/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonutils
import (
"encoding/json"
"fmt"
"io"
"strings"
)
// JSONStreamWriter writes tokens as parsed by a json.Decoder back to a string
type JSONStreamWriter struct {
// out is the output destination
out io.Writer
// indent is the current indent level
indent string
// state stores a stack of the json state, comprised of [ { and F characters. F=field
state string
// deferred is used to buffer the output temporarily, used to prevent a trailing comma in an object
deferred string
// path is the current stack of fields, used to support the Path() function
path []string
}
// NewJSONStreamWriter is the constructor for a JSONStreamWriter
func NewJSONStreamWriter(out io.Writer) *JSONStreamWriter {
return &JSONStreamWriter{
out: out,
}
}
// Path returns the path to the current position in the JSON tree
func (j *JSONStreamWriter) Path() string {
return strings.Join(j.path, ".")
}
// WriteToken writes the next token to the output
func (j *JSONStreamWriter) WriteToken(token json.Token) error {
state := byte(0)
if j.state != "" {
state = j.state[len(j.state)-1]
}
var v string
switch tt := token.(type) {
// Delim, for the four JSON delimiters [ ] { }
case json.Delim:
v = tt.String()
indent := j.indent
switch tt {
case json.Delim('{'):
j.indent += " "
j.state += "{"
case json.Delim('['):
j.indent += " "
j.state += "["
case json.Delim(']'), json.Delim('}'):
j.indent = j.indent[:len(j.indent)-2]
indent = j.indent
j.state = j.state[:len(j.state)-1]
if j.state != "" && j.state[len(j.state)-1] == 'F' {
j.state = j.state[:len(j.state)-1]
j.path = j.path[:len(j.path)-1]
}
// Don't put a comma on the last field in a block
if j.deferred == ",\n" {
j.deferred = "\n"
}
default:
return fmt.Errorf("unknown delim: %v", tt)
}
switch state {
case 0:
if err := j.writeRaw(indent + v); err != nil {
return err
}
case '{':
if err := j.writeRaw(indent + v); err != nil {
return err
}
case '[':
if err := j.writeRaw(indent + v); err != nil {
return err
}
case 'F':
if err := j.writeRaw(v); err != nil {
return err
}
default:
return fmt.Errorf("unhandled state for json delim serialization: %v %q", state, j.state)
}
switch tt {
case json.Delim('{'):
j.deferred = "\n"
case json.Delim('['):
j.deferred = "\n"
case json.Delim(']'), json.Delim('}'):
j.deferred = ",\n"
default:
return fmt.Errorf("unknown delim: %v", tt)
}
return nil
// bool, for JSON booleans
case bool:
v = fmt.Sprintf("%v", tt)
// string, for JSON string literals
case string:
v = "\"" + tt + "\""
// float64, for JSON numbers
case float64:
v = fmt.Sprintf("%g", tt)
// Number, for JSON numbers
case json.Number:
v = tt.String()
// nil, for JSON null
case nil:
v = "null"
default:
return fmt.Errorf("unhandled token type %T", tt)
}
switch state {
case '{':
j.state += "F"
j.path = append(j.path, fmt.Sprintf("%s", token))
return j.writeRaw(j.indent + v + ": ")
case '[':
if err := j.writeRaw(j.indent + v); err != nil {
return err
}
j.deferred = ",\n"
return nil
case 'F':
j.state = j.state[:len(j.state)-1]
j.path = j.path[:len(j.path)-1]
if err := j.writeRaw(v); err != nil {
return err
}
j.deferred = ",\n"
return nil
}
return fmt.Errorf("unhandled state for json value (%T %q) serialization: %v %q", token, v, state, j.state)
}
func (j *JSONStreamWriter) writeRaw(s string) error {
if j.deferred != "" {
if _, err := j.out.Write([]byte(j.deferred)); err != nil {
return err
}
j.deferred = ""
}
_, err := j.out.Write([]byte(s))
return err
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonutils
import (
"encoding/json"
"fmt"
"sort"
)
// Transformer is used to transform JSON values
type Transformer struct {
stringTransforms []func(path string, value string) (string, error)
objectTransforms []func(path string, value map[string]any) error
sliceTransforms []func(path string, value []any) ([]any, error)
}
// NewTransformer is the constructor for a Transformer
func NewTransformer() *Transformer {
return &Transformer{}
}
// AddStringTransform adds a function that will be called for each string value in the JSON tree
func (t *Transformer) AddStringTransform(fn func(path string, value string) (string, error)) {
t.stringTransforms = append(t.stringTransforms, fn)
}
// AddObjectTransform adds a function that will be called for each object in the JSON tree
func (t *Transformer) AddObjectTransform(fn func(path string, value map[string]any) error) {
t.objectTransforms = append(t.objectTransforms, fn)
}
// AddSliceTransform adds a function that will be called for each slice in the JSON tree
func (t *Transformer) AddSliceTransform(fn func(path string, value []any) ([]any, error)) {
t.sliceTransforms = append(t.sliceTransforms, fn)
}
// Transform applies the transformations to the JSON tree
func (o *Transformer) Transform(v map[string]any) error {
_, err := o.visitAny(v, "")
return err
}
// visitAny is a helper function that visits any value in the JSON tree
func (o *Transformer) visitAny(v any, path string) (any, error) {
if v == nil {
return v, nil
}
switch v := v.(type) {
case map[string]any:
if err := o.visitMap(v, path); err != nil {
return nil, err
}
return v, nil
case []any:
return o.visitSlice(v, path)
case int64, float64, bool:
return o.visitPrimitive(v, path)
case string:
return o.visitString(v, path)
default:
return nil, fmt.Errorf("unhandled type at path %q: %T", path, v)
}
}
func (o *Transformer) visitMap(m map[string]any, path string) error {
for _, fn := range o.objectTransforms {
if err := fn(path, m); err != nil {
return err
}
}
for k, v := range m {
childPath := path + "." + k
v2, err := o.visitAny(v, childPath)
if err != nil {
return err
}
m[k] = v2
}
return nil
}
// visitSlice is a helper function that visits a slice in the JSON tree
func (o *Transformer) visitSlice(s []any, path string) (any, error) {
for _, fn := range o.sliceTransforms {
var err error
s, err = fn(path+"[]", s)
if err != nil {
return nil, err
}
}
for i, v := range s {
v2, err := o.visitAny(v, path+"[]")
if err != nil {
return nil, err
}
s[i] = v2
}
return s, nil
}
// SortSlice sorts a slice of any values, ordered by their JSON representations.
// This is not very efficient, but is convenient for small slice where we don't know their types.
func SortSlice(s []any) ([]any, error) {
type entry struct {
o any
sortKey string
}
var entries []entry
for i := range s {
j, err := json.Marshal(s[i])
if err != nil {
return nil, fmt.Errorf("error converting to json: %w", err)
}
entries = append(entries, entry{o: s[i], sortKey: string(j)})
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].sortKey < entries[j].sortKey
})
out := make([]any, 0, len(s))
for i := range s {
out = append(out, entries[i].o)
}
return out, nil
}
// visitPrimitive is a helper function that visits a primitive value in the JSON tree
func (o *Transformer) visitPrimitive(v any, _ string) (any, error) {
return v, nil
}
// visitString is a helper function that visits a string value in the JSON tree
func (o *Transformer) visitString(v string, path string) (string, error) {
for _, fn := range o.stringTransforms {
var err error
v, err = fn(path, v)
if err != nil {
return "", err
}
}
return v, nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzz
import (
"bytes"
"encoding/json"
"strings"
"k8s.io/kops/pkg/jsonutils"
)
func FuzzWriteToken(data []byte) int {
var buf bytes.Buffer
out := jsonutils.NewJSONStreamWriter(&buf)
in := json.NewDecoder(strings.NewReader(string(data)))
token, err := in.Token()
if err != nil {
return -1
}
err = out.WriteToken(token)
if err != nil {
return 0
}
return 1
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/node"
)
var deprecatedNodeLabels = map[string]string{
`beta.kubernetes.io/arch`: `deprecated since v1.14; use "kubernetes.io/arch" instead`,
`beta.kubernetes.io/os`: `deprecated since v1.14; use "kubernetes.io/os" instead`,
`failure-domain.beta.kubernetes.io/region`: `deprecated since v1.17; use "topology.kubernetes.io/region" instead`,
`failure-domain.beta.kubernetes.io/zone`: `deprecated since v1.17; use "topology.kubernetes.io/zone" instead`,
`beta.kubernetes.io/instance-type`: `deprecated since v1.17; use "node.kubernetes.io/instance-type" instead`,
`app.kubernetes.io/created-by`: `deprecated since v1.9`,
`scheduler.alpha.kubernetes.io/preferAvoidPods`: `deprecated since v1.22; use Taints and Tolerations instead`,
`node-role.kubernetes.io/master`: `use "node-role.kubernetes.io/control-plane" instead`,
}
// GetNodeLabelDeprecatedMessage returns the message for the deprecated node label
// and a bool indicating if the label is deprecated.
func GetNodeLabelDeprecatedMessage(key string) (string, bool) {
msg, ok := deprecatedNodeLabels[key]
return msg, ok
}
func GetWarningsForRuntimeClass(rc *node.RuntimeClass) []string {
var warnings []string
if rc != nil && rc.Scheduling != nil && rc.Scheduling.NodeSelector != nil {
// use of deprecated node labels in scheduling's node affinity
for key := range rc.Scheduling.NodeSelector {
if msg, deprecated := GetNodeLabelDeprecatedMessage(key); deprecated {
warnings = append(warnings, fmt.Sprintf("%s: %s", field.NewPath("scheduling", "nodeSelector"), msg))
}
}
}
return warnings
}
// GetWarningsForNodeSelector tests if any of the node selector requirements in the template is deprecated.
// If there are deprecated node selector requirements in either match expressions or match labels, a warning is returned.
func GetWarningsForNodeSelector(nodeSelector *metav1.LabelSelector, fieldPath *field.Path) []string {
if nodeSelector == nil {
return nil
}
var warnings []string
// use of deprecated node labels in matchLabelExpressions
for i, expression := range nodeSelector.MatchExpressions {
if msg, deprecated := GetNodeLabelDeprecatedMessage(expression.Key); deprecated {
warnings = append(
warnings,
fmt.Sprintf(
"%s: %s is %s",
fieldPath.Child("matchExpressions").Index(i).Child("key"),
expression.Key,
msg,
),
)
}
}
// use of deprecated node labels in matchLabels
for label := range nodeSelector.MatchLabels {
if msg, deprecated := GetNodeLabelDeprecatedMessage(label); deprecated {
warnings = append(warnings, fmt.Sprintf("%s: %s", fieldPath.Child("matchLabels").Child(label), msg))
}
}
return warnings
}
// GetWarningsForNodeSelectorTerm checks match expressions of node selector term
func GetWarningsForNodeSelectorTerm(nodeSelectorTerm api.NodeSelectorTerm, checkLabelValue bool, fieldPath *field.Path) []string {
var warnings []string
// use of deprecated node labels in matchLabelExpressions
for i, expression := range nodeSelectorTerm.MatchExpressions {
if msg, deprecated := GetNodeLabelDeprecatedMessage(expression.Key); deprecated {
warnings = append(
warnings,
fmt.Sprintf(
"%s: %s is %s",
fieldPath.Child("matchExpressions").Index(i).Child("key"),
expression.Key,
msg,
),
)
}
if checkLabelValue {
for index, value := range expression.Values {
for _, msg := range validation.IsValidLabelValue(value) {
warnings = append(warnings,
fmt.Sprintf(
"%s: %s is invalid, %s",
fieldPath.Child("matchExpressions").Index(i).Child("values").Index(index),
value,
msg,
))
}
}
}
}
return warnings
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolumeclaim
import (
"fmt"
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/features"
)
const (
pvc string = "PersistentVolumeClaim"
volumeSnapshot string = "VolumeSnapshot"
deprecatedStorageClassAnnotationsMsg = `deprecated since v1.8; use "storageClassName" attribute instead`
)
// DropDisabledFields removes disabled fields from the pvc spec.
// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a pvc spec.
func DropDisabledFields(pvcSpec, oldPVCSpec *core.PersistentVolumeClaimSpec) {
// Drop the contents of the volumeAttributesClassName if the VolumeAttributesClass
// feature gate is disabled.
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) {
if oldPVCSpec == nil || oldPVCSpec.VolumeAttributesClassName == nil {
pvcSpec.VolumeAttributesClassName = nil
}
}
// Drop the contents of the dataSourceRef field if the AnyVolumeDataSource
// feature gate is disabled.
if !utilfeature.DefaultFeatureGate.Enabled(features.AnyVolumeDataSource) {
if !dataSourceRefInUse(oldPVCSpec) {
pvcSpec.DataSourceRef = nil
}
}
// Drop the contents of the dataSourceRef field if the CrossNamespaceVolumeDataSource
// feature gate is disabled and dataSourceRef.Namespace is specified.
if !utilfeature.DefaultFeatureGate.Enabled(features.CrossNamespaceVolumeDataSource) &&
pvcSpec.DataSourceRef != nil && pvcSpec.DataSourceRef.Namespace != nil && len(*pvcSpec.DataSourceRef.Namespace) != 0 {
if !dataSourceRefInUse(oldPVCSpec) {
pvcSpec.DataSourceRef = nil
}
}
}
// EnforceDataSourceBackwardsCompatibility drops the data source field under certain conditions
// to maintain backwards compatibility with old behavior.
// See KEP 1495 for details.
// Specifically, if this is an update of a PVC with no data source, or a creation of a new PVC,
// and the dataSourceRef field is not filled in, then we will drop "invalid" data sources
// (anything other than a PVC or a VolumeSnapshot) from this request as if an empty PVC had
// been requested.
// This should be called after DropDisabledFields so that if the AnyVolumeDataSource feature
// gate is disabled, dataSourceRef will be forced to empty, ensuring pre-1.22 behavior.
// This should be called before NormalizeDataSources, so that data sources other than PVCs
// and VolumeSnapshots can only be set through the dataSourceRef field and not the dataSource
// field.
func EnforceDataSourceBackwardsCompatibility(pvcSpec, oldPVCSpec *core.PersistentVolumeClaimSpec) {
// Check if the old PVC has a data source here is so that on updates from old clients
// that omit dataSourceRef, we preserve the data source, even if it would have been
// invalid to specify it at using the dataSource field at create.
if dataSourceInUse(oldPVCSpec) {
return
}
// Check if dataSourceRef is empty is because if it's not empty, then there is
// definitely a newer client and it definitely either wants to create a non-empty
// volume, or it wants to update a PVC that has a data source. Whether the
// specified data source is valid or satisfiable is a matter for validation and
// the volume populator code, but we can say with certainty that the client is
// not expecting the legacy behavior of ignoring invalid data sources.
if pvcSpec.DataSourceRef != nil {
return
}
// Historically, we only allow PVCs and VolumeSnapshots in the dataSource field.
// All other values are silently dropped.
if !dataSourceIsPvcOrSnapshot(pvcSpec.DataSource) {
pvcSpec.DataSource = nil
}
}
func DropDisabledFieldsFromStatus(pvc, oldPVC *core.PersistentVolumeClaim) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) {
if oldPVC == nil || oldPVC.Status.CurrentVolumeAttributesClassName == nil {
pvc.Status.CurrentVolumeAttributesClassName = nil
}
if oldPVC == nil || oldPVC.Status.ModifyVolumeStatus == nil {
pvc.Status.ModifyVolumeStatus = nil
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
if !helper.ClaimContainsAllocatedResources(oldPVC) {
pvc.Status.AllocatedResources = nil
}
if !helper.ClaimContainsAllocatedResourceStatus(oldPVC) {
pvc.Status.AllocatedResourceStatuses = nil
}
}
}
func dataSourceInUse(oldPVCSpec *core.PersistentVolumeClaimSpec) bool {
if oldPVCSpec == nil {
return false
}
if oldPVCSpec.DataSource != nil || oldPVCSpec.DataSourceRef != nil {
return true
}
return false
}
func dataSourceIsPvcOrSnapshot(dataSource *core.TypedLocalObjectReference) bool {
if dataSource != nil {
apiGroup := ""
if dataSource.APIGroup != nil {
apiGroup = *dataSource.APIGroup
}
if dataSource.Kind == pvc &&
apiGroup == "" {
return true
}
if dataSource.Kind == volumeSnapshot && apiGroup == "snapshot.storage.k8s.io" {
return true
}
}
return false
}
func dataSourceRefInUse(oldPVCSpec *core.PersistentVolumeClaimSpec) bool {
if oldPVCSpec == nil {
return false
}
if oldPVCSpec.DataSourceRef != nil {
return true
}
return false
}
// NormalizeDataSources ensures that DataSource and DataSourceRef have the same contents
// as long as both are not explicitly set.
// This should be used by creates/gets of PVCs, but not updates
func NormalizeDataSources(pvcSpec *core.PersistentVolumeClaimSpec) {
// Don't enable this behavior if the feature gate is not on
if !utilfeature.DefaultFeatureGate.Enabled(features.AnyVolumeDataSource) {
return
}
if pvcSpec.DataSource != nil && pvcSpec.DataSourceRef == nil {
// Using the old way of setting a data source
pvcSpec.DataSourceRef = &core.TypedObjectReference{
Kind: pvcSpec.DataSource.Kind,
Name: pvcSpec.DataSource.Name,
}
if pvcSpec.DataSource.APIGroup != nil {
apiGroup := *pvcSpec.DataSource.APIGroup
pvcSpec.DataSourceRef.APIGroup = &apiGroup
}
} else if pvcSpec.DataSourceRef != nil && pvcSpec.DataSource == nil {
if pvcSpec.DataSourceRef.Namespace == nil || len(*pvcSpec.DataSourceRef.Namespace) == 0 {
// Using the new way of setting a data source
pvcSpec.DataSource = &core.TypedLocalObjectReference{
Kind: pvcSpec.DataSourceRef.Kind,
Name: pvcSpec.DataSourceRef.Name,
}
if pvcSpec.DataSourceRef.APIGroup != nil {
apiGroup := *pvcSpec.DataSourceRef.APIGroup
pvcSpec.DataSource.APIGroup = &apiGroup
}
}
}
}
func GetWarningsForPersistentVolumeClaim(pv *core.PersistentVolumeClaim) []string {
var warnings []string
if pv == nil {
return nil
}
if _, ok := pv.ObjectMeta.Annotations[core.BetaStorageClassAnnotation]; ok {
warnings = append(warnings,
fmt.Sprintf(
"%s: %s",
field.NewPath("metadata", "annotations").Key(core.BetaStorageClassAnnotation),
deprecatedStorageClassAnnotationsMsg,
),
)
}
warnings = append(warnings, GetWarningsForPersistentVolumeClaimSpec(field.NewPath("spec"), pv.Spec)...)
return warnings
}
func GetWarningsForPersistentVolumeClaimSpec(fieldPath *field.Path, pvSpec core.PersistentVolumeClaimSpec) []string {
var warnings []string
requestValue := pvSpec.Resources.Requests[core.ResourceStorage]
if requestValue.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf(
"%s: fractional byte value %q is invalid, must be an integer",
fieldPath.Child("resources").Child("requests").Key(core.ResourceStorage.String()), requestValue.String()))
}
limitValue := pvSpec.Resources.Limits[core.ResourceStorage]
if limitValue.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf(
"%s: fractional byte value %q is invalid, must be an integer",
fieldPath.Child("resources").Child("limits").Key(core.ResourceStorage.String()), limitValue.String()))
}
return warnings
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"fmt"
"iter"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metavalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
utilfeature "k8s.io/apiserver/pkg/util/feature"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/features"
)
// ContainerType signifies container type
type ContainerType int
const (
// Containers is for normal containers
Containers ContainerType = 1 << iota
// InitContainers is for init containers
InitContainers
// EphemeralContainers is for ephemeral containers
EphemeralContainers
)
// AllContainers specifies that all containers be visited
const AllContainers ContainerType = (InitContainers | Containers | EphemeralContainers)
// AllFeatureEnabledContainers returns a ContainerType mask which includes all container
// types except for the ones guarded by feature gate.
func AllFeatureEnabledContainers() ContainerType {
return AllContainers
}
// ContainerVisitor is called with each container spec, and returns true
// if visiting should continue.
type ContainerVisitor func(container *api.Container, containerType ContainerType) (shouldContinue bool)
// VisitContainers invokes the visitor function with a pointer to every container
// spec in the given pod spec with type set in mask. If visitor returns false,
// visiting is short-circuited. VisitContainers returns true if visiting completes,
// false if visiting was short-circuited.
func VisitContainers(podSpec *api.PodSpec, mask ContainerType, visitor ContainerVisitor) bool {
for c, t := range ContainerIter(podSpec, mask) {
if !visitor(c, t) {
return false
}
}
return true
}
// ContainerIter returns an iterator over all containers in the given pod spec with a masked type.
// The iteration order is InitContainers, then main Containers, then EphemeralContainers.
func ContainerIter(podSpec *api.PodSpec, mask ContainerType) iter.Seq2[*api.Container, ContainerType] {
return func(yield func(*api.Container, ContainerType) bool) {
if mask&InitContainers != 0 {
for i := range podSpec.InitContainers {
if !yield(&podSpec.InitContainers[i], InitContainers) {
return
}
}
}
if mask&Containers != 0 {
for i := range podSpec.Containers {
if !yield(&podSpec.Containers[i], Containers) {
return
}
}
}
if mask&EphemeralContainers != 0 {
for i := range podSpec.EphemeralContainers {
if !yield((*api.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) {
return
}
}
}
}
}
// Visitor is called with each object name, and returns true if visiting should continue
type Visitor func(name string) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(name string) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(name)
}
}
// VisitPodSecretNames invokes the visitor function with the name of every secret
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodSecretNames(pod *api.Pod, visitor Visitor, containerType ContainerType) bool {
visitor = skipEmptyNames(visitor)
for _, reference := range pod.Spec.ImagePullSecrets {
if !visitor(reference.Name) {
return false
}
}
VisitContainers(&pod.Spec, containerType, func(c *api.Container, containerType ContainerType) bool {
return visitContainerSecretNames(c, visitor)
})
var source *api.VolumeSource
for i := range pod.Spec.Volumes {
source = &pod.Spec.Volumes[i].VolumeSource
switch {
case source.AzureFile != nil:
if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) {
return false
}
case source.CephFS != nil:
if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) {
return false
}
case source.Cinder != nil:
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) {
return false
}
case source.FlexVolume != nil:
if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) {
return false
}
case source.Projected != nil:
for j := range source.Projected.Sources {
if source.Projected.Sources[j].Secret != nil {
if !visitor(source.Projected.Sources[j].Secret.Name) {
return false
}
}
}
case source.RBD != nil:
if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) {
return false
}
case source.Secret != nil:
if !visitor(source.Secret.SecretName) {
return false
}
case source.ScaleIO != nil:
if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) {
return false
}
case source.ISCSI != nil:
if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) {
return false
}
case source.StorageOS != nil:
if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) {
return false
}
case source.CSI != nil:
if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) {
return false
}
}
}
return true
}
func visitContainerSecretNames(container *api.Container, visitor Visitor) bool {
for _, env := range container.EnvFrom {
if env.SecretRef != nil {
if !visitor(env.SecretRef.Name) {
return false
}
}
}
for _, envVar := range container.Env {
if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil {
if !visitor(envVar.ValueFrom.SecretKeyRef.Name) {
return false
}
}
}
return true
}
// VisitPodConfigmapNames invokes the visitor function with the name of every configmap
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodConfigmapNames(pod *api.Pod, visitor Visitor, containerType ContainerType) bool {
visitor = skipEmptyNames(visitor)
VisitContainers(&pod.Spec, containerType, func(c *api.Container, containerType ContainerType) bool {
return visitContainerConfigmapNames(c, visitor)
})
var source *api.VolumeSource
for i := range pod.Spec.Volumes {
source = &pod.Spec.Volumes[i].VolumeSource
switch {
case source.Projected != nil:
for j := range source.Projected.Sources {
if source.Projected.Sources[j].ConfigMap != nil {
if !visitor(source.Projected.Sources[j].ConfigMap.Name) {
return false
}
}
}
case source.ConfigMap != nil:
if !visitor(source.ConfigMap.Name) {
return false
}
}
}
return true
}
func visitContainerConfigmapNames(container *api.Container, visitor Visitor) bool {
for _, env := range container.EnvFrom {
if env.ConfigMapRef != nil {
if !visitor(env.ConfigMapRef.Name) {
return false
}
}
}
for _, envVar := range container.Env {
if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil {
if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) {
return false
}
}
}
return true
}
// IsPodReady returns true if a pod is ready; false otherwise.
func IsPodReady(pod *api.Pod) bool {
return IsPodReadyConditionTrue(pod.Status)
}
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsPodReadyConditionTrue(status api.PodStatus) bool {
condition := GetPodReadyCondition(status)
return condition != nil && condition.Status == api.ConditionTrue
}
// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
// Returns nil if the condition is not present.
func GetPodReadyCondition(status api.PodStatus) *api.PodCondition {
_, condition := GetPodCondition(&status, api.PodReady)
return condition
}
// GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetPodCondition(status *api.PodStatus, conditionType api.PodConditionType) (int, *api.PodCondition) {
if status == nil {
return -1, nil
}
for i := range status.Conditions {
if status.Conditions[i].Type == conditionType {
return i, &status.Conditions[i]
}
}
return -1, nil
}
// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
// status has changed.
// Returns true if pod condition has changed or has been added.
func UpdatePodCondition(status *api.PodStatus, condition *api.PodCondition) bool {
condition.LastTransitionTime = metav1.Now()
// Try to find this pod condition.
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
if oldCondition == nil {
// We are adding new pod condition.
status.Conditions = append(status.Conditions, *condition)
return true
}
// We are updating an existing condition, so we need to check if it has changed.
if condition.Status == oldCondition.Status {
condition.LastTransitionTime = oldCondition.LastTransitionTime
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message &&
condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) &&
condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime)
status.Conditions[conditionIndex] = *condition
// Return true if one of the fields have changed.
return !isEqual
}
func checkContainerUseIndivisibleHugePagesValues(container api.Container) bool {
for resourceName, quantity := range container.Resources.Limits {
if helper.IsHugePageResourceName(resourceName) {
if !helper.IsHugePageResourceValueDivisible(resourceName, quantity) {
return true
}
}
}
for resourceName, quantity := range container.Resources.Requests {
if helper.IsHugePageResourceName(resourceName) {
if !helper.IsHugePageResourceValueDivisible(resourceName, quantity) {
return true
}
}
}
return false
}
// usesIndivisibleHugePagesValues returns true if the one of the containers uses non-integer multiple
// of huge page unit size
func usesIndivisibleHugePagesValues(podSpec *api.PodSpec) bool {
foundIndivisibleHugePagesValue := false
VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
if checkContainerUseIndivisibleHugePagesValues(*c) {
foundIndivisibleHugePagesValue = true
}
return !foundIndivisibleHugePagesValue // continue visiting if we haven't seen an invalid value yet
})
if foundIndivisibleHugePagesValue {
return true
}
for resourceName, quantity := range podSpec.Overhead {
if helper.IsHugePageResourceName(resourceName) {
if !helper.IsHugePageResourceValueDivisible(resourceName, quantity) {
return true
}
}
}
return false
}
// hasInvalidTopologySpreadConstraintLabelSelector return true if spec.TopologySpreadConstraints have any entry with invalid labelSelector
func hasInvalidTopologySpreadConstraintLabelSelector(spec *api.PodSpec) bool {
for _, constraint := range spec.TopologySpreadConstraints {
errs := metavalidation.ValidateLabelSelector(constraint.LabelSelector, metavalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, nil)
if len(errs) != 0 {
return true
}
}
return false
}
// hasInvalidTopologySpreadConstrainMatchLabelKeys return true if spec.TopologySpreadConstraints have any entry with invalid MatchLabelKeys
func hasInvalidTopologySpreadConstrainMatchLabelKeys(spec *api.PodSpec) bool {
for _, constraint := range spec.TopologySpreadConstraints {
errs := apivalidation.ValidateMatchLabelKeysAndMismatchLabelKeys(nil, constraint.MatchLabelKeys, nil, constraint.LabelSelector)
if len(errs) != 0 {
return true
}
}
return false
}
// hasLegacyInvalidTopologySpreadConstrainMatchLabelKeys return true if spec.TopologySpreadConstraints have any entry with invalid MatchLabelKeys against legacy validation
func hasLegacyInvalidTopologySpreadConstrainMatchLabelKeys(spec *api.PodSpec) bool {
for _, constraint := range spec.TopologySpreadConstraints {
errs := apivalidation.ValidateMatchLabelKeysInTopologySpread(nil, constraint.MatchLabelKeys, constraint.LabelSelector)
if len(errs) != 0 {
return true
}
}
return false
}
// hasNonLocalProjectedTokenPath return true if spec.Volumes have any entry with non-local projected token path
func hasNonLocalProjectedTokenPath(spec *api.PodSpec) bool {
for _, volume := range spec.Volumes {
if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
if source.ServiceAccountToken == nil {
continue
}
errs := apivalidation.ValidateLocalNonReservedPath(source.ServiceAccountToken.Path, nil)
if len(errs) != 0 {
return true
}
}
}
}
return false
}
// GetValidationOptionsFromPodSpecAndMeta returns validation options based on pod specs and metadata
func GetValidationOptionsFromPodSpecAndMeta(podSpec, oldPodSpec *api.PodSpec, podMeta, oldPodMeta *metav1.ObjectMeta) apivalidation.PodValidationOptions {
// default pod validation options based on feature gate
opts := apivalidation.PodValidationOptions{
AllowInvalidPodDeletionCost: !utilfeature.DefaultFeatureGate.Enabled(features.PodDeletionCost),
// Do not allow pod spec to use non-integer multiple of huge page unit size default
AllowIndivisibleHugePagesValues: false,
AllowInvalidLabelValueInSelector: false,
AllowInvalidTopologySpreadConstraintLabelSelector: false,
AllowNamespacedSysctlsForHostNetAndHostIPC: false,
AllowNonLocalProjectedTokenPath: false,
AllowPodLifecycleSleepActionZeroValue: utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepActionAllowZero),
PodLevelResourcesEnabled: utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
AllowInvalidLabelValueInRequiredNodeAffinity: false,
AllowSidecarResizePolicy: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
AllowMatchLabelKeysInPodTopologySpread: utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpread),
AllowMatchLabelKeysInPodTopologySpreadSelectorMerge: utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpreadSelectorMerge),
OldPodViolatesMatchLabelKeysValidation: false,
OldPodViolatesLegacyMatchLabelKeysValidation: false,
AllowContainerRestartPolicyRules: utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules),
AllowUserNamespacesWithVolumeDevices: false,
}
// If old spec uses relaxed validation or enabled the RelaxedEnvironmentVariableValidation feature gate,
// we must allow it
opts.AllowRelaxedEnvironmentVariableValidation = useRelaxedEnvironmentVariableValidation(podSpec, oldPodSpec)
opts.AllowRelaxedDNSSearchValidation = useRelaxedDNSSearchValidation(oldPodSpec)
opts.AllowEnvFilesValidation = useAllowEnvFilesValidation(oldPodSpec)
opts.AllowOnlyRecursiveSELinuxChangePolicy = useOnlyRecursiveSELinuxChangePolicy(oldPodSpec)
if oldPodSpec != nil {
// if old spec used non-integer multiple of huge page unit size, we must allow it
opts.AllowIndivisibleHugePagesValues = usesIndivisibleHugePagesValues(oldPodSpec)
opts.AllowInvalidLabelValueInSelector = hasInvalidLabelValueInAffinitySelector(oldPodSpec)
opts.AllowInvalidLabelValueInRequiredNodeAffinity = hasInvalidLabelValueInRequiredNodeAffinity(oldPodSpec)
// if old spec has invalid labelSelector in topologySpreadConstraint, we must allow it
opts.AllowInvalidTopologySpreadConstraintLabelSelector = hasInvalidTopologySpreadConstraintLabelSelector(oldPodSpec)
if opts.AllowMatchLabelKeysInPodTopologySpread {
if opts.AllowMatchLabelKeysInPodTopologySpreadSelectorMerge {
// If old spec has invalid MatchLabelKeys, we must set true
opts.OldPodViolatesMatchLabelKeysValidation = hasInvalidTopologySpreadConstrainMatchLabelKeys(oldPodSpec)
} else {
// If old spec has invalid MatchLabelKeys against legacy validation, we must set true
opts.OldPodViolatesLegacyMatchLabelKeysValidation = hasLegacyInvalidTopologySpreadConstrainMatchLabelKeys(oldPodSpec)
}
}
// if old spec has an invalid projected token volume path, we must allow it
opts.AllowNonLocalProjectedTokenPath = hasNonLocalProjectedTokenPath(oldPodSpec)
// if old spec has invalid sysctl with hostNet or hostIPC, we must allow it when update
if oldPodSpec.SecurityContext != nil && len(oldPodSpec.SecurityContext.Sysctls) != 0 {
for _, s := range oldPodSpec.SecurityContext.Sysctls {
err := apivalidation.ValidateHostSysctl(s.Name, oldPodSpec.SecurityContext, nil)
if err != nil {
opts.AllowNamespacedSysctlsForHostNetAndHostIPC = true
break
}
}
}
opts.AllowPodLifecycleSleepActionZeroValue = opts.AllowPodLifecycleSleepActionZeroValue || podLifecycleSleepActionZeroValueInUse(oldPodSpec)
// If oldPod has resize policy set on the restartable init container, we must allow it
opts.AllowSidecarResizePolicy = opts.AllowSidecarResizePolicy || hasRestartableInitContainerResizePolicy(oldPodSpec)
opts.AllowContainerRestartPolicyRules = opts.AllowContainerRestartPolicyRules || containerRestartRulesInUse(oldPodSpec)
// If old spec has userns and volume devices (doesn't work), we still allow
// modifications to it.
opts.AllowUserNamespacesWithVolumeDevices = hasUserNamespacesWithVolumeDevices(oldPodSpec)
}
if oldPodMeta != nil && !opts.AllowInvalidPodDeletionCost {
// This is an update, so validate only if the existing object was valid.
_, err := helper.GetDeletionCostFromPodAnnotations(oldPodMeta.Annotations)
opts.AllowInvalidPodDeletionCost = err != nil
}
return opts
}
func useRelaxedEnvironmentVariableValidation(podSpec, oldPodSpec *api.PodSpec) bool {
if utilfeature.DefaultFeatureGate.Enabled(features.RelaxedEnvironmentVariableValidation) {
return true
}
var oldPodEnvVarNames, podEnvVarNames sets.Set[string]
if oldPodSpec != nil {
oldPodEnvVarNames = gatherPodEnvVarNames(oldPodSpec)
}
if podSpec != nil {
podEnvVarNames = gatherPodEnvVarNames(podSpec)
}
for env := range podEnvVarNames {
if relaxedEnvVarUsed(env, oldPodEnvVarNames) {
return true
}
}
return false
}
func useRelaxedDNSSearchValidation(oldPodSpec *api.PodSpec) bool {
// Return true early if feature gate is enabled
if utilfeature.DefaultFeatureGate.Enabled(features.RelaxedDNSSearchValidation) {
return true
}
// Return false early if there is no DNSConfig or Searches.
if oldPodSpec == nil || oldPodSpec.DNSConfig == nil || oldPodSpec.DNSConfig.Searches == nil {
return false
}
return hasDotOrUnderscore(oldPodSpec.DNSConfig.Searches)
}
// Helper function to check if any domain is a dot or contains an underscore.
func hasDotOrUnderscore(searches []string) bool {
for _, domain := range searches {
if domain == "." || strings.Contains(domain, "_") {
return true
}
}
return false
}
func useAllowEnvFilesValidation(oldPodSpec *api.PodSpec) bool {
// Return true early if feature gate is enabled
if utilfeature.DefaultFeatureGate.Enabled(features.EnvFiles) {
return true
}
if oldPodSpec == nil {
return false
}
for _, container := range oldPodSpec.Containers {
if hasEnvFileKeyRef(container.Env) {
return true
}
}
for _, container := range oldPodSpec.InitContainers {
if hasEnvFileKeyRef(container.Env) {
return true
}
}
for _, container := range oldPodSpec.EphemeralContainers {
if hasEnvFileKeyRef(container.Env) {
return true
}
}
return false
}
func hasEnvFileKeyRef(envs []api.EnvVar) bool {
for _, env := range envs {
if env.ValueFrom != nil && env.ValueFrom.FileKeyRef != nil {
return true
}
}
return false
}
func gatherPodEnvVarNames(podSpec *api.PodSpec) sets.Set[string] {
podEnvVarNames := sets.Set[string]{}
for _, c := range podSpec.Containers {
for _, env := range c.Env {
podEnvVarNames.Insert(env.Name)
}
for _, env := range c.EnvFrom {
podEnvVarNames.Insert(env.Prefix)
}
}
for _, c := range podSpec.InitContainers {
for _, env := range c.Env {
podEnvVarNames.Insert(env.Name)
}
for _, env := range c.EnvFrom {
podEnvVarNames.Insert(env.Prefix)
}
}
for _, c := range podSpec.EphemeralContainers {
for _, env := range c.Env {
podEnvVarNames.Insert(env.Name)
}
for _, env := range c.EnvFrom {
podEnvVarNames.Insert(env.Prefix)
}
}
return podEnvVarNames
}
func relaxedEnvVarUsed(name string, oldPodEnvVarNames sets.Set[string]) bool {
// A length of 0 means this is not an update request,
// or the old pod does not exist in the env.
// We will let the feature gate decide whether to use relaxed rules.
if oldPodEnvVarNames.Len() == 0 {
return false
}
if len(validation.IsEnvVarName(name)) == 0 || len(validation.IsRelaxedEnvVarName(name)) != 0 {
// It's either a valid name by strict rules or an invalid name under relaxed rules.
// Either way, we'll use strict rules to validate.
return false
}
// The name in question failed strict rules but passed relaxed rules.
if oldPodEnvVarNames.Has(name) {
// This relaxed-rules name was already in use.
return true
}
return false
}
// GetValidationOptionsFromPodTemplate will return pod validation options for specified template.
func GetValidationOptionsFromPodTemplate(podTemplate, oldPodTemplate *api.PodTemplateSpec) apivalidation.PodValidationOptions {
var newPodSpec, oldPodSpec *api.PodSpec
var newPodMeta, oldPodMeta *metav1.ObjectMeta
// we have to be careful about nil pointers here
// replication controller in particular is prone to passing nil
if podTemplate != nil {
newPodSpec = &podTemplate.Spec
newPodMeta = &podTemplate.ObjectMeta
}
if oldPodTemplate != nil {
oldPodSpec = &oldPodTemplate.Spec
oldPodMeta = &oldPodTemplate.ObjectMeta
}
return GetValidationOptionsFromPodSpecAndMeta(newPodSpec, oldPodSpec, newPodMeta, oldPodMeta)
}
// DropDisabledTemplateFields removes disabled fields from the pod template metadata and spec.
// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a PodTemplateSpec
func DropDisabledTemplateFields(podTemplate, oldPodTemplate *api.PodTemplateSpec) {
var (
podSpec *api.PodSpec
podAnnotations map[string]string
oldPodSpec *api.PodSpec
oldPodAnnotations map[string]string
)
if podTemplate != nil {
podSpec = &podTemplate.Spec
podAnnotations = podTemplate.Annotations
}
if oldPodTemplate != nil {
oldPodSpec = &oldPodTemplate.Spec
oldPodAnnotations = oldPodTemplate.Annotations
}
dropDisabledFields(podSpec, podAnnotations, oldPodSpec, oldPodAnnotations)
}
// DropDisabledPodFields removes disabled fields from the pod metadata and spec.
// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a Pod
func DropDisabledPodFields(pod, oldPod *api.Pod) {
var (
podSpec *api.PodSpec
podStatus *api.PodStatus
podAnnotations map[string]string
oldPodSpec *api.PodSpec
oldPodStatus *api.PodStatus
oldPodAnnotations map[string]string
)
if pod != nil {
podSpec = &pod.Spec
podStatus = &pod.Status
podAnnotations = pod.Annotations
}
if oldPod != nil {
oldPodSpec = &oldPod.Spec
oldPodStatus = &oldPod.Status
oldPodAnnotations = oldPod.Annotations
}
dropDisabledFields(podSpec, podAnnotations, oldPodSpec, oldPodAnnotations)
dropDisabledPodStatusFields(podStatus, oldPodStatus, podSpec, oldPodSpec)
}
// dropDisabledFields removes disabled fields from the pod metadata and spec.
func dropDisabledFields(
podSpec *api.PodSpec, podAnnotations map[string]string,
oldPodSpec *api.PodSpec, oldPodAnnotations map[string]string,
) {
// the new spec must always be non-nil
if podSpec == nil {
podSpec = &api.PodSpec{}
}
// If the feature is disabled and not in use, drop the hostUsers field.
if !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) && !hostUsersInUse(oldPodSpec) {
// Drop the field in podSpec only if SecurityContext is not nil.
// If it is nil, there is no need to set hostUsers=nil (it will be nil too).
if podSpec.SecurityContext != nil {
podSpec.SecurityContext.HostUsers = nil
}
}
// If the feature is disabled and not in use, drop the SupplementalGroupsPolicy field.
if !utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) && !supplementalGroupsPolicyInUse(oldPodSpec) {
// Drop the field in podSpec only if SecurityContext is not nil.
// If it is nil, there is no need to set supplementalGroupsPolicy=nil (it will be nil too).
if podSpec.SecurityContext != nil {
podSpec.SecurityContext.SupplementalGroupsPolicy = nil
}
}
dropDisabledPodLevelResources(podSpec, oldPodSpec)
dropDisabledProcMountField(podSpec, oldPodSpec)
dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec)
dropDisabledMatchLabelKeysFieldInTopologySpread(podSpec, oldPodSpec)
dropDisabledMatchLabelKeysFieldInPodAffinity(podSpec, oldPodSpec)
dropDisabledDynamicResourceAllocationFields(podSpec, oldPodSpec)
dropDisabledClusterTrustBundleProjection(podSpec, oldPodSpec)
dropDisabledPodCertificateProjection(podSpec, oldPodSpec)
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
// Drop ResizePolicy fields. Don't drop updates to Resources field as template.spec.resources
// field is mutable for certain controllers. Let ValidatePodUpdate handle it.
for i := range podSpec.Containers {
podSpec.Containers[i].ResizePolicy = nil
}
for i := range podSpec.InitContainers {
podSpec.InitContainers[i].ResizePolicy = nil
}
for i := range podSpec.EphemeralContainers {
podSpec.EphemeralContainers[i].ResizePolicy = nil
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && !restartableInitContainersInUse(oldPodSpec) {
// Drop the RestartPolicy field of init containers.
for i := range podSpec.InitContainers {
podSpec.InitContainers[i].RestartPolicy = nil
}
// For other types of containers, validateContainers will handle them.
}
if !utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) && !containerRestartRulesInUse(oldPodSpec) {
dropContainerRestartRules(podSpec)
}
if !utilfeature.DefaultFeatureGate.Enabled(features.RecursiveReadOnlyMounts) && !rroInUse(oldPodSpec) {
for i := range podSpec.Containers {
for j := range podSpec.Containers[i].VolumeMounts {
podSpec.Containers[i].VolumeMounts[j].RecursiveReadOnly = nil
}
}
for i := range podSpec.InitContainers {
for j := range podSpec.InitContainers[i].VolumeMounts {
podSpec.InitContainers[i].VolumeMounts[j].RecursiveReadOnly = nil
}
}
for i := range podSpec.EphemeralContainers {
for j := range podSpec.EphemeralContainers[i].VolumeMounts {
podSpec.EphemeralContainers[i].VolumeMounts[j].RecursiveReadOnly = nil
}
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.HostnameOverride) && !setHostnameOverrideInUse(oldPodSpec) {
// Set HostnameOverride to nil only if feature is disabled and it is not used
podSpec.HostnameOverride = nil
}
dropFileKeyRefInUse(podSpec, oldPodSpec)
dropPodLifecycleSleepAction(podSpec, oldPodSpec)
dropImageVolumes(podSpec, oldPodSpec)
dropSELinuxChangePolicy(podSpec, oldPodSpec)
dropContainerStopSignals(podSpec, oldPodSpec)
}
// setHostnameOverrideInUse returns true if any pod's spec defines HostnameOverride field.
func setHostnameOverrideInUse(podSpec *api.PodSpec) bool {
if podSpec == nil || podSpec.HostnameOverride == nil {
return false
}
return true
}
func dropFileKeyRefInUse(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.EnvFiles) || podFileKeyRefInUse(oldPodSpec) {
return
}
VisitContainers(podSpec, AllContainers, func(c *api.Container, _ ContainerType) bool {
for i := range c.Env {
if c.Env[i].ValueFrom != nil && c.Env[i].ValueFrom.FileKeyRef != nil {
c.Env[i].ValueFrom.FileKeyRef = nil
}
}
return true
})
}
func podFileKeyRefInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
VisitContainers(podSpec, AllContainers, func(c *api.Container, _ ContainerType) bool {
for _, env := range c.Env {
if env.ValueFrom != nil && env.ValueFrom.FileKeyRef != nil {
inUse = true
return false
}
}
return true
})
return inUse
}
func dropContainerStopSignals(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerStopSignals) || containerStopSignalsInUse(oldPodSpec) {
return
}
wipeLifecycle := func(ctr *api.Container) {
if ctr.Lifecycle == nil {
return
}
if ctr.Lifecycle.StopSignal != nil {
ctr.Lifecycle.StopSignal = nil
if *ctr.Lifecycle == (api.Lifecycle{}) {
ctr.Lifecycle = nil
}
}
}
VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
if c.Lifecycle == nil {
return true
}
wipeLifecycle(c)
return true
})
}
func containerStopSignalsInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
if c.Lifecycle == nil {
return true
}
if c.Lifecycle.StopSignal != nil {
inUse = true
return false
}
return true
})
return inUse
}
func dropDisabledPodLevelResources(podSpec, oldPodSpec *api.PodSpec) {
// If the feature is disabled and not in use, drop Resources at the pod-level
// from PodSpec.
if !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && !podLevelResourcesInUse(oldPodSpec) {
podSpec.Resources = nil
}
}
func dropPodLifecycleSleepAction(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepAction) || podLifecycleSleepActionInUse(oldPodSpec) {
return
}
adjustLifecycle := func(lifecycle *api.Lifecycle) {
if lifecycle.PreStop != nil && lifecycle.PreStop.Sleep != nil {
lifecycle.PreStop.Sleep = nil
if lifecycle.PreStop.Exec == nil && lifecycle.PreStop.HTTPGet == nil && lifecycle.PreStop.TCPSocket == nil {
lifecycle.PreStop = nil
}
}
if lifecycle.PostStart != nil && lifecycle.PostStart.Sleep != nil {
lifecycle.PostStart.Sleep = nil
if lifecycle.PostStart.Exec == nil && lifecycle.PostStart.HTTPGet == nil && lifecycle.PostStart.TCPSocket == nil {
lifecycle.PostStart = nil
}
}
}
for i := range podSpec.Containers {
if podSpec.Containers[i].Lifecycle == nil {
continue
}
adjustLifecycle(podSpec.Containers[i].Lifecycle)
if podSpec.Containers[i].Lifecycle.PreStop == nil && podSpec.Containers[i].Lifecycle.PostStart == nil && podSpec.Containers[i].Lifecycle.StopSignal == nil {
podSpec.Containers[i].Lifecycle = nil
}
}
for i := range podSpec.InitContainers {
if podSpec.InitContainers[i].Lifecycle == nil {
continue
}
adjustLifecycle(podSpec.InitContainers[i].Lifecycle)
if podSpec.InitContainers[i].Lifecycle.PreStop == nil && podSpec.InitContainers[i].Lifecycle.PostStart == nil && podSpec.InitContainers[i].Lifecycle.StopSignal == nil {
podSpec.InitContainers[i].Lifecycle = nil
}
}
for i := range podSpec.EphemeralContainers {
if podSpec.EphemeralContainers[i].Lifecycle == nil {
continue
}
adjustLifecycle(podSpec.EphemeralContainers[i].Lifecycle)
if podSpec.EphemeralContainers[i].Lifecycle.PreStop == nil && podSpec.EphemeralContainers[i].Lifecycle.PostStart == nil && podSpec.EphemeralContainers[i].Lifecycle.StopSignal == nil {
podSpec.EphemeralContainers[i].Lifecycle = nil
}
}
}
func podLifecycleSleepActionInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
if c.Lifecycle == nil {
return true
}
if c.Lifecycle.PreStop != nil && c.Lifecycle.PreStop.Sleep != nil {
inUse = true
return false
}
if c.Lifecycle.PostStart != nil && c.Lifecycle.PostStart.Sleep != nil {
inUse = true
return false
}
return true
})
return inUse
}
func podLifecycleSleepActionZeroValueInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
if c.Lifecycle == nil {
return true
}
if c.Lifecycle.PreStop != nil && c.Lifecycle.PreStop.Sleep != nil && c.Lifecycle.PreStop.Sleep.Seconds == 0 {
inUse = true
return false
}
if c.Lifecycle.PostStart != nil && c.Lifecycle.PostStart.Sleep != nil && c.Lifecycle.PostStart.Sleep.Seconds == 0 {
inUse = true
return false
}
return true
})
return inUse
}
// dropDisabledPodStatusFields removes disabled fields from the pod status
func dropDisabledPodStatusFields(podStatus, oldPodStatus *api.PodStatus, podSpec, oldPodSpec *api.PodSpec) {
// the new status is always be non-nil
if podStatus == nil {
podStatus = &api.PodStatus{}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
// Drop Resources fields
dropResourcesField := func(csl []api.ContainerStatus) {
for i := range csl {
csl[i].Resources = nil
}
}
dropResourcesField(podStatus.ContainerStatuses)
dropResourcesField(podStatus.InitContainerStatuses)
dropResourcesField(podStatus.EphemeralContainerStatuses)
// Drop AllocatedResources field
dropAllocatedResourcesField := func(csl []api.ContainerStatus) {
for i := range csl {
csl[i].AllocatedResources = nil
}
}
dropAllocatedResourcesField(podStatus.ContainerStatuses)
dropAllocatedResourcesField(podStatus.InitContainerStatuses)
dropAllocatedResourcesField(podStatus.EphemeralContainerStatuses)
}
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
podStatus.ResourceClaimStatuses = nil
}
if !utilfeature.DefaultFeatureGate.Enabled(features.DRAExtendedResource) && !draExendedResourceInUse(oldPodStatus) {
podStatus.ExtendedResourceClaimStatus = nil
}
if !utilfeature.DefaultFeatureGate.Enabled(features.RecursiveReadOnlyMounts) && !rroInUse(oldPodSpec) {
for i := range podStatus.ContainerStatuses {
podStatus.ContainerStatuses[i].VolumeMounts = nil
}
for i := range podStatus.InitContainerStatuses {
podStatus.InitContainerStatuses[i].VolumeMounts = nil
}
for i := range podStatus.EphemeralContainerStatuses {
podStatus.EphemeralContainerStatuses[i].VolumeMounts = nil
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.ResourceHealthStatus) {
setAllocatedResourcesStatusToNil := func(csl []api.ContainerStatus) {
for i := range csl {
csl[i].AllocatedResourcesStatus = nil
}
}
setAllocatedResourcesStatusToNil(podStatus.ContainerStatuses)
setAllocatedResourcesStatusToNil(podStatus.InitContainerStatuses)
setAllocatedResourcesStatusToNil(podStatus.EphemeralContainerStatuses)
}
// drop ContainerStatus.User field to empty (disable SupplementalGroupsPolicy)
if !utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) && !supplementalGroupsPolicyInUse(oldPodSpec) {
dropUserField := func(csl []api.ContainerStatus) {
for i := range csl {
csl[i].User = nil
}
}
dropUserField(podStatus.InitContainerStatuses)
dropUserField(podStatus.ContainerStatuses)
dropUserField(podStatus.EphemeralContainerStatuses)
}
if !utilfeature.DefaultFeatureGate.Enabled(features.PodObservedGenerationTracking) && !podObservedGenerationTrackingInUse(oldPodStatus) {
podStatus.ObservedGeneration = 0
for i := range podStatus.Conditions {
podStatus.Conditions[i].ObservedGeneration = 0
}
}
}
// dropDisabledDynamicResourceAllocationFields removes pod claim references from
// container specs and pod-level resource claims unless they are already used
// by the old pod spec.
func dropDisabledDynamicResourceAllocationFields(podSpec, oldPodSpec *api.PodSpec) {
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
dropResourceClaimRequests(podSpec.Containers)
dropResourceClaimRequests(podSpec.InitContainers)
dropEphemeralResourceClaimRequests(podSpec.EphemeralContainers)
podSpec.ResourceClaims = nil
}
}
func draExendedResourceInUse(podStatus *api.PodStatus) bool {
if podStatus != nil && podStatus.ExtendedResourceClaimStatus != nil {
return true
}
return false
}
func dynamicResourceAllocationInUse(podSpec *api.PodSpec) bool {
// We only need to check this field because the containers cannot have
// resource requirements entries for claims without a corresponding
// entry at the pod spec level.
if podSpec != nil && len(podSpec.ResourceClaims) > 0 {
return true
}
return false
}
func dropResourceClaimRequests(containers []api.Container) {
for i := range containers {
containers[i].Resources.Claims = nil
}
}
func dropEphemeralResourceClaimRequests(containers []api.EphemeralContainer) {
for i := range containers {
containers[i].Resources.Claims = nil
}
}
// dropDisabledProcMountField removes disabled fields from PodSpec related
// to ProcMount only if it is not already used by the old spec
func dropDisabledProcMountField(podSpec, oldPodSpec *api.PodSpec) {
if !utilfeature.DefaultFeatureGate.Enabled(features.ProcMountType) && !procMountInUse(oldPodSpec) {
defaultProcMount := api.DefaultProcMount
VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
if c.SecurityContext != nil && c.SecurityContext.ProcMount != nil {
// The ProcMount field was improperly forced to non-nil in 1.12.
// If the feature is disabled, and the existing object is not using any non-default values, and the ProcMount field is present in the incoming object, force to the default value.
// Note: we cannot force the field to nil when the feature is disabled because it causes a diff against previously persisted data.
c.SecurityContext.ProcMount = &defaultProcMount
}
return true
})
}
}
// dropDisabledNodeInclusionPolicyFields removes disabled fields from PodSpec related
// to NodeInclusionPolicy only if it is not used by the old spec.
func dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec *api.PodSpec) {
if !utilfeature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread) && podSpec != nil {
if !nodeTaintsPolicyInUse(oldPodSpec) {
for i := range podSpec.TopologySpreadConstraints {
podSpec.TopologySpreadConstraints[i].NodeTaintsPolicy = nil
}
}
if !nodeAffinityPolicyInUse(oldPodSpec) {
for i := range podSpec.TopologySpreadConstraints {
podSpec.TopologySpreadConstraints[i].NodeAffinityPolicy = nil
}
}
}
}
// dropDisabledMatchLabelKeysFieldInPodAffinity removes disabled fields from PodSpec related
// to MatchLabelKeys in required/preferred PodAffinity/PodAntiAffinity only if it is not already used by the old spec.
func dropDisabledMatchLabelKeysFieldInPodAffinity(podSpec, oldPodSpec *api.PodSpec) {
if podSpec == nil || podSpec.Affinity == nil || utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodAffinity) || matchLabelKeysFieldInPodAffinityInUse(oldPodSpec) {
return
}
if affinity := podSpec.Affinity.PodAffinity; affinity != nil {
dropMatchLabelKeysFieldInPodAffnityTerm(affinity.RequiredDuringSchedulingIgnoredDuringExecution)
dropMatchLabelKeysFieldInWeightedPodAffnityTerm(affinity.PreferredDuringSchedulingIgnoredDuringExecution)
}
if antiaffinity := podSpec.Affinity.PodAntiAffinity; antiaffinity != nil {
dropMatchLabelKeysFieldInPodAffnityTerm(antiaffinity.RequiredDuringSchedulingIgnoredDuringExecution)
dropMatchLabelKeysFieldInWeightedPodAffnityTerm(antiaffinity.PreferredDuringSchedulingIgnoredDuringExecution)
}
}
// dropDisabledMatchLabelKeysFieldInTopologySpread removes disabled fields from PodSpec related
// to MatchLabelKeys in TopologySpread only if it is not already used by the old spec.
func dropDisabledMatchLabelKeysFieldInTopologySpread(podSpec, oldPodSpec *api.PodSpec) {
if !utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpread) && !matchLabelKeysInTopologySpreadInUse(oldPodSpec) {
for i := range podSpec.TopologySpreadConstraints {
podSpec.TopologySpreadConstraints[i].MatchLabelKeys = nil
}
}
}
// dropMatchLabelKeysFieldInWeightedPodAffnityTerm removes MatchLabelKeys and MismatchLabelKeys fields from WeightedPodAffinityTerm
func dropMatchLabelKeysFieldInWeightedPodAffnityTerm(terms []api.WeightedPodAffinityTerm) {
for i := range terms {
terms[i].PodAffinityTerm.MatchLabelKeys = nil
terms[i].PodAffinityTerm.MismatchLabelKeys = nil
}
}
// dropMatchLabelKeysFieldInPodAffnityTerm removes MatchLabelKeys and MismatchLabelKeys fields from PodAffinityTerm
func dropMatchLabelKeysFieldInPodAffnityTerm(terms []api.PodAffinityTerm) {
for i := range terms {
terms[i].MatchLabelKeys = nil
terms[i].MismatchLabelKeys = nil
}
}
// matchLabelKeysFieldInPodAffinityInUse returns true if given affinityTerms have MatchLabelKeys field set.
func matchLabelKeysFieldInPodAffinityInUse(podSpec *api.PodSpec) bool {
if podSpec == nil || podSpec.Affinity == nil {
return false
}
if affinity := podSpec.Affinity.PodAffinity; affinity != nil {
for _, c := range affinity.RequiredDuringSchedulingIgnoredDuringExecution {
if len(c.MatchLabelKeys) > 0 || len(c.MismatchLabelKeys) > 0 {
return true
}
}
for _, c := range affinity.PreferredDuringSchedulingIgnoredDuringExecution {
if len(c.PodAffinityTerm.MatchLabelKeys) > 0 || len(c.PodAffinityTerm.MismatchLabelKeys) > 0 {
return true
}
}
}
if antiAffinity := podSpec.Affinity.PodAntiAffinity; antiAffinity != nil {
for _, c := range antiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
if len(c.MatchLabelKeys) > 0 || len(c.MismatchLabelKeys) > 0 {
return true
}
}
for _, c := range antiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
if len(c.PodAffinityTerm.MatchLabelKeys) > 0 || len(c.PodAffinityTerm.MismatchLabelKeys) > 0 {
return true
}
}
}
return false
}
// matchLabelKeysInTopologySpreadInUse returns true if the pod spec is non-nil
// and has MatchLabelKeys field set in TopologySpreadConstraints.
func matchLabelKeysInTopologySpreadInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
for _, c := range podSpec.TopologySpreadConstraints {
if len(c.MatchLabelKeys) > 0 {
return true
}
}
return false
}
// nodeAffinityPolicyInUse returns true if the pod spec is non-nil and has NodeAffinityPolicy field set
// in TopologySpreadConstraints
func nodeAffinityPolicyInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
for _, c := range podSpec.TopologySpreadConstraints {
if c.NodeAffinityPolicy != nil {
return true
}
}
return false
}
// nodeTaintsPolicyInUse returns true if the pod spec is non-nil and has NodeTaintsPolicy field set
// in TopologySpreadConstraints
func nodeTaintsPolicyInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
for _, c := range podSpec.TopologySpreadConstraints {
if c.NodeTaintsPolicy != nil {
return true
}
}
return false
}
// hostUsersInUse returns true if the pod spec has spec.hostUsers field set.
func hostUsersInUse(podSpec *api.PodSpec) bool {
return podSpec != nil && podSpec.SecurityContext != nil && podSpec.SecurityContext.HostUsers != nil
}
func supplementalGroupsPolicyInUse(podSpec *api.PodSpec) bool {
return podSpec != nil && podSpec.SecurityContext != nil && podSpec.SecurityContext.SupplementalGroupsPolicy != nil
}
func podObservedGenerationTrackingInUse(podStatus *api.PodStatus) bool {
if podStatus == nil {
return false
}
if podStatus.ObservedGeneration != 0 {
return true
}
for _, condition := range podStatus.Conditions {
if condition.ObservedGeneration != 0 {
return true
}
}
return false
}
// podLevelResourcesInUse returns true if pod-spec is non-nil and Resources field at
// pod-level has non-empty Requests or Limits.
func podLevelResourcesInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
if podSpec.Resources == nil {
return false
}
if len(podSpec.Resources.Requests) > 0 {
return true
}
if len(podSpec.Resources.Limits) > 0 {
return true
}
return false
}
// inPlacePodVerticalScalingInUse returns true if pod spec is non-nil and ResizePolicy is set
func inPlacePodVerticalScalingInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
containersMask := Containers | InitContainers
VisitContainers(podSpec, containersMask, func(c *api.Container, containerType ContainerType) bool {
if len(c.ResizePolicy) > 0 {
inUse = true
return false
}
return true
})
return inUse
}
// procMountInUse returns true if the pod spec is non-nil and has a SecurityContext's ProcMount field set to a non-default value
func procMountInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool {
if c.SecurityContext == nil || c.SecurityContext.ProcMount == nil {
return true
}
if *c.SecurityContext.ProcMount != api.DefaultProcMount {
inUse = true
return false
}
return true
})
return inUse
}
// restartableInitContainersInUse returns true if the pod spec is non-nil and
// it has any init container with ContainerRestartPolicyAlways.
func restartableInitContainersInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
VisitContainers(podSpec, InitContainers, func(c *api.Container, containerType ContainerType) bool {
if c.RestartPolicy != nil && *c.RestartPolicy == api.ContainerRestartPolicyAlways {
inUse = true
return false
}
return true
})
return inUse
}
func clusterTrustBundleProjectionInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
for _, v := range podSpec.Volumes {
if v.Projected == nil {
continue
}
for _, s := range v.Projected.Sources {
if s.ClusterTrustBundle != nil {
return true
}
}
}
return false
}
func rroInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
var inUse bool
VisitContainers(podSpec, AllContainers, func(c *api.Container, _ ContainerType) bool {
for _, f := range c.VolumeMounts {
if f.RecursiveReadOnly != nil {
inUse = true
return false
}
}
return true
})
return inUse
}
func dropDisabledClusterTrustBundleProjection(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundleProjection) {
return
}
if podSpec == nil {
return
}
// If the pod was already using it, it can keep using it.
if clusterTrustBundleProjectionInUse(oldPodSpec) {
return
}
for i := range podSpec.Volumes {
if podSpec.Volumes[i].Projected == nil {
continue
}
for j := range podSpec.Volumes[i].Projected.Sources {
podSpec.Volumes[i].Projected.Sources[j].ClusterTrustBundle = nil
}
}
}
func podCertificateProjectionInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
for _, v := range podSpec.Volumes {
if v.Projected == nil {
continue
}
for _, s := range v.Projected.Sources {
if s.PodCertificate != nil {
return true
}
}
}
return false
}
func dropDisabledPodCertificateProjection(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
return
}
if podSpec == nil {
return
}
// If the pod was already using it, it can keep using it.
if podCertificateProjectionInUse(oldPodSpec) {
return
}
for i := range podSpec.Volumes {
if podSpec.Volumes[i].Projected == nil {
continue
}
for j := range podSpec.Volumes[i].Projected.Sources {
podSpec.Volumes[i].Projected.Sources[j].PodCertificate = nil
}
}
}
func hasInvalidLabelValueInAffinitySelector(spec *api.PodSpec) bool {
if spec.Affinity != nil {
if spec.Affinity.PodAffinity != nil {
for _, term := range spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
allErrs := apivalidation.ValidatePodAffinityTermSelector(term, false, nil)
if len(allErrs) != 0 {
return true
}
}
for _, term := range spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
allErrs := apivalidation.ValidatePodAffinityTermSelector(term.PodAffinityTerm, false, nil)
if len(allErrs) != 0 {
return true
}
}
}
if spec.Affinity.PodAntiAffinity != nil {
for _, term := range spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
allErrs := apivalidation.ValidatePodAffinityTermSelector(term, false, nil)
if len(allErrs) != 0 {
return true
}
}
for _, term := range spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
allErrs := apivalidation.ValidatePodAffinityTermSelector(term.PodAffinityTerm, false, nil)
if len(allErrs) != 0 {
return true
}
}
}
}
return false
}
// IsRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways.
// This function is not checking if the container passed to it is indeed an init container.
// It is just checking if the container restart policy has been set to always.
func IsRestartableInitContainer(initContainer *api.Container) bool {
if initContainer == nil || initContainer.RestartPolicy == nil {
return false
}
return *initContainer.RestartPolicy == api.ContainerRestartPolicyAlways
}
func hasInvalidLabelValueInRequiredNodeAffinity(spec *api.PodSpec) bool {
if spec == nil ||
spec.Affinity == nil ||
spec.Affinity.NodeAffinity == nil ||
spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return false
}
return helper.HasInvalidLabelValueInNodeSelectorTerms(spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms)
}
// KEP: https://kep.k8s.io/4639
func dropImageVolumes(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) || imageVolumesInUse(oldPodSpec) {
return
}
imageVolumeNames := sets.New[string]()
var newVolumes []api.Volume
for _, v := range podSpec.Volumes {
if v.Image != nil {
imageVolumeNames.Insert(v.Name)
continue
}
newVolumes = append(newVolumes, v)
}
podSpec.Volumes = newVolumes
dropVolumeMounts := func(givenMounts []api.VolumeMount) (newVolumeMounts []api.VolumeMount) {
for _, m := range givenMounts {
if !imageVolumeNames.Has(m.Name) {
newVolumeMounts = append(newVolumeMounts, m)
}
}
return newVolumeMounts
}
for i, c := range podSpec.Containers {
podSpec.Containers[i].VolumeMounts = dropVolumeMounts(c.VolumeMounts)
}
for i, c := range podSpec.InitContainers {
podSpec.InitContainers[i].VolumeMounts = dropVolumeMounts(c.VolumeMounts)
}
for i, c := range podSpec.EphemeralContainers {
podSpec.EphemeralContainers[i].VolumeMounts = dropVolumeMounts(c.VolumeMounts)
}
}
func imageVolumesInUse(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
for _, v := range podSpec.Volumes {
if v.Image != nil {
return true
}
}
return false
}
func dropSELinuxChangePolicy(podSpec, oldPodSpec *api.PodSpec) {
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxChangePolicy) || seLinuxChangePolicyInUse(oldPodSpec) {
return
}
if podSpec == nil || podSpec.SecurityContext == nil {
return
}
podSpec.SecurityContext.SELinuxChangePolicy = nil
}
func seLinuxChangePolicyInUse(podSpec *api.PodSpec) bool {
if podSpec == nil || podSpec.SecurityContext == nil {
return false
}
return podSpec.SecurityContext.SELinuxChangePolicy != nil
}
func useOnlyRecursiveSELinuxChangePolicy(oldPodSpec *api.PodSpec) bool {
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMount) {
// All policies are allowed
return false
}
if seLinuxChangePolicyInUse(oldPodSpec) {
// The old pod spec has *any* policy: we need to keep that object update-able.
return false
}
// No feature gate + no value in the old object -> only Recursive is allowed
return true
}
func hasUserNamespacesWithVolumeDevices(podSpec *api.PodSpec) bool {
if podSpec.SecurityContext == nil || podSpec.SecurityContext.HostUsers == nil || *podSpec.SecurityContext.HostUsers {
return false
}
hasVolumeDevices := false
VisitContainers(podSpec, AllContainers, func(c *api.Container, _ ContainerType) bool {
if len(c.VolumeDevices) > 0 {
hasVolumeDevices = true
return false // stop iterating
}
return true // keep iterating
})
return hasVolumeDevices
}
// hasRestartableInitContainerResizePolicy returns true if the pod spec is non-nil and
// it has any init container with ContainerRestartPolicyAlways and non-nil ResizePolicy.
func hasRestartableInitContainerResizePolicy(podSpec *api.PodSpec) bool {
if podSpec == nil {
return false
}
for _, c := range podSpec.InitContainers {
if IsRestartableInitContainer(&c) && len(c.ResizePolicy) > 0 {
return true
}
}
return false
}
// HasAPIObjectReference returns true if a reference to an API object is found in the pod spec,
// along with the plural resource of the referenced API type, or an error if an unknown field is encountered.
func HasAPIObjectReference(pod *api.Pod) (bool, string, error) {
if pod.Spec.ServiceAccountName != "" {
return true, "serviceaccounts", nil
}
hasSecrets := false
VisitPodSecretNames(pod, func(name string) (shouldContinue bool) { hasSecrets = true; return false }, AllContainers)
if hasSecrets {
return true, "secrets", nil
}
hasConfigMaps := false
VisitPodConfigmapNames(pod, func(name string) (shouldContinue bool) { hasConfigMaps = true; return false }, AllContainers)
if hasConfigMaps {
return true, "configmaps", nil
}
if len(pod.Spec.ResourceClaims) > 0 {
return true, "resourceclaims", nil
}
for _, v := range pod.Spec.Volumes {
switch {
case v.AWSElasticBlockStore != nil, v.AzureDisk != nil, v.CephFS != nil, v.Cinder != nil,
v.DownwardAPI != nil, v.EmptyDir != nil, v.FC != nil, v.FlexVolume != nil, v.Flocker != nil, v.GCEPersistentDisk != nil,
v.GitRepo != nil, v.HostPath != nil, v.Image != nil, v.ISCSI != nil, v.NFS != nil, v.PhotonPersistentDisk != nil,
v.PortworxVolume != nil, v.Quobyte != nil, v.RBD != nil, v.ScaleIO != nil, v.StorageOS != nil, v.VsphereVolume != nil:
continue
case v.ConfigMap != nil:
return true, "configmaps (via configmap volumes)", nil
case v.Secret != nil:
return true, "secrets (via secret volumes)", nil
case v.CSI != nil:
return true, "csidrivers (via CSI volumes)", nil
case v.Glusterfs != nil:
return true, "endpoints (via glusterFS volumes)", nil
case v.PersistentVolumeClaim != nil:
return true, "persistentvolumeclaims", nil
case v.Ephemeral != nil:
return true, "persistentvolumeclaims (via ephemeral volumes)", nil
case v.AzureFile != nil:
return true, "secrets (via azureFile volumes)", nil
case v.Projected != nil:
for _, s := range v.Projected.Sources {
// Reject projected volume sources that require the Kubernetes API
switch {
case s.ConfigMap != nil:
return true, "configmaps (via projected volumes)", nil
case s.Secret != nil:
return true, "secrets (via projected volumes)", nil
case s.ServiceAccountToken != nil:
return true, "serviceaccounts (via projected volumes)", nil
case s.ClusterTrustBundle != nil:
return true, "clustertrustbundles", nil
case s.PodCertificate != nil:
return true, "podcertificates", nil
case s.DownwardAPI != nil:
// Allow projected volume sources that don't require the Kubernetes API
continue
default:
// Reject unknown volume types
return true, "", fmt.Errorf("unknown source for projected volume %q", v.Name)
}
}
default:
return true, "", fmt.Errorf("unknown volume type for volume %q", v.Name)
}
}
return false, "", nil
}
// ApparmorFieldForAnnotation takes a pod annotation and returns the converted
// apparmor profile field.
func ApparmorFieldForAnnotation(annotation string) *api.AppArmorProfile {
if annotation == api.DeprecatedAppArmorAnnotationValueUnconfined {
return &api.AppArmorProfile{Type: api.AppArmorProfileTypeUnconfined}
}
if annotation == api.DeprecatedAppArmorAnnotationValueRuntimeDefault {
return &api.AppArmorProfile{Type: api.AppArmorProfileTypeRuntimeDefault}
}
if strings.HasPrefix(annotation, api.DeprecatedAppArmorAnnotationValueLocalhostPrefix) {
localhostProfile := strings.TrimPrefix(annotation, api.DeprecatedAppArmorAnnotationValueLocalhostPrefix)
if localhostProfile != "" {
return &api.AppArmorProfile{
Type: api.AppArmorProfileTypeLocalhost,
LocalhostProfile: &localhostProfile,
}
}
}
// we can only reach this code path if the localhostProfile name has a zero
// length or if the annotation has an unrecognized value
return nil
}
func dropContainerRestartRules(podSpec *api.PodSpec) {
if podSpec == nil {
return
}
for i, c := range podSpec.InitContainers {
if c.RestartPolicy != nil && *c.RestartPolicy != api.ContainerRestartPolicyAlways {
podSpec.InitContainers[i].RestartPolicy = nil
}
podSpec.InitContainers[i].RestartPolicyRules = nil
}
for i := range podSpec.Containers {
podSpec.Containers[i].RestartPolicy = nil
podSpec.Containers[i].RestartPolicyRules = nil
}
for i := range podSpec.EphemeralContainers {
podSpec.EphemeralContainers[i].RestartPolicy = nil
podSpec.EphemeralContainers[i].RestartPolicyRules = nil
}
}
func containerRestartRulesInUse(oldPodSpec *api.PodSpec) bool {
if oldPodSpec == nil {
return false
}
for _, c := range oldPodSpec.InitContainers {
if c.RestartPolicy != nil && *c.RestartPolicy != api.ContainerRestartPolicyAlways {
return true
}
if len(c.RestartPolicyRules) > 0 {
return true
}
}
for _, c := range oldPodSpec.Containers {
if c.RestartPolicy != nil {
return true
}
}
return false
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"fmt"
"os"
"strings"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
nodeapi "k8s.io/kubernetes/pkg/api/node"
pvcutil "k8s.io/kubernetes/pkg/api/persistentvolumeclaim"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/pods"
"k8s.io/kubernetes/pkg/features"
)
func GetWarningsForPod(ctx context.Context, pod, oldPod *api.Pod) []string {
if pod == nil {
return nil
}
var (
oldSpec *api.PodSpec
oldMeta *metav1.ObjectMeta
)
if oldPod != nil {
oldSpec = &oldPod.Spec
oldMeta = &oldPod.ObjectMeta
}
return warningsForPodSpecAndMeta(nil, &pod.Spec, &pod.ObjectMeta, oldSpec, oldMeta)
}
func GetWarningsForPodTemplate(ctx context.Context, fieldPath *field.Path, podTemplate, oldPodTemplate *api.PodTemplateSpec) []string {
if podTemplate == nil {
return nil
}
var (
oldSpec *api.PodSpec
oldMeta *metav1.ObjectMeta
)
if oldPodTemplate != nil {
oldSpec = &oldPodTemplate.Spec
oldMeta = &oldPodTemplate.ObjectMeta
}
return warningsForPodSpecAndMeta(fieldPath, &podTemplate.Spec, &podTemplate.ObjectMeta, oldSpec, oldMeta)
}
var deprecatedAnnotations = []struct {
key string
prefix string
message string
}{
{
key: `scheduler.alpha.kubernetes.io/critical-pod`,
message: `non-functional in v1.16+; use the "priorityClassName" field instead`,
},
{
key: `security.alpha.kubernetes.io/sysctls`,
message: `non-functional in v1.11+; use the "sysctls" field instead`,
},
{
key: `security.alpha.kubernetes.io/unsafe-sysctls`,
message: `non-functional in v1.11+; use the "sysctls" field instead`,
},
}
func warningsForPodSpecAndMeta(fieldPath *field.Path, podSpec *api.PodSpec, meta *metav1.ObjectMeta, oldPodSpec *api.PodSpec, oldMeta *metav1.ObjectMeta) []string {
var warnings []string
// use of deprecated node labels in selectors/affinity/topology
for k := range podSpec.NodeSelector {
if msg, deprecated := nodeapi.GetNodeLabelDeprecatedMessage(k); deprecated {
warnings = append(warnings, fmt.Sprintf("%s: %s", fieldPath.Child("spec", "nodeSelector").Key(k), msg))
}
}
if podSpec.Affinity != nil && podSpec.Affinity.NodeAffinity != nil {
n := podSpec.Affinity.NodeAffinity
if n.RequiredDuringSchedulingIgnoredDuringExecution != nil {
termFldPath := fieldPath.Child("spec", "affinity", "nodeAffinity", "requiredDuringSchedulingIgnoredDuringExecution", "nodeSelectorTerms")
for i, term := range n.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
warnings = append(warnings, nodeapi.GetWarningsForNodeSelectorTerm(term, false, termFldPath.Index(i))...)
}
}
preferredFldPath := fieldPath.Child("spec", "affinity", "nodeAffinity", "preferredDuringSchedulingIgnoredDuringExecution")
for i, term := range n.PreferredDuringSchedulingIgnoredDuringExecution {
warnings = append(warnings, nodeapi.GetWarningsForNodeSelectorTerm(term.Preference, true, preferredFldPath.Index(i).Child("preference"))...)
}
}
for i, t := range podSpec.TopologySpreadConstraints {
if msg, deprecated := nodeapi.GetNodeLabelDeprecatedMessage(t.TopologyKey); deprecated {
warnings = append(warnings, fmt.Sprintf(
"%s: %s is %s",
fieldPath.Child("spec", "topologySpreadConstraints").Index(i).Child("topologyKey"),
t.TopologyKey,
msg,
))
}
// warn if labelSelector is empty which is no-match.
if t.LabelSelector == nil {
warnings = append(warnings, fmt.Sprintf("%s: a null labelSelector results in matching no pod", fieldPath.Child("spec", "topologySpreadConstraints").Index(i).Child("labelSelector")))
}
}
// use of deprecated annotations
for _, deprecated := range deprecatedAnnotations {
if _, exists := meta.Annotations[deprecated.key]; exists {
warnings = append(warnings, fmt.Sprintf("%s: %s", fieldPath.Child("metadata", "annotations").Key(deprecated.key), deprecated.message))
}
if len(deprecated.prefix) > 0 {
for k := range meta.Annotations {
if strings.HasPrefix(k, deprecated.prefix) {
warnings = append(warnings, fmt.Sprintf("%s: %s", fieldPath.Child("metadata", "annotations").Key(k), deprecated.message))
break
}
}
}
}
// deprecated and removed volume plugins
for i, v := range podSpec.Volumes {
if v.PhotonPersistentDisk != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.11, non-functional in v1.16+", fieldPath.Child("spec", "volumes").Index(i).Child("photonPersistentDisk")))
}
if v.GitRepo != nil {
if !utilfeature.DefaultFeatureGate.Enabled(features.GitRepoVolumeDriver) {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.11, and disabled by default in v1.33+", fieldPath.Child("spec", "volumes").Index(i).Child("gitRepo")))
} else {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.11", fieldPath.Child("spec", "volumes").Index(i).Child("gitRepo")))
}
}
if v.ScaleIO != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.16, non-functional in v1.22+", fieldPath.Child("spec", "volumes").Index(i).Child("scaleIO")))
}
if v.Flocker != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.22, non-functional in v1.25+", fieldPath.Child("spec", "volumes").Index(i).Child("flocker")))
}
if v.StorageOS != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.22, non-functional in v1.25+", fieldPath.Child("spec", "volumes").Index(i).Child("storageOS")))
}
if v.Quobyte != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.22, non-functional in v1.25+", fieldPath.Child("spec", "volumes").Index(i).Child("quobyte")))
}
if v.Glusterfs != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.25, non-functional in v1.26+", fieldPath.Child("spec", "volumes").Index(i).Child("glusterfs")))
}
if v.Ephemeral != nil && v.Ephemeral.VolumeClaimTemplate != nil {
warnings = append(warnings, pvcutil.GetWarningsForPersistentVolumeClaimSpec(fieldPath.Child("spec", "volumes").Index(i).Child("ephemeral").Child("volumeClaimTemplate").Child("spec"), v.Ephemeral.VolumeClaimTemplate.Spec)...)
}
if v.CephFS != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.28, non-functional in v1.31+", fieldPath.Child("spec", "volumes").Index(i).Child("cephfs")))
}
if v.RBD != nil {
warnings = append(warnings, fmt.Sprintf("%s: deprecated in v1.28, non-functional in v1.31+", fieldPath.Child("spec", "volumes").Index(i).Child("rbd")))
}
}
if overlaps := warningsForOverlappingVirtualPaths(podSpec.Volumes); len(overlaps) > 0 {
warnings = append(warnings, overlaps...)
}
// duplicate hostAliases (#91670, #58477)
if len(podSpec.HostAliases) > 1 {
items := sets.New[string]()
for i, item := range podSpec.HostAliases {
if items.Has(item.IP) {
warnings = append(warnings, fmt.Sprintf("%s: duplicate ip %q", fieldPath.Child("spec", "hostAliases").Index(i).Child("ip"), item.IP))
} else {
items.Insert(item.IP)
}
}
}
// duplicate imagePullSecrets (#91629, #58477)
if len(podSpec.ImagePullSecrets) > 1 {
items := sets.New[string]()
for i, item := range podSpec.ImagePullSecrets {
if items.Has(item.Name) {
warnings = append(warnings, fmt.Sprintf("%s: duplicate name %q", fieldPath.Child("spec", "imagePullSecrets").Index(i).Child("name"), item.Name))
} else {
items.Insert(item.Name)
}
}
}
// imagePullSecrets with empty name (#99454#issuecomment-787838112)
for i, item := range podSpec.ImagePullSecrets {
if len(item.Name) == 0 {
warnings = append(warnings, fmt.Sprintf("%s: invalid empty name %q", fieldPath.Child("spec", "imagePullSecrets").Index(i).Child("name"), item.Name))
}
}
// fractional memory/ephemeral-storage requests/limits (#79950, #49442, #18538)
if value, ok := podSpec.Overhead[api.ResourceMemory]; ok && value.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf("%s: fractional byte value %q is invalid, must be an integer", fieldPath.Child("spec", "overhead").Key(string(api.ResourceMemory)), value.String()))
}
if value, ok := podSpec.Overhead[api.ResourceEphemeralStorage]; ok && value.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf("%s: fractional byte value %q is invalid, must be an integer", fieldPath.Child("spec", "overhead").Key(string(api.ResourceEphemeralStorage)), value.String()))
}
// use of pod seccomp annotation without accompanying field
if podSpec.SecurityContext == nil || podSpec.SecurityContext.SeccompProfile == nil {
if _, exists := meta.Annotations[api.SeccompPodAnnotationKey]; exists {
warnings = append(warnings, fmt.Sprintf(`%s: non-functional in v1.27+; use the "seccompProfile" field instead`, fieldPath.Child("metadata", "annotations").Key(api.SeccompPodAnnotationKey)))
}
}
var podAppArmorProfile *api.AppArmorProfile
if podSpec.SecurityContext != nil {
podAppArmorProfile = podSpec.SecurityContext.AppArmorProfile
}
pods.VisitContainersWithPath(podSpec, fieldPath.Child("spec"), func(c *api.Container, p *field.Path) bool {
// use of container seccomp annotation without accompanying field
if c.SecurityContext == nil || c.SecurityContext.SeccompProfile == nil {
if _, exists := meta.Annotations[api.SeccompContainerAnnotationKeyPrefix+c.Name]; exists {
warnings = append(warnings, fmt.Sprintf(`%s: non-functional in v1.27+; use the "seccompProfile" field instead`, fieldPath.Child("metadata", "annotations").Key(api.SeccompContainerAnnotationKeyPrefix+c.Name)))
}
}
// use of container AppArmor annotation without accompanying field
isPodTemplate := fieldPath != nil // Pod warnings are emitted through applyAppArmorVersionSkew instead.
hasAppArmorField := c.SecurityContext != nil && c.SecurityContext.AppArmorProfile != nil
if isPodTemplate && !hasAppArmorField {
key := api.DeprecatedAppArmorAnnotationKeyPrefix + c.Name
if annotation, exists := meta.Annotations[key]; exists {
// Only warn if the annotation doesn't match the pod profile.
if podAppArmorProfile == nil || !apiequality.Semantic.DeepEqual(podAppArmorProfile, ApparmorFieldForAnnotation(annotation)) {
warnings = append(warnings, fmt.Sprintf(`%s: deprecated since v1.30; use the "appArmorProfile" field instead`, fieldPath.Child("metadata", "annotations").Key(key)))
}
}
}
// fractional memory/ephemeral-storage requests/limits (#79950, #49442, #18538)
if value, ok := c.Resources.Limits[api.ResourceMemory]; ok && value.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf("%s: fractional byte value %q is invalid, must be an integer", p.Child("resources", "limits").Key(string(api.ResourceMemory)), value.String()))
}
if value, ok := c.Resources.Requests[api.ResourceMemory]; ok && value.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf("%s: fractional byte value %q is invalid, must be an integer", p.Child("resources", "requests").Key(string(api.ResourceMemory)), value.String()))
}
if value, ok := c.Resources.Limits[api.ResourceEphemeralStorage]; ok && value.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf("%s: fractional byte value %q is invalid, must be an integer", p.Child("resources", "limits").Key(string(api.ResourceEphemeralStorage)), value.String()))
}
if value, ok := c.Resources.Requests[api.ResourceEphemeralStorage]; ok && value.MilliValue()%int64(1000) != int64(0) {
warnings = append(warnings, fmt.Sprintf("%s: fractional byte value %q is invalid, must be an integer", p.Child("resources", "requests").Key(string(api.ResourceEphemeralStorage)), value.String()))
}
// duplicate containers[*].env (#86163, #93266, #58477)
if len(c.Env) > 1 {
items := sets.New[string]()
for i, item := range c.Env {
if items.Has(item.Name) {
// a previous value exists, but it might be OK
bad := false
ref := fmt.Sprintf("$(%s)", item.Name) // what does a ref to this name look like
// if we are replacing it with a valueFrom, warn
if item.ValueFrom != nil {
bad = true
}
// if this is X="$(X)", warn
if item.Value == ref {
bad = true
}
// if the new value does not contain a reference to the old
// value (e.g. X="abc"; X="$(X)123"), warn
if !strings.Contains(item.Value, ref) {
bad = true
}
if bad {
warnings = append(warnings, fmt.Sprintf("%s: hides previous definition of %q, which may be dropped when using apply", p.Child("env").Index(i), item.Name))
}
} else {
items.Insert(item.Name)
}
}
}
return true
})
type portBlock struct {
field *field.Path
port api.ContainerPort
}
// Accumulate ports across all containers
allPorts := map[string][]portBlock{}
pods.VisitContainersWithPath(podSpec, fieldPath.Child("spec"), func(c *api.Container, fldPath *field.Path) bool {
for i, port := range c.Ports {
if port.HostIP != "" && port.HostPort == 0 {
warnings = append(warnings, fmt.Sprintf("%s: hostIP set without hostPort: %+v",
fldPath.Child("ports").Index(i), port))
}
k := fmt.Sprintf("%d/%s", port.ContainerPort, port.Protocol)
if others, found := allPorts[k]; found {
// Someone else has this protcol+port, but it still might not be a conflict.
for _, other := range others {
if port.HostIP == other.port.HostIP && port.HostPort == other.port.HostPort {
// Exactly-equal is obvious. Validation should already filter for this except when these are unspecified.
warnings = append(warnings, fmt.Sprintf("%s: duplicate port definition with %s", fldPath.Child("ports").Index(i), other.field))
} else if port.HostPort == 0 || other.port.HostPort == 0 {
// HostPort = 0 is redundant with any other value, which is odd but not really dangerous. HostIP doesn't matter here.
warnings = append(warnings, fmt.Sprintf("%s: overlapping port definition with %s", fldPath.Child("ports").Index(i), other.field))
} else if a, b := port.HostIP == "", other.port.HostIP == ""; port.HostPort == other.port.HostPort && ((a || b) && !(a && b)) {
// If the HostPorts are the same and either HostIP is not specified while the other is not, the behavior is undefined.
warnings = append(warnings, fmt.Sprintf("%s: dangerously ambiguous port definition with %s", fldPath.Child("ports").Index(i), other.field))
}
}
allPorts[k] = append(allPorts[k], portBlock{field: fldPath.Child("ports").Index(i), port: port})
} else {
allPorts[k] = []portBlock{{field: fldPath.Child("ports").Index(i), port: port}}
}
}
return true
})
// Accumulate port names of containers and sidecar containers
allPortsNames := map[string]*field.Path{}
pods.VisitContainersWithPath(podSpec, fieldPath.Child("spec"), func(c *api.Container, fldPath *field.Path) bool {
for i, port := range c.Ports {
if port.Name != "" {
if other, found := allPortsNames[port.Name]; found {
warnings = append(warnings, fmt.Sprintf("%s: duplicate port name %q with %s, services and probes that select ports by name will use %s", fldPath.Child("ports").Index(i), port.Name, other, other))
} else {
allPortsNames[port.Name] = fldPath.Child("ports").Index(i)
}
}
}
return true
})
// warn if the terminationGracePeriodSeconds is negative.
if podSpec.TerminationGracePeriodSeconds != nil && *podSpec.TerminationGracePeriodSeconds < 0 {
warnings = append(warnings, fmt.Sprintf("%s: must be >= 0; negative values are invalid and will be treated as 1", fieldPath.Child("spec", "terminationGracePeriodSeconds")))
}
if podSpec.Affinity != nil {
if affinity := podSpec.Affinity.PodAffinity; affinity != nil {
warnings = append(warnings, warningsForPodAffinityTerms(affinity.RequiredDuringSchedulingIgnoredDuringExecution, fieldPath.Child("spec", "affinity", "podAffinity", "requiredDuringSchedulingIgnoredDuringExecution"))...)
warnings = append(warnings, warningsForWeightedPodAffinityTerms(affinity.PreferredDuringSchedulingIgnoredDuringExecution, fieldPath.Child("spec", "affinity", "podAffinity", "preferredDuringSchedulingIgnoredDuringExecution"))...)
}
if affinity := podSpec.Affinity.PodAntiAffinity; affinity != nil {
warnings = append(warnings, warningsForPodAffinityTerms(affinity.RequiredDuringSchedulingIgnoredDuringExecution, fieldPath.Child("spec", "affinity", "podAntiAffinity", "requiredDuringSchedulingIgnoredDuringExecution"))...)
warnings = append(warnings, warningsForWeightedPodAffinityTerms(affinity.PreferredDuringSchedulingIgnoredDuringExecution, fieldPath.Child("spec", "affinity", "podAntiAffinity", "preferredDuringSchedulingIgnoredDuringExecution"))...)
}
}
// Deprecated IP address formats
if podSpec.DNSConfig != nil {
for i, ns := range podSpec.DNSConfig.Nameservers {
warnings = append(warnings, validation.GetWarningsForIP(fieldPath.Child("spec", "dnsConfig", "nameservers").Index(i), ns)...)
}
}
for i, hostAlias := range podSpec.HostAliases {
warnings = append(warnings, validation.GetWarningsForIP(fieldPath.Child("spec", "hostAliases").Index(i).Child("ip"), hostAlias.IP)...)
}
return warnings
}
func warningsForPodAffinityTerms(terms []api.PodAffinityTerm, fieldPath *field.Path) []string {
var warnings []string
for i, t := range terms {
if t.LabelSelector == nil {
warnings = append(warnings, fmt.Sprintf("%s: a null labelSelector results in matching no pod", fieldPath.Index(i).Child("labelSelector")))
}
}
return warnings
}
func warningsForWeightedPodAffinityTerms(terms []api.WeightedPodAffinityTerm, fieldPath *field.Path) []string {
var warnings []string
for i, t := range terms {
// warn if labelSelector is empty which is no-match.
if t.PodAffinityTerm.LabelSelector == nil {
warnings = append(warnings, fmt.Sprintf("%s: a null labelSelector results in matching no pod", fieldPath.Index(i).Child("podAffinityTerm", "labelSelector")))
}
}
return warnings
}
// warningsForOverlappingVirtualPaths validates that there are no overlapping paths in single ConfigMapVolume, SecretVolume, DownwardAPIVolume and ProjectedVolume.
// A volume can try to load different keys to the same path which will result in overwriting of the value from the latest registered key
// Another possible scenario is when one of the path contains the other key path. Example:
// configMap:
//
// name: myconfig
// items:
// - key: key1
// path: path
// - key: key2
// path: path/path2
//
// In such cases we either get `is directory` or 'file exists' error message.
func warningsForOverlappingVirtualPaths(volumes []api.Volume) []string {
var warnings []string
mkWarn := func(volName, volDesc, body string) string {
return fmt.Sprintf("volume %q (%s): overlapping paths: %s", volName, volDesc, body)
}
for _, v := range volumes {
if v.ConfigMap != nil && v.ConfigMap.Items != nil {
overlaps := checkVolumeMappingForOverlap(extractPaths(v.ConfigMap.Items, ""))
for _, ol := range overlaps {
warnings = append(warnings, mkWarn(v.Name, fmt.Sprintf("ConfigMap %q", v.ConfigMap.Name), ol))
}
}
if v.Secret != nil && v.Secret.Items != nil {
overlaps := checkVolumeMappingForOverlap(extractPaths(v.Secret.Items, ""))
for _, ol := range overlaps {
warnings = append(warnings, mkWarn(v.Name, fmt.Sprintf("Secret %q", v.Secret.SecretName), ol))
}
}
if v.DownwardAPI != nil && v.DownwardAPI.Items != nil {
overlaps := checkVolumeMappingForOverlap(extractPathsDownwardAPI(v.DownwardAPI.Items, ""))
for _, ol := range overlaps {
warnings = append(warnings, mkWarn(v.Name, "DownwardAPI", ol))
}
}
if v.Projected != nil {
var allPaths []pathAndSource
for _, source := range v.Projected.Sources {
if source == (api.VolumeProjection{}) {
warnings = append(warnings, fmt.Sprintf("volume %q (Projected) has no sources provided", v.Name))
continue
}
var sourcePaths []pathAndSource
switch {
case source.ConfigMap != nil && source.ConfigMap.Items != nil:
sourcePaths = extractPaths(source.ConfigMap.Items, fmt.Sprintf("ConfigMap %q", source.ConfigMap.Name))
case source.Secret != nil && source.Secret.Items != nil:
sourcePaths = extractPaths(source.Secret.Items, fmt.Sprintf("Secret %q", source.Secret.Name))
case source.DownwardAPI != nil && source.DownwardAPI.Items != nil:
sourcePaths = extractPathsDownwardAPI(source.DownwardAPI.Items, "DownwardAPI")
case source.ServiceAccountToken != nil:
sourcePaths = []pathAndSource{{source.ServiceAccountToken.Path, "ServiceAccountToken"}}
case source.ClusterTrustBundle != nil:
name := ""
if source.ClusterTrustBundle.Name != nil {
name = *source.ClusterTrustBundle.Name
} else {
name = *source.ClusterTrustBundle.SignerName
}
sourcePaths = []pathAndSource{{source.ClusterTrustBundle.Path, fmt.Sprintf("ClusterTrustBundle %q", name)}}
case source.PodCertificate != nil:
sourcePaths = []pathAndSource{}
if len(source.PodCertificate.CertificateChainPath) != 0 {
sourcePaths = append(sourcePaths, pathAndSource{source.PodCertificate.CertificateChainPath, "PodCertificate chain"})
}
if len(source.PodCertificate.KeyPath) != 0 {
sourcePaths = append(sourcePaths, pathAndSource{source.PodCertificate.KeyPath, "PodCertificate key"})
}
if len(source.PodCertificate.CredentialBundlePath) != 0 {
sourcePaths = append(sourcePaths, pathAndSource{source.PodCertificate.CredentialBundlePath, "PodCertificate credential bundle"})
}
}
if len(sourcePaths) == 0 {
continue
}
for _, ps := range sourcePaths {
ps.path = strings.TrimRight(ps.path, string(os.PathSeparator))
if collisions := checkForOverlap(allPaths, ps); len(collisions) > 0 {
for _, c := range collisions {
warnings = append(warnings, mkWarn(v.Name, "Projected", fmt.Sprintf("%s with %s", ps.String(), c.String())))
}
}
allPaths = append(allPaths, ps)
}
}
}
}
return warnings
}
// this lets us track a path and where it came from, for better errors
type pathAndSource struct {
path string
source string
}
func (ps pathAndSource) String() string {
if ps.source != "" {
return fmt.Sprintf("%q (%s)", ps.path, ps.source)
}
return fmt.Sprintf("%q", ps.path)
}
func extractPaths(mapping []api.KeyToPath, source string) []pathAndSource {
result := make([]pathAndSource, 0, len(mapping))
for _, v := range mapping {
result = append(result, pathAndSource{v.Path, source})
}
return result
}
func extractPathsDownwardAPI(mapping []api.DownwardAPIVolumeFile, source string) []pathAndSource {
result := make([]pathAndSource, 0, len(mapping))
for _, v := range mapping {
result = append(result, pathAndSource{v.Path, source})
}
return result
}
func checkVolumeMappingForOverlap(paths []pathAndSource) []string {
pathSeparator := string(os.PathSeparator)
var warnings []string
var allPaths []pathAndSource
for _, ps := range paths {
ps.path = strings.TrimRight(ps.path, pathSeparator)
if collisions := checkForOverlap(allPaths, ps); len(collisions) > 0 {
for _, c := range collisions {
warnings = append(warnings, fmt.Sprintf("%s with %s", ps.String(), c.String()))
}
}
allPaths = append(allPaths, ps)
}
return warnings
}
func checkForOverlap(haystack []pathAndSource, needle pathAndSource) []pathAndSource {
pathSeparator := `/` // this check runs in the API server, use the OS-agnostic separator
if needle.path == "" {
return nil
}
var result []pathAndSource
for _, item := range haystack {
switch {
case item.path == "":
continue
case item == needle:
result = append(result, item)
case strings.HasPrefix(item.path+pathSeparator, needle.path+pathSeparator):
result = append(result, item)
case strings.HasPrefix(needle.path+pathSeparator, item.path+pathSeparator):
result = append(result, item)
}
}
return result
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"strings"
api "k8s.io/kubernetes/pkg/apis/core"
utilnet "k8s.io/utils/net"
)
const (
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
)
// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0
func IsAllowAll(ipnets utilnet.IPNetSet) bool {
for _, s := range ipnets.StringSlice() {
if s == "0.0.0.0/0" {
return true
}
}
return false
}
// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
func GetLoadBalancerSourceRanges(service *api.Service) (utilnet.IPNetSet, error) {
var ipnets utilnet.IPNetSet
var err error
// if SourceRange field is specified, ignore sourceRange annotation
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
specs := service.Spec.LoadBalancerSourceRanges
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
}
} else {
val := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey]
val = strings.TrimSpace(val)
if val == "" {
val = defaultLoadBalancerSourceRanges
}
specs := strings.Split(val, ",")
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", api.AnnotationLoadBalancerSourceRangesKey, val)
}
}
return ipnets, nil
}
// ExternallyAccessible checks if service is externally accessible.
func ExternallyAccessible(service *api.Service) bool {
return service.Spec.Type == api.ServiceTypeLoadBalancer ||
service.Spec.Type == api.ServiceTypeNodePort ||
(service.Spec.Type == api.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0)
}
// RequestsOnlyLocalTraffic checks if service requests OnlyLocal traffic.
func RequestsOnlyLocalTraffic(service *api.Service) bool {
if service.Spec.Type != api.ServiceTypeLoadBalancer &&
service.Spec.Type != api.ServiceTypeNodePort {
return false
}
return service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyLocal
}
// NeedsHealthCheck checks if service needs health check.
func NeedsHealthCheck(service *api.Service) bool {
if service.Spec.Type != api.ServiceTypeLoadBalancer {
return false
}
return RequestsOnlyLocalTraffic(service)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
func GetWarningsForService(service, oldService *api.Service) []string {
if service == nil {
return nil
}
var warnings []string
if _, ok := service.Annotations[api.DeprecatedAnnotationTopologyAwareHints]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %s is deprecated, please use %s instead", api.DeprecatedAnnotationTopologyAwareHints, api.AnnotationTopologyMode))
}
if helper.IsServiceIPSet(service) {
for i, clusterIP := range service.Spec.ClusterIPs {
warnings = append(warnings, utilvalidation.GetWarningsForIP(field.NewPath("spec").Child("clusterIPs").Index(i), clusterIP)...)
}
}
if isHeadlessService(service) {
if service.Spec.LoadBalancerIP != "" {
warnings = append(warnings, "spec.loadBalancerIP is ignored for headless services")
}
if len(service.Spec.ExternalIPs) > 0 {
warnings = append(warnings, "spec.externalIPs is ignored for headless services")
}
if service.Spec.SessionAffinity != "" {
warnings = append(warnings, "spec.SessionAffinity is ignored for headless services")
}
}
for i, externalIP := range service.Spec.ExternalIPs {
warnings = append(warnings, utilvalidation.GetWarningsForIP(field.NewPath("spec").Child("externalIPs").Index(i), externalIP)...)
}
if len(service.Spec.LoadBalancerIP) > 0 {
warnings = append(warnings, utilvalidation.GetWarningsForIP(field.NewPath("spec").Child("loadBalancerIP"), service.Spec.LoadBalancerIP)...)
}
for i, cidr := range service.Spec.LoadBalancerSourceRanges {
warnings = append(warnings, utilvalidation.GetWarningsForCIDR(field.NewPath("spec").Child("loadBalancerSourceRanges").Index(i), cidr)...)
}
if service.Spec.Type == api.ServiceTypeExternalName && len(service.Spec.ExternalIPs) > 0 {
warnings = append(warnings, fmt.Sprintf("spec.externalIPs is ignored when spec.type is %q", api.ServiceTypeExternalName))
}
if service.Spec.Type != api.ServiceTypeExternalName && service.Spec.ExternalName != "" {
warnings = append(warnings, fmt.Sprintf("spec.externalName is ignored when spec.type is not %q", api.ServiceTypeExternalName))
}
return warnings
}
func isHeadlessService(service *api.Service) bool {
return service != nil && service.Spec.Type == api.ServiceTypeClusterIP && service.Spec.ClusterIP == api.ClusterIPNone
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"testing"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// TestSelectableFieldLabelConversionsOfKind verifies that given resource have field
// label conversion defined for each its selectable field.
// fields contains selectable fields of the resource.
// labelMap maps deprecated labels to their canonical names.
func TestSelectableFieldLabelConversionsOfKind(t *testing.T, apiVersion string, kind string, fields fields.Set, labelMap map[string]string) {
badFieldLabels := []string{
"name",
".name",
"bad",
"metadata",
"foo.bar",
}
value := "value"
gv, err := schema.ParseGroupVersion(apiVersion)
if err != nil {
t.Errorf("kind=%s: got unexpected error: %v", kind, err)
return
}
gvk := gv.WithKind(kind)
if len(fields) == 0 {
t.Logf("no selectable fields for kind %q, skipping", kind)
}
for label := range fields {
if label == "name" {
t.Logf("FIXME: \"name\" is deprecated by \"metadata.name\", it should be removed from selectable fields of kind=%s", kind)
continue
}
newLabel, newValue, err := legacyscheme.Scheme.ConvertFieldLabel(gvk, label, value)
if err != nil {
t.Errorf("kind=%s label=%s: got unexpected error: %v", kind, label, err)
} else {
expectedLabel := label
if l, exists := labelMap[label]; exists {
expectedLabel = l
}
if newLabel != expectedLabel {
t.Errorf("kind=%s label=%s: got unexpected label name (%q != %q)", kind, label, newLabel, expectedLabel)
}
if newValue != value {
t.Errorf("kind=%s label=%s: got unexpected new value (%q != %q)", kind, label, newValue, value)
}
}
}
for _, label := range badFieldLabels {
_, _, err := legacyscheme.Scheme.ConvertFieldLabel(gvk, label, "value")
if err == nil {
t.Errorf("kind=%s label=%s: got unexpected non-error", kind, label)
}
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"sigs.k8s.io/randfill"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apitesting "k8s.io/apimachinery/pkg/api/apitesting"
"k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
metafuzzer "k8s.io/apimachinery/pkg/apis/meta/fuzzer"
"k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
admissionregistrationfuzzer "k8s.io/kubernetes/pkg/apis/admissionregistration/fuzzer"
"k8s.io/kubernetes/pkg/apis/apps"
appsfuzzer "k8s.io/kubernetes/pkg/apis/apps/fuzzer"
autoscalingfuzzer "k8s.io/kubernetes/pkg/apis/autoscaling/fuzzer"
batchfuzzer "k8s.io/kubernetes/pkg/apis/batch/fuzzer"
certificatesfuzzer "k8s.io/kubernetes/pkg/apis/certificates/fuzzer"
api "k8s.io/kubernetes/pkg/apis/core"
corefuzzer "k8s.io/kubernetes/pkg/apis/core/fuzzer"
discoveryfuzzer "k8s.io/kubernetes/pkg/apis/discovery/fuzzer"
extensionsfuzzer "k8s.io/kubernetes/pkg/apis/extensions/fuzzer"
flowcontrolfuzzer "k8s.io/kubernetes/pkg/apis/flowcontrol/fuzzer"
networkingfuzzer "k8s.io/kubernetes/pkg/apis/networking/fuzzer"
policyfuzzer "k8s.io/kubernetes/pkg/apis/policy/fuzzer"
rbacfuzzer "k8s.io/kubernetes/pkg/apis/rbac/fuzzer"
resourcefuzzer "k8s.io/kubernetes/pkg/apis/resource/fuzzer"
schedulingfuzzer "k8s.io/kubernetes/pkg/apis/scheduling/fuzzer"
storagefuzzer "k8s.io/kubernetes/pkg/apis/storage/fuzzer"
)
// overrideGenericFuncs override some generic fuzzer funcs from k8s.io/apiserver in order to have more realistic
// values in a Kubernetes context.
func overrideGenericFuncs(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(j *runtime.Object, c randfill.Continue) {
// TODO: uncomment when round trip starts from a versioned object
if true { // c.Bool() {
*j = &runtime.Unknown{
// We do not set TypeMeta here because it is not carried through a round trip
Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`),
ContentType: runtime.ContentTypeJSON,
}
} else {
types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}}
t := types[c.Rand.Intn(len(types))]
c.Fill(t)
*j = t
}
},
func(r *runtime.RawExtension, c randfill.Continue) {
// Pick an arbitrary type and fuzz it
types := []runtime.Object{&api.Pod{}, &apps.Deployment{}, &api.Service{}}
obj := types[c.Rand.Intn(len(types))]
c.Fill(obj)
var codec runtime.Codec
switch obj.(type) {
case *apps.Deployment:
codec = apitesting.TestCodec(codecs, appsv1.SchemeGroupVersion)
default:
codec = apitesting.TestCodec(codecs, v1.SchemeGroupVersion)
}
// Convert the object to raw bytes
bytes, err := runtime.Encode(codec, obj)
if err != nil {
panic(fmt.Sprintf("Failed to encode object: %v", err))
}
// Set the bytes field on the RawExtension
r.Raw = bytes
},
}
}
// FuzzerFuncs is a list of fuzzer functions
var FuzzerFuncs = fuzzer.MergeFuzzerFuncs(
overrideGenericFuncs,
corefuzzer.Funcs,
extensionsfuzzer.Funcs,
appsfuzzer.Funcs,
batchfuzzer.Funcs,
autoscalingfuzzer.Funcs,
rbacfuzzer.Funcs,
policyfuzzer.Funcs,
resourcefuzzer.Funcs,
certificatesfuzzer.Funcs,
admissionregistrationfuzzer.Funcs,
storagefuzzer.Funcs,
networkingfuzzer.Funcs,
metafuzzer.Funcs,
schedulingfuzzer.Funcs,
discoveryfuzzer.Funcs,
flowcontrolfuzzer.Funcs,
)
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"bytes"
"sort"
"strconv"
"testing"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtimetest "k8s.io/apimachinery/pkg/runtime/testing"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// VerifyVersionedValidationEquivalence tests that all versions of an API return equivalent validation errors.
func VerifyVersionedValidationEquivalence(t *testing.T, obj, old k8sruntime.Object, subresources ...string) {
t.Helper()
// Accumulate errors from all versioned validation, per version.
all := map[string]field.ErrorList{}
accumulate := func(t *testing.T, gv string, errs field.ErrorList) {
all[gv] = errs
}
// Convert versioned object to internal format before validation.
// runtimetest.RunValidationForEachVersion requires unversioned (internal) objects as input.
internalObj, err := convertToInternal(t, legacyscheme.Scheme, obj)
if err != nil {
t.Fatal(err)
}
if internalObj == nil {
return
}
if old == nil {
runtimetest.RunValidationForEachVersion(t, legacyscheme.Scheme, []string{}, internalObj, accumulate, subresources...)
} else {
// Convert old versioned object to internal format before validation.
// runtimetest.RunUpdateValidationForEachVersion requires unversioned (internal) objects as input.
internalOld, err := convertToInternal(t, legacyscheme.Scheme, old)
if err != nil {
t.Fatal(err)
}
if internalOld == nil {
return
}
runtimetest.RunUpdateValidationForEachVersion(t, legacyscheme.Scheme, []string{}, internalObj, internalOld, accumulate, subresources...)
}
// Make a copy so we can modify it.
other := map[string]field.ErrorList{}
// Index for nicer output.
keys := []string{}
for k, v := range all {
other[k] = v
keys = append(keys, k)
}
sort.Strings(keys)
// Compare each lhs to each rhs.
for _, lk := range keys {
lv := all[lk]
// remove lk since to prevent comparison to itself and because this
// iteration will compare it to any version it has not yet been
// compared to. e.g. [1, 2, 3] vs. [1, 2, 3] yields:
// 1 vs. 2
// 1 vs. 3
// 2 vs. 3
delete(other, lk)
// don't compare to ourself
for _, rk := range keys {
rv, found := other[rk]
if !found {
continue // done already
}
if len(lv) != len(rv) {
t.Errorf("different error count (%d vs. %d)\n%s: %v\n%s: %v", len(lv), len(rv), lk, fmtErrs(lv), rk, fmtErrs(rv))
continue
}
next := false
for i := range lv {
if l, r := lv[i], rv[i]; l.Type != r.Type || l.Detail != r.Detail {
t.Errorf("different errors\n%s: %v\n%s: %v", lk, fmtErrs(lv), rk, fmtErrs(rv))
next = true
break
}
}
if next {
continue
}
}
}
}
// helper for nicer output
func fmtErrs(errs field.ErrorList) string {
if len(errs) == 0 {
return "<no errors>"
}
if len(errs) == 1 {
return strconv.Quote(errs[0].Error())
}
buf := bytes.Buffer{}
for _, e := range errs {
buf.WriteString("\n")
buf.WriteString(strconv.Quote(e.Error()))
}
return buf.String()
}
func convertToInternal(t *testing.T, scheme *k8sruntime.Scheme, obj k8sruntime.Object) (k8sruntime.Object, error) {
t.Helper()
gvks, _, err := scheme.ObjectKinds(obj)
if err != nil {
t.Fatal(err)
}
if len(gvks) == 0 {
t.Fatal("no GVKs found for object")
}
gvk := gvks[0]
if gvk.Version == k8sruntime.APIVersionInternal {
return obj, nil
}
gvk.Version = k8sruntime.APIVersionInternal
if !scheme.Recognizes(gvk) {
t.Logf("no internal object found for GroupKind %s", gvk.GroupKind().String())
return nil, nil
}
return scheme.ConvertToVersion(obj, schema.GroupVersion{Group: gvk.Group, Version: k8sruntime.APIVersionInternal})
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"iter"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
// ContainerType signifies container type
type ContainerType int
const (
// Containers is for normal containers
Containers ContainerType = 1 << iota
// InitContainers is for init containers
InitContainers
// EphemeralContainers is for ephemeral containers
EphemeralContainers
)
// AllContainers specifies that all containers be visited
const AllContainers ContainerType = InitContainers | Containers | EphemeralContainers
// AllFeatureEnabledContainers returns a ContainerType mask which includes all container
// types except for the ones guarded by feature gate.
func AllFeatureEnabledContainers() ContainerType {
return AllContainers
}
// ContainerVisitor is called with each container spec, and returns true
// if visiting should continue.
type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool)
// Visitor is called with each object name, and returns true if visiting should continue
type Visitor func(name string) (shouldContinue bool)
func skipEmptyNames(visitor Visitor) Visitor {
return func(name string) bool {
if len(name) == 0 {
// continue visiting
return true
}
// delegate to visitor
return visitor(name)
}
}
// VisitContainers invokes the visitor function with a pointer to every container
// spec in the given pod spec with type set in mask. If visitor returns false,
// visiting is short-circuited. VisitContainers returns true if visiting completes,
// false if visiting was short-circuited.
func VisitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool {
for c, t := range ContainerIter(podSpec, mask) {
if !visitor(c, t) {
return false
}
}
return true
}
// ContainerIter returns an iterator over all containers in the given pod spec with a masked type.
// The iteration order is InitContainers, then main Containers, then EphemeralContainers.
func ContainerIter(podSpec *v1.PodSpec, mask ContainerType) iter.Seq2[*v1.Container, ContainerType] {
return func(yield func(*v1.Container, ContainerType) bool) {
if mask&InitContainers != 0 {
for i := range podSpec.InitContainers {
if !yield(&podSpec.InitContainers[i], InitContainers) {
return
}
}
}
if mask&Containers != 0 {
for i := range podSpec.Containers {
if !yield(&podSpec.Containers[i], Containers) {
return
}
}
}
if mask&EphemeralContainers != 0 {
for i := range podSpec.EphemeralContainers {
if !yield((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) {
return
}
}
}
}
}
// VisitPodSecretNames invokes the visitor function with the name of every secret
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
for _, reference := range pod.Spec.ImagePullSecrets {
if !visitor(reference.Name) {
return false
}
}
VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool {
return visitContainerSecretNames(c, visitor)
})
var source *v1.VolumeSource
for i := range pod.Spec.Volumes {
source = &pod.Spec.Volumes[i].VolumeSource
switch {
case source.AzureFile != nil:
if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) {
return false
}
case source.CephFS != nil:
if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) {
return false
}
case source.Cinder != nil:
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) {
return false
}
case source.FlexVolume != nil:
if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) {
return false
}
case source.Projected != nil:
for j := range source.Projected.Sources {
if source.Projected.Sources[j].Secret != nil {
if !visitor(source.Projected.Sources[j].Secret.Name) {
return false
}
}
}
case source.RBD != nil:
if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) {
return false
}
case source.Secret != nil:
if !visitor(source.Secret.SecretName) {
return false
}
case source.ScaleIO != nil:
if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) {
return false
}
case source.ISCSI != nil:
if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) {
return false
}
case source.StorageOS != nil:
if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) {
return false
}
case source.CSI != nil:
if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) {
return false
}
}
}
return true
}
// visitContainerSecretNames returns true unless the visitor returned false when invoked with a secret reference
func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool {
for _, env := range container.EnvFrom {
if env.SecretRef != nil {
if !visitor(env.SecretRef.Name) {
return false
}
}
}
for _, envVar := range container.Env {
if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil {
if !visitor(envVar.ValueFrom.SecretKeyRef.Name) {
return false
}
}
}
return true
}
// VisitPodConfigmapNames invokes the visitor function with the name of every configmap
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
// Returns true if visiting completed, false if visiting was short-circuited.
func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool {
visitor = skipEmptyNames(visitor)
VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool {
return visitContainerConfigmapNames(c, visitor)
})
var source *v1.VolumeSource
for i := range pod.Spec.Volumes {
source = &pod.Spec.Volumes[i].VolumeSource
switch {
case source.Projected != nil:
for j := range source.Projected.Sources {
if source.Projected.Sources[j].ConfigMap != nil {
if !visitor(source.Projected.Sources[j].ConfigMap.Name) {
return false
}
}
}
case source.ConfigMap != nil:
if !visitor(source.ConfigMap.Name) {
return false
}
}
}
return true
}
// visitContainerConfigmapNames returns true unless the visitor returned false when invoked with a configmap reference
func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool {
for _, env := range container.EnvFrom {
if env.ConfigMapRef != nil {
if !visitor(env.ConfigMapRef.Name) {
return false
}
}
}
for _, envVar := range container.Env {
if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil {
if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) {
return false
}
}
}
return true
}
// GetContainerStatus extracts the status of container "name" from "statuses".
// It returns true if "name" exists, else returns false.
func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) {
for i := range statuses {
if statuses[i].Name == name {
return statuses[i], true
}
}
return v1.ContainerStatus{}, false
}
// GetExistingContainerStatus extracts the status of container "name" from "statuses",
// It also returns if "name" exists.
func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus {
status, _ := GetContainerStatus(statuses, name)
return status
}
// GetIndexOfContainerStatus gets the index of status of container "name" from "statuses",
// It returns (index, true) if "name" exists, else returns (0, false).
func GetIndexOfContainerStatus(statuses []v1.ContainerStatus, name string) (int, bool) {
for i := range statuses {
if statuses[i].Name == name {
return i, true
}
}
return 0, false
}
// IsPodAvailable returns true if a pod is available; false otherwise.
// Precondition for an available pod is that it must be ready. On top
// of that, there are two cases when a pod can be considered available:
// 1. minReadySeconds == 0, or
// 2. LastTransitionTime (is set) + minReadySeconds <= current time
func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool {
if !IsPodReady(pod) {
return false
}
c := GetPodReadyCondition(pod.Status)
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
if minReadySeconds == 0 || (!c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Compare(now.Time) <= 0) {
return true
}
return false
}
// IsPodReady returns true if a pod is ready; false otherwise.
func IsPodReady(pod *v1.Pod) bool {
return IsPodReadyConditionTrue(pod.Status)
}
// IsPodTerminal returns true if a pod is terminal, all containers are stopped and cannot ever regress.
func IsPodTerminal(pod *v1.Pod) bool {
return IsPodPhaseTerminal(pod.Status.Phase)
}
// IsPodPhaseTerminal returns true if the pod's phase is terminal.
func IsPodPhaseTerminal(phase v1.PodPhase) bool {
return phase == v1.PodFailed || phase == v1.PodSucceeded
}
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsPodReadyConditionTrue(status v1.PodStatus) bool {
condition := GetPodReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue
}
// IsContainersReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsContainersReadyConditionTrue(status v1.PodStatus) bool {
condition := GetContainersReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue
}
// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
// Returns nil if the condition is not present.
func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
_, condition := GetPodCondition(&status, v1.PodReady)
return condition
}
// GetContainersReadyCondition extracts the containers ready condition from the given status and returns that.
// Returns nil if the condition is not present.
func GetContainersReadyCondition(status v1.PodStatus) *v1.PodCondition {
_, condition := GetPodCondition(&status, v1.ContainersReady)
return condition
}
// GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if status == nil {
return -1, nil
}
return GetPodConditionFromList(status.Conditions, conditionType)
}
// GetPodConditionFromList extracts the provided condition from the given list of condition and
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
}
// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
// status has changed.
// Returns true if pod condition has changed or has been added.
func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool {
condition.LastTransitionTime = metav1.Now()
// Try to find this pod condition.
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
if oldCondition == nil {
// We are adding new pod condition.
status.Conditions = append(status.Conditions, *condition)
return true
}
// We are updating an existing condition, so we need to check if it has changed.
if condition.Status == oldCondition.Status {
condition.LastTransitionTime = oldCondition.LastTransitionTime
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message &&
condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) &&
condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime)
status.Conditions[conditionIndex] = *condition
// Return true if one of the fields have changed.
return !isEqual
}
// IsRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways.
// This function is not checking if the container passed to it is indeed an init container.
// It is just checking if the container restart policy has been set to always.
func IsRestartableInitContainer(initContainer *v1.Container) bool {
if initContainer == nil || initContainer.RestartPolicy == nil {
return false
}
return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
}
// IsContainerRestartable returns true if the container can be restarted. A container can be
// restarted if it has a pod-level restart policy "Always" or "OnFailure" and not override by
// container-level restart policy, or a container-level restart policy "Always" or "OnFailure",
// or a container level restart rule with action "Restart".
func IsContainerRestartable(pod v1.PodSpec, container v1.Container) bool {
if container.RestartPolicy != nil {
for _, rule := range container.RestartPolicyRules {
if rule.Action == v1.ContainerRestartRuleActionRestart {
return true
}
}
return *container.RestartPolicy != v1.ContainerRestartPolicyNever
}
return pod.RestartPolicy != v1.RestartPolicyNever
}
// ContainerShouldRestart checks if a container should be restarted by its restart policy.
// First, the container-level restartPolicyRules are evaluated in order. An action is taken if any
// rules are matched. Second, the container-level restart policy is used. Lastly, if no container
// level policy are specified, pod-level restart policy is used.
func ContainerShouldRestart(container v1.Container, pod v1.PodSpec, exitCode int32) bool {
if container.RestartPolicy != nil {
rule, ok := findMatchingContainerRestartRule(container, exitCode)
if ok {
switch rule.Action {
case v1.ContainerRestartRuleActionRestart:
return true
default:
// Do nothing, fallback to container-level restart policy.
}
}
// Check container-level restart policy if no rules matched.
switch *container.RestartPolicy {
case v1.ContainerRestartPolicyAlways:
return true
case v1.ContainerRestartPolicyOnFailure:
return exitCode != 0
case v1.ContainerRestartPolicyNever:
return false
default:
// Do nothing, fallback to pod-level restart policy.
}
}
switch pod.RestartPolicy {
case v1.RestartPolicyAlways:
return true
case v1.RestartPolicyOnFailure:
return exitCode != 0
case v1.RestartPolicyNever:
return false
default:
// Default policy is Always, so we return true here.
return true
}
}
// findMatchingContainerRestartRule returns a rule and true if the exitCode matched
// one of the restart rules for the given container. Returns and empty rule and
// false if no rules matched.
func findMatchingContainerRestartRule(container v1.Container, exitCode int32) (rule v1.ContainerRestartRule, found bool) {
for _, rule := range container.RestartPolicyRules {
if rule.ExitCodes != nil {
exitCodeMatched := false
for _, code := range rule.ExitCodes.Values {
if code == exitCode {
exitCodeMatched = true
}
}
switch rule.ExitCodes.Operator {
case v1.ContainerRestartRuleOnExitCodesOpIn:
if exitCodeMatched {
return rule, true
}
case v1.ContainerRestartRuleOnExitCodesOpNotIn:
if !exitCodeMatched {
return rule, true
}
default:
// Do nothing, continue to the next rule.
}
}
}
return v1.ContainerRestartRule{}, false
}
// CalculatePodStatusObservedGeneration calculates the observedGeneration for the pod status.
// This is used to track the generation of the pod that was observed by the kubelet.
// The observedGeneration is set to the pod's generation when the feature gate
// PodObservedGenerationTracking is enabled OR if status.observedGeneration is already set.
// This protects against an infinite loop of kubelet trying to clear the value after the FG is turned off, and
// the API server preserving existing values when an incoming update tries to clear it.
func CalculatePodStatusObservedGeneration(pod *v1.Pod) int64 {
if pod.Status.ObservedGeneration != 0 || utilfeature.DefaultFeatureGate.Enabled(features.PodObservedGenerationTracking) {
return pod.Generation
}
return 0
}
// CalculatePodConditionObservedGeneration calculates the observedGeneration for a particular pod condition.
// The observedGeneration is set to the pod's generation when the feature gate
// PodObservedGenerationTracking is enabled OR if condition[].observedGeneration is already set.
// This protects against an infinite loop of kubelet trying to clear the value after the FG is turned off, and
// the API server preserving existing values when an incoming update tries to clear it.
func CalculatePodConditionObservedGeneration(podStatus *v1.PodStatus, generation int64, conditionType v1.PodConditionType) int64 {
if podStatus == nil {
return 0
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodObservedGenerationTracking) {
return generation
}
for _, condition := range podStatus.Conditions {
if condition.Type == conditionType && condition.ObservedGeneration != 0 {
return generation
}
}
return 0
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"fmt"
"math"
"strconv"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/kubernetes/pkg/features"
)
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
requestQuantity := resource.Quantity{}
switch resourceName {
case v1.ResourceCPU:
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
case v1.ResourceMemory, v1.ResourceStorage, v1.ResourceEphemeralStorage:
requestQuantity = resource.Quantity{Format: resource.BinarySI}
default:
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
}
// Supported pod level resources will be used instead of container level ones when available
hasPodLevelResources := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod)
if rQuantity, ok := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{SkipContainerLevelResources: hasPodLevelResources, ExcludeOverhead: true})[resourceName]; ok {
requestQuantity.Add(rQuantity)
}
// Add overhead for running a pod
// to the total requests if the resource total is non-zero
if pod.Spec.Overhead != nil {
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
requestQuantity.Add(podOverhead)
}
}
return requestQuantity
}
// GetResourceRequest finds and returns the request value for a specific resource.
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
if resource == v1.ResourcePods {
return 1
}
requestQuantity := GetResourceRequestQuantity(pod, resource)
if resource == v1.ResourceCPU {
return requestQuantity.MilliValue()
}
return requestQuantity.Value()
}
// ExtractResourceValueByContainerName extracts the value of a resource
// by providing container name
func ExtractResourceValueByContainerName(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string) (string, error) {
container, err := findContainerInPod(pod, containerName)
if err != nil {
return "", err
}
return ExtractContainerResourceValue(fs, container)
}
// ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource
// by providing container name and node allocatable
func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) {
realContainer, err := findContainerInPod(pod, containerName)
if err != nil {
return "", err
}
container := realContainer.DeepCopy()
MergeContainerResourceLimits(container, nodeAllocatable)
return ExtractContainerResourceValue(fs, container)
}
// ExtractContainerResourceValue extracts the value of a resource
// in an already known container
func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) {
divisor := resource.Quantity{}
if divisor.Cmp(fs.Divisor) == 0 {
divisor = resource.MustParse("1")
} else {
divisor = fs.Divisor
}
switch fs.Resource {
case "limits.cpu":
return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor)
case "limits.memory":
return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor)
case "limits.ephemeral-storage":
return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor)
case "requests.cpu":
return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor)
case "requests.memory":
return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor)
case "requests.ephemeral-storage":
return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor)
}
// handle extended standard resources with dynamic names
// example: requests.hugepages-<pageSize> or limits.hugepages-<pageSize>
if strings.HasPrefix(fs.Resource, "requests.") {
resourceName := v1.ResourceName(strings.TrimPrefix(fs.Resource, "requests."))
if IsHugePageResourceName(resourceName) {
return convertResourceHugePagesToString(container.Resources.Requests.Name(resourceName, resource.BinarySI), divisor)
}
}
if strings.HasPrefix(fs.Resource, "limits.") {
resourceName := v1.ResourceName(strings.TrimPrefix(fs.Resource, "limits."))
if IsHugePageResourceName(resourceName) {
return convertResourceHugePagesToString(container.Resources.Limits.Name(resourceName, resource.BinarySI), divisor)
}
}
return "", fmt.Errorf("unsupported container resource : %v", fs.Resource)
}
// convertResourceCPUToString converts cpu value to the format of divisor and returns
// ceiling of the value.
func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) {
c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue())))
return strconv.FormatInt(c, 10), nil
}
// convertResourceMemoryToString converts memory value to the format of divisor and returns
// ceiling of the value.
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
return strconv.FormatInt(m, 10), nil
}
// convertResourceHugePagesToString converts hugepages value to the format of divisor and returns
// ceiling of the value.
func convertResourceHugePagesToString(hugePages *resource.Quantity, divisor resource.Quantity) (string, error) {
m := int64(math.Ceil(float64(hugePages.Value()) / float64(divisor.Value())))
return strconv.FormatInt(m, 10), nil
}
// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns
// ceiling of the value.
func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) {
m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value())))
return strconv.FormatInt(m, 10), nil
}
// findContainerInPod finds a container by its name in the provided pod
func findContainerInPod(pod *v1.Pod, containerName string) (*v1.Container, error) {
for _, container := range pod.Spec.Containers {
if container.Name == containerName {
return &container, nil
}
}
for _, container := range pod.Spec.InitContainers {
if container.Name == containerName {
return &container, nil
}
}
return nil, fmt.Errorf("container %s not found", containerName)
}
// MergeContainerResourceLimits checks if a limit is applied for
// the container, and if not, it sets the limit to the passed resource list.
func MergeContainerResourceLimits(container *v1.Container,
allocatable v1.ResourceList) {
if container.Resources.Limits == nil {
container.Resources.Limits = make(v1.ResourceList)
}
// NOTE: we exclude hugepages-* resources because hugepages are never overcommitted.
// This means that the container always has a limit specified.
for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage} {
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
if cap, exists := allocatable[resource]; exists {
container.Resources.Limits[resource] = cap.DeepCopy()
}
}
}
}
// IsHugePageResourceName returns true if the resource name has the huge page
// resource prefix.
func IsHugePageResourceName(name v1.ResourceName) bool {
return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
utilnet "k8s.io/utils/net"
)
const (
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
)
// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0
func IsAllowAll(ipnets utilnet.IPNetSet) bool {
for _, s := range ipnets.StringSlice() {
if s == "0.0.0.0/0" {
return true
}
}
return false
}
// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
func GetLoadBalancerSourceRanges(service *v1.Service) (utilnet.IPNetSet, error) {
var ipnets utilnet.IPNetSet
var err error
// if SourceRange field is specified, ignore sourceRange annotation
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
specs := service.Spec.LoadBalancerSourceRanges
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
}
} else {
val := service.Annotations[v1.AnnotationLoadBalancerSourceRangesKey]
val = strings.TrimSpace(val)
if val == "" {
val = defaultLoadBalancerSourceRanges
}
specs := strings.Split(val, ",")
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", v1.AnnotationLoadBalancerSourceRangesKey, val)
}
}
return ipnets, nil
}
// ExternallyAccessible checks if service is externally accessible.
func ExternallyAccessible(service *v1.Service) bool {
return service.Spec.Type == v1.ServiceTypeLoadBalancer ||
service.Spec.Type == v1.ServiceTypeNodePort ||
(service.Spec.Type == v1.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0)
}
// ExternalPolicyLocal checks if service is externally accessible and has ETP = Local.
func ExternalPolicyLocal(service *v1.Service) bool {
if !ExternallyAccessible(service) {
return false
}
return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal
}
// InternalPolicyLocal checks if service has ITP = Local.
func InternalPolicyLocal(service *v1.Service) bool {
if service.Spec.InternalTrafficPolicy == nil {
return false
}
return *service.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal
}
// NeedsHealthCheck checks if service needs health check.
func NeedsHealthCheck(service *v1.Service) bool {
if service.Spec.Type != v1.ServiceTypeLoadBalancer {
return false
}
return ExternalPolicyLocal(service)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/admission"
v1 "k8s.io/kubernetes/pkg/apis/admission/v1"
"k8s.io/kubernetes/pkg/apis/admission/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(admission.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "admission.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder the schema builder
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme handler to add items to the schema
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&AdmissionReview{},
)
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name for this API.
const GroupName = "admission.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &admissionv1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
admission "k8s.io/kubernetes/pkg/apis/admission"
authenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*admissionv1.AdmissionRequest)(nil), (*admission.AdmissionRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AdmissionRequest_To_admission_AdmissionRequest(a.(*admissionv1.AdmissionRequest), b.(*admission.AdmissionRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admission.AdmissionRequest)(nil), (*admissionv1.AdmissionRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admission_AdmissionRequest_To_v1_AdmissionRequest(a.(*admission.AdmissionRequest), b.(*admissionv1.AdmissionRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionv1.AdmissionResponse)(nil), (*admission.AdmissionResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AdmissionResponse_To_admission_AdmissionResponse(a.(*admissionv1.AdmissionResponse), b.(*admission.AdmissionResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admission.AdmissionResponse)(nil), (*admissionv1.AdmissionResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admission_AdmissionResponse_To_v1_AdmissionResponse(a.(*admission.AdmissionResponse), b.(*admissionv1.AdmissionResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionv1.AdmissionReview)(nil), (*admission.AdmissionReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AdmissionReview_To_admission_AdmissionReview(a.(*admissionv1.AdmissionReview), b.(*admission.AdmissionReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admission.AdmissionReview)(nil), (*admissionv1.AdmissionReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admission_AdmissionReview_To_v1_AdmissionReview(a.(*admission.AdmissionReview), b.(*admissionv1.AdmissionReview), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_AdmissionRequest_To_admission_AdmissionRequest(in *admissionv1.AdmissionRequest, out *admission.AdmissionRequest, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Kind = in.Kind
out.Resource = in.Resource
out.SubResource = in.SubResource
out.RequestKind = (*metav1.GroupVersionKind)(unsafe.Pointer(in.RequestKind))
out.RequestResource = (*metav1.GroupVersionResource)(unsafe.Pointer(in.RequestResource))
out.RequestSubResource = in.RequestSubResource
out.Name = in.Name
out.Namespace = in.Namespace
out.Operation = admission.Operation(in.Operation)
if err := authenticationv1.Convert_v1_UserInfo_To_authentication_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Object, &out.Object, s); err != nil {
return err
}
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.OldObject, &out.OldObject, s); err != nil {
return err
}
out.DryRun = (*bool)(unsafe.Pointer(in.DryRun))
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Options, &out.Options, s); err != nil {
return err
}
return nil
}
// Convert_v1_AdmissionRequest_To_admission_AdmissionRequest is an autogenerated conversion function.
func Convert_v1_AdmissionRequest_To_admission_AdmissionRequest(in *admissionv1.AdmissionRequest, out *admission.AdmissionRequest, s conversion.Scope) error {
return autoConvert_v1_AdmissionRequest_To_admission_AdmissionRequest(in, out, s)
}
func autoConvert_admission_AdmissionRequest_To_v1_AdmissionRequest(in *admission.AdmissionRequest, out *admissionv1.AdmissionRequest, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Kind = in.Kind
out.Resource = in.Resource
out.SubResource = in.SubResource
out.RequestKind = (*metav1.GroupVersionKind)(unsafe.Pointer(in.RequestKind))
out.RequestResource = (*metav1.GroupVersionResource)(unsafe.Pointer(in.RequestResource))
out.RequestSubResource = in.RequestSubResource
out.Name = in.Name
out.Namespace = in.Namespace
out.Operation = admissionv1.Operation(in.Operation)
if err := authenticationv1.Convert_authentication_UserInfo_To_v1_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Object, &out.Object, s); err != nil {
return err
}
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.OldObject, &out.OldObject, s); err != nil {
return err
}
out.DryRun = (*bool)(unsafe.Pointer(in.DryRun))
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Options, &out.Options, s); err != nil {
return err
}
return nil
}
// Convert_admission_AdmissionRequest_To_v1_AdmissionRequest is an autogenerated conversion function.
func Convert_admission_AdmissionRequest_To_v1_AdmissionRequest(in *admission.AdmissionRequest, out *admissionv1.AdmissionRequest, s conversion.Scope) error {
return autoConvert_admission_AdmissionRequest_To_v1_AdmissionRequest(in, out, s)
}
func autoConvert_v1_AdmissionResponse_To_admission_AdmissionResponse(in *admissionv1.AdmissionResponse, out *admission.AdmissionResponse, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Allowed = in.Allowed
out.Result = (*metav1.Status)(unsafe.Pointer(in.Result))
out.Patch = *(*[]byte)(unsafe.Pointer(&in.Patch))
out.PatchType = (*admission.PatchType)(unsafe.Pointer(in.PatchType))
out.AuditAnnotations = *(*map[string]string)(unsafe.Pointer(&in.AuditAnnotations))
out.Warnings = *(*[]string)(unsafe.Pointer(&in.Warnings))
return nil
}
// Convert_v1_AdmissionResponse_To_admission_AdmissionResponse is an autogenerated conversion function.
func Convert_v1_AdmissionResponse_To_admission_AdmissionResponse(in *admissionv1.AdmissionResponse, out *admission.AdmissionResponse, s conversion.Scope) error {
return autoConvert_v1_AdmissionResponse_To_admission_AdmissionResponse(in, out, s)
}
func autoConvert_admission_AdmissionResponse_To_v1_AdmissionResponse(in *admission.AdmissionResponse, out *admissionv1.AdmissionResponse, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Allowed = in.Allowed
out.Result = (*metav1.Status)(unsafe.Pointer(in.Result))
out.Patch = *(*[]byte)(unsafe.Pointer(&in.Patch))
out.PatchType = (*admissionv1.PatchType)(unsafe.Pointer(in.PatchType))
out.AuditAnnotations = *(*map[string]string)(unsafe.Pointer(&in.AuditAnnotations))
out.Warnings = *(*[]string)(unsafe.Pointer(&in.Warnings))
return nil
}
// Convert_admission_AdmissionResponse_To_v1_AdmissionResponse is an autogenerated conversion function.
func Convert_admission_AdmissionResponse_To_v1_AdmissionResponse(in *admission.AdmissionResponse, out *admissionv1.AdmissionResponse, s conversion.Scope) error {
return autoConvert_admission_AdmissionResponse_To_v1_AdmissionResponse(in, out, s)
}
func autoConvert_v1_AdmissionReview_To_admission_AdmissionReview(in *admissionv1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error {
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = new(admission.AdmissionRequest)
if err := Convert_v1_AdmissionRequest_To_admission_AdmissionRequest(*in, *out, s); err != nil {
return err
}
} else {
out.Request = nil
}
out.Response = (*admission.AdmissionResponse)(unsafe.Pointer(in.Response))
return nil
}
// Convert_v1_AdmissionReview_To_admission_AdmissionReview is an autogenerated conversion function.
func Convert_v1_AdmissionReview_To_admission_AdmissionReview(in *admissionv1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error {
return autoConvert_v1_AdmissionReview_To_admission_AdmissionReview(in, out, s)
}
func autoConvert_admission_AdmissionReview_To_v1_AdmissionReview(in *admission.AdmissionReview, out *admissionv1.AdmissionReview, s conversion.Scope) error {
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = new(admissionv1.AdmissionRequest)
if err := Convert_admission_AdmissionRequest_To_v1_AdmissionRequest(*in, *out, s); err != nil {
return err
}
} else {
out.Request = nil
}
out.Response = (*admissionv1.AdmissionResponse)(unsafe.Pointer(in.Response))
return nil
}
// Convert_admission_AdmissionReview_To_v1_AdmissionReview is an autogenerated conversion function.
func Convert_admission_AdmissionReview_To_v1_AdmissionReview(in *admission.AdmissionReview, out *admissionv1.AdmissionReview, s conversion.Scope) error {
return autoConvert_admission_AdmissionReview_To_v1_AdmissionReview(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
admissionv1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name for this API.
const GroupName = "admission.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &admissionv1beta1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
admission "k8s.io/kubernetes/pkg/apis/admission"
authenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*admissionv1beta1.AdmissionRequest)(nil), (*admission.AdmissionRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(a.(*admissionv1beta1.AdmissionRequest), b.(*admission.AdmissionRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admission.AdmissionRequest)(nil), (*admissionv1beta1.AdmissionRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(a.(*admission.AdmissionRequest), b.(*admissionv1beta1.AdmissionRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionv1beta1.AdmissionResponse)(nil), (*admission.AdmissionResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse(a.(*admissionv1beta1.AdmissionResponse), b.(*admission.AdmissionResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admission.AdmissionResponse)(nil), (*admissionv1beta1.AdmissionResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse(a.(*admission.AdmissionResponse), b.(*admissionv1beta1.AdmissionResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionv1beta1.AdmissionReview)(nil), (*admission.AdmissionReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AdmissionReview_To_admission_AdmissionReview(a.(*admissionv1beta1.AdmissionReview), b.(*admission.AdmissionReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admission.AdmissionReview)(nil), (*admissionv1beta1.AdmissionReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admission_AdmissionReview_To_v1beta1_AdmissionReview(a.(*admission.AdmissionReview), b.(*admissionv1beta1.AdmissionReview), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in *admissionv1beta1.AdmissionRequest, out *admission.AdmissionRequest, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Kind = in.Kind
out.Resource = in.Resource
out.SubResource = in.SubResource
out.RequestKind = (*v1.GroupVersionKind)(unsafe.Pointer(in.RequestKind))
out.RequestResource = (*v1.GroupVersionResource)(unsafe.Pointer(in.RequestResource))
out.RequestSubResource = in.RequestSubResource
out.Name = in.Name
out.Namespace = in.Namespace
out.Operation = admission.Operation(in.Operation)
if err := authenticationv1.Convert_v1_UserInfo_To_authentication_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Object, &out.Object, s); err != nil {
return err
}
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.OldObject, &out.OldObject, s); err != nil {
return err
}
out.DryRun = (*bool)(unsafe.Pointer(in.DryRun))
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Options, &out.Options, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest is an autogenerated conversion function.
func Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in *admissionv1beta1.AdmissionRequest, out *admission.AdmissionRequest, s conversion.Scope) error {
return autoConvert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in, out, s)
}
func autoConvert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in *admission.AdmissionRequest, out *admissionv1beta1.AdmissionRequest, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Kind = in.Kind
out.Resource = in.Resource
out.SubResource = in.SubResource
out.RequestKind = (*v1.GroupVersionKind)(unsafe.Pointer(in.RequestKind))
out.RequestResource = (*v1.GroupVersionResource)(unsafe.Pointer(in.RequestResource))
out.RequestSubResource = in.RequestSubResource
out.Name = in.Name
out.Namespace = in.Namespace
out.Operation = admissionv1beta1.Operation(in.Operation)
if err := authenticationv1.Convert_authentication_UserInfo_To_v1_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Object, &out.Object, s); err != nil {
return err
}
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.OldObject, &out.OldObject, s); err != nil {
return err
}
out.DryRun = (*bool)(unsafe.Pointer(in.DryRun))
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Options, &out.Options, s); err != nil {
return err
}
return nil
}
// Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest is an autogenerated conversion function.
func Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in *admission.AdmissionRequest, out *admissionv1beta1.AdmissionRequest, s conversion.Scope) error {
return autoConvert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in, out, s)
}
func autoConvert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse(in *admissionv1beta1.AdmissionResponse, out *admission.AdmissionResponse, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Allowed = in.Allowed
out.Result = (*v1.Status)(unsafe.Pointer(in.Result))
out.Patch = *(*[]byte)(unsafe.Pointer(&in.Patch))
out.PatchType = (*admission.PatchType)(unsafe.Pointer(in.PatchType))
out.AuditAnnotations = *(*map[string]string)(unsafe.Pointer(&in.AuditAnnotations))
out.Warnings = *(*[]string)(unsafe.Pointer(&in.Warnings))
return nil
}
// Convert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse is an autogenerated conversion function.
func Convert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse(in *admissionv1beta1.AdmissionResponse, out *admission.AdmissionResponse, s conversion.Scope) error {
return autoConvert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse(in, out, s)
}
func autoConvert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse(in *admission.AdmissionResponse, out *admissionv1beta1.AdmissionResponse, s conversion.Scope) error {
out.UID = types.UID(in.UID)
out.Allowed = in.Allowed
out.Result = (*v1.Status)(unsafe.Pointer(in.Result))
out.Patch = *(*[]byte)(unsafe.Pointer(&in.Patch))
out.PatchType = (*admissionv1beta1.PatchType)(unsafe.Pointer(in.PatchType))
out.AuditAnnotations = *(*map[string]string)(unsafe.Pointer(&in.AuditAnnotations))
out.Warnings = *(*[]string)(unsafe.Pointer(&in.Warnings))
return nil
}
// Convert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse is an autogenerated conversion function.
func Convert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse(in *admission.AdmissionResponse, out *admissionv1beta1.AdmissionResponse, s conversion.Scope) error {
return autoConvert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse(in, out, s)
}
func autoConvert_v1beta1_AdmissionReview_To_admission_AdmissionReview(in *admissionv1beta1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error {
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = new(admission.AdmissionRequest)
if err := Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(*in, *out, s); err != nil {
return err
}
} else {
out.Request = nil
}
out.Response = (*admission.AdmissionResponse)(unsafe.Pointer(in.Response))
return nil
}
// Convert_v1beta1_AdmissionReview_To_admission_AdmissionReview is an autogenerated conversion function.
func Convert_v1beta1_AdmissionReview_To_admission_AdmissionReview(in *admissionv1beta1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error {
return autoConvert_v1beta1_AdmissionReview_To_admission_AdmissionReview(in, out, s)
}
func autoConvert_admission_AdmissionReview_To_v1beta1_AdmissionReview(in *admission.AdmissionReview, out *admissionv1beta1.AdmissionReview, s conversion.Scope) error {
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = new(admissionv1beta1.AdmissionRequest)
if err := Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(*in, *out, s); err != nil {
return err
}
} else {
out.Request = nil
}
out.Response = (*admissionv1beta1.AdmissionResponse)(unsafe.Pointer(in.Response))
return nil
}
// Convert_admission_AdmissionReview_To_v1beta1_AdmissionReview is an autogenerated conversion function.
func Convert_admission_AdmissionReview_To_v1beta1_AdmissionReview(in *admission.AdmissionReview, out *admissionv1beta1.AdmissionReview, s conversion.Scope) error {
return autoConvert_admission_AdmissionReview_To_v1beta1_AdmissionReview(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package admission
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) {
*out = *in
out.Kind = in.Kind
out.Resource = in.Resource
if in.RequestKind != nil {
in, out := &in.RequestKind, &out.RequestKind
*out = new(v1.GroupVersionKind)
**out = **in
}
if in.RequestResource != nil {
in, out := &in.RequestResource, &out.RequestResource
*out = new(v1.GroupVersionResource)
**out = **in
}
in.UserInfo.DeepCopyInto(&out.UserInfo)
if in.Object != nil {
out.Object = in.Object.DeepCopyObject()
}
if in.OldObject != nil {
out.OldObject = in.OldObject.DeepCopyObject()
}
if in.DryRun != nil {
in, out := &in.DryRun, &out.DryRun
*out = new(bool)
**out = **in
}
if in.Options != nil {
out.Options = in.Options.DeepCopyObject()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest.
func (in *AdmissionRequest) DeepCopy() *AdmissionRequest {
if in == nil {
return nil
}
out := new(AdmissionRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) {
*out = *in
if in.Result != nil {
in, out := &in.Result, &out.Result
*out = new(v1.Status)
(*in).DeepCopyInto(*out)
}
if in.Patch != nil {
in, out := &in.Patch, &out.Patch
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.PatchType != nil {
in, out := &in.PatchType, &out.PatchType
*out = new(PatchType)
**out = **in
}
if in.AuditAnnotations != nil {
in, out := &in.AuditAnnotations, &out.AuditAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Warnings != nil {
in, out := &in.Warnings, &out.Warnings
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse.
func (in *AdmissionResponse) DeepCopy() *AdmissionResponse {
if in == nil {
return nil
}
out := new(AdmissionResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = new(AdmissionRequest)
(*in).DeepCopyInto(*out)
}
if in.Response != nil {
in, out := &in.Response, &out.Response
*out = new(AdmissionResponse)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview.
func (in *AdmissionReview) DeepCopy() *AdmissionReview {
if in == nil {
return nil
}
out := new(AdmissionReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AdmissionReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/admissionregistration"
)
// Funcs returns the fuzzer functions for the admissionregistration api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *admissionregistration.Rule, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.Scope == nil {
s := admissionregistration.AllScopes
obj.Scope = &s
}
},
func(obj *admissionregistration.ValidatingWebhook, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.FailurePolicy == nil {
p := admissionregistration.FailurePolicyType("Fail")
obj.FailurePolicy = &p
}
if obj.MatchPolicy == nil {
m := admissionregistration.MatchPolicyType("Exact")
obj.MatchPolicy = &m
}
if obj.SideEffects == nil {
s := admissionregistration.SideEffectClassUnknown
obj.SideEffects = &s
}
if obj.TimeoutSeconds == nil {
i := int32(30)
obj.TimeoutSeconds = &i
}
obj.AdmissionReviewVersions = []string{"v1beta1"}
},
func(obj *admissionregistration.MutatingWebhook, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.FailurePolicy == nil {
p := admissionregistration.FailurePolicyType("Fail")
obj.FailurePolicy = &p
}
if obj.MatchPolicy == nil {
m := admissionregistration.MatchPolicyType("Exact")
obj.MatchPolicy = &m
}
if obj.SideEffects == nil {
s := admissionregistration.SideEffectClassUnknown
obj.SideEffects = &s
}
if obj.ReinvocationPolicy == nil {
r := admissionregistration.NeverReinvocationPolicy
obj.ReinvocationPolicy = &r
}
if obj.TimeoutSeconds == nil {
i := int32(30)
obj.TimeoutSeconds = &i
}
obj.AdmissionReviewVersions = []string{"v1beta1"}
},
func(obj *admissionregistration.ValidatingAdmissionPolicySpec, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.FailurePolicy == nil {
p := admissionregistration.FailurePolicyType("Fail")
obj.FailurePolicy = &p
}
},
func(obj *admissionregistration.ValidatingAdmissionPolicyBindingSpec, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.ValidationActions == nil {
obj.ValidationActions = []admissionregistration.ValidationAction{admissionregistration.Deny}
}
},
func(obj *admissionregistration.MatchResources, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.MatchPolicy == nil {
m := admissionregistration.MatchPolicyType("Exact")
obj.MatchPolicy = &m
}
},
func(obj *admissionregistration.ParamRef, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
// Populate required field
if obj.ParameterNotFoundAction == nil {
v := admissionregistration.DenyAction
obj.ParameterNotFoundAction = &v
}
},
func(obj *admissionregistration.MutatingAdmissionPolicySpec, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.FailurePolicy == nil {
p := admissionregistration.FailurePolicyType("Fail")
obj.FailurePolicy = &p
}
obj.ReinvocationPolicy = admissionregistration.NeverReinvocationPolicy
},
func(obj *admissionregistration.Mutation, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
patchTypes := []admissionregistration.PatchType{admissionregistration.PatchTypeJSONPatch, admissionregistration.PatchTypeApplyConfiguration}
obj.PatchType = patchTypes[c.Rand.Intn(len(patchTypes))]
if obj.PatchType == admissionregistration.PatchTypeJSONPatch {
obj.JSONPatch = &admissionregistration.JSONPatch{}
c.Fill(&obj.JSONPatch)
obj.ApplyConfiguration = nil
}
if obj.PatchType == admissionregistration.PatchTypeApplyConfiguration {
obj.ApplyConfiguration = &admissionregistration.ApplyConfiguration{}
c.Fill(obj.ApplyConfiguration)
obj.JSONPatch = nil
}
},
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/admissionregistration"
v1 "k8s.io/kubernetes/pkg/apis/admissionregistration/v1"
"k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1"
"k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(admissionregistration.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admissionregistration
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the name used for this API group
const GroupName = "admissionregistration.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ValidatingWebhookConfiguration{},
&ValidatingWebhookConfigurationList{},
&MutatingWebhookConfiguration{},
&MutatingWebhookConfigurationList{},
&ValidatingAdmissionPolicy{},
&ValidatingAdmissionPolicyList{},
&ValidatingAdmissionPolicyBinding{},
&ValidatingAdmissionPolicyBindingList{},
&MutatingAdmissionPolicy{},
&MutatingAdmissionPolicyList{},
&MutatingAdmissionPolicyBinding{},
&MutatingAdmissionPolicyBindingList{},
)
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/admissionregistration/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration"
)
// Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations is an autogenerated conversion function.
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *v1.RuleWithOperations, s conversion.Scope) error {
return autoConvert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(in, out, s)
}
// Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations is an autogenerated conversion function.
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *v1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error {
return autoConvert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_ValidatingWebhook sets defaults for webhook validating
func SetDefaults_ValidatingWebhook(obj *admissionregistrationv1.ValidatingWebhook) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1.Fail
obj.FailurePolicy = &policy
}
if obj.MatchPolicy == nil {
policy := admissionregistrationv1.Equivalent
obj.MatchPolicy = &policy
}
if obj.NamespaceSelector == nil {
selector := metav1.LabelSelector{}
obj.NamespaceSelector = &selector
}
if obj.ObjectSelector == nil {
selector := metav1.LabelSelector{}
obj.ObjectSelector = &selector
}
if obj.TimeoutSeconds == nil {
obj.TimeoutSeconds = new(int32)
*obj.TimeoutSeconds = 10
}
}
// SetDefaults_MutatingWebhook sets defaults for webhook mutating
func SetDefaults_MutatingWebhook(obj *admissionregistrationv1.MutatingWebhook) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1.Fail
obj.FailurePolicy = &policy
}
if obj.MatchPolicy == nil {
policy := admissionregistrationv1.Equivalent
obj.MatchPolicy = &policy
}
if obj.NamespaceSelector == nil {
selector := metav1.LabelSelector{}
obj.NamespaceSelector = &selector
}
if obj.ObjectSelector == nil {
selector := metav1.LabelSelector{}
obj.ObjectSelector = &selector
}
if obj.TimeoutSeconds == nil {
obj.TimeoutSeconds = new(int32)
*obj.TimeoutSeconds = 10
}
if obj.ReinvocationPolicy == nil {
never := admissionregistrationv1.NeverReinvocationPolicy
obj.ReinvocationPolicy = &never
}
}
// SetDefaults_Rule sets defaults for webhook rule
func SetDefaults_Rule(obj *admissionregistrationv1.Rule) {
if obj.Scope == nil {
s := admissionregistrationv1.AllScopes
obj.Scope = &s
}
}
// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference
func SetDefaults_ServiceReference(obj *admissionregistrationv1.ServiceReference) {
if obj.Port == nil {
obj.Port = ptr.To[int32](443)
}
}
// SetDefaults_ValidatingAdmissionPolicySpec sets defaults for ValidatingAdmissionPolicySpec
func SetDefaults_ValidatingAdmissionPolicySpec(obj *admissionregistrationv1.ValidatingAdmissionPolicySpec) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1.Fail
obj.FailurePolicy = &policy
}
}
// SetDefaults_MatchResources sets defaults for MatchResources
func SetDefaults_MatchResources(obj *admissionregistrationv1.MatchResources) {
if obj.MatchPolicy == nil {
policy := admissionregistrationv1.Equivalent
obj.MatchPolicy = &policy
}
if obj.NamespaceSelector == nil {
selector := metav1.LabelSelector{}
obj.NamespaceSelector = &selector
}
if obj.ObjectSelector == nil {
selector := metav1.LabelSelector{}
obj.ObjectSelector = &selector
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "admissionregistration.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &admissionregistrationv1.SchemeBuilder
// AddToScheme handler to add items to the schema
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.AuditAnnotation)(nil), (*admissionregistration.AuditAnnotation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AuditAnnotation_To_admissionregistration_AuditAnnotation(a.(*admissionregistrationv1.AuditAnnotation), b.(*admissionregistration.AuditAnnotation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.AuditAnnotation)(nil), (*admissionregistrationv1.AuditAnnotation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_AuditAnnotation_To_v1_AuditAnnotation(a.(*admissionregistration.AuditAnnotation), b.(*admissionregistrationv1.AuditAnnotation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ExpressionWarning)(nil), (*admissionregistration.ExpressionWarning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExpressionWarning_To_admissionregistration_ExpressionWarning(a.(*admissionregistrationv1.ExpressionWarning), b.(*admissionregistration.ExpressionWarning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ExpressionWarning)(nil), (*admissionregistrationv1.ExpressionWarning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ExpressionWarning_To_v1_ExpressionWarning(a.(*admissionregistration.ExpressionWarning), b.(*admissionregistrationv1.ExpressionWarning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.MatchCondition)(nil), (*admissionregistration.MatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_MatchCondition_To_admissionregistration_MatchCondition(a.(*admissionregistrationv1.MatchCondition), b.(*admissionregistration.MatchCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MatchCondition)(nil), (*admissionregistrationv1.MatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MatchCondition_To_v1_MatchCondition(a.(*admissionregistration.MatchCondition), b.(*admissionregistrationv1.MatchCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.MatchResources)(nil), (*admissionregistration.MatchResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_MatchResources_To_admissionregistration_MatchResources(a.(*admissionregistrationv1.MatchResources), b.(*admissionregistration.MatchResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MatchResources)(nil), (*admissionregistrationv1.MatchResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MatchResources_To_v1_MatchResources(a.(*admissionregistration.MatchResources), b.(*admissionregistrationv1.MatchResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.MutatingWebhook)(nil), (*admissionregistration.MutatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_MutatingWebhook_To_admissionregistration_MutatingWebhook(a.(*admissionregistrationv1.MutatingWebhook), b.(*admissionregistration.MutatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhook)(nil), (*admissionregistrationv1.MutatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingWebhook_To_v1_MutatingWebhook(a.(*admissionregistration.MutatingWebhook), b.(*admissionregistrationv1.MutatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.MutatingWebhookConfiguration)(nil), (*admissionregistration.MutatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(a.(*admissionregistrationv1.MutatingWebhookConfiguration), b.(*admissionregistration.MutatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhookConfiguration)(nil), (*admissionregistrationv1.MutatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingWebhookConfiguration_To_v1_MutatingWebhookConfiguration(a.(*admissionregistration.MutatingWebhookConfiguration), b.(*admissionregistrationv1.MutatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.MutatingWebhookConfigurationList)(nil), (*admissionregistration.MutatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(a.(*admissionregistrationv1.MutatingWebhookConfigurationList), b.(*admissionregistration.MutatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhookConfigurationList)(nil), (*admissionregistrationv1.MutatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1_MutatingWebhookConfigurationList(a.(*admissionregistration.MutatingWebhookConfigurationList), b.(*admissionregistrationv1.MutatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.NamedRuleWithOperations)(nil), (*admissionregistration.NamedRuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(a.(*admissionregistrationv1.NamedRuleWithOperations), b.(*admissionregistration.NamedRuleWithOperations), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.NamedRuleWithOperations)(nil), (*admissionregistrationv1.NamedRuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_NamedRuleWithOperations_To_v1_NamedRuleWithOperations(a.(*admissionregistration.NamedRuleWithOperations), b.(*admissionregistrationv1.NamedRuleWithOperations), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ParamKind)(nil), (*admissionregistration.ParamKind)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ParamKind_To_admissionregistration_ParamKind(a.(*admissionregistrationv1.ParamKind), b.(*admissionregistration.ParamKind), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ParamKind)(nil), (*admissionregistrationv1.ParamKind)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ParamKind_To_v1_ParamKind(a.(*admissionregistration.ParamKind), b.(*admissionregistrationv1.ParamKind), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ParamRef)(nil), (*admissionregistration.ParamRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ParamRef_To_admissionregistration_ParamRef(a.(*admissionregistrationv1.ParamRef), b.(*admissionregistration.ParamRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ParamRef)(nil), (*admissionregistrationv1.ParamRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ParamRef_To_v1_ParamRef(a.(*admissionregistration.ParamRef), b.(*admissionregistrationv1.ParamRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.Rule)(nil), (*admissionregistration.Rule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Rule_To_admissionregistration_Rule(a.(*admissionregistrationv1.Rule), b.(*admissionregistration.Rule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Rule)(nil), (*admissionregistrationv1.Rule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Rule_To_v1_Rule(a.(*admissionregistration.Rule), b.(*admissionregistrationv1.Rule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ServiceReference)(nil), (*admissionregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceReference_To_admissionregistration_ServiceReference(a.(*admissionregistrationv1.ServiceReference), b.(*admissionregistration.ServiceReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ServiceReference)(nil), (*admissionregistrationv1.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ServiceReference_To_v1_ServiceReference(a.(*admissionregistration.ServiceReference), b.(*admissionregistrationv1.ServiceReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.TypeChecking)(nil), (*admissionregistration.TypeChecking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TypeChecking_To_admissionregistration_TypeChecking(a.(*admissionregistrationv1.TypeChecking), b.(*admissionregistration.TypeChecking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.TypeChecking)(nil), (*admissionregistrationv1.TypeChecking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_TypeChecking_To_v1_TypeChecking(a.(*admissionregistration.TypeChecking), b.(*admissionregistrationv1.TypeChecking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingAdmissionPolicy)(nil), (*admissionregistration.ValidatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(a.(*admissionregistrationv1.ValidatingAdmissionPolicy), b.(*admissionregistration.ValidatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicy)(nil), (*admissionregistrationv1.ValidatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1_ValidatingAdmissionPolicy(a.(*admissionregistration.ValidatingAdmissionPolicy), b.(*admissionregistrationv1.ValidatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingAdmissionPolicyBinding)(nil), (*admissionregistration.ValidatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(a.(*admissionregistrationv1.ValidatingAdmissionPolicyBinding), b.(*admissionregistration.ValidatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBinding)(nil), (*admissionregistrationv1.ValidatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1_ValidatingAdmissionPolicyBinding(a.(*admissionregistration.ValidatingAdmissionPolicyBinding), b.(*admissionregistrationv1.ValidatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingAdmissionPolicyBindingList)(nil), (*admissionregistration.ValidatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(a.(*admissionregistrationv1.ValidatingAdmissionPolicyBindingList), b.(*admissionregistration.ValidatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBindingList)(nil), (*admissionregistrationv1.ValidatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1_ValidatingAdmissionPolicyBindingList(a.(*admissionregistration.ValidatingAdmissionPolicyBindingList), b.(*admissionregistrationv1.ValidatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec)(nil), (*admissionregistration.ValidatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(a.(*admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec), b.(*admissionregistration.ValidatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBindingSpec)(nil), (*admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1_ValidatingAdmissionPolicyBindingSpec(a.(*admissionregistration.ValidatingAdmissionPolicyBindingSpec), b.(*admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingAdmissionPolicyList)(nil), (*admissionregistration.ValidatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(a.(*admissionregistrationv1.ValidatingAdmissionPolicyList), b.(*admissionregistration.ValidatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyList)(nil), (*admissionregistrationv1.ValidatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1_ValidatingAdmissionPolicyList(a.(*admissionregistration.ValidatingAdmissionPolicyList), b.(*admissionregistrationv1.ValidatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingAdmissionPolicySpec)(nil), (*admissionregistration.ValidatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(a.(*admissionregistrationv1.ValidatingAdmissionPolicySpec), b.(*admissionregistration.ValidatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicySpec)(nil), (*admissionregistrationv1.ValidatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1_ValidatingAdmissionPolicySpec(a.(*admissionregistration.ValidatingAdmissionPolicySpec), b.(*admissionregistrationv1.ValidatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingAdmissionPolicyStatus)(nil), (*admissionregistration.ValidatingAdmissionPolicyStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(a.(*admissionregistrationv1.ValidatingAdmissionPolicyStatus), b.(*admissionregistration.ValidatingAdmissionPolicyStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyStatus)(nil), (*admissionregistrationv1.ValidatingAdmissionPolicyStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1_ValidatingAdmissionPolicyStatus(a.(*admissionregistration.ValidatingAdmissionPolicyStatus), b.(*admissionregistrationv1.ValidatingAdmissionPolicyStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingWebhook)(nil), (*admissionregistration.ValidatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(a.(*admissionregistrationv1.ValidatingWebhook), b.(*admissionregistration.ValidatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhook)(nil), (*admissionregistrationv1.ValidatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingWebhook_To_v1_ValidatingWebhook(a.(*admissionregistration.ValidatingWebhook), b.(*admissionregistrationv1.ValidatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingWebhookConfiguration)(nil), (*admissionregistration.ValidatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(a.(*admissionregistrationv1.ValidatingWebhookConfiguration), b.(*admissionregistration.ValidatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhookConfiguration)(nil), (*admissionregistrationv1.ValidatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1_ValidatingWebhookConfiguration(a.(*admissionregistration.ValidatingWebhookConfiguration), b.(*admissionregistrationv1.ValidatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.ValidatingWebhookConfigurationList)(nil), (*admissionregistration.ValidatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(a.(*admissionregistrationv1.ValidatingWebhookConfigurationList), b.(*admissionregistration.ValidatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhookConfigurationList)(nil), (*admissionregistrationv1.ValidatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1_ValidatingWebhookConfigurationList(a.(*admissionregistration.ValidatingWebhookConfigurationList), b.(*admissionregistrationv1.ValidatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.Validation)(nil), (*admissionregistration.Validation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Validation_To_admissionregistration_Validation(a.(*admissionregistrationv1.Validation), b.(*admissionregistration.Validation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Validation)(nil), (*admissionregistrationv1.Validation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Validation_To_v1_Validation(a.(*admissionregistration.Validation), b.(*admissionregistrationv1.Validation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.Variable)(nil), (*admissionregistration.Variable)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Variable_To_admissionregistration_Variable(a.(*admissionregistrationv1.Variable), b.(*admissionregistration.Variable), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Variable)(nil), (*admissionregistrationv1.Variable)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Variable_To_v1_Variable(a.(*admissionregistration.Variable), b.(*admissionregistrationv1.Variable), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1.WebhookClientConfig)(nil), (*admissionregistration.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(a.(*admissionregistrationv1.WebhookClientConfig), b.(*admissionregistration.WebhookClientConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.WebhookClientConfig)(nil), (*admissionregistrationv1.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_WebhookClientConfig_To_v1_WebhookClientConfig(a.(*admissionregistration.WebhookClientConfig), b.(*admissionregistrationv1.WebhookClientConfig), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*admissionregistration.RuleWithOperations)(nil), (*admissionregistrationv1.RuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(a.(*admissionregistration.RuleWithOperations), b.(*admissionregistrationv1.RuleWithOperations), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*admissionregistrationv1.RuleWithOperations)(nil), (*admissionregistration.RuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(a.(*admissionregistrationv1.RuleWithOperations), b.(*admissionregistration.RuleWithOperations), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in *admissionregistrationv1.AuditAnnotation, out *admissionregistration.AuditAnnotation, s conversion.Scope) error {
out.Key = in.Key
out.ValueExpression = in.ValueExpression
return nil
}
// Convert_v1_AuditAnnotation_To_admissionregistration_AuditAnnotation is an autogenerated conversion function.
func Convert_v1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in *admissionregistrationv1.AuditAnnotation, out *admissionregistration.AuditAnnotation, s conversion.Scope) error {
return autoConvert_v1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in, out, s)
}
func autoConvert_admissionregistration_AuditAnnotation_To_v1_AuditAnnotation(in *admissionregistration.AuditAnnotation, out *admissionregistrationv1.AuditAnnotation, s conversion.Scope) error {
out.Key = in.Key
out.ValueExpression = in.ValueExpression
return nil
}
// Convert_admissionregistration_AuditAnnotation_To_v1_AuditAnnotation is an autogenerated conversion function.
func Convert_admissionregistration_AuditAnnotation_To_v1_AuditAnnotation(in *admissionregistration.AuditAnnotation, out *admissionregistrationv1.AuditAnnotation, s conversion.Scope) error {
return autoConvert_admissionregistration_AuditAnnotation_To_v1_AuditAnnotation(in, out, s)
}
func autoConvert_v1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in *admissionregistrationv1.ExpressionWarning, out *admissionregistration.ExpressionWarning, s conversion.Scope) error {
out.FieldRef = in.FieldRef
out.Warning = in.Warning
return nil
}
// Convert_v1_ExpressionWarning_To_admissionregistration_ExpressionWarning is an autogenerated conversion function.
func Convert_v1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in *admissionregistrationv1.ExpressionWarning, out *admissionregistration.ExpressionWarning, s conversion.Scope) error {
return autoConvert_v1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in, out, s)
}
func autoConvert_admissionregistration_ExpressionWarning_To_v1_ExpressionWarning(in *admissionregistration.ExpressionWarning, out *admissionregistrationv1.ExpressionWarning, s conversion.Scope) error {
out.FieldRef = in.FieldRef
out.Warning = in.Warning
return nil
}
// Convert_admissionregistration_ExpressionWarning_To_v1_ExpressionWarning is an autogenerated conversion function.
func Convert_admissionregistration_ExpressionWarning_To_v1_ExpressionWarning(in *admissionregistration.ExpressionWarning, out *admissionregistrationv1.ExpressionWarning, s conversion.Scope) error {
return autoConvert_admissionregistration_ExpressionWarning_To_v1_ExpressionWarning(in, out, s)
}
func autoConvert_v1_MatchCondition_To_admissionregistration_MatchCondition(in *admissionregistrationv1.MatchCondition, out *admissionregistration.MatchCondition, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_v1_MatchCondition_To_admissionregistration_MatchCondition is an autogenerated conversion function.
func Convert_v1_MatchCondition_To_admissionregistration_MatchCondition(in *admissionregistrationv1.MatchCondition, out *admissionregistration.MatchCondition, s conversion.Scope) error {
return autoConvert_v1_MatchCondition_To_admissionregistration_MatchCondition(in, out, s)
}
func autoConvert_admissionregistration_MatchCondition_To_v1_MatchCondition(in *admissionregistration.MatchCondition, out *admissionregistrationv1.MatchCondition, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_MatchCondition_To_v1_MatchCondition is an autogenerated conversion function.
func Convert_admissionregistration_MatchCondition_To_v1_MatchCondition(in *admissionregistration.MatchCondition, out *admissionregistrationv1.MatchCondition, s conversion.Scope) error {
return autoConvert_admissionregistration_MatchCondition_To_v1_MatchCondition(in, out, s)
}
func autoConvert_v1_MatchResources_To_admissionregistration_MatchResources(in *admissionregistrationv1.MatchResources, out *admissionregistration.MatchResources, s conversion.Scope) error {
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]admissionregistration.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ResourceRules = nil
}
if in.ExcludeResourceRules != nil {
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
*out = make([]admissionregistration.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ExcludeResourceRules = nil
}
out.MatchPolicy = (*admissionregistration.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
return nil
}
// Convert_v1_MatchResources_To_admissionregistration_MatchResources is an autogenerated conversion function.
func Convert_v1_MatchResources_To_admissionregistration_MatchResources(in *admissionregistrationv1.MatchResources, out *admissionregistration.MatchResources, s conversion.Scope) error {
return autoConvert_v1_MatchResources_To_admissionregistration_MatchResources(in, out, s)
}
func autoConvert_admissionregistration_MatchResources_To_v1_MatchResources(in *admissionregistration.MatchResources, out *admissionregistrationv1.MatchResources, s conversion.Scope) error {
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]admissionregistrationv1.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_NamedRuleWithOperations_To_v1_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ResourceRules = nil
}
if in.ExcludeResourceRules != nil {
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
*out = make([]admissionregistrationv1.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_NamedRuleWithOperations_To_v1_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ExcludeResourceRules = nil
}
out.MatchPolicy = (*admissionregistrationv1.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
return nil
}
// Convert_admissionregistration_MatchResources_To_v1_MatchResources is an autogenerated conversion function.
func Convert_admissionregistration_MatchResources_To_v1_MatchResources(in *admissionregistration.MatchResources, out *admissionregistrationv1.MatchResources, s conversion.Scope) error {
return autoConvert_admissionregistration_MatchResources_To_v1_MatchResources(in, out, s)
}
func autoConvert_v1_MutatingWebhook_To_admissionregistration_MutatingWebhook(in *admissionregistrationv1.MutatingWebhook, out *admissionregistration.MutatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistration.RuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistration.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistration.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.ReinvocationPolicy = (*admissionregistration.ReinvocationPolicyType)(unsafe.Pointer(in.ReinvocationPolicy))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_v1_MutatingWebhook_To_admissionregistration_MutatingWebhook is an autogenerated conversion function.
func Convert_v1_MutatingWebhook_To_admissionregistration_MutatingWebhook(in *admissionregistrationv1.MutatingWebhook, out *admissionregistration.MutatingWebhook, s conversion.Scope) error {
return autoConvert_v1_MutatingWebhook_To_admissionregistration_MutatingWebhook(in, out, s)
}
func autoConvert_admissionregistration_MutatingWebhook_To_v1_MutatingWebhook(in *admissionregistration.MutatingWebhook, out *admissionregistrationv1.MutatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_admissionregistration_WebhookClientConfig_To_v1_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistrationv1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistrationv1.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistrationv1.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.ReinvocationPolicy = (*admissionregistrationv1.ReinvocationPolicyType)(unsafe.Pointer(in.ReinvocationPolicy))
out.MatchConditions = *(*[]admissionregistrationv1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_admissionregistration_MutatingWebhook_To_v1_MutatingWebhook is an autogenerated conversion function.
func Convert_admissionregistration_MutatingWebhook_To_v1_MutatingWebhook(in *admissionregistration.MutatingWebhook, out *admissionregistrationv1.MutatingWebhook, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingWebhook_To_v1_MutatingWebhook(in, out, s)
}
func autoConvert_v1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *admissionregistrationv1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistration.MutatingWebhook, len(*in))
for i := range *in {
if err := Convert_v1_MutatingWebhook_To_admissionregistration_MutatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_v1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration is an autogenerated conversion function.
func Convert_v1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *admissionregistrationv1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_v1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in, out, s)
}
func autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *admissionregistrationv1.MutatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistrationv1.MutatingWebhook, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingWebhook_To_v1_MutatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_admissionregistration_MutatingWebhookConfiguration_To_v1_MutatingWebhookConfiguration is an autogenerated conversion function.
func Convert_admissionregistration_MutatingWebhookConfiguration_To_v1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *admissionregistrationv1.MutatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1_MutatingWebhookConfiguration(in, out, s)
}
func autoConvert_v1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *admissionregistrationv1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.MutatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_v1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_v1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *admissionregistrationv1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_v1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in, out, s)
}
func autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *admissionregistrationv1.MutatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1.MutatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingWebhookConfiguration_To_v1_MutatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1_MutatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *admissionregistrationv1.MutatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1_MutatingWebhookConfigurationList(in, out, s)
}
func autoConvert_v1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in *admissionregistrationv1.NamedRuleWithOperations, out *admissionregistration.NamedRuleWithOperations, s conversion.Scope) error {
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
if err := Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(&in.RuleWithOperations, &out.RuleWithOperations, s); err != nil {
return err
}
return nil
}
// Convert_v1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations is an autogenerated conversion function.
func Convert_v1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in *admissionregistrationv1.NamedRuleWithOperations, out *admissionregistration.NamedRuleWithOperations, s conversion.Scope) error {
return autoConvert_v1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in, out, s)
}
func autoConvert_admissionregistration_NamedRuleWithOperations_To_v1_NamedRuleWithOperations(in *admissionregistration.NamedRuleWithOperations, out *admissionregistrationv1.NamedRuleWithOperations, s conversion.Scope) error {
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
if err := Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(&in.RuleWithOperations, &out.RuleWithOperations, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_NamedRuleWithOperations_To_v1_NamedRuleWithOperations is an autogenerated conversion function.
func Convert_admissionregistration_NamedRuleWithOperations_To_v1_NamedRuleWithOperations(in *admissionregistration.NamedRuleWithOperations, out *admissionregistrationv1.NamedRuleWithOperations, s conversion.Scope) error {
return autoConvert_admissionregistration_NamedRuleWithOperations_To_v1_NamedRuleWithOperations(in, out, s)
}
func autoConvert_v1_ParamKind_To_admissionregistration_ParamKind(in *admissionregistrationv1.ParamKind, out *admissionregistration.ParamKind, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.Kind = in.Kind
return nil
}
// Convert_v1_ParamKind_To_admissionregistration_ParamKind is an autogenerated conversion function.
func Convert_v1_ParamKind_To_admissionregistration_ParamKind(in *admissionregistrationv1.ParamKind, out *admissionregistration.ParamKind, s conversion.Scope) error {
return autoConvert_v1_ParamKind_To_admissionregistration_ParamKind(in, out, s)
}
func autoConvert_admissionregistration_ParamKind_To_v1_ParamKind(in *admissionregistration.ParamKind, out *admissionregistrationv1.ParamKind, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.Kind = in.Kind
return nil
}
// Convert_admissionregistration_ParamKind_To_v1_ParamKind is an autogenerated conversion function.
func Convert_admissionregistration_ParamKind_To_v1_ParamKind(in *admissionregistration.ParamKind, out *admissionregistrationv1.ParamKind, s conversion.Scope) error {
return autoConvert_admissionregistration_ParamKind_To_v1_ParamKind(in, out, s)
}
func autoConvert_v1_ParamRef_To_admissionregistration_ParamRef(in *admissionregistrationv1.ParamRef, out *admissionregistration.ParamRef, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ParameterNotFoundAction = (*admissionregistration.ParameterNotFoundActionType)(unsafe.Pointer(in.ParameterNotFoundAction))
return nil
}
// Convert_v1_ParamRef_To_admissionregistration_ParamRef is an autogenerated conversion function.
func Convert_v1_ParamRef_To_admissionregistration_ParamRef(in *admissionregistrationv1.ParamRef, out *admissionregistration.ParamRef, s conversion.Scope) error {
return autoConvert_v1_ParamRef_To_admissionregistration_ParamRef(in, out, s)
}
func autoConvert_admissionregistration_ParamRef_To_v1_ParamRef(in *admissionregistration.ParamRef, out *admissionregistrationv1.ParamRef, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ParameterNotFoundAction = (*admissionregistrationv1.ParameterNotFoundActionType)(unsafe.Pointer(in.ParameterNotFoundAction))
return nil
}
// Convert_admissionregistration_ParamRef_To_v1_ParamRef is an autogenerated conversion function.
func Convert_admissionregistration_ParamRef_To_v1_ParamRef(in *admissionregistration.ParamRef, out *admissionregistrationv1.ParamRef, s conversion.Scope) error {
return autoConvert_admissionregistration_ParamRef_To_v1_ParamRef(in, out, s)
}
func autoConvert_v1_Rule_To_admissionregistration_Rule(in *admissionregistrationv1.Rule, out *admissionregistration.Rule, s conversion.Scope) error {
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.Scope = (*admissionregistration.ScopeType)(unsafe.Pointer(in.Scope))
return nil
}
// Convert_v1_Rule_To_admissionregistration_Rule is an autogenerated conversion function.
func Convert_v1_Rule_To_admissionregistration_Rule(in *admissionregistrationv1.Rule, out *admissionregistration.Rule, s conversion.Scope) error {
return autoConvert_v1_Rule_To_admissionregistration_Rule(in, out, s)
}
func autoConvert_admissionregistration_Rule_To_v1_Rule(in *admissionregistration.Rule, out *admissionregistrationv1.Rule, s conversion.Scope) error {
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.Scope = (*admissionregistrationv1.ScopeType)(unsafe.Pointer(in.Scope))
return nil
}
// Convert_admissionregistration_Rule_To_v1_Rule is an autogenerated conversion function.
func Convert_admissionregistration_Rule_To_v1_Rule(in *admissionregistration.Rule, out *admissionregistrationv1.Rule, s conversion.Scope) error {
return autoConvert_admissionregistration_Rule_To_v1_Rule(in, out, s)
}
func autoConvert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *admissionregistrationv1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error {
out.Operations = *(*[]admissionregistration.OperationType)(unsafe.Pointer(&in.Operations))
if err := Convert_v1_Rule_To_admissionregistration_Rule(&in.Rule, &out.Rule, s); err != nil {
return err
}
return nil
}
func autoConvert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *admissionregistrationv1.RuleWithOperations, s conversion.Scope) error {
out.Operations = *(*[]admissionregistrationv1.OperationType)(unsafe.Pointer(&in.Operations))
if err := Convert_admissionregistration_Rule_To_v1_Rule(&in.Rule, &out.Rule, s); err != nil {
return err
}
return nil
}
func autoConvert_v1_ServiceReference_To_admissionregistration_ServiceReference(in *admissionregistrationv1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
out.Path = (*string)(unsafe.Pointer(in.Path))
if err := metav1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil {
return err
}
return nil
}
// Convert_v1_ServiceReference_To_admissionregistration_ServiceReference is an autogenerated conversion function.
func Convert_v1_ServiceReference_To_admissionregistration_ServiceReference(in *admissionregistrationv1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error {
return autoConvert_v1_ServiceReference_To_admissionregistration_ServiceReference(in, out, s)
}
func autoConvert_admissionregistration_ServiceReference_To_v1_ServiceReference(in *admissionregistration.ServiceReference, out *admissionregistrationv1.ServiceReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
out.Path = (*string)(unsafe.Pointer(in.Path))
if err := metav1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ServiceReference_To_v1_ServiceReference is an autogenerated conversion function.
func Convert_admissionregistration_ServiceReference_To_v1_ServiceReference(in *admissionregistration.ServiceReference, out *admissionregistrationv1.ServiceReference, s conversion.Scope) error {
return autoConvert_admissionregistration_ServiceReference_To_v1_ServiceReference(in, out, s)
}
func autoConvert_v1_TypeChecking_To_admissionregistration_TypeChecking(in *admissionregistrationv1.TypeChecking, out *admissionregistration.TypeChecking, s conversion.Scope) error {
out.ExpressionWarnings = *(*[]admissionregistration.ExpressionWarning)(unsafe.Pointer(&in.ExpressionWarnings))
return nil
}
// Convert_v1_TypeChecking_To_admissionregistration_TypeChecking is an autogenerated conversion function.
func Convert_v1_TypeChecking_To_admissionregistration_TypeChecking(in *admissionregistrationv1.TypeChecking, out *admissionregistration.TypeChecking, s conversion.Scope) error {
return autoConvert_v1_TypeChecking_To_admissionregistration_TypeChecking(in, out, s)
}
func autoConvert_admissionregistration_TypeChecking_To_v1_TypeChecking(in *admissionregistration.TypeChecking, out *admissionregistrationv1.TypeChecking, s conversion.Scope) error {
out.ExpressionWarnings = *(*[]admissionregistrationv1.ExpressionWarning)(unsafe.Pointer(&in.ExpressionWarnings))
return nil
}
// Convert_admissionregistration_TypeChecking_To_v1_TypeChecking is an autogenerated conversion function.
func Convert_admissionregistration_TypeChecking_To_v1_TypeChecking(in *admissionregistration.TypeChecking, out *admissionregistrationv1.TypeChecking, s conversion.Scope) error {
return autoConvert_admissionregistration_TypeChecking_To_v1_TypeChecking(in, out, s)
}
func autoConvert_v1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in *admissionregistrationv1.ValidatingAdmissionPolicy, out *admissionregistration.ValidatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy is an autogenerated conversion function.
func Convert_v1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in *admissionregistrationv1.ValidatingAdmissionPolicy, out *admissionregistration.ValidatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_v1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicy_To_v1_ValidatingAdmissionPolicy(in *admissionregistration.ValidatingAdmissionPolicy, out *admissionregistrationv1.ValidatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1_ValidatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1_ValidatingAdmissionPolicyStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1_ValidatingAdmissionPolicy is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1_ValidatingAdmissionPolicy(in *admissionregistration.ValidatingAdmissionPolicy, out *admissionregistrationv1.ValidatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicy_To_v1_ValidatingAdmissionPolicy(in, out, s)
}
func autoConvert_v1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1.ValidatingAdmissionPolicyBinding, out *admissionregistration.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_v1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1.ValidatingAdmissionPolicyBinding, out *admissionregistration.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_v1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1_ValidatingAdmissionPolicyBinding(in *admissionregistration.ValidatingAdmissionPolicyBinding, out *admissionregistrationv1.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1_ValidatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1_ValidatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1_ValidatingAdmissionPolicyBinding(in *admissionregistration.ValidatingAdmissionPolicyBinding, out *admissionregistrationv1.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1_ValidatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_v1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, out *admissionregistration.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_v1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_v1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, out *admissionregistration.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_v1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1_ValidatingAdmissionPolicyBindingList(in *admissionregistration.ValidatingAdmissionPolicyBindingList, out *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1.ValidatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1_ValidatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1_ValidatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1_ValidatingAdmissionPolicyBindingList(in *admissionregistration.ValidatingAdmissionPolicyBindingList, out *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1_ValidatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_v1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in *admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec, out *admissionregistration.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistration.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistration.MatchResources)
if err := Convert_v1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
out.ValidationActions = *(*[]admissionregistration.ValidationAction)(unsafe.Pointer(&in.ValidationActions))
return nil
}
// Convert_v1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_v1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in *admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec, out *admissionregistration.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_v1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1_ValidatingAdmissionPolicyBindingSpec(in *admissionregistration.ValidatingAdmissionPolicyBindingSpec, out *admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistrationv1.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistrationv1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
out.ValidationActions = *(*[]admissionregistrationv1.ValidationAction)(unsafe.Pointer(&in.ValidationActions))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1_ValidatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1_ValidatingAdmissionPolicyBindingSpec(in *admissionregistration.ValidatingAdmissionPolicyBindingSpec, out *admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1_ValidatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_v1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in *admissionregistrationv1.ValidatingAdmissionPolicyList, out *admissionregistration.ValidatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_v1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_v1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in *admissionregistrationv1.ValidatingAdmissionPolicyList, out *admissionregistration.ValidatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_v1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyList_To_v1_ValidatingAdmissionPolicyList(in *admissionregistration.ValidatingAdmissionPolicyList, out *admissionregistrationv1.ValidatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1.ValidatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1_ValidatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1_ValidatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1_ValidatingAdmissionPolicyList(in *admissionregistration.ValidatingAdmissionPolicyList, out *admissionregistrationv1.ValidatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyList_To_v1_ValidatingAdmissionPolicyList(in, out, s)
}
func autoConvert_v1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in *admissionregistrationv1.ValidatingAdmissionPolicySpec, out *admissionregistration.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistration.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistration.MatchResources)
if err := Convert_v1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Validations = *(*[]admissionregistration.Validation)(unsafe.Pointer(&in.Validations))
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.AuditAnnotations = *(*[]admissionregistration.AuditAnnotation)(unsafe.Pointer(&in.AuditAnnotations))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.Variables = *(*[]admissionregistration.Variable)(unsafe.Pointer(&in.Variables))
return nil
}
// Convert_v1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_v1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in *admissionregistrationv1.ValidatingAdmissionPolicySpec, out *admissionregistration.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_v1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1_ValidatingAdmissionPolicySpec(in *admissionregistration.ValidatingAdmissionPolicySpec, out *admissionregistrationv1.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistrationv1.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistrationv1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Validations = *(*[]admissionregistrationv1.Validation)(unsafe.Pointer(&in.Validations))
out.MatchConditions = *(*[]admissionregistrationv1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.FailurePolicy = (*admissionregistrationv1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.AuditAnnotations = *(*[]admissionregistrationv1.AuditAnnotation)(unsafe.Pointer(&in.AuditAnnotations))
out.Variables = *(*[]admissionregistrationv1.Variable)(unsafe.Pointer(&in.Variables))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1_ValidatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1_ValidatingAdmissionPolicySpec(in *admissionregistration.ValidatingAdmissionPolicySpec, out *admissionregistrationv1.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1_ValidatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_v1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in *admissionregistrationv1.ValidatingAdmissionPolicyStatus, out *admissionregistration.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.TypeChecking = (*admissionregistration.TypeChecking)(unsafe.Pointer(in.TypeChecking))
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus is an autogenerated conversion function.
func Convert_v1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in *admissionregistrationv1.ValidatingAdmissionPolicyStatus, out *admissionregistration.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
return autoConvert_v1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1_ValidatingAdmissionPolicyStatus(in *admissionregistration.ValidatingAdmissionPolicyStatus, out *admissionregistrationv1.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.TypeChecking = (*admissionregistrationv1.TypeChecking)(unsafe.Pointer(in.TypeChecking))
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1_ValidatingAdmissionPolicyStatus is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1_ValidatingAdmissionPolicyStatus(in *admissionregistration.ValidatingAdmissionPolicyStatus, out *admissionregistrationv1.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1_ValidatingAdmissionPolicyStatus(in, out, s)
}
func autoConvert_v1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(in *admissionregistrationv1.ValidatingWebhook, out *admissionregistration.ValidatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistration.RuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistration.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistration.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_v1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook is an autogenerated conversion function.
func Convert_v1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(in *admissionregistrationv1.ValidatingWebhook, out *admissionregistration.ValidatingWebhook, s conversion.Scope) error {
return autoConvert_v1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(in, out, s)
}
func autoConvert_admissionregistration_ValidatingWebhook_To_v1_ValidatingWebhook(in *admissionregistration.ValidatingWebhook, out *admissionregistrationv1.ValidatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_admissionregistration_WebhookClientConfig_To_v1_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistrationv1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistrationv1.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistrationv1.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.MatchConditions = *(*[]admissionregistrationv1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_admissionregistration_ValidatingWebhook_To_v1_ValidatingWebhook is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingWebhook_To_v1_ValidatingWebhook(in *admissionregistration.ValidatingWebhook, out *admissionregistrationv1.ValidatingWebhook, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingWebhook_To_v1_ValidatingWebhook(in, out, s)
}
func autoConvert_v1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *admissionregistrationv1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistration.ValidatingWebhook, len(*in))
for i := range *in {
if err := Convert_v1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_v1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration is an autogenerated conversion function.
func Convert_v1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *admissionregistrationv1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_v1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in, out, s)
}
func autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *admissionregistrationv1.ValidatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistrationv1.ValidatingWebhook, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingWebhook_To_v1_ValidatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1_ValidatingWebhookConfiguration is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *admissionregistrationv1.ValidatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1_ValidatingWebhookConfiguration(in, out, s)
}
func autoConvert_v1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *admissionregistrationv1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_v1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_v1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *admissionregistrationv1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_v1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *admissionregistrationv1.ValidatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1.ValidatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1_ValidatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1_ValidatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *admissionregistrationv1.ValidatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1_ValidatingWebhookConfigurationList(in, out, s)
}
func autoConvert_v1_Validation_To_admissionregistration_Validation(in *admissionregistrationv1.Validation, out *admissionregistration.Validation, s conversion.Scope) error {
out.Expression = in.Expression
out.Message = in.Message
out.Reason = (*metav1.StatusReason)(unsafe.Pointer(in.Reason))
out.MessageExpression = in.MessageExpression
return nil
}
// Convert_v1_Validation_To_admissionregistration_Validation is an autogenerated conversion function.
func Convert_v1_Validation_To_admissionregistration_Validation(in *admissionregistrationv1.Validation, out *admissionregistration.Validation, s conversion.Scope) error {
return autoConvert_v1_Validation_To_admissionregistration_Validation(in, out, s)
}
func autoConvert_admissionregistration_Validation_To_v1_Validation(in *admissionregistration.Validation, out *admissionregistrationv1.Validation, s conversion.Scope) error {
out.Expression = in.Expression
out.Message = in.Message
out.Reason = (*metav1.StatusReason)(unsafe.Pointer(in.Reason))
out.MessageExpression = in.MessageExpression
return nil
}
// Convert_admissionregistration_Validation_To_v1_Validation is an autogenerated conversion function.
func Convert_admissionregistration_Validation_To_v1_Validation(in *admissionregistration.Validation, out *admissionregistrationv1.Validation, s conversion.Scope) error {
return autoConvert_admissionregistration_Validation_To_v1_Validation(in, out, s)
}
func autoConvert_v1_Variable_To_admissionregistration_Variable(in *admissionregistrationv1.Variable, out *admissionregistration.Variable, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_v1_Variable_To_admissionregistration_Variable is an autogenerated conversion function.
func Convert_v1_Variable_To_admissionregistration_Variable(in *admissionregistrationv1.Variable, out *admissionregistration.Variable, s conversion.Scope) error {
return autoConvert_v1_Variable_To_admissionregistration_Variable(in, out, s)
}
func autoConvert_admissionregistration_Variable_To_v1_Variable(in *admissionregistration.Variable, out *admissionregistrationv1.Variable, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_Variable_To_v1_Variable is an autogenerated conversion function.
func Convert_admissionregistration_Variable_To_v1_Variable(in *admissionregistration.Variable, out *admissionregistrationv1.Variable, s conversion.Scope) error {
return autoConvert_admissionregistration_Variable_To_v1_Variable(in, out, s)
}
func autoConvert_v1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *admissionregistrationv1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error {
out.URL = (*string)(unsafe.Pointer(in.URL))
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(admissionregistration.ServiceReference)
if err := Convert_v1_ServiceReference_To_admissionregistration_ServiceReference(*in, *out, s); err != nil {
return err
}
} else {
out.Service = nil
}
out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
return nil
}
// Convert_v1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig is an autogenerated conversion function.
func Convert_v1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *admissionregistrationv1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error {
return autoConvert_v1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in, out, s)
}
func autoConvert_admissionregistration_WebhookClientConfig_To_v1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *admissionregistrationv1.WebhookClientConfig, s conversion.Scope) error {
out.URL = (*string)(unsafe.Pointer(in.URL))
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(admissionregistrationv1.ServiceReference)
if err := Convert_admissionregistration_ServiceReference_To_v1_ServiceReference(*in, *out, s); err != nil {
return err
}
} else {
out.Service = nil
}
out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
return nil
}
// Convert_admissionregistration_WebhookClientConfig_To_v1_WebhookClientConfig is an autogenerated conversion function.
func Convert_admissionregistration_WebhookClientConfig_To_v1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *admissionregistrationv1.WebhookClientConfig, s conversion.Scope) error {
return autoConvert_admissionregistration_WebhookClientConfig_To_v1_WebhookClientConfig(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.MutatingWebhookConfiguration{}, func(obj interface{}) {
SetObjectDefaults_MutatingWebhookConfiguration(obj.(*admissionregistrationv1.MutatingWebhookConfiguration))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.MutatingWebhookConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_MutatingWebhookConfigurationList(obj.(*admissionregistrationv1.MutatingWebhookConfigurationList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.ValidatingAdmissionPolicy{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicy(obj.(*admissionregistrationv1.ValidatingAdmissionPolicy))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyBinding(obj.(*admissionregistrationv1.ValidatingAdmissionPolicyBinding))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.ValidatingAdmissionPolicyBindingList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyBindingList(obj.(*admissionregistrationv1.ValidatingAdmissionPolicyBindingList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.ValidatingAdmissionPolicyList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyList(obj.(*admissionregistrationv1.ValidatingAdmissionPolicyList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.ValidatingWebhookConfiguration{}, func(obj interface{}) {
SetObjectDefaults_ValidatingWebhookConfiguration(obj.(*admissionregistrationv1.ValidatingWebhookConfiguration))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1.ValidatingWebhookConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingWebhookConfigurationList(obj.(*admissionregistrationv1.ValidatingWebhookConfigurationList))
})
return nil
}
func SetObjectDefaults_MutatingWebhookConfiguration(in *admissionregistrationv1.MutatingWebhookConfiguration) {
for i := range in.Webhooks {
a := &in.Webhooks[i]
SetDefaults_MutatingWebhook(a)
if a.ClientConfig.Service != nil {
SetDefaults_ServiceReference(a.ClientConfig.Service)
}
for j := range a.Rules {
b := &a.Rules[j]
SetDefaults_Rule(&b.Rule)
}
}
}
func SetObjectDefaults_MutatingWebhookConfigurationList(in *admissionregistrationv1.MutatingWebhookConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_MutatingWebhookConfiguration(a)
}
}
func SetObjectDefaults_ValidatingAdmissionPolicy(in *admissionregistrationv1.ValidatingAdmissionPolicy) {
SetDefaults_ValidatingAdmissionPolicySpec(&in.Spec)
if in.Spec.MatchConstraints != nil {
SetDefaults_MatchResources(in.Spec.MatchConstraints)
for i := range in.Spec.MatchConstraints.ResourceRules {
a := &in.Spec.MatchConstraints.ResourceRules[i]
SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchConstraints.ExcludeResourceRules {
a := &in.Spec.MatchConstraints.ExcludeResourceRules[i]
SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1.ValidatingAdmissionPolicyBinding) {
if in.Spec.MatchResources != nil {
SetDefaults_MatchResources(in.Spec.MatchResources)
for i := range in.Spec.MatchResources.ResourceRules {
a := &in.Spec.MatchResources.ResourceRules[i]
SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchResources.ExcludeResourceRules {
a := &in.Spec.MatchResources.ExcludeResourceRules[i]
SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1.ValidatingAdmissionPolicyBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingAdmissionPolicyBinding(a)
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyList(in *admissionregistrationv1.ValidatingAdmissionPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingAdmissionPolicy(a)
}
}
func SetObjectDefaults_ValidatingWebhookConfiguration(in *admissionregistrationv1.ValidatingWebhookConfiguration) {
for i := range in.Webhooks {
a := &in.Webhooks[i]
SetDefaults_ValidatingWebhook(a)
if a.ClientConfig.Service != nil {
SetDefaults_ServiceReference(a.ClientConfig.Service)
}
for j := range a.Rules {
b := &a.Rules[j]
SetDefaults_Rule(&b.Rule)
}
}
}
func SetObjectDefaults_ValidatingWebhookConfigurationList(in *admissionregistrationv1.ValidatingWebhookConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingWebhookConfiguration(a)
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_ValidatingAdmissionPolicySpec sets defaults for ValidatingAdmissionPolicySpec
func SetDefaults_ValidatingAdmissionPolicySpec(obj *admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1alpha1.Fail
obj.FailurePolicy = &policy
}
}
// SetDefaults_MatchResources sets defaults for MatchResources
func SetDefaults_MatchResources(obj *admissionregistrationv1alpha1.MatchResources) {
if obj.MatchPolicy == nil {
policy := admissionregistrationv1alpha1.Equivalent
obj.MatchPolicy = &policy
}
if obj.NamespaceSelector == nil {
selector := metav1.LabelSelector{}
obj.NamespaceSelector = &selector
}
if obj.ObjectSelector == nil {
selector := metav1.LabelSelector{}
obj.ObjectSelector = &selector
}
}
// SetDefaults_ParamRef sets defaults for ParamRef
func SetDefaults_ParamRef(obj *admissionregistrationv1alpha1.ParamRef) {
if obj.ParameterNotFoundAction == nil {
v := admissionregistrationv1alpha1.DenyAction
obj.ParameterNotFoundAction = &v
}
}
// SetDefaults_MutatingAdmissionPolicySpec sets defaults for MutatingAdmissionPolicySpec
func SetDefaults_MutatingAdmissionPolicySpec(obj *admissionregistrationv1alpha1.MutatingAdmissionPolicySpec) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1alpha1.Fail
obj.FailurePolicy = &policy
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "admissionregistration.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &admissionregistrationv1alpha1.SchemeBuilder
// AddToScheme handler to add items to the schema
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration"
apisadmissionregistrationv1 "k8s.io/kubernetes/pkg/apis/admissionregistration/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ApplyConfiguration)(nil), (*admissionregistration.ApplyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(a.(*admissionregistrationv1alpha1.ApplyConfiguration), b.(*admissionregistration.ApplyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ApplyConfiguration)(nil), (*admissionregistrationv1alpha1.ApplyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ApplyConfiguration_To_v1alpha1_ApplyConfiguration(a.(*admissionregistration.ApplyConfiguration), b.(*admissionregistrationv1alpha1.ApplyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.AuditAnnotation)(nil), (*admissionregistration.AuditAnnotation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_AuditAnnotation_To_admissionregistration_AuditAnnotation(a.(*admissionregistrationv1alpha1.AuditAnnotation), b.(*admissionregistration.AuditAnnotation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.AuditAnnotation)(nil), (*admissionregistrationv1alpha1.AuditAnnotation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_AuditAnnotation_To_v1alpha1_AuditAnnotation(a.(*admissionregistration.AuditAnnotation), b.(*admissionregistrationv1alpha1.AuditAnnotation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ExpressionWarning)(nil), (*admissionregistration.ExpressionWarning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ExpressionWarning_To_admissionregistration_ExpressionWarning(a.(*admissionregistrationv1alpha1.ExpressionWarning), b.(*admissionregistration.ExpressionWarning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ExpressionWarning)(nil), (*admissionregistrationv1alpha1.ExpressionWarning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ExpressionWarning_To_v1alpha1_ExpressionWarning(a.(*admissionregistration.ExpressionWarning), b.(*admissionregistrationv1alpha1.ExpressionWarning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.JSONPatch)(nil), (*admissionregistration.JSONPatch)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_JSONPatch_To_admissionregistration_JSONPatch(a.(*admissionregistrationv1alpha1.JSONPatch), b.(*admissionregistration.JSONPatch), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.JSONPatch)(nil), (*admissionregistrationv1alpha1.JSONPatch)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_JSONPatch_To_v1alpha1_JSONPatch(a.(*admissionregistration.JSONPatch), b.(*admissionregistrationv1alpha1.JSONPatch), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MatchCondition)(nil), (*admissionregistration.MatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MatchCondition_To_admissionregistration_MatchCondition(a.(*admissionregistrationv1alpha1.MatchCondition), b.(*admissionregistration.MatchCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MatchCondition)(nil), (*admissionregistrationv1alpha1.MatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MatchCondition_To_v1alpha1_MatchCondition(a.(*admissionregistration.MatchCondition), b.(*admissionregistrationv1alpha1.MatchCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MatchResources)(nil), (*admissionregistration.MatchResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(a.(*admissionregistrationv1alpha1.MatchResources), b.(*admissionregistration.MatchResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MatchResources)(nil), (*admissionregistrationv1alpha1.MatchResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(a.(*admissionregistration.MatchResources), b.(*admissionregistrationv1alpha1.MatchResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MutatingAdmissionPolicy)(nil), (*admissionregistration.MutatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(a.(*admissionregistrationv1alpha1.MutatingAdmissionPolicy), b.(*admissionregistration.MutatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicy)(nil), (*admissionregistrationv1alpha1.MutatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicy_To_v1alpha1_MutatingAdmissionPolicy(a.(*admissionregistration.MutatingAdmissionPolicy), b.(*admissionregistrationv1alpha1.MutatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding)(nil), (*admissionregistration.MutatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(a.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding), b.(*admissionregistration.MutatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyBinding)(nil), (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1alpha1_MutatingAdmissionPolicyBinding(a.(*admissionregistration.MutatingAdmissionPolicyBinding), b.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList)(nil), (*admissionregistration.MutatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(a.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList), b.(*admissionregistration.MutatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyBindingList)(nil), (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1alpha1_MutatingAdmissionPolicyBindingList(a.(*admissionregistration.MutatingAdmissionPolicyBindingList), b.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec)(nil), (*admissionregistration.MutatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(a.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec), b.(*admissionregistration.MutatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyBindingSpec)(nil), (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1alpha1_MutatingAdmissionPolicyBindingSpec(a.(*admissionregistration.MutatingAdmissionPolicyBindingSpec), b.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MutatingAdmissionPolicyList)(nil), (*admissionregistration.MutatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(a.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyList), b.(*admissionregistration.MutatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyList)(nil), (*admissionregistrationv1alpha1.MutatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyList_To_v1alpha1_MutatingAdmissionPolicyList(a.(*admissionregistration.MutatingAdmissionPolicyList), b.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.MutatingAdmissionPolicySpec)(nil), (*admissionregistration.MutatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(a.(*admissionregistrationv1alpha1.MutatingAdmissionPolicySpec), b.(*admissionregistration.MutatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicySpec)(nil), (*admissionregistrationv1alpha1.MutatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1alpha1_MutatingAdmissionPolicySpec(a.(*admissionregistration.MutatingAdmissionPolicySpec), b.(*admissionregistrationv1alpha1.MutatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.Mutation)(nil), (*admissionregistration.Mutation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Mutation_To_admissionregistration_Mutation(a.(*admissionregistrationv1alpha1.Mutation), b.(*admissionregistration.Mutation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Mutation)(nil), (*admissionregistrationv1alpha1.Mutation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Mutation_To_v1alpha1_Mutation(a.(*admissionregistration.Mutation), b.(*admissionregistrationv1alpha1.Mutation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.NamedRuleWithOperations)(nil), (*admissionregistration.NamedRuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(a.(*admissionregistrationv1alpha1.NamedRuleWithOperations), b.(*admissionregistration.NamedRuleWithOperations), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.NamedRuleWithOperations)(nil), (*admissionregistrationv1alpha1.NamedRuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_NamedRuleWithOperations_To_v1alpha1_NamedRuleWithOperations(a.(*admissionregistration.NamedRuleWithOperations), b.(*admissionregistrationv1alpha1.NamedRuleWithOperations), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ParamKind)(nil), (*admissionregistration.ParamKind)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ParamKind_To_admissionregistration_ParamKind(a.(*admissionregistrationv1alpha1.ParamKind), b.(*admissionregistration.ParamKind), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ParamKind)(nil), (*admissionregistrationv1alpha1.ParamKind)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ParamKind_To_v1alpha1_ParamKind(a.(*admissionregistration.ParamKind), b.(*admissionregistrationv1alpha1.ParamKind), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ParamRef)(nil), (*admissionregistration.ParamRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ParamRef_To_admissionregistration_ParamRef(a.(*admissionregistrationv1alpha1.ParamRef), b.(*admissionregistration.ParamRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ParamRef)(nil), (*admissionregistrationv1alpha1.ParamRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ParamRef_To_v1alpha1_ParamRef(a.(*admissionregistration.ParamRef), b.(*admissionregistrationv1alpha1.ParamRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.TypeChecking)(nil), (*admissionregistration.TypeChecking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_TypeChecking_To_admissionregistration_TypeChecking(a.(*admissionregistrationv1alpha1.TypeChecking), b.(*admissionregistration.TypeChecking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.TypeChecking)(nil), (*admissionregistrationv1alpha1.TypeChecking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_TypeChecking_To_v1alpha1_TypeChecking(a.(*admissionregistration.TypeChecking), b.(*admissionregistrationv1alpha1.TypeChecking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ValidatingAdmissionPolicy)(nil), (*admissionregistration.ValidatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(a.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicy), b.(*admissionregistration.ValidatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicy)(nil), (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1alpha1_ValidatingAdmissionPolicy(a.(*admissionregistration.ValidatingAdmissionPolicy), b.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding)(nil), (*admissionregistration.ValidatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(a.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding), b.(*admissionregistration.ValidatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBinding)(nil), (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1alpha1_ValidatingAdmissionPolicyBinding(a.(*admissionregistration.ValidatingAdmissionPolicyBinding), b.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList)(nil), (*admissionregistration.ValidatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(a.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList), b.(*admissionregistration.ValidatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBindingList)(nil), (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1alpha1_ValidatingAdmissionPolicyBindingList(a.(*admissionregistration.ValidatingAdmissionPolicyBindingList), b.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec)(nil), (*admissionregistration.ValidatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(a.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec), b.(*admissionregistration.ValidatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBindingSpec)(nil), (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1alpha1_ValidatingAdmissionPolicyBindingSpec(a.(*admissionregistration.ValidatingAdmissionPolicyBindingSpec), b.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList)(nil), (*admissionregistration.ValidatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(a.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList), b.(*admissionregistration.ValidatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyList)(nil), (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1alpha1_ValidatingAdmissionPolicyList(a.(*admissionregistration.ValidatingAdmissionPolicyList), b.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec)(nil), (*admissionregistration.ValidatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(a.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec), b.(*admissionregistration.ValidatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicySpec)(nil), (*admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1alpha1_ValidatingAdmissionPolicySpec(a.(*admissionregistration.ValidatingAdmissionPolicySpec), b.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus)(nil), (*admissionregistration.ValidatingAdmissionPolicyStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(a.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus), b.(*admissionregistration.ValidatingAdmissionPolicyStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyStatus)(nil), (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1alpha1_ValidatingAdmissionPolicyStatus(a.(*admissionregistration.ValidatingAdmissionPolicyStatus), b.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.Validation)(nil), (*admissionregistration.Validation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Validation_To_admissionregistration_Validation(a.(*admissionregistrationv1alpha1.Validation), b.(*admissionregistration.Validation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Validation)(nil), (*admissionregistrationv1alpha1.Validation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Validation_To_v1alpha1_Validation(a.(*admissionregistration.Validation), b.(*admissionregistrationv1alpha1.Validation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1alpha1.Variable)(nil), (*admissionregistration.Variable)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Variable_To_admissionregistration_Variable(a.(*admissionregistrationv1alpha1.Variable), b.(*admissionregistration.Variable), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Variable)(nil), (*admissionregistrationv1alpha1.Variable)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Variable_To_v1alpha1_Variable(a.(*admissionregistration.Variable), b.(*admissionregistrationv1alpha1.Variable), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(in *admissionregistrationv1alpha1.ApplyConfiguration, out *admissionregistration.ApplyConfiguration, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1alpha1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(in *admissionregistrationv1alpha1.ApplyConfiguration, out *admissionregistration.ApplyConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(in, out, s)
}
func autoConvert_admissionregistration_ApplyConfiguration_To_v1alpha1_ApplyConfiguration(in *admissionregistration.ApplyConfiguration, out *admissionregistrationv1alpha1.ApplyConfiguration, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_ApplyConfiguration_To_v1alpha1_ApplyConfiguration is an autogenerated conversion function.
func Convert_admissionregistration_ApplyConfiguration_To_v1alpha1_ApplyConfiguration(in *admissionregistration.ApplyConfiguration, out *admissionregistrationv1alpha1.ApplyConfiguration, s conversion.Scope) error {
return autoConvert_admissionregistration_ApplyConfiguration_To_v1alpha1_ApplyConfiguration(in, out, s)
}
func autoConvert_v1alpha1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in *admissionregistrationv1alpha1.AuditAnnotation, out *admissionregistration.AuditAnnotation, s conversion.Scope) error {
out.Key = in.Key
out.ValueExpression = in.ValueExpression
return nil
}
// Convert_v1alpha1_AuditAnnotation_To_admissionregistration_AuditAnnotation is an autogenerated conversion function.
func Convert_v1alpha1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in *admissionregistrationv1alpha1.AuditAnnotation, out *admissionregistration.AuditAnnotation, s conversion.Scope) error {
return autoConvert_v1alpha1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in, out, s)
}
func autoConvert_admissionregistration_AuditAnnotation_To_v1alpha1_AuditAnnotation(in *admissionregistration.AuditAnnotation, out *admissionregistrationv1alpha1.AuditAnnotation, s conversion.Scope) error {
out.Key = in.Key
out.ValueExpression = in.ValueExpression
return nil
}
// Convert_admissionregistration_AuditAnnotation_To_v1alpha1_AuditAnnotation is an autogenerated conversion function.
func Convert_admissionregistration_AuditAnnotation_To_v1alpha1_AuditAnnotation(in *admissionregistration.AuditAnnotation, out *admissionregistrationv1alpha1.AuditAnnotation, s conversion.Scope) error {
return autoConvert_admissionregistration_AuditAnnotation_To_v1alpha1_AuditAnnotation(in, out, s)
}
func autoConvert_v1alpha1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in *admissionregistrationv1alpha1.ExpressionWarning, out *admissionregistration.ExpressionWarning, s conversion.Scope) error {
out.FieldRef = in.FieldRef
out.Warning = in.Warning
return nil
}
// Convert_v1alpha1_ExpressionWarning_To_admissionregistration_ExpressionWarning is an autogenerated conversion function.
func Convert_v1alpha1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in *admissionregistrationv1alpha1.ExpressionWarning, out *admissionregistration.ExpressionWarning, s conversion.Scope) error {
return autoConvert_v1alpha1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in, out, s)
}
func autoConvert_admissionregistration_ExpressionWarning_To_v1alpha1_ExpressionWarning(in *admissionregistration.ExpressionWarning, out *admissionregistrationv1alpha1.ExpressionWarning, s conversion.Scope) error {
out.FieldRef = in.FieldRef
out.Warning = in.Warning
return nil
}
// Convert_admissionregistration_ExpressionWarning_To_v1alpha1_ExpressionWarning is an autogenerated conversion function.
func Convert_admissionregistration_ExpressionWarning_To_v1alpha1_ExpressionWarning(in *admissionregistration.ExpressionWarning, out *admissionregistrationv1alpha1.ExpressionWarning, s conversion.Scope) error {
return autoConvert_admissionregistration_ExpressionWarning_To_v1alpha1_ExpressionWarning(in, out, s)
}
func autoConvert_v1alpha1_JSONPatch_To_admissionregistration_JSONPatch(in *admissionregistrationv1alpha1.JSONPatch, out *admissionregistration.JSONPatch, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1alpha1_JSONPatch_To_admissionregistration_JSONPatch is an autogenerated conversion function.
func Convert_v1alpha1_JSONPatch_To_admissionregistration_JSONPatch(in *admissionregistrationv1alpha1.JSONPatch, out *admissionregistration.JSONPatch, s conversion.Scope) error {
return autoConvert_v1alpha1_JSONPatch_To_admissionregistration_JSONPatch(in, out, s)
}
func autoConvert_admissionregistration_JSONPatch_To_v1alpha1_JSONPatch(in *admissionregistration.JSONPatch, out *admissionregistrationv1alpha1.JSONPatch, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_JSONPatch_To_v1alpha1_JSONPatch is an autogenerated conversion function.
func Convert_admissionregistration_JSONPatch_To_v1alpha1_JSONPatch(in *admissionregistration.JSONPatch, out *admissionregistrationv1alpha1.JSONPatch, s conversion.Scope) error {
return autoConvert_admissionregistration_JSONPatch_To_v1alpha1_JSONPatch(in, out, s)
}
func autoConvert_v1alpha1_MatchCondition_To_admissionregistration_MatchCondition(in *admissionregistrationv1alpha1.MatchCondition, out *admissionregistration.MatchCondition, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_v1alpha1_MatchCondition_To_admissionregistration_MatchCondition is an autogenerated conversion function.
func Convert_v1alpha1_MatchCondition_To_admissionregistration_MatchCondition(in *admissionregistrationv1alpha1.MatchCondition, out *admissionregistration.MatchCondition, s conversion.Scope) error {
return autoConvert_v1alpha1_MatchCondition_To_admissionregistration_MatchCondition(in, out, s)
}
func autoConvert_admissionregistration_MatchCondition_To_v1alpha1_MatchCondition(in *admissionregistration.MatchCondition, out *admissionregistrationv1alpha1.MatchCondition, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_MatchCondition_To_v1alpha1_MatchCondition is an autogenerated conversion function.
func Convert_admissionregistration_MatchCondition_To_v1alpha1_MatchCondition(in *admissionregistration.MatchCondition, out *admissionregistrationv1alpha1.MatchCondition, s conversion.Scope) error {
return autoConvert_admissionregistration_MatchCondition_To_v1alpha1_MatchCondition(in, out, s)
}
func autoConvert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(in *admissionregistrationv1alpha1.MatchResources, out *admissionregistration.MatchResources, s conversion.Scope) error {
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]admissionregistration.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1alpha1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ResourceRules = nil
}
if in.ExcludeResourceRules != nil {
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
*out = make([]admissionregistration.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1alpha1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ExcludeResourceRules = nil
}
out.MatchPolicy = (*admissionregistration.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
return nil
}
// Convert_v1alpha1_MatchResources_To_admissionregistration_MatchResources is an autogenerated conversion function.
func Convert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(in *admissionregistrationv1alpha1.MatchResources, out *admissionregistration.MatchResources, s conversion.Scope) error {
return autoConvert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(in, out, s)
}
func autoConvert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(in *admissionregistration.MatchResources, out *admissionregistrationv1alpha1.MatchResources, s conversion.Scope) error {
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]admissionregistrationv1alpha1.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_NamedRuleWithOperations_To_v1alpha1_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ResourceRules = nil
}
if in.ExcludeResourceRules != nil {
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
*out = make([]admissionregistrationv1alpha1.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_NamedRuleWithOperations_To_v1alpha1_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ExcludeResourceRules = nil
}
out.MatchPolicy = (*admissionregistrationv1alpha1.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
return nil
}
// Convert_admissionregistration_MatchResources_To_v1alpha1_MatchResources is an autogenerated conversion function.
func Convert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(in *admissionregistration.MatchResources, out *admissionregistrationv1alpha1.MatchResources, s conversion.Scope) error {
return autoConvert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(in, out, s)
}
func autoConvert_v1alpha1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(in *admissionregistrationv1alpha1.MutatingAdmissionPolicy, out *admissionregistration.MutatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy is an autogenerated conversion function.
func Convert_v1alpha1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(in *admissionregistrationv1alpha1.MutatingAdmissionPolicy, out *admissionregistration.MutatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_v1alpha1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicy_To_v1alpha1_MutatingAdmissionPolicy(in *admissionregistration.MutatingAdmissionPolicy, out *admissionregistrationv1alpha1.MutatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1alpha1_MutatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicy_To_v1alpha1_MutatingAdmissionPolicy is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicy_To_v1alpha1_MutatingAdmissionPolicy(in *admissionregistration.MutatingAdmissionPolicy, out *admissionregistrationv1alpha1.MutatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicy_To_v1alpha1_MutatingAdmissionPolicy(in, out, s)
}
func autoConvert_v1alpha1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, out *admissionregistration.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_v1alpha1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, out *admissionregistration.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_v1alpha1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1alpha1_MutatingAdmissionPolicyBinding(in *admissionregistration.MutatingAdmissionPolicyBinding, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1alpha1_MutatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1alpha1_MutatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1alpha1_MutatingAdmissionPolicyBinding(in *admissionregistration.MutatingAdmissionPolicyBinding, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1alpha1_MutatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_v1alpha1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, out *admissionregistration.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.MutatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_v1alpha1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_v1alpha1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, out *admissionregistration.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_v1alpha1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1alpha1_MutatingAdmissionPolicyBindingList(in *admissionregistration.MutatingAdmissionPolicyBindingList, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1alpha1_MutatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1alpha1_MutatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1alpha1_MutatingAdmissionPolicyBindingList(in *admissionregistration.MutatingAdmissionPolicyBindingList, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1alpha1_MutatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_v1alpha1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec, out *admissionregistration.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistration.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistration.MatchResources)
if err := Convert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
return nil
}
// Convert_v1alpha1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_v1alpha1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec, out *admissionregistration.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1alpha1_MutatingAdmissionPolicyBindingSpec(in *admissionregistration.MutatingAdmissionPolicyBindingSpec, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistrationv1alpha1.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistrationv1alpha1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1alpha1_MutatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1alpha1_MutatingAdmissionPolicyBindingSpec(in *admissionregistration.MutatingAdmissionPolicyBindingSpec, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1alpha1_MutatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_v1alpha1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, out *admissionregistration.MutatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.MutatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_v1alpha1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_v1alpha1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, out *admissionregistration.MutatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_v1alpha1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyList_To_v1alpha1_MutatingAdmissionPolicyList(in *admissionregistration.MutatingAdmissionPolicyList, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1alpha1.MutatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingAdmissionPolicy_To_v1alpha1_MutatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyList_To_v1alpha1_MutatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyList_To_v1alpha1_MutatingAdmissionPolicyList(in *admissionregistration.MutatingAdmissionPolicyList, out *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyList_To_v1alpha1_MutatingAdmissionPolicyList(in, out, s)
}
func autoConvert_v1alpha1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(in *admissionregistrationv1alpha1.MutatingAdmissionPolicySpec, out *admissionregistration.MutatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistration.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistration.MatchResources)
if err := Convert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Variables = *(*[]admissionregistration.Variable)(unsafe.Pointer(&in.Variables))
out.Mutations = *(*[]admissionregistration.Mutation)(unsafe.Pointer(&in.Mutations))
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.ReinvocationPolicy = admissionregistration.ReinvocationPolicyType(in.ReinvocationPolicy)
return nil
}
// Convert_v1alpha1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_v1alpha1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(in *admissionregistrationv1alpha1.MutatingAdmissionPolicySpec, out *admissionregistration.MutatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_v1alpha1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicySpec_To_v1alpha1_MutatingAdmissionPolicySpec(in *admissionregistration.MutatingAdmissionPolicySpec, out *admissionregistrationv1alpha1.MutatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistrationv1alpha1.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistrationv1alpha1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Variables = *(*[]admissionregistrationv1alpha1.Variable)(unsafe.Pointer(&in.Variables))
out.Mutations = *(*[]admissionregistrationv1alpha1.Mutation)(unsafe.Pointer(&in.Mutations))
out.FailurePolicy = (*admissionregistrationv1alpha1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchConditions = *(*[]admissionregistrationv1alpha1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.ReinvocationPolicy = admissionregistrationv1.ReinvocationPolicyType(in.ReinvocationPolicy)
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1alpha1_MutatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1alpha1_MutatingAdmissionPolicySpec(in *admissionregistration.MutatingAdmissionPolicySpec, out *admissionregistrationv1alpha1.MutatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicySpec_To_v1alpha1_MutatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_v1alpha1_Mutation_To_admissionregistration_Mutation(in *admissionregistrationv1alpha1.Mutation, out *admissionregistration.Mutation, s conversion.Scope) error {
out.PatchType = admissionregistration.PatchType(in.PatchType)
out.ApplyConfiguration = (*admissionregistration.ApplyConfiguration)(unsafe.Pointer(in.ApplyConfiguration))
out.JSONPatch = (*admissionregistration.JSONPatch)(unsafe.Pointer(in.JSONPatch))
return nil
}
// Convert_v1alpha1_Mutation_To_admissionregistration_Mutation is an autogenerated conversion function.
func Convert_v1alpha1_Mutation_To_admissionregistration_Mutation(in *admissionregistrationv1alpha1.Mutation, out *admissionregistration.Mutation, s conversion.Scope) error {
return autoConvert_v1alpha1_Mutation_To_admissionregistration_Mutation(in, out, s)
}
func autoConvert_admissionregistration_Mutation_To_v1alpha1_Mutation(in *admissionregistration.Mutation, out *admissionregistrationv1alpha1.Mutation, s conversion.Scope) error {
out.PatchType = admissionregistrationv1alpha1.PatchType(in.PatchType)
out.ApplyConfiguration = (*admissionregistrationv1alpha1.ApplyConfiguration)(unsafe.Pointer(in.ApplyConfiguration))
out.JSONPatch = (*admissionregistrationv1alpha1.JSONPatch)(unsafe.Pointer(in.JSONPatch))
return nil
}
// Convert_admissionregistration_Mutation_To_v1alpha1_Mutation is an autogenerated conversion function.
func Convert_admissionregistration_Mutation_To_v1alpha1_Mutation(in *admissionregistration.Mutation, out *admissionregistrationv1alpha1.Mutation, s conversion.Scope) error {
return autoConvert_admissionregistration_Mutation_To_v1alpha1_Mutation(in, out, s)
}
func autoConvert_v1alpha1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in *admissionregistrationv1alpha1.NamedRuleWithOperations, out *admissionregistration.NamedRuleWithOperations, s conversion.Scope) error {
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
if err := apisadmissionregistrationv1.Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(&in.RuleWithOperations, &out.RuleWithOperations, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations is an autogenerated conversion function.
func Convert_v1alpha1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in *admissionregistrationv1alpha1.NamedRuleWithOperations, out *admissionregistration.NamedRuleWithOperations, s conversion.Scope) error {
return autoConvert_v1alpha1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in, out, s)
}
func autoConvert_admissionregistration_NamedRuleWithOperations_To_v1alpha1_NamedRuleWithOperations(in *admissionregistration.NamedRuleWithOperations, out *admissionregistrationv1alpha1.NamedRuleWithOperations, s conversion.Scope) error {
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
if err := apisadmissionregistrationv1.Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(&in.RuleWithOperations, &out.RuleWithOperations, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_NamedRuleWithOperations_To_v1alpha1_NamedRuleWithOperations is an autogenerated conversion function.
func Convert_admissionregistration_NamedRuleWithOperations_To_v1alpha1_NamedRuleWithOperations(in *admissionregistration.NamedRuleWithOperations, out *admissionregistrationv1alpha1.NamedRuleWithOperations, s conversion.Scope) error {
return autoConvert_admissionregistration_NamedRuleWithOperations_To_v1alpha1_NamedRuleWithOperations(in, out, s)
}
func autoConvert_v1alpha1_ParamKind_To_admissionregistration_ParamKind(in *admissionregistrationv1alpha1.ParamKind, out *admissionregistration.ParamKind, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.Kind = in.Kind
return nil
}
// Convert_v1alpha1_ParamKind_To_admissionregistration_ParamKind is an autogenerated conversion function.
func Convert_v1alpha1_ParamKind_To_admissionregistration_ParamKind(in *admissionregistrationv1alpha1.ParamKind, out *admissionregistration.ParamKind, s conversion.Scope) error {
return autoConvert_v1alpha1_ParamKind_To_admissionregistration_ParamKind(in, out, s)
}
func autoConvert_admissionregistration_ParamKind_To_v1alpha1_ParamKind(in *admissionregistration.ParamKind, out *admissionregistrationv1alpha1.ParamKind, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.Kind = in.Kind
return nil
}
// Convert_admissionregistration_ParamKind_To_v1alpha1_ParamKind is an autogenerated conversion function.
func Convert_admissionregistration_ParamKind_To_v1alpha1_ParamKind(in *admissionregistration.ParamKind, out *admissionregistrationv1alpha1.ParamKind, s conversion.Scope) error {
return autoConvert_admissionregistration_ParamKind_To_v1alpha1_ParamKind(in, out, s)
}
func autoConvert_v1alpha1_ParamRef_To_admissionregistration_ParamRef(in *admissionregistrationv1alpha1.ParamRef, out *admissionregistration.ParamRef, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ParameterNotFoundAction = (*admissionregistration.ParameterNotFoundActionType)(unsafe.Pointer(in.ParameterNotFoundAction))
return nil
}
// Convert_v1alpha1_ParamRef_To_admissionregistration_ParamRef is an autogenerated conversion function.
func Convert_v1alpha1_ParamRef_To_admissionregistration_ParamRef(in *admissionregistrationv1alpha1.ParamRef, out *admissionregistration.ParamRef, s conversion.Scope) error {
return autoConvert_v1alpha1_ParamRef_To_admissionregistration_ParamRef(in, out, s)
}
func autoConvert_admissionregistration_ParamRef_To_v1alpha1_ParamRef(in *admissionregistration.ParamRef, out *admissionregistrationv1alpha1.ParamRef, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ParameterNotFoundAction = (*admissionregistrationv1alpha1.ParameterNotFoundActionType)(unsafe.Pointer(in.ParameterNotFoundAction))
return nil
}
// Convert_admissionregistration_ParamRef_To_v1alpha1_ParamRef is an autogenerated conversion function.
func Convert_admissionregistration_ParamRef_To_v1alpha1_ParamRef(in *admissionregistration.ParamRef, out *admissionregistrationv1alpha1.ParamRef, s conversion.Scope) error {
return autoConvert_admissionregistration_ParamRef_To_v1alpha1_ParamRef(in, out, s)
}
func autoConvert_v1alpha1_TypeChecking_To_admissionregistration_TypeChecking(in *admissionregistrationv1alpha1.TypeChecking, out *admissionregistration.TypeChecking, s conversion.Scope) error {
out.ExpressionWarnings = *(*[]admissionregistration.ExpressionWarning)(unsafe.Pointer(&in.ExpressionWarnings))
return nil
}
// Convert_v1alpha1_TypeChecking_To_admissionregistration_TypeChecking is an autogenerated conversion function.
func Convert_v1alpha1_TypeChecking_To_admissionregistration_TypeChecking(in *admissionregistrationv1alpha1.TypeChecking, out *admissionregistration.TypeChecking, s conversion.Scope) error {
return autoConvert_v1alpha1_TypeChecking_To_admissionregistration_TypeChecking(in, out, s)
}
func autoConvert_admissionregistration_TypeChecking_To_v1alpha1_TypeChecking(in *admissionregistration.TypeChecking, out *admissionregistrationv1alpha1.TypeChecking, s conversion.Scope) error {
out.ExpressionWarnings = *(*[]admissionregistrationv1alpha1.ExpressionWarning)(unsafe.Pointer(&in.ExpressionWarnings))
return nil
}
// Convert_admissionregistration_TypeChecking_To_v1alpha1_TypeChecking is an autogenerated conversion function.
func Convert_admissionregistration_TypeChecking_To_v1alpha1_TypeChecking(in *admissionregistration.TypeChecking, out *admissionregistrationv1alpha1.TypeChecking, s conversion.Scope) error {
return autoConvert_admissionregistration_TypeChecking_To_v1alpha1_TypeChecking(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, out *admissionregistration.ValidatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, out *admissionregistration.ValidatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicy_To_v1alpha1_ValidatingAdmissionPolicy(in *admissionregistration.ValidatingAdmissionPolicy, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1alpha1_ValidatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1alpha1_ValidatingAdmissionPolicyStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1alpha1_ValidatingAdmissionPolicy is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1alpha1_ValidatingAdmissionPolicy(in *admissionregistration.ValidatingAdmissionPolicy, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicy_To_v1alpha1_ValidatingAdmissionPolicy(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, out *admissionregistration.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, out *admissionregistration.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1alpha1_ValidatingAdmissionPolicyBinding(in *admissionregistration.ValidatingAdmissionPolicyBinding, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1alpha1_ValidatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1alpha1_ValidatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1alpha1_ValidatingAdmissionPolicyBinding(in *admissionregistration.ValidatingAdmissionPolicyBinding, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1alpha1_ValidatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, out *admissionregistration.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_v1alpha1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, out *admissionregistration.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1alpha1_ValidatingAdmissionPolicyBindingList(in *admissionregistration.ValidatingAdmissionPolicyBindingList, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1alpha1_ValidatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1alpha1_ValidatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1alpha1_ValidatingAdmissionPolicyBindingList(in *admissionregistration.ValidatingAdmissionPolicyBindingList, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1alpha1_ValidatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec, out *admissionregistration.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistration.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistration.MatchResources)
if err := Convert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
out.ValidationActions = *(*[]admissionregistration.ValidationAction)(unsafe.Pointer(&in.ValidationActions))
return nil
}
// Convert_v1alpha1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec, out *admissionregistration.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1alpha1_ValidatingAdmissionPolicyBindingSpec(in *admissionregistration.ValidatingAdmissionPolicyBindingSpec, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistrationv1alpha1.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistrationv1alpha1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
out.ValidationActions = *(*[]admissionregistrationv1alpha1.ValidationAction)(unsafe.Pointer(&in.ValidationActions))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1alpha1_ValidatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1alpha1_ValidatingAdmissionPolicyBindingSpec(in *admissionregistration.ValidatingAdmissionPolicyBindingSpec, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1alpha1_ValidatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, out *admissionregistration.ValidatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_v1alpha1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, out *admissionregistration.ValidatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyList_To_v1alpha1_ValidatingAdmissionPolicyList(in *admissionregistration.ValidatingAdmissionPolicyList, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1alpha1.ValidatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1alpha1_ValidatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1alpha1_ValidatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1alpha1_ValidatingAdmissionPolicyList(in *admissionregistration.ValidatingAdmissionPolicyList, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyList_To_v1alpha1_ValidatingAdmissionPolicyList(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec, out *admissionregistration.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistration.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistration.MatchResources)
if err := Convert_v1alpha1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Validations = *(*[]admissionregistration.Validation)(unsafe.Pointer(&in.Validations))
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.AuditAnnotations = *(*[]admissionregistration.AuditAnnotation)(unsafe.Pointer(&in.AuditAnnotations))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.Variables = *(*[]admissionregistration.Variable)(unsafe.Pointer(&in.Variables))
return nil
}
// Convert_v1alpha1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec, out *admissionregistration.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1alpha1_ValidatingAdmissionPolicySpec(in *admissionregistration.ValidatingAdmissionPolicySpec, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistrationv1alpha1.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistrationv1alpha1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1alpha1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Validations = *(*[]admissionregistrationv1alpha1.Validation)(unsafe.Pointer(&in.Validations))
out.MatchConditions = *(*[]admissionregistrationv1alpha1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.FailurePolicy = (*admissionregistrationv1alpha1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.AuditAnnotations = *(*[]admissionregistrationv1alpha1.AuditAnnotation)(unsafe.Pointer(&in.AuditAnnotations))
out.Variables = *(*[]admissionregistrationv1alpha1.Variable)(unsafe.Pointer(&in.Variables))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1alpha1_ValidatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1alpha1_ValidatingAdmissionPolicySpec(in *admissionregistration.ValidatingAdmissionPolicySpec, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1alpha1_ValidatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus, out *admissionregistration.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.TypeChecking = (*admissionregistration.TypeChecking)(unsafe.Pointer(in.TypeChecking))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1alpha1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus, out *admissionregistration.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1alpha1_ValidatingAdmissionPolicyStatus(in *admissionregistration.ValidatingAdmissionPolicyStatus, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.TypeChecking = (*admissionregistrationv1alpha1.TypeChecking)(unsafe.Pointer(in.TypeChecking))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1alpha1_ValidatingAdmissionPolicyStatus is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1alpha1_ValidatingAdmissionPolicyStatus(in *admissionregistration.ValidatingAdmissionPolicyStatus, out *admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1alpha1_ValidatingAdmissionPolicyStatus(in, out, s)
}
func autoConvert_v1alpha1_Validation_To_admissionregistration_Validation(in *admissionregistrationv1alpha1.Validation, out *admissionregistration.Validation, s conversion.Scope) error {
out.Expression = in.Expression
out.Message = in.Message
out.Reason = (*v1.StatusReason)(unsafe.Pointer(in.Reason))
out.MessageExpression = in.MessageExpression
return nil
}
// Convert_v1alpha1_Validation_To_admissionregistration_Validation is an autogenerated conversion function.
func Convert_v1alpha1_Validation_To_admissionregistration_Validation(in *admissionregistrationv1alpha1.Validation, out *admissionregistration.Validation, s conversion.Scope) error {
return autoConvert_v1alpha1_Validation_To_admissionregistration_Validation(in, out, s)
}
func autoConvert_admissionregistration_Validation_To_v1alpha1_Validation(in *admissionregistration.Validation, out *admissionregistrationv1alpha1.Validation, s conversion.Scope) error {
out.Expression = in.Expression
out.Message = in.Message
out.Reason = (*v1.StatusReason)(unsafe.Pointer(in.Reason))
out.MessageExpression = in.MessageExpression
return nil
}
// Convert_admissionregistration_Validation_To_v1alpha1_Validation is an autogenerated conversion function.
func Convert_admissionregistration_Validation_To_v1alpha1_Validation(in *admissionregistration.Validation, out *admissionregistrationv1alpha1.Validation, s conversion.Scope) error {
return autoConvert_admissionregistration_Validation_To_v1alpha1_Validation(in, out, s)
}
func autoConvert_v1alpha1_Variable_To_admissionregistration_Variable(in *admissionregistrationv1alpha1.Variable, out *admissionregistration.Variable, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_v1alpha1_Variable_To_admissionregistration_Variable is an autogenerated conversion function.
func Convert_v1alpha1_Variable_To_admissionregistration_Variable(in *admissionregistrationv1alpha1.Variable, out *admissionregistration.Variable, s conversion.Scope) error {
return autoConvert_v1alpha1_Variable_To_admissionregistration_Variable(in, out, s)
}
func autoConvert_admissionregistration_Variable_To_v1alpha1_Variable(in *admissionregistration.Variable, out *admissionregistrationv1alpha1.Variable, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_Variable_To_v1alpha1_Variable is an autogenerated conversion function.
func Convert_admissionregistration_Variable_To_v1alpha1_Variable(in *admissionregistration.Variable, out *admissionregistrationv1alpha1.Variable, s conversion.Scope) error {
return autoConvert_admissionregistration_Variable_To_v1alpha1_Variable(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
v1 "k8s.io/kubernetes/pkg/apis/admissionregistration/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.MutatingAdmissionPolicy{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicy(obj.(*admissionregistrationv1alpha1.MutatingAdmissionPolicy))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicyBinding(obj.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicyBindingList(obj.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.MutatingAdmissionPolicyList{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicyList(obj.(*admissionregistrationv1alpha1.MutatingAdmissionPolicyList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicy(obj.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicy))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyBinding(obj.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyBindingList(obj.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1alpha1.ValidatingAdmissionPolicyList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyList(obj.(*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList))
})
return nil
}
func SetObjectDefaults_MutatingAdmissionPolicy(in *admissionregistrationv1alpha1.MutatingAdmissionPolicy) {
SetDefaults_MutatingAdmissionPolicySpec(&in.Spec)
if in.Spec.MatchConstraints != nil {
SetDefaults_MatchResources(in.Spec.MatchConstraints)
for i := range in.Spec.MatchConstraints.ResourceRules {
a := &in.Spec.MatchConstraints.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchConstraints.ExcludeResourceRules {
a := &in.Spec.MatchConstraints.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_MutatingAdmissionPolicyBinding(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding) {
if in.Spec.ParamRef != nil {
SetDefaults_ParamRef(in.Spec.ParamRef)
}
if in.Spec.MatchResources != nil {
SetDefaults_MatchResources(in.Spec.MatchResources)
for i := range in.Spec.MatchResources.ResourceRules {
a := &in.Spec.MatchResources.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchResources.ExcludeResourceRules {
a := &in.Spec.MatchResources.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_MutatingAdmissionPolicyBindingList(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_MutatingAdmissionPolicyBinding(a)
}
}
func SetObjectDefaults_MutatingAdmissionPolicyList(in *admissionregistrationv1alpha1.MutatingAdmissionPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_MutatingAdmissionPolicy(a)
}
}
func SetObjectDefaults_ValidatingAdmissionPolicy(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicy) {
SetDefaults_ValidatingAdmissionPolicySpec(&in.Spec)
if in.Spec.MatchConstraints != nil {
SetDefaults_MatchResources(in.Spec.MatchConstraints)
for i := range in.Spec.MatchConstraints.ResourceRules {
a := &in.Spec.MatchConstraints.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchConstraints.ExcludeResourceRules {
a := &in.Spec.MatchConstraints.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding) {
if in.Spec.ParamRef != nil {
SetDefaults_ParamRef(in.Spec.ParamRef)
}
if in.Spec.MatchResources != nil {
SetDefaults_MatchResources(in.Spec.MatchResources)
for i := range in.Spec.MatchResources.ResourceRules {
a := &in.Spec.MatchResources.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchResources.ExcludeResourceRules {
a := &in.Spec.MatchResources.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingAdmissionPolicyBinding(a)
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyList(in *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingAdmissionPolicy(a)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_ValidatingAdmissionPolicySpec sets defaults for ValidatingAdmissionPolicySpec
func SetDefaults_ValidatingAdmissionPolicySpec(obj *admissionregistrationv1beta1.ValidatingAdmissionPolicySpec) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1beta1.Fail
obj.FailurePolicy = &policy
}
}
// SetDefaults_MatchResources sets defaults for MatchResources
func SetDefaults_MatchResources(obj *admissionregistrationv1beta1.MatchResources) {
if obj.MatchPolicy == nil {
policy := admissionregistrationv1beta1.Equivalent
obj.MatchPolicy = &policy
}
if obj.NamespaceSelector == nil {
selector := metav1.LabelSelector{}
obj.NamespaceSelector = &selector
}
if obj.ObjectSelector == nil {
selector := metav1.LabelSelector{}
obj.ObjectSelector = &selector
}
}
// SetDefaults_ValidatingWebhook sets defaults for webhook validating
func SetDefaults_ValidatingWebhook(obj *admissionregistrationv1beta1.ValidatingWebhook) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1beta1.Ignore
obj.FailurePolicy = &policy
}
if obj.MatchPolicy == nil {
policy := admissionregistrationv1beta1.Exact
obj.MatchPolicy = &policy
}
if obj.NamespaceSelector == nil {
selector := metav1.LabelSelector{}
obj.NamespaceSelector = &selector
}
if obj.ObjectSelector == nil {
selector := metav1.LabelSelector{}
obj.ObjectSelector = &selector
}
if obj.SideEffects == nil {
// TODO: revisit/remove this default and possibly make the field required when promoting to v1
unknown := admissionregistrationv1beta1.SideEffectClassUnknown
obj.SideEffects = &unknown
}
if obj.TimeoutSeconds == nil {
obj.TimeoutSeconds = new(int32)
*obj.TimeoutSeconds = 30
}
if len(obj.AdmissionReviewVersions) == 0 {
obj.AdmissionReviewVersions = []string{admissionregistrationv1beta1.SchemeGroupVersion.Version}
}
}
// SetDefaults_MutatingWebhook sets defaults for webhook mutating
func SetDefaults_MutatingWebhook(obj *admissionregistrationv1beta1.MutatingWebhook) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1beta1.Ignore
obj.FailurePolicy = &policy
}
if obj.MatchPolicy == nil {
policy := admissionregistrationv1beta1.Exact
obj.MatchPolicy = &policy
}
if obj.NamespaceSelector == nil {
selector := metav1.LabelSelector{}
obj.NamespaceSelector = &selector
}
if obj.ObjectSelector == nil {
selector := metav1.LabelSelector{}
obj.ObjectSelector = &selector
}
if obj.SideEffects == nil {
// TODO: revisit/remove this default and possibly make the field required when promoting to v1
unknown := admissionregistrationv1beta1.SideEffectClassUnknown
obj.SideEffects = &unknown
}
if obj.TimeoutSeconds == nil {
obj.TimeoutSeconds = new(int32)
*obj.TimeoutSeconds = 30
}
if obj.ReinvocationPolicy == nil {
never := admissionregistrationv1beta1.NeverReinvocationPolicy
obj.ReinvocationPolicy = &never
}
if len(obj.AdmissionReviewVersions) == 0 {
obj.AdmissionReviewVersions = []string{admissionregistrationv1beta1.SchemeGroupVersion.Version}
}
}
// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference
func SetDefaults_ServiceReference(obj *admissionregistrationv1beta1.ServiceReference) {
if obj.Port == nil {
obj.Port = ptr.To[int32](443)
}
}
// SetDefaults_MutatingAdmissionPolicySpec sets defaults for MutatingAdmissionPolicySpec
func SetDefaults_MutatingAdmissionPolicySpec(obj *admissionregistrationv1beta1.MutatingAdmissionPolicySpec) {
if obj.FailurePolicy == nil {
policy := admissionregistrationv1beta1.Fail
obj.FailurePolicy = &policy
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "admissionregistration.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &admissionregistrationv1beta1.SchemeBuilder
// AddToScheme handler to add items to the schema
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration"
apisadmissionregistrationv1 "k8s.io/kubernetes/pkg/apis/admissionregistration/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ApplyConfiguration)(nil), (*admissionregistration.ApplyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(a.(*admissionregistrationv1beta1.ApplyConfiguration), b.(*admissionregistration.ApplyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ApplyConfiguration)(nil), (*admissionregistrationv1beta1.ApplyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ApplyConfiguration_To_v1beta1_ApplyConfiguration(a.(*admissionregistration.ApplyConfiguration), b.(*admissionregistrationv1beta1.ApplyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.AuditAnnotation)(nil), (*admissionregistration.AuditAnnotation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AuditAnnotation_To_admissionregistration_AuditAnnotation(a.(*admissionregistrationv1beta1.AuditAnnotation), b.(*admissionregistration.AuditAnnotation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.AuditAnnotation)(nil), (*admissionregistrationv1beta1.AuditAnnotation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_AuditAnnotation_To_v1beta1_AuditAnnotation(a.(*admissionregistration.AuditAnnotation), b.(*admissionregistrationv1beta1.AuditAnnotation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ExpressionWarning)(nil), (*admissionregistration.ExpressionWarning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ExpressionWarning_To_admissionregistration_ExpressionWarning(a.(*admissionregistrationv1beta1.ExpressionWarning), b.(*admissionregistration.ExpressionWarning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ExpressionWarning)(nil), (*admissionregistrationv1beta1.ExpressionWarning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ExpressionWarning_To_v1beta1_ExpressionWarning(a.(*admissionregistration.ExpressionWarning), b.(*admissionregistrationv1beta1.ExpressionWarning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.JSONPatch)(nil), (*admissionregistration.JSONPatch)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_JSONPatch_To_admissionregistration_JSONPatch(a.(*admissionregistrationv1beta1.JSONPatch), b.(*admissionregistration.JSONPatch), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.JSONPatch)(nil), (*admissionregistrationv1beta1.JSONPatch)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_JSONPatch_To_v1beta1_JSONPatch(a.(*admissionregistration.JSONPatch), b.(*admissionregistrationv1beta1.JSONPatch), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MatchCondition)(nil), (*admissionregistration.MatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MatchCondition_To_admissionregistration_MatchCondition(a.(*admissionregistrationv1beta1.MatchCondition), b.(*admissionregistration.MatchCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MatchCondition)(nil), (*admissionregistrationv1beta1.MatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MatchCondition_To_v1beta1_MatchCondition(a.(*admissionregistration.MatchCondition), b.(*admissionregistrationv1beta1.MatchCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MatchResources)(nil), (*admissionregistration.MatchResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MatchResources_To_admissionregistration_MatchResources(a.(*admissionregistrationv1beta1.MatchResources), b.(*admissionregistration.MatchResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MatchResources)(nil), (*admissionregistrationv1beta1.MatchResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MatchResources_To_v1beta1_MatchResources(a.(*admissionregistration.MatchResources), b.(*admissionregistrationv1beta1.MatchResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingAdmissionPolicy)(nil), (*admissionregistration.MutatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(a.(*admissionregistrationv1beta1.MutatingAdmissionPolicy), b.(*admissionregistration.MutatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicy)(nil), (*admissionregistrationv1beta1.MutatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicy_To_v1beta1_MutatingAdmissionPolicy(a.(*admissionregistration.MutatingAdmissionPolicy), b.(*admissionregistrationv1beta1.MutatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingAdmissionPolicyBinding)(nil), (*admissionregistration.MutatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(a.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBinding), b.(*admissionregistration.MutatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyBinding)(nil), (*admissionregistrationv1beta1.MutatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1beta1_MutatingAdmissionPolicyBinding(a.(*admissionregistration.MutatingAdmissionPolicyBinding), b.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList)(nil), (*admissionregistration.MutatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(a.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList), b.(*admissionregistration.MutatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyBindingList)(nil), (*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1beta1_MutatingAdmissionPolicyBindingList(a.(*admissionregistration.MutatingAdmissionPolicyBindingList), b.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec)(nil), (*admissionregistration.MutatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(a.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec), b.(*admissionregistration.MutatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyBindingSpec)(nil), (*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1beta1_MutatingAdmissionPolicyBindingSpec(a.(*admissionregistration.MutatingAdmissionPolicyBindingSpec), b.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingAdmissionPolicyList)(nil), (*admissionregistration.MutatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(a.(*admissionregistrationv1beta1.MutatingAdmissionPolicyList), b.(*admissionregistration.MutatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicyList)(nil), (*admissionregistrationv1beta1.MutatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicyList_To_v1beta1_MutatingAdmissionPolicyList(a.(*admissionregistration.MutatingAdmissionPolicyList), b.(*admissionregistrationv1beta1.MutatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingAdmissionPolicySpec)(nil), (*admissionregistration.MutatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(a.(*admissionregistrationv1beta1.MutatingAdmissionPolicySpec), b.(*admissionregistration.MutatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingAdmissionPolicySpec)(nil), (*admissionregistrationv1beta1.MutatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1beta1_MutatingAdmissionPolicySpec(a.(*admissionregistration.MutatingAdmissionPolicySpec), b.(*admissionregistrationv1beta1.MutatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingWebhook)(nil), (*admissionregistration.MutatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingWebhook_To_admissionregistration_MutatingWebhook(a.(*admissionregistrationv1beta1.MutatingWebhook), b.(*admissionregistration.MutatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhook)(nil), (*admissionregistrationv1beta1.MutatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingWebhook_To_v1beta1_MutatingWebhook(a.(*admissionregistration.MutatingWebhook), b.(*admissionregistrationv1beta1.MutatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingWebhookConfiguration)(nil), (*admissionregistration.MutatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(a.(*admissionregistrationv1beta1.MutatingWebhookConfiguration), b.(*admissionregistration.MutatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhookConfiguration)(nil), (*admissionregistrationv1beta1.MutatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(a.(*admissionregistration.MutatingWebhookConfiguration), b.(*admissionregistrationv1beta1.MutatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.MutatingWebhookConfigurationList)(nil), (*admissionregistration.MutatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(a.(*admissionregistrationv1beta1.MutatingWebhookConfigurationList), b.(*admissionregistration.MutatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.MutatingWebhookConfigurationList)(nil), (*admissionregistrationv1beta1.MutatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(a.(*admissionregistration.MutatingWebhookConfigurationList), b.(*admissionregistrationv1beta1.MutatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.Mutation)(nil), (*admissionregistration.Mutation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Mutation_To_admissionregistration_Mutation(a.(*admissionregistrationv1beta1.Mutation), b.(*admissionregistration.Mutation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Mutation)(nil), (*admissionregistrationv1beta1.Mutation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Mutation_To_v1beta1_Mutation(a.(*admissionregistration.Mutation), b.(*admissionregistrationv1beta1.Mutation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.NamedRuleWithOperations)(nil), (*admissionregistration.NamedRuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(a.(*admissionregistrationv1beta1.NamedRuleWithOperations), b.(*admissionregistration.NamedRuleWithOperations), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.NamedRuleWithOperations)(nil), (*admissionregistrationv1beta1.NamedRuleWithOperations)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_NamedRuleWithOperations_To_v1beta1_NamedRuleWithOperations(a.(*admissionregistration.NamedRuleWithOperations), b.(*admissionregistrationv1beta1.NamedRuleWithOperations), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ParamKind)(nil), (*admissionregistration.ParamKind)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ParamKind_To_admissionregistration_ParamKind(a.(*admissionregistrationv1beta1.ParamKind), b.(*admissionregistration.ParamKind), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ParamKind)(nil), (*admissionregistrationv1beta1.ParamKind)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ParamKind_To_v1beta1_ParamKind(a.(*admissionregistration.ParamKind), b.(*admissionregistrationv1beta1.ParamKind), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ParamRef)(nil), (*admissionregistration.ParamRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ParamRef_To_admissionregistration_ParamRef(a.(*admissionregistrationv1beta1.ParamRef), b.(*admissionregistration.ParamRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ParamRef)(nil), (*admissionregistrationv1beta1.ParamRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ParamRef_To_v1beta1_ParamRef(a.(*admissionregistration.ParamRef), b.(*admissionregistrationv1beta1.ParamRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ServiceReference)(nil), (*admissionregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(a.(*admissionregistrationv1beta1.ServiceReference), b.(*admissionregistration.ServiceReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ServiceReference)(nil), (*admissionregistrationv1beta1.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(a.(*admissionregistration.ServiceReference), b.(*admissionregistrationv1beta1.ServiceReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.TypeChecking)(nil), (*admissionregistration.TypeChecking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_TypeChecking_To_admissionregistration_TypeChecking(a.(*admissionregistrationv1beta1.TypeChecking), b.(*admissionregistration.TypeChecking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.TypeChecking)(nil), (*admissionregistrationv1beta1.TypeChecking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_TypeChecking_To_v1beta1_TypeChecking(a.(*admissionregistration.TypeChecking), b.(*admissionregistrationv1beta1.TypeChecking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingAdmissionPolicy)(nil), (*admissionregistration.ValidatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(a.(*admissionregistrationv1beta1.ValidatingAdmissionPolicy), b.(*admissionregistration.ValidatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicy)(nil), (*admissionregistrationv1beta1.ValidatingAdmissionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1beta1_ValidatingAdmissionPolicy(a.(*admissionregistration.ValidatingAdmissionPolicy), b.(*admissionregistrationv1beta1.ValidatingAdmissionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding)(nil), (*admissionregistration.ValidatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(a.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding), b.(*admissionregistration.ValidatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBinding)(nil), (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1beta1_ValidatingAdmissionPolicyBinding(a.(*admissionregistration.ValidatingAdmissionPolicyBinding), b.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList)(nil), (*admissionregistration.ValidatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(a.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList), b.(*admissionregistration.ValidatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBindingList)(nil), (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1beta1_ValidatingAdmissionPolicyBindingList(a.(*admissionregistration.ValidatingAdmissionPolicyBindingList), b.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec)(nil), (*admissionregistration.ValidatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(a.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec), b.(*admissionregistration.ValidatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyBindingSpec)(nil), (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1beta1_ValidatingAdmissionPolicyBindingSpec(a.(*admissionregistration.ValidatingAdmissionPolicyBindingSpec), b.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingAdmissionPolicyList)(nil), (*admissionregistration.ValidatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(a.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyList), b.(*admissionregistration.ValidatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyList)(nil), (*admissionregistrationv1beta1.ValidatingAdmissionPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1beta1_ValidatingAdmissionPolicyList(a.(*admissionregistration.ValidatingAdmissionPolicyList), b.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingAdmissionPolicySpec)(nil), (*admissionregistration.ValidatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(a.(*admissionregistrationv1beta1.ValidatingAdmissionPolicySpec), b.(*admissionregistration.ValidatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicySpec)(nil), (*admissionregistrationv1beta1.ValidatingAdmissionPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1beta1_ValidatingAdmissionPolicySpec(a.(*admissionregistration.ValidatingAdmissionPolicySpec), b.(*admissionregistrationv1beta1.ValidatingAdmissionPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus)(nil), (*admissionregistration.ValidatingAdmissionPolicyStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(a.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus), b.(*admissionregistration.ValidatingAdmissionPolicyStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingAdmissionPolicyStatus)(nil), (*admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1beta1_ValidatingAdmissionPolicyStatus(a.(*admissionregistration.ValidatingAdmissionPolicyStatus), b.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingWebhook)(nil), (*admissionregistration.ValidatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(a.(*admissionregistrationv1beta1.ValidatingWebhook), b.(*admissionregistration.ValidatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhook)(nil), (*admissionregistrationv1beta1.ValidatingWebhook)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingWebhook_To_v1beta1_ValidatingWebhook(a.(*admissionregistration.ValidatingWebhook), b.(*admissionregistrationv1beta1.ValidatingWebhook), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingWebhookConfiguration)(nil), (*admissionregistration.ValidatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(a.(*admissionregistrationv1beta1.ValidatingWebhookConfiguration), b.(*admissionregistration.ValidatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhookConfiguration)(nil), (*admissionregistrationv1beta1.ValidatingWebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(a.(*admissionregistration.ValidatingWebhookConfiguration), b.(*admissionregistrationv1beta1.ValidatingWebhookConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.ValidatingWebhookConfigurationList)(nil), (*admissionregistration.ValidatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(a.(*admissionregistrationv1beta1.ValidatingWebhookConfigurationList), b.(*admissionregistration.ValidatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.ValidatingWebhookConfigurationList)(nil), (*admissionregistrationv1beta1.ValidatingWebhookConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(a.(*admissionregistration.ValidatingWebhookConfigurationList), b.(*admissionregistrationv1beta1.ValidatingWebhookConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.Validation)(nil), (*admissionregistration.Validation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Validation_To_admissionregistration_Validation(a.(*admissionregistrationv1beta1.Validation), b.(*admissionregistration.Validation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Validation)(nil), (*admissionregistrationv1beta1.Validation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Validation_To_v1beta1_Validation(a.(*admissionregistration.Validation), b.(*admissionregistrationv1beta1.Validation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.Variable)(nil), (*admissionregistration.Variable)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Variable_To_admissionregistration_Variable(a.(*admissionregistrationv1beta1.Variable), b.(*admissionregistration.Variable), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.Variable)(nil), (*admissionregistrationv1beta1.Variable)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_Variable_To_v1beta1_Variable(a.(*admissionregistration.Variable), b.(*admissionregistrationv1beta1.Variable), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistrationv1beta1.WebhookClientConfig)(nil), (*admissionregistration.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(a.(*admissionregistrationv1beta1.WebhookClientConfig), b.(*admissionregistration.WebhookClientConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*admissionregistration.WebhookClientConfig)(nil), (*admissionregistrationv1beta1.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(a.(*admissionregistration.WebhookClientConfig), b.(*admissionregistrationv1beta1.WebhookClientConfig), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(in *admissionregistrationv1beta1.ApplyConfiguration, out *admissionregistration.ApplyConfiguration, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1beta1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration is an autogenerated conversion function.
func Convert_v1beta1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(in *admissionregistrationv1beta1.ApplyConfiguration, out *admissionregistration.ApplyConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_ApplyConfiguration_To_admissionregistration_ApplyConfiguration(in, out, s)
}
func autoConvert_admissionregistration_ApplyConfiguration_To_v1beta1_ApplyConfiguration(in *admissionregistration.ApplyConfiguration, out *admissionregistrationv1beta1.ApplyConfiguration, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_ApplyConfiguration_To_v1beta1_ApplyConfiguration is an autogenerated conversion function.
func Convert_admissionregistration_ApplyConfiguration_To_v1beta1_ApplyConfiguration(in *admissionregistration.ApplyConfiguration, out *admissionregistrationv1beta1.ApplyConfiguration, s conversion.Scope) error {
return autoConvert_admissionregistration_ApplyConfiguration_To_v1beta1_ApplyConfiguration(in, out, s)
}
func autoConvert_v1beta1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in *admissionregistrationv1beta1.AuditAnnotation, out *admissionregistration.AuditAnnotation, s conversion.Scope) error {
out.Key = in.Key
out.ValueExpression = in.ValueExpression
return nil
}
// Convert_v1beta1_AuditAnnotation_To_admissionregistration_AuditAnnotation is an autogenerated conversion function.
func Convert_v1beta1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in *admissionregistrationv1beta1.AuditAnnotation, out *admissionregistration.AuditAnnotation, s conversion.Scope) error {
return autoConvert_v1beta1_AuditAnnotation_To_admissionregistration_AuditAnnotation(in, out, s)
}
func autoConvert_admissionregistration_AuditAnnotation_To_v1beta1_AuditAnnotation(in *admissionregistration.AuditAnnotation, out *admissionregistrationv1beta1.AuditAnnotation, s conversion.Scope) error {
out.Key = in.Key
out.ValueExpression = in.ValueExpression
return nil
}
// Convert_admissionregistration_AuditAnnotation_To_v1beta1_AuditAnnotation is an autogenerated conversion function.
func Convert_admissionregistration_AuditAnnotation_To_v1beta1_AuditAnnotation(in *admissionregistration.AuditAnnotation, out *admissionregistrationv1beta1.AuditAnnotation, s conversion.Scope) error {
return autoConvert_admissionregistration_AuditAnnotation_To_v1beta1_AuditAnnotation(in, out, s)
}
func autoConvert_v1beta1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in *admissionregistrationv1beta1.ExpressionWarning, out *admissionregistration.ExpressionWarning, s conversion.Scope) error {
out.FieldRef = in.FieldRef
out.Warning = in.Warning
return nil
}
// Convert_v1beta1_ExpressionWarning_To_admissionregistration_ExpressionWarning is an autogenerated conversion function.
func Convert_v1beta1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in *admissionregistrationv1beta1.ExpressionWarning, out *admissionregistration.ExpressionWarning, s conversion.Scope) error {
return autoConvert_v1beta1_ExpressionWarning_To_admissionregistration_ExpressionWarning(in, out, s)
}
func autoConvert_admissionregistration_ExpressionWarning_To_v1beta1_ExpressionWarning(in *admissionregistration.ExpressionWarning, out *admissionregistrationv1beta1.ExpressionWarning, s conversion.Scope) error {
out.FieldRef = in.FieldRef
out.Warning = in.Warning
return nil
}
// Convert_admissionregistration_ExpressionWarning_To_v1beta1_ExpressionWarning is an autogenerated conversion function.
func Convert_admissionregistration_ExpressionWarning_To_v1beta1_ExpressionWarning(in *admissionregistration.ExpressionWarning, out *admissionregistrationv1beta1.ExpressionWarning, s conversion.Scope) error {
return autoConvert_admissionregistration_ExpressionWarning_To_v1beta1_ExpressionWarning(in, out, s)
}
func autoConvert_v1beta1_JSONPatch_To_admissionregistration_JSONPatch(in *admissionregistrationv1beta1.JSONPatch, out *admissionregistration.JSONPatch, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1beta1_JSONPatch_To_admissionregistration_JSONPatch is an autogenerated conversion function.
func Convert_v1beta1_JSONPatch_To_admissionregistration_JSONPatch(in *admissionregistrationv1beta1.JSONPatch, out *admissionregistration.JSONPatch, s conversion.Scope) error {
return autoConvert_v1beta1_JSONPatch_To_admissionregistration_JSONPatch(in, out, s)
}
func autoConvert_admissionregistration_JSONPatch_To_v1beta1_JSONPatch(in *admissionregistration.JSONPatch, out *admissionregistrationv1beta1.JSONPatch, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_JSONPatch_To_v1beta1_JSONPatch is an autogenerated conversion function.
func Convert_admissionregistration_JSONPatch_To_v1beta1_JSONPatch(in *admissionregistration.JSONPatch, out *admissionregistrationv1beta1.JSONPatch, s conversion.Scope) error {
return autoConvert_admissionregistration_JSONPatch_To_v1beta1_JSONPatch(in, out, s)
}
func autoConvert_v1beta1_MatchCondition_To_admissionregistration_MatchCondition(in *admissionregistrationv1beta1.MatchCondition, out *admissionregistration.MatchCondition, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_v1beta1_MatchCondition_To_admissionregistration_MatchCondition is an autogenerated conversion function.
func Convert_v1beta1_MatchCondition_To_admissionregistration_MatchCondition(in *admissionregistrationv1beta1.MatchCondition, out *admissionregistration.MatchCondition, s conversion.Scope) error {
return autoConvert_v1beta1_MatchCondition_To_admissionregistration_MatchCondition(in, out, s)
}
func autoConvert_admissionregistration_MatchCondition_To_v1beta1_MatchCondition(in *admissionregistration.MatchCondition, out *admissionregistrationv1beta1.MatchCondition, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_MatchCondition_To_v1beta1_MatchCondition is an autogenerated conversion function.
func Convert_admissionregistration_MatchCondition_To_v1beta1_MatchCondition(in *admissionregistration.MatchCondition, out *admissionregistrationv1beta1.MatchCondition, s conversion.Scope) error {
return autoConvert_admissionregistration_MatchCondition_To_v1beta1_MatchCondition(in, out, s)
}
func autoConvert_v1beta1_MatchResources_To_admissionregistration_MatchResources(in *admissionregistrationv1beta1.MatchResources, out *admissionregistration.MatchResources, s conversion.Scope) error {
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]admissionregistration.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1beta1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ResourceRules = nil
}
if in.ExcludeResourceRules != nil {
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
*out = make([]admissionregistration.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_v1beta1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ExcludeResourceRules = nil
}
out.MatchPolicy = (*admissionregistration.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
return nil
}
// Convert_v1beta1_MatchResources_To_admissionregistration_MatchResources is an autogenerated conversion function.
func Convert_v1beta1_MatchResources_To_admissionregistration_MatchResources(in *admissionregistrationv1beta1.MatchResources, out *admissionregistration.MatchResources, s conversion.Scope) error {
return autoConvert_v1beta1_MatchResources_To_admissionregistration_MatchResources(in, out, s)
}
func autoConvert_admissionregistration_MatchResources_To_v1beta1_MatchResources(in *admissionregistration.MatchResources, out *admissionregistrationv1beta1.MatchResources, s conversion.Scope) error {
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]admissionregistrationv1beta1.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_NamedRuleWithOperations_To_v1beta1_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ResourceRules = nil
}
if in.ExcludeResourceRules != nil {
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
*out = make([]admissionregistrationv1beta1.NamedRuleWithOperations, len(*in))
for i := range *in {
if err := Convert_admissionregistration_NamedRuleWithOperations_To_v1beta1_NamedRuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.ExcludeResourceRules = nil
}
out.MatchPolicy = (*admissionregistrationv1beta1.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
return nil
}
// Convert_admissionregistration_MatchResources_To_v1beta1_MatchResources is an autogenerated conversion function.
func Convert_admissionregistration_MatchResources_To_v1beta1_MatchResources(in *admissionregistration.MatchResources, out *admissionregistrationv1beta1.MatchResources, s conversion.Scope) error {
return autoConvert_admissionregistration_MatchResources_To_v1beta1_MatchResources(in, out, s)
}
func autoConvert_v1beta1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(in *admissionregistrationv1beta1.MutatingAdmissionPolicy, out *admissionregistration.MutatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy is an autogenerated conversion function.
func Convert_v1beta1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(in *admissionregistrationv1beta1.MutatingAdmissionPolicy, out *admissionregistration.MutatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicy_To_v1beta1_MutatingAdmissionPolicy(in *admissionregistration.MutatingAdmissionPolicy, out *admissionregistrationv1beta1.MutatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1beta1_MutatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicy_To_v1beta1_MutatingAdmissionPolicy is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicy_To_v1beta1_MutatingAdmissionPolicy(in *admissionregistration.MutatingAdmissionPolicy, out *admissionregistrationv1beta1.MutatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicy_To_v1beta1_MutatingAdmissionPolicy(in, out, s)
}
func autoConvert_v1beta1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, out *admissionregistration.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_v1beta1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, out *admissionregistration.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1beta1_MutatingAdmissionPolicyBinding(in *admissionregistration.MutatingAdmissionPolicyBinding, out *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1beta1_MutatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1beta1_MutatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1beta1_MutatingAdmissionPolicyBinding(in *admissionregistration.MutatingAdmissionPolicyBinding, out *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1beta1_MutatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_v1beta1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList, out *admissionregistration.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.MutatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_v1beta1_MutatingAdmissionPolicyBinding_To_admissionregistration_MutatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_v1beta1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList, out *admissionregistration.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingAdmissionPolicyBindingList_To_admissionregistration_MutatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1beta1_MutatingAdmissionPolicyBindingList(in *admissionregistration.MutatingAdmissionPolicyBindingList, out *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingAdmissionPolicyBinding_To_v1beta1_MutatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1beta1_MutatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1beta1_MutatingAdmissionPolicyBindingList(in *admissionregistration.MutatingAdmissionPolicyBindingList, out *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyBindingList_To_v1beta1_MutatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_v1beta1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec, out *admissionregistration.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistration.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistration.MatchResources)
if err := Convert_v1beta1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
return nil
}
// Convert_v1beta1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_v1beta1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec, out *admissionregistration.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingAdmissionPolicyBindingSpec_To_admissionregistration_MutatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1beta1_MutatingAdmissionPolicyBindingSpec(in *admissionregistration.MutatingAdmissionPolicyBindingSpec, out *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistrationv1beta1.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistrationv1beta1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1beta1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1beta1_MutatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1beta1_MutatingAdmissionPolicyBindingSpec(in *admissionregistration.MutatingAdmissionPolicyBindingSpec, out *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyBindingSpec_To_v1beta1_MutatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_v1beta1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(in *admissionregistrationv1beta1.MutatingAdmissionPolicyList, out *admissionregistration.MutatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.MutatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_v1beta1_MutatingAdmissionPolicy_To_admissionregistration_MutatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_v1beta1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(in *admissionregistrationv1beta1.MutatingAdmissionPolicyList, out *admissionregistration.MutatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingAdmissionPolicyList_To_admissionregistration_MutatingAdmissionPolicyList(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicyList_To_v1beta1_MutatingAdmissionPolicyList(in *admissionregistration.MutatingAdmissionPolicyList, out *admissionregistrationv1beta1.MutatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1beta1.MutatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingAdmissionPolicy_To_v1beta1_MutatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicyList_To_v1beta1_MutatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicyList_To_v1beta1_MutatingAdmissionPolicyList(in *admissionregistration.MutatingAdmissionPolicyList, out *admissionregistrationv1beta1.MutatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicyList_To_v1beta1_MutatingAdmissionPolicyList(in, out, s)
}
func autoConvert_v1beta1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(in *admissionregistrationv1beta1.MutatingAdmissionPolicySpec, out *admissionregistration.MutatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistration.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistration.MatchResources)
if err := Convert_v1beta1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Variables = *(*[]admissionregistration.Variable)(unsafe.Pointer(&in.Variables))
out.Mutations = *(*[]admissionregistration.Mutation)(unsafe.Pointer(&in.Mutations))
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.ReinvocationPolicy = admissionregistration.ReinvocationPolicyType(in.ReinvocationPolicy)
return nil
}
// Convert_v1beta1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_v1beta1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(in *admissionregistrationv1beta1.MutatingAdmissionPolicySpec, out *admissionregistration.MutatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingAdmissionPolicySpec_To_admissionregistration_MutatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_admissionregistration_MutatingAdmissionPolicySpec_To_v1beta1_MutatingAdmissionPolicySpec(in *admissionregistration.MutatingAdmissionPolicySpec, out *admissionregistrationv1beta1.MutatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistrationv1beta1.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistrationv1beta1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1beta1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Variables = *(*[]admissionregistrationv1beta1.Variable)(unsafe.Pointer(&in.Variables))
out.Mutations = *(*[]admissionregistrationv1beta1.Mutation)(unsafe.Pointer(&in.Mutations))
out.FailurePolicy = (*admissionregistrationv1beta1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchConditions = *(*[]admissionregistrationv1beta1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.ReinvocationPolicy = admissionregistrationv1.ReinvocationPolicyType(in.ReinvocationPolicy)
return nil
}
// Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1beta1_MutatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_admissionregistration_MutatingAdmissionPolicySpec_To_v1beta1_MutatingAdmissionPolicySpec(in *admissionregistration.MutatingAdmissionPolicySpec, out *admissionregistrationv1beta1.MutatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingAdmissionPolicySpec_To_v1beta1_MutatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_v1beta1_MutatingWebhook_To_admissionregistration_MutatingWebhook(in *admissionregistrationv1beta1.MutatingWebhook, out *admissionregistration.MutatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistration.RuleWithOperations, len(*in))
for i := range *in {
if err := apisadmissionregistrationv1.Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistration.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistration.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.ReinvocationPolicy = (*admissionregistration.ReinvocationPolicyType)(unsafe.Pointer(in.ReinvocationPolicy))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_v1beta1_MutatingWebhook_To_admissionregistration_MutatingWebhook is an autogenerated conversion function.
func Convert_v1beta1_MutatingWebhook_To_admissionregistration_MutatingWebhook(in *admissionregistrationv1beta1.MutatingWebhook, out *admissionregistration.MutatingWebhook, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingWebhook_To_admissionregistration_MutatingWebhook(in, out, s)
}
func autoConvert_admissionregistration_MutatingWebhook_To_v1beta1_MutatingWebhook(in *admissionregistration.MutatingWebhook, out *admissionregistrationv1beta1.MutatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
for i := range *in {
if err := apisadmissionregistrationv1.Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistrationv1beta1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistrationv1beta1.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistrationv1beta1.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.ReinvocationPolicy = (*admissionregistrationv1.ReinvocationPolicyType)(unsafe.Pointer(in.ReinvocationPolicy))
out.MatchConditions = *(*[]admissionregistrationv1beta1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_admissionregistration_MutatingWebhook_To_v1beta1_MutatingWebhook is an autogenerated conversion function.
func Convert_admissionregistration_MutatingWebhook_To_v1beta1_MutatingWebhook(in *admissionregistration.MutatingWebhook, out *admissionregistrationv1beta1.MutatingWebhook, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingWebhook_To_v1beta1_MutatingWebhook(in, out, s)
}
func autoConvert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *admissionregistrationv1beta1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistration.MutatingWebhook, len(*in))
for i := range *in {
if err := Convert_v1beta1_MutatingWebhook_To_admissionregistration_MutatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration is an autogenerated conversion function.
func Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *admissionregistrationv1beta1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in, out, s)
}
func autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *admissionregistrationv1beta1.MutatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistrationv1beta1.MutatingWebhook, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingWebhook_To_v1beta1_MutatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration is an autogenerated conversion function.
func Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *admissionregistrationv1beta1.MutatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in, out, s)
}
func autoConvert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *admissionregistrationv1beta1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.MutatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *admissionregistrationv1beta1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in, out, s)
}
func autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *admissionregistrationv1beta1.MutatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1beta1.MutatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *admissionregistrationv1beta1.MutatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in, out, s)
}
func autoConvert_v1beta1_Mutation_To_admissionregistration_Mutation(in *admissionregistrationv1beta1.Mutation, out *admissionregistration.Mutation, s conversion.Scope) error {
out.PatchType = admissionregistration.PatchType(in.PatchType)
out.ApplyConfiguration = (*admissionregistration.ApplyConfiguration)(unsafe.Pointer(in.ApplyConfiguration))
out.JSONPatch = (*admissionregistration.JSONPatch)(unsafe.Pointer(in.JSONPatch))
return nil
}
// Convert_v1beta1_Mutation_To_admissionregistration_Mutation is an autogenerated conversion function.
func Convert_v1beta1_Mutation_To_admissionregistration_Mutation(in *admissionregistrationv1beta1.Mutation, out *admissionregistration.Mutation, s conversion.Scope) error {
return autoConvert_v1beta1_Mutation_To_admissionregistration_Mutation(in, out, s)
}
func autoConvert_admissionregistration_Mutation_To_v1beta1_Mutation(in *admissionregistration.Mutation, out *admissionregistrationv1beta1.Mutation, s conversion.Scope) error {
out.PatchType = admissionregistrationv1beta1.PatchType(in.PatchType)
out.ApplyConfiguration = (*admissionregistrationv1beta1.ApplyConfiguration)(unsafe.Pointer(in.ApplyConfiguration))
out.JSONPatch = (*admissionregistrationv1beta1.JSONPatch)(unsafe.Pointer(in.JSONPatch))
return nil
}
// Convert_admissionregistration_Mutation_To_v1beta1_Mutation is an autogenerated conversion function.
func Convert_admissionregistration_Mutation_To_v1beta1_Mutation(in *admissionregistration.Mutation, out *admissionregistrationv1beta1.Mutation, s conversion.Scope) error {
return autoConvert_admissionregistration_Mutation_To_v1beta1_Mutation(in, out, s)
}
func autoConvert_v1beta1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in *admissionregistrationv1beta1.NamedRuleWithOperations, out *admissionregistration.NamedRuleWithOperations, s conversion.Scope) error {
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
if err := apisadmissionregistrationv1.Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(&in.RuleWithOperations, &out.RuleWithOperations, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations is an autogenerated conversion function.
func Convert_v1beta1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in *admissionregistrationv1beta1.NamedRuleWithOperations, out *admissionregistration.NamedRuleWithOperations, s conversion.Scope) error {
return autoConvert_v1beta1_NamedRuleWithOperations_To_admissionregistration_NamedRuleWithOperations(in, out, s)
}
func autoConvert_admissionregistration_NamedRuleWithOperations_To_v1beta1_NamedRuleWithOperations(in *admissionregistration.NamedRuleWithOperations, out *admissionregistrationv1beta1.NamedRuleWithOperations, s conversion.Scope) error {
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
if err := apisadmissionregistrationv1.Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(&in.RuleWithOperations, &out.RuleWithOperations, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_NamedRuleWithOperations_To_v1beta1_NamedRuleWithOperations is an autogenerated conversion function.
func Convert_admissionregistration_NamedRuleWithOperations_To_v1beta1_NamedRuleWithOperations(in *admissionregistration.NamedRuleWithOperations, out *admissionregistrationv1beta1.NamedRuleWithOperations, s conversion.Scope) error {
return autoConvert_admissionregistration_NamedRuleWithOperations_To_v1beta1_NamedRuleWithOperations(in, out, s)
}
func autoConvert_v1beta1_ParamKind_To_admissionregistration_ParamKind(in *admissionregistrationv1beta1.ParamKind, out *admissionregistration.ParamKind, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.Kind = in.Kind
return nil
}
// Convert_v1beta1_ParamKind_To_admissionregistration_ParamKind is an autogenerated conversion function.
func Convert_v1beta1_ParamKind_To_admissionregistration_ParamKind(in *admissionregistrationv1beta1.ParamKind, out *admissionregistration.ParamKind, s conversion.Scope) error {
return autoConvert_v1beta1_ParamKind_To_admissionregistration_ParamKind(in, out, s)
}
func autoConvert_admissionregistration_ParamKind_To_v1beta1_ParamKind(in *admissionregistration.ParamKind, out *admissionregistrationv1beta1.ParamKind, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.Kind = in.Kind
return nil
}
// Convert_admissionregistration_ParamKind_To_v1beta1_ParamKind is an autogenerated conversion function.
func Convert_admissionregistration_ParamKind_To_v1beta1_ParamKind(in *admissionregistration.ParamKind, out *admissionregistrationv1beta1.ParamKind, s conversion.Scope) error {
return autoConvert_admissionregistration_ParamKind_To_v1beta1_ParamKind(in, out, s)
}
func autoConvert_v1beta1_ParamRef_To_admissionregistration_ParamRef(in *admissionregistrationv1beta1.ParamRef, out *admissionregistration.ParamRef, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ParameterNotFoundAction = (*admissionregistration.ParameterNotFoundActionType)(unsafe.Pointer(in.ParameterNotFoundAction))
return nil
}
// Convert_v1beta1_ParamRef_To_admissionregistration_ParamRef is an autogenerated conversion function.
func Convert_v1beta1_ParamRef_To_admissionregistration_ParamRef(in *admissionregistrationv1beta1.ParamRef, out *admissionregistration.ParamRef, s conversion.Scope) error {
return autoConvert_v1beta1_ParamRef_To_admissionregistration_ParamRef(in, out, s)
}
func autoConvert_admissionregistration_ParamRef_To_v1beta1_ParamRef(in *admissionregistration.ParamRef, out *admissionregistrationv1beta1.ParamRef, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ParameterNotFoundAction = (*admissionregistrationv1beta1.ParameterNotFoundActionType)(unsafe.Pointer(in.ParameterNotFoundAction))
return nil
}
// Convert_admissionregistration_ParamRef_To_v1beta1_ParamRef is an autogenerated conversion function.
func Convert_admissionregistration_ParamRef_To_v1beta1_ParamRef(in *admissionregistration.ParamRef, out *admissionregistrationv1beta1.ParamRef, s conversion.Scope) error {
return autoConvert_admissionregistration_ParamRef_To_v1beta1_ParamRef(in, out, s)
}
func autoConvert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in *admissionregistrationv1beta1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
out.Path = (*string)(unsafe.Pointer(in.Path))
if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference is an autogenerated conversion function.
func Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in *admissionregistrationv1beta1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error {
return autoConvert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in, out, s)
}
func autoConvert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in *admissionregistration.ServiceReference, out *admissionregistrationv1beta1.ServiceReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
out.Path = (*string)(unsafe.Pointer(in.Path))
if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function.
func Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in *admissionregistration.ServiceReference, out *admissionregistrationv1beta1.ServiceReference, s conversion.Scope) error {
return autoConvert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in, out, s)
}
func autoConvert_v1beta1_TypeChecking_To_admissionregistration_TypeChecking(in *admissionregistrationv1beta1.TypeChecking, out *admissionregistration.TypeChecking, s conversion.Scope) error {
out.ExpressionWarnings = *(*[]admissionregistration.ExpressionWarning)(unsafe.Pointer(&in.ExpressionWarnings))
return nil
}
// Convert_v1beta1_TypeChecking_To_admissionregistration_TypeChecking is an autogenerated conversion function.
func Convert_v1beta1_TypeChecking_To_admissionregistration_TypeChecking(in *admissionregistrationv1beta1.TypeChecking, out *admissionregistration.TypeChecking, s conversion.Scope) error {
return autoConvert_v1beta1_TypeChecking_To_admissionregistration_TypeChecking(in, out, s)
}
func autoConvert_admissionregistration_TypeChecking_To_v1beta1_TypeChecking(in *admissionregistration.TypeChecking, out *admissionregistrationv1beta1.TypeChecking, s conversion.Scope) error {
out.ExpressionWarnings = *(*[]admissionregistrationv1beta1.ExpressionWarning)(unsafe.Pointer(&in.ExpressionWarnings))
return nil
}
// Convert_admissionregistration_TypeChecking_To_v1beta1_TypeChecking is an autogenerated conversion function.
func Convert_admissionregistration_TypeChecking_To_v1beta1_TypeChecking(in *admissionregistration.TypeChecking, out *admissionregistrationv1beta1.TypeChecking, s conversion.Scope) error {
return autoConvert_admissionregistration_TypeChecking_To_v1beta1_TypeChecking(in, out, s)
}
func autoConvert_v1beta1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in *admissionregistrationv1beta1.ValidatingAdmissionPolicy, out *admissionregistration.ValidatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy is an autogenerated conversion function.
func Convert_v1beta1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in *admissionregistrationv1beta1.ValidatingAdmissionPolicy, out *admissionregistration.ValidatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicy_To_v1beta1_ValidatingAdmissionPolicy(in *admissionregistration.ValidatingAdmissionPolicy, out *admissionregistrationv1beta1.ValidatingAdmissionPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1beta1_ValidatingAdmissionPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1beta1_ValidatingAdmissionPolicyStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1beta1_ValidatingAdmissionPolicy is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1beta1_ValidatingAdmissionPolicy(in *admissionregistration.ValidatingAdmissionPolicy, out *admissionregistrationv1beta1.ValidatingAdmissionPolicy, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicy_To_v1beta1_ValidatingAdmissionPolicy(in, out, s)
}
func autoConvert_v1beta1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, out *admissionregistration.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_v1beta1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, out *admissionregistration.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1beta1_ValidatingAdmissionPolicyBinding(in *admissionregistration.ValidatingAdmissionPolicyBinding, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1beta1_ValidatingAdmissionPolicyBindingSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1beta1_ValidatingAdmissionPolicyBinding is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1beta1_ValidatingAdmissionPolicyBinding(in *admissionregistration.ValidatingAdmissionPolicyBinding, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1beta1_ValidatingAdmissionPolicyBinding(in, out, s)
}
func autoConvert_v1beta1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, out *admissionregistration.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_v1beta1_ValidatingAdmissionPolicyBinding_To_admissionregistration_ValidatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_v1beta1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, out *admissionregistration.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingAdmissionPolicyBindingList_To_admissionregistration_ValidatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1beta1_ValidatingAdmissionPolicyBindingList(in *admissionregistration.ValidatingAdmissionPolicyBindingList, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingAdmissionPolicyBinding_To_v1beta1_ValidatingAdmissionPolicyBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1beta1_ValidatingAdmissionPolicyBindingList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1beta1_ValidatingAdmissionPolicyBindingList(in *admissionregistration.ValidatingAdmissionPolicyBindingList, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingList_To_v1beta1_ValidatingAdmissionPolicyBindingList(in, out, s)
}
func autoConvert_v1beta1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec, out *admissionregistration.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistration.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistration.MatchResources)
if err := Convert_v1beta1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
out.ValidationActions = *(*[]admissionregistration.ValidationAction)(unsafe.Pointer(&in.ValidationActions))
return nil
}
// Convert_v1beta1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_v1beta1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec, out *admissionregistration.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingAdmissionPolicyBindingSpec_To_admissionregistration_ValidatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1beta1_ValidatingAdmissionPolicyBindingSpec(in *admissionregistration.ValidatingAdmissionPolicyBindingSpec, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
out.PolicyName = in.PolicyName
out.ParamRef = (*admissionregistrationv1beta1.ParamRef)(unsafe.Pointer(in.ParamRef))
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(admissionregistrationv1beta1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1beta1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchResources = nil
}
out.ValidationActions = *(*[]admissionregistrationv1beta1.ValidationAction)(unsafe.Pointer(&in.ValidationActions))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1beta1_ValidatingAdmissionPolicyBindingSpec is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1beta1_ValidatingAdmissionPolicyBindingSpec(in *admissionregistration.ValidatingAdmissionPolicyBindingSpec, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpec, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyBindingSpec_To_v1beta1_ValidatingAdmissionPolicyBindingSpec(in, out, s)
}
func autoConvert_v1beta1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, out *admissionregistration.ValidatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_v1beta1_ValidatingAdmissionPolicy_To_admissionregistration_ValidatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_v1beta1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, out *admissionregistration.ValidatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingAdmissionPolicyList_To_admissionregistration_ValidatingAdmissionPolicyList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyList_To_v1beta1_ValidatingAdmissionPolicyList(in *admissionregistration.ValidatingAdmissionPolicyList, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1beta1.ValidatingAdmissionPolicy, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingAdmissionPolicy_To_v1beta1_ValidatingAdmissionPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1beta1_ValidatingAdmissionPolicyList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyList_To_v1beta1_ValidatingAdmissionPolicyList(in *admissionregistration.ValidatingAdmissionPolicyList, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyList_To_v1beta1_ValidatingAdmissionPolicyList(in, out, s)
}
func autoConvert_v1beta1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in *admissionregistrationv1beta1.ValidatingAdmissionPolicySpec, out *admissionregistration.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistration.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistration.MatchResources)
if err := Convert_v1beta1_MatchResources_To_admissionregistration_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Validations = *(*[]admissionregistration.Validation)(unsafe.Pointer(&in.Validations))
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.AuditAnnotations = *(*[]admissionregistration.AuditAnnotation)(unsafe.Pointer(&in.AuditAnnotations))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.Variables = *(*[]admissionregistration.Variable)(unsafe.Pointer(&in.Variables))
return nil
}
// Convert_v1beta1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_v1beta1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in *admissionregistrationv1beta1.ValidatingAdmissionPolicySpec, out *admissionregistration.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingAdmissionPolicySpec_To_admissionregistration_ValidatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1beta1_ValidatingAdmissionPolicySpec(in *admissionregistration.ValidatingAdmissionPolicySpec, out *admissionregistrationv1beta1.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
out.ParamKind = (*admissionregistrationv1beta1.ParamKind)(unsafe.Pointer(in.ParamKind))
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(admissionregistrationv1beta1.MatchResources)
if err := Convert_admissionregistration_MatchResources_To_v1beta1_MatchResources(*in, *out, s); err != nil {
return err
}
} else {
out.MatchConstraints = nil
}
out.Validations = *(*[]admissionregistrationv1beta1.Validation)(unsafe.Pointer(&in.Validations))
out.MatchConditions = *(*[]admissionregistrationv1beta1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
out.FailurePolicy = (*admissionregistrationv1beta1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.AuditAnnotations = *(*[]admissionregistrationv1beta1.AuditAnnotation)(unsafe.Pointer(&in.AuditAnnotations))
out.Variables = *(*[]admissionregistrationv1beta1.Variable)(unsafe.Pointer(&in.Variables))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1beta1_ValidatingAdmissionPolicySpec is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1beta1_ValidatingAdmissionPolicySpec(in *admissionregistration.ValidatingAdmissionPolicySpec, out *admissionregistrationv1beta1.ValidatingAdmissionPolicySpec, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicySpec_To_v1beta1_ValidatingAdmissionPolicySpec(in, out, s)
}
func autoConvert_v1beta1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus, out *admissionregistration.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.TypeChecking = (*admissionregistration.TypeChecking)(unsafe.Pointer(in.TypeChecking))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus is an autogenerated conversion function.
func Convert_v1beta1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus, out *admissionregistration.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingAdmissionPolicyStatus_To_admissionregistration_ValidatingAdmissionPolicyStatus(in, out, s)
}
func autoConvert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1beta1_ValidatingAdmissionPolicyStatus(in *admissionregistration.ValidatingAdmissionPolicyStatus, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.TypeChecking = (*admissionregistrationv1beta1.TypeChecking)(unsafe.Pointer(in.TypeChecking))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1beta1_ValidatingAdmissionPolicyStatus is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1beta1_ValidatingAdmissionPolicyStatus(in *admissionregistration.ValidatingAdmissionPolicyStatus, out *admissionregistrationv1beta1.ValidatingAdmissionPolicyStatus, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingAdmissionPolicyStatus_To_v1beta1_ValidatingAdmissionPolicyStatus(in, out, s)
}
func autoConvert_v1beta1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(in *admissionregistrationv1beta1.ValidatingWebhook, out *admissionregistration.ValidatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistration.RuleWithOperations, len(*in))
for i := range *in {
if err := apisadmissionregistrationv1.Convert_v1_RuleWithOperations_To_admissionregistration_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistration.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistration.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.MatchConditions = *(*[]admissionregistration.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_v1beta1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook is an autogenerated conversion function.
func Convert_v1beta1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(in *admissionregistrationv1beta1.ValidatingWebhook, out *admissionregistration.ValidatingWebhook, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(in, out, s)
}
func autoConvert_admissionregistration_ValidatingWebhook_To_v1beta1_ValidatingWebhook(in *admissionregistration.ValidatingWebhook, out *admissionregistrationv1beta1.ValidatingWebhook, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil {
return err
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
for i := range *in {
if err := apisadmissionregistrationv1.Convert_admissionregistration_RuleWithOperations_To_v1_RuleWithOperations(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.FailurePolicy = (*admissionregistrationv1beta1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy))
out.MatchPolicy = (*admissionregistrationv1beta1.MatchPolicyType)(unsafe.Pointer(in.MatchPolicy))
out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.ObjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ObjectSelector))
out.SideEffects = (*admissionregistrationv1beta1.SideEffectClass)(unsafe.Pointer(in.SideEffects))
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
out.AdmissionReviewVersions = *(*[]string)(unsafe.Pointer(&in.AdmissionReviewVersions))
out.MatchConditions = *(*[]admissionregistrationv1beta1.MatchCondition)(unsafe.Pointer(&in.MatchConditions))
return nil
}
// Convert_admissionregistration_ValidatingWebhook_To_v1beta1_ValidatingWebhook is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingWebhook_To_v1beta1_ValidatingWebhook(in *admissionregistration.ValidatingWebhook, out *admissionregistrationv1beta1.ValidatingWebhook, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingWebhook_To_v1beta1_ValidatingWebhook(in, out, s)
}
func autoConvert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *admissionregistrationv1beta1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistration.ValidatingWebhook, len(*in))
for i := range *in {
if err := Convert_v1beta1_ValidatingWebhook_To_admissionregistration_ValidatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration is an autogenerated conversion function.
func Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *admissionregistrationv1beta1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in, out, s)
}
func autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *admissionregistrationv1beta1.ValidatingWebhookConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]admissionregistrationv1beta1.ValidatingWebhook, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingWebhook_To_v1beta1_ValidatingWebhook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Webhooks = nil
}
return nil
}
// Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *admissionregistrationv1beta1.ValidatingWebhookConfiguration, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in, out, s)
}
func autoConvert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistration.ValidatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in, out, s)
}
func autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]admissionregistrationv1beta1.ValidatingWebhookConfiguration, len(*in))
for i := range *in {
if err := Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList is an autogenerated conversion function.
func Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, s conversion.Scope) error {
return autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in, out, s)
}
func autoConvert_v1beta1_Validation_To_admissionregistration_Validation(in *admissionregistrationv1beta1.Validation, out *admissionregistration.Validation, s conversion.Scope) error {
out.Expression = in.Expression
out.Message = in.Message
out.Reason = (*v1.StatusReason)(unsafe.Pointer(in.Reason))
out.MessageExpression = in.MessageExpression
return nil
}
// Convert_v1beta1_Validation_To_admissionregistration_Validation is an autogenerated conversion function.
func Convert_v1beta1_Validation_To_admissionregistration_Validation(in *admissionregistrationv1beta1.Validation, out *admissionregistration.Validation, s conversion.Scope) error {
return autoConvert_v1beta1_Validation_To_admissionregistration_Validation(in, out, s)
}
func autoConvert_admissionregistration_Validation_To_v1beta1_Validation(in *admissionregistration.Validation, out *admissionregistrationv1beta1.Validation, s conversion.Scope) error {
out.Expression = in.Expression
out.Message = in.Message
out.Reason = (*v1.StatusReason)(unsafe.Pointer(in.Reason))
out.MessageExpression = in.MessageExpression
return nil
}
// Convert_admissionregistration_Validation_To_v1beta1_Validation is an autogenerated conversion function.
func Convert_admissionregistration_Validation_To_v1beta1_Validation(in *admissionregistration.Validation, out *admissionregistrationv1beta1.Validation, s conversion.Scope) error {
return autoConvert_admissionregistration_Validation_To_v1beta1_Validation(in, out, s)
}
func autoConvert_v1beta1_Variable_To_admissionregistration_Variable(in *admissionregistrationv1beta1.Variable, out *admissionregistration.Variable, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_v1beta1_Variable_To_admissionregistration_Variable is an autogenerated conversion function.
func Convert_v1beta1_Variable_To_admissionregistration_Variable(in *admissionregistrationv1beta1.Variable, out *admissionregistration.Variable, s conversion.Scope) error {
return autoConvert_v1beta1_Variable_To_admissionregistration_Variable(in, out, s)
}
func autoConvert_admissionregistration_Variable_To_v1beta1_Variable(in *admissionregistration.Variable, out *admissionregistrationv1beta1.Variable, s conversion.Scope) error {
out.Name = in.Name
out.Expression = in.Expression
return nil
}
// Convert_admissionregistration_Variable_To_v1beta1_Variable is an autogenerated conversion function.
func Convert_admissionregistration_Variable_To_v1beta1_Variable(in *admissionregistration.Variable, out *admissionregistrationv1beta1.Variable, s conversion.Scope) error {
return autoConvert_admissionregistration_Variable_To_v1beta1_Variable(in, out, s)
}
func autoConvert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *admissionregistrationv1beta1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error {
out.URL = (*string)(unsafe.Pointer(in.URL))
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(admissionregistration.ServiceReference)
if err := Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(*in, *out, s); err != nil {
return err
}
} else {
out.Service = nil
}
out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
return nil
}
// Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig is an autogenerated conversion function.
func Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *admissionregistrationv1beta1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error {
return autoConvert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in, out, s)
}
func autoConvert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *admissionregistrationv1beta1.WebhookClientConfig, s conversion.Scope) error {
out.URL = (*string)(unsafe.Pointer(in.URL))
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(admissionregistrationv1beta1.ServiceReference)
if err := Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil {
return err
}
} else {
out.Service = nil
}
out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
return nil
}
// Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig is an autogenerated conversion function.
func Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *admissionregistrationv1beta1.WebhookClientConfig, s conversion.Scope) error {
return autoConvert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
v1 "k8s.io/kubernetes/pkg/apis/admissionregistration/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.MutatingAdmissionPolicy{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicy(obj.(*admissionregistrationv1beta1.MutatingAdmissionPolicy))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.MutatingAdmissionPolicyBinding{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicyBinding(obj.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBinding))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicyBindingList(obj.(*admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.MutatingAdmissionPolicyList{}, func(obj interface{}) {
SetObjectDefaults_MutatingAdmissionPolicyList(obj.(*admissionregistrationv1beta1.MutatingAdmissionPolicyList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.MutatingWebhookConfiguration{}, func(obj interface{}) {
SetObjectDefaults_MutatingWebhookConfiguration(obj.(*admissionregistrationv1beta1.MutatingWebhookConfiguration))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.MutatingWebhookConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_MutatingWebhookConfigurationList(obj.(*admissionregistrationv1beta1.MutatingWebhookConfigurationList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicy(obj.(*admissionregistrationv1beta1.ValidatingAdmissionPolicy))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyBinding(obj.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyBindingList(obj.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.ValidatingAdmissionPolicyList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingAdmissionPolicyList(obj.(*admissionregistrationv1beta1.ValidatingAdmissionPolicyList))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, func(obj interface{}) {
SetObjectDefaults_ValidatingWebhookConfiguration(obj.(*admissionregistrationv1beta1.ValidatingWebhookConfiguration))
})
scheme.AddTypeDefaultingFunc(&admissionregistrationv1beta1.ValidatingWebhookConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_ValidatingWebhookConfigurationList(obj.(*admissionregistrationv1beta1.ValidatingWebhookConfigurationList))
})
return nil
}
func SetObjectDefaults_MutatingAdmissionPolicy(in *admissionregistrationv1beta1.MutatingAdmissionPolicy) {
SetDefaults_MutatingAdmissionPolicySpec(&in.Spec)
if in.Spec.MatchConstraints != nil {
SetDefaults_MatchResources(in.Spec.MatchConstraints)
for i := range in.Spec.MatchConstraints.ResourceRules {
a := &in.Spec.MatchConstraints.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchConstraints.ExcludeResourceRules {
a := &in.Spec.MatchConstraints.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_MutatingAdmissionPolicyBinding(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding) {
if in.Spec.MatchResources != nil {
SetDefaults_MatchResources(in.Spec.MatchResources)
for i := range in.Spec.MatchResources.ResourceRules {
a := &in.Spec.MatchResources.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchResources.ExcludeResourceRules {
a := &in.Spec.MatchResources.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_MutatingAdmissionPolicyBindingList(in *admissionregistrationv1beta1.MutatingAdmissionPolicyBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_MutatingAdmissionPolicyBinding(a)
}
}
func SetObjectDefaults_MutatingAdmissionPolicyList(in *admissionregistrationv1beta1.MutatingAdmissionPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_MutatingAdmissionPolicy(a)
}
}
func SetObjectDefaults_MutatingWebhookConfiguration(in *admissionregistrationv1beta1.MutatingWebhookConfiguration) {
for i := range in.Webhooks {
a := &in.Webhooks[i]
SetDefaults_MutatingWebhook(a)
if a.ClientConfig.Service != nil {
SetDefaults_ServiceReference(a.ClientConfig.Service)
}
for j := range a.Rules {
b := &a.Rules[j]
v1.SetDefaults_Rule(&b.Rule)
}
}
}
func SetObjectDefaults_MutatingWebhookConfigurationList(in *admissionregistrationv1beta1.MutatingWebhookConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_MutatingWebhookConfiguration(a)
}
}
func SetObjectDefaults_ValidatingAdmissionPolicy(in *admissionregistrationv1beta1.ValidatingAdmissionPolicy) {
SetDefaults_ValidatingAdmissionPolicySpec(&in.Spec)
if in.Spec.MatchConstraints != nil {
SetDefaults_MatchResources(in.Spec.MatchConstraints)
for i := range in.Spec.MatchConstraints.ResourceRules {
a := &in.Spec.MatchConstraints.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchConstraints.ExcludeResourceRules {
a := &in.Spec.MatchConstraints.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyBinding(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding) {
if in.Spec.MatchResources != nil {
SetDefaults_MatchResources(in.Spec.MatchResources)
for i := range in.Spec.MatchResources.ResourceRules {
a := &in.Spec.MatchResources.ResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
for i := range in.Spec.MatchResources.ExcludeResourceRules {
a := &in.Spec.MatchResources.ExcludeResourceRules[i]
v1.SetDefaults_Rule(&a.RuleWithOperations.Rule)
}
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyBindingList(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingAdmissionPolicyBinding(a)
}
}
func SetObjectDefaults_ValidatingAdmissionPolicyList(in *admissionregistrationv1beta1.ValidatingAdmissionPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingAdmissionPolicy(a)
}
}
func SetObjectDefaults_ValidatingWebhookConfiguration(in *admissionregistrationv1beta1.ValidatingWebhookConfiguration) {
for i := range in.Webhooks {
a := &in.Webhooks[i]
SetDefaults_ValidatingWebhook(a)
if a.ClientConfig.Service != nil {
SetDefaults_ServiceReference(a.ClientConfig.Service)
}
for j := range a.Rules {
b := &a.Rules[j]
v1.SetDefaults_Rule(&b.Rule)
}
}
}
func SetObjectDefaults_ValidatingWebhookConfigurationList(in *admissionregistrationv1beta1.ValidatingWebhookConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ValidatingWebhookConfiguration(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package admissionregistration
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration.
func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration {
if in == nil {
return nil
}
out := new(ApplyConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation.
func (in *AuditAnnotation) DeepCopy() *AuditAnnotation {
if in == nil {
return nil
}
out := new(AuditAnnotation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning.
func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
if in == nil {
return nil
}
out := new(ExpressionWarning)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
func (in *JSONPatch) DeepCopy() *JSONPatch {
if in == nil {
return nil
}
out := new(JSONPatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition.
func (in *MatchCondition) DeepCopy() *MatchCondition {
if in == nil {
return nil
}
out := new(MatchCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchResources) DeepCopyInto(out *MatchResources) {
*out = *in
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ObjectSelector != nil {
in, out := &in.ObjectSelector, &out.ObjectSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]NamedRuleWithOperations, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExcludeResourceRules != nil {
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
*out = make([]NamedRuleWithOperations, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.MatchPolicy != nil {
in, out := &in.MatchPolicy, &out.MatchPolicy
*out = new(MatchPolicyType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources.
func (in *MatchResources) DeepCopy() *MatchResources {
if in == nil {
return nil
}
out := new(MatchResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy.
func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding.
func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MutatingAdmissionPolicyBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList.
func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) {
*out = *in
if in.ParamRef != nil {
in, out := &in.ParamRef, &out.ParamRef
*out = new(ParamRef)
(*in).DeepCopyInto(*out)
}
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(MatchResources)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec.
func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyBindingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MutatingAdmissionPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList.
func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) {
*out = *in
if in.ParamKind != nil {
in, out := &in.ParamKind, &out.ParamKind
*out = new(ParamKind)
**out = **in
}
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(MatchResources)
(*in).DeepCopyInto(*out)
}
if in.Variables != nil {
in, out := &in.Variables, &out.Variables
*out = make([]Variable, len(*in))
copy(*out, *in)
}
if in.Mutations != nil {
in, out := &in.Mutations, &out.Mutations
*out = make([]Mutation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FailurePolicy != nil {
in, out := &in.FailurePolicy, &out.FailurePolicy
*out = new(FailurePolicyType)
**out = **in
}
if in.MatchConditions != nil {
in, out := &in.MatchConditions, &out.MatchConditions
*out = make([]MatchCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec.
func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec {
if in == nil {
return nil
}
out := new(MutatingAdmissionPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
*out = *in
in.ClientConfig.DeepCopyInto(&out.ClientConfig)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]RuleWithOperations, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FailurePolicy != nil {
in, out := &in.FailurePolicy, &out.FailurePolicy
*out = new(FailurePolicyType)
**out = **in
}
if in.MatchPolicy != nil {
in, out := &in.MatchPolicy, &out.MatchPolicy
*out = new(MatchPolicyType)
**out = **in
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ObjectSelector != nil {
in, out := &in.ObjectSelector, &out.ObjectSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.SideEffects != nil {
in, out := &in.SideEffects, &out.SideEffects
*out = new(SideEffectClass)
**out = **in
}
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int32)
**out = **in
}
if in.AdmissionReviewVersions != nil {
in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ReinvocationPolicy != nil {
in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy
*out = new(ReinvocationPolicyType)
**out = **in
}
if in.MatchConditions != nil {
in, out := &in.MatchConditions, &out.MatchConditions
*out = make([]MatchCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook.
func (in *MutatingWebhook) DeepCopy() *MutatingWebhook {
if in == nil {
return nil
}
out := new(MutatingWebhook)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]MutatingWebhook, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration.
func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration {
if in == nil {
return nil
}
out := new(MutatingWebhookConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MutatingWebhookConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList.
func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList {
if in == nil {
return nil
}
out := new(MutatingWebhookConfigurationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Mutation) DeepCopyInto(out *Mutation) {
*out = *in
if in.ApplyConfiguration != nil {
in, out := &in.ApplyConfiguration, &out.ApplyConfiguration
*out = new(ApplyConfiguration)
**out = **in
}
if in.JSONPatch != nil {
in, out := &in.JSONPatch, &out.JSONPatch
*out = new(JSONPatch)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation.
func (in *Mutation) DeepCopy() *Mutation {
if in == nil {
return nil
}
out := new(Mutation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
*out = *in
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations.
func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations {
if in == nil {
return nil
}
out := new(NamedRuleWithOperations)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParamKind) DeepCopyInto(out *ParamKind) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind.
func (in *ParamKind) DeepCopy() *ParamKind {
if in == nil {
return nil
}
out := new(ParamKind)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParamRef) DeepCopyInto(out *ParamRef) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ParameterNotFoundAction != nil {
in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction
*out = new(ParameterNotFoundActionType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef.
func (in *ParamRef) DeepCopy() *ParamRef {
if in == nil {
return nil
}
out := new(ParamRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Rule) DeepCopyInto(out *Rule) {
*out = *in
if in.APIGroups != nil {
in, out := &in.APIGroups, &out.APIGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.APIVersions != nil {
in, out := &in.APIVersions, &out.APIVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Scope != nil {
in, out := &in.Scope, &out.Scope
*out = new(ScopeType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
func (in *Rule) DeepCopy() *Rule {
if in == nil {
return nil
}
out := new(Rule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) {
*out = *in
if in.Operations != nil {
in, out := &in.Operations, &out.Operations
*out = make([]OperationType, len(*in))
copy(*out, *in)
}
in.Rule.DeepCopyInto(&out.Rule)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations.
func (in *RuleWithOperations) DeepCopy() *RuleWithOperations {
if in == nil {
return nil
}
out := new(RuleWithOperations)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
*out = *in
if in.Path != nil {
in, out := &in.Path, &out.Path
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
func (in *ServiceReference) DeepCopy() *ServiceReference {
if in == nil {
return nil
}
out := new(ServiceReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypeChecking) DeepCopyInto(out *TypeChecking) {
*out = *in
if in.ExpressionWarnings != nil {
in, out := &in.ExpressionWarnings, &out.ExpressionWarnings
*out = make([]ExpressionWarning, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking.
func (in *TypeChecking) DeepCopy() *TypeChecking {
if in == nil {
return nil
}
out := new(TypeChecking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy.
func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding.
func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicyBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ValidatingAdmissionPolicyBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList.
func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicyBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) {
*out = *in
if in.ParamRef != nil {
in, out := &in.ParamRef, &out.ParamRef
*out = new(ParamRef)
(*in).DeepCopyInto(*out)
}
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
*out = new(MatchResources)
(*in).DeepCopyInto(*out)
}
if in.ValidationActions != nil {
in, out := &in.ValidationActions, &out.ValidationActions
*out = make([]ValidationAction, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec.
func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicyBindingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ValidatingAdmissionPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList.
func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) {
*out = *in
if in.ParamKind != nil {
in, out := &in.ParamKind, &out.ParamKind
*out = new(ParamKind)
**out = **in
}
if in.MatchConstraints != nil {
in, out := &in.MatchConstraints, &out.MatchConstraints
*out = new(MatchResources)
(*in).DeepCopyInto(*out)
}
if in.Validations != nil {
in, out := &in.Validations, &out.Validations
*out = make([]Validation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.MatchConditions != nil {
in, out := &in.MatchConditions, &out.MatchConditions
*out = make([]MatchCondition, len(*in))
copy(*out, *in)
}
if in.FailurePolicy != nil {
in, out := &in.FailurePolicy, &out.FailurePolicy
*out = new(FailurePolicyType)
**out = **in
}
if in.AuditAnnotations != nil {
in, out := &in.AuditAnnotations, &out.AuditAnnotations
*out = make([]AuditAnnotation, len(*in))
copy(*out, *in)
}
if in.Variables != nil {
in, out := &in.Variables, &out.Variables
*out = make([]Variable, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec.
func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) {
*out = *in
if in.TypeChecking != nil {
in, out := &in.TypeChecking, &out.TypeChecking
*out = new(TypeChecking)
(*in).DeepCopyInto(*out)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus.
func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) {
*out = *in
in.ClientConfig.DeepCopyInto(&out.ClientConfig)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]RuleWithOperations, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FailurePolicy != nil {
in, out := &in.FailurePolicy, &out.FailurePolicy
*out = new(FailurePolicyType)
**out = **in
}
if in.MatchPolicy != nil {
in, out := &in.MatchPolicy, &out.MatchPolicy
*out = new(MatchPolicyType)
**out = **in
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ObjectSelector != nil {
in, out := &in.ObjectSelector, &out.ObjectSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.SideEffects != nil {
in, out := &in.SideEffects, &out.SideEffects
*out = new(SideEffectClass)
**out = **in
}
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int32)
**out = **in
}
if in.AdmissionReviewVersions != nil {
in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MatchConditions != nil {
in, out := &in.MatchConditions, &out.MatchConditions
*out = make([]MatchCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook.
func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook {
if in == nil {
return nil
}
out := new(ValidatingWebhook)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]ValidatingWebhook, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration.
func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration {
if in == nil {
return nil
}
out := new(ValidatingWebhookConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ValidatingWebhookConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList.
func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList {
if in == nil {
return nil
}
out := new(ValidatingWebhookConfigurationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Validation) DeepCopyInto(out *Validation) {
*out = *in
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(v1.StatusReason)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation.
func (in *Validation) DeepCopy() *Validation {
if in == nil {
return nil
}
out := new(Validation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Variable) DeepCopyInto(out *Variable) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable.
func (in *Variable) DeepCopy() *Variable {
if in == nil {
return nil
}
out := new(Variable)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) {
*out = *in
if in.URL != nil {
in, out := &in.URL, &out.URL
*out = new(string)
**out = **in
}
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(ServiceReference)
(*in).DeepCopyInto(*out)
}
if in.CABundle != nil {
in, out := &in.CABundle, &out.CABundle
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig.
func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig {
if in == nil {
return nil
}
out := new(WebhookClientConfig)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserverinternal
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "internal.apiserver.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&StorageVersion{},
&StorageVersionList{},
)
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/sets"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/apiserverinternal"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
)
// ValidateStorageVersion validate the storage version object.
func ValidateStorageVersion(sv *apiserverinternal.StorageVersion) field.ErrorList {
var allErrs field.ErrorList
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&sv.ObjectMeta, false, ValidateStorageVersionName, field.NewPath("metadata"))...)
allErrs = append(allErrs, validateStorageVersionStatus(sv.Status, field.NewPath("status"))...)
return allErrs
}
// ValidateStorageVersionName is a ValidateNameFunc for storage version names
func ValidateStorageVersionName(name string, prefix bool) []string {
var allErrs []string
idx := strings.LastIndex(name, ".")
if idx < 0 {
allErrs = append(allErrs, "name must be in the form of <group>.<resource>")
} else {
for _, msg := range utilvalidation.IsDNS1123Subdomain(name[:idx]) {
allErrs = append(allErrs, "the group segment "+msg)
}
for _, msg := range utilvalidation.IsDNS1035Label(name[idx+1:]) {
allErrs = append(allErrs, "the resource segment "+msg)
}
}
return allErrs
}
// ValidateStorageVersionUpdate tests if an update to a StorageVersion is valid.
func ValidateStorageVersionUpdate(sv, oldSV *apiserverinternal.StorageVersion) field.ErrorList {
// no error since StorageVersionSpec is an empty spec
return field.ErrorList{}
}
// ValidateStorageVersionStatusUpdate tests if an update to a StorageVersionStatus is valid.
func ValidateStorageVersionStatusUpdate(sv, oldSV *apiserverinternal.StorageVersion) field.ErrorList {
var allErrs field.ErrorList
allErrs = append(allErrs, validateStorageVersionStatus(sv.Status, field.NewPath("status"))...)
return allErrs
}
func validateStorageVersionStatus(ss apiserverinternal.StorageVersionStatus, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
allAPIServerIDs := sets.New[string]()
for i, ssv := range ss.StorageVersions {
if allAPIServerIDs.Has(ssv.APIServerID) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("storageVersions").Index(i).Child("apiServerID"), ssv.APIServerID))
} else {
allAPIServerIDs.Insert(ssv.APIServerID)
}
allErrs = append(allErrs, validateServerStorageVersion(ssv, fldPath.Child("storageVersions").Index(i))...)
}
if err := validateCommonVersion(ss, fldPath); err != nil {
allErrs = append(allErrs, err)
}
allErrs = append(allErrs, validateStorageVersionCondition(ss.Conditions, fldPath)...)
return allErrs
}
func validateServerStorageVersion(ssv apiserverinternal.ServerStorageVersion, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range apimachineryvalidation.NameIsDNSSubdomain(ssv.APIServerID, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("apiServerID"), ssv.APIServerID, msg))
}
if errs := isValidAPIVersion(ssv.EncodingVersion); len(errs) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("encodingVersion"), ssv.EncodingVersion, strings.Join(errs, ",")))
}
found := false
for i, dv := range ssv.DecodableVersions {
if errs := isValidAPIVersion(dv); len(errs) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("decodableVersions").Index(i), dv, strings.Join(errs, ",")))
}
if dv == ssv.EncodingVersion {
found = true
}
}
if !found {
allErrs = append(allErrs, field.Invalid(fldPath.Child("decodableVersions"), ssv.DecodableVersions, fmt.Sprintf("decodableVersions must include encodingVersion %s", ssv.EncodingVersion)))
}
for i, sv := range ssv.ServedVersions {
if errs := isValidAPIVersion(sv); len(errs) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("servedVersions").Index(i), sv, strings.Join(errs, ",")))
}
foundDecodableVersion := false
for _, dv := range ssv.DecodableVersions {
if sv == dv {
foundDecodableVersion = true
break
}
}
if !foundDecodableVersion {
allErrs = append(allErrs, field.Invalid(fldPath.Child("servedVersions").Index(i), sv, fmt.Sprintf("individual served version : %s must be included in decodableVersions : %s", sv, ssv.DecodableVersions)))
}
}
return allErrs
}
func commonVersion(ssv []apiserverinternal.ServerStorageVersion) *string {
if len(ssv) == 0 {
return nil
}
commonVersion := ssv[0].EncodingVersion
for _, v := range ssv[1:] {
if v.EncodingVersion != commonVersion {
return nil
}
}
return &commonVersion
}
func validateCommonVersion(svs apiserverinternal.StorageVersionStatus, fldPath *field.Path) *field.Error {
actualCommonVersion := commonVersion(svs.StorageVersions)
if actualCommonVersion == nil && svs.CommonEncodingVersion == nil {
return nil
}
if actualCommonVersion == nil && svs.CommonEncodingVersion != nil {
return field.Invalid(fldPath.Child("commonEncodingVersion"), *svs.CommonEncodingVersion, "should be nil if servers do not agree on the same encoding version, or if there is no server reporting the supported versions yet")
}
if actualCommonVersion != nil && svs.CommonEncodingVersion == nil {
return field.Invalid(fldPath.Child("commonEncodingVersion"), svs.CommonEncodingVersion, fmt.Sprintf("the common encoding version is %s", *actualCommonVersion))
}
if actualCommonVersion != nil && svs.CommonEncodingVersion != nil && *actualCommonVersion != *svs.CommonEncodingVersion {
return field.Invalid(fldPath.Child("commonEncodingVersion"), *svs.CommonEncodingVersion, fmt.Sprintf("the actual common encoding version is %s", *actualCommonVersion))
}
return nil
}
func validateStorageVersionCondition(conditions []apiserverinternal.StorageVersionCondition, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// We do not verify that the condition type or the condition status is
// a predefined one because we might add more type or status later.
seenType := make(map[apiserverinternal.StorageVersionConditionType]int)
for i, condition := range conditions {
if ii, ok := seenType[condition.Type]; ok {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("type"), string(condition.Type),
fmt.Sprintf("the type of the condition is not unique, it also appears in conditions[%d]", ii)))
}
seenType[condition.Type] = i
allErrs = append(allErrs, apivalidation.ValidateQualifiedName(string(condition.Type), fldPath.Index(i).Child("type"))...)
allErrs = append(allErrs, apivalidation.ValidateQualifiedName(string(condition.Status), fldPath.Index(i).Child("status"))...)
if condition.Reason == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("reason"), ""))
}
if condition.Message == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("message"), ""))
}
}
return allErrs
}
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
const dns1035LabelErrMsg string = "a DNS-1035 label, which must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
// isValidAPIVersion tests whether the value passed is a valid apiVersion. A
// valid apiVersion contains a version string that matches DNS_LABEL format,
// with an optional group/ prefix, where the group string matches DNS_SUBDOMAIN
// format. If the value is not valid, a list of error strings is returned.
// Otherwise an empty list (or nil) is returned.
func isValidAPIVersion(apiVersion string) []string {
var errs []string
parts := strings.Split(apiVersion, "/")
var version string
switch len(parts) {
case 1:
version = parts[0]
case 2:
var group string
group, version = parts[0], parts[1]
if len(group) == 0 {
errs = append(errs, "group part: "+utilvalidation.EmptyError())
} else if msgs := utilvalidation.IsDNS1123Subdomain(group); len(msgs) != 0 {
errs = append(errs, prefixEach(msgs, "group part: ")...)
}
default:
return append(errs, "an apiVersion is "+utilvalidation.RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")+
" with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyVersion')")
}
if len(version) == 0 {
errs = append(errs, "version part: "+utilvalidation.EmptyError())
} else if msgs := utilvalidation.IsDNS1035Label(version); len(msgs) != 0 {
errs = append(errs, prefixEach(msgs, "version part: ")...)
}
return errs
}
func prefixEach(msgs []string, prefix string) []string {
for i := range msgs {
msgs[i] = prefix + msgs[i]
}
return msgs
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package apiserverinternal
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerStorageVersion) DeepCopyInto(out *ServerStorageVersion) {
*out = *in
if in.DecodableVersions != nil {
in, out := &in.DecodableVersions, &out.DecodableVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ServedVersions != nil {
in, out := &in.ServedVersions, &out.ServedVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStorageVersion.
func (in *ServerStorageVersion) DeepCopy() *ServerStorageVersion {
if in == nil {
return nil
}
out := new(ServerStorageVersion)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageVersion) DeepCopyInto(out *StorageVersion) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersion.
func (in *StorageVersion) DeepCopy() *StorageVersion {
if in == nil {
return nil
}
out := new(StorageVersion)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StorageVersion) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageVersionCondition) DeepCopyInto(out *StorageVersionCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionCondition.
func (in *StorageVersionCondition) DeepCopy() *StorageVersionCondition {
if in == nil {
return nil
}
out := new(StorageVersionCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageVersionList) DeepCopyInto(out *StorageVersionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StorageVersion, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionList.
func (in *StorageVersionList) DeepCopy() *StorageVersionList {
if in == nil {
return nil
}
out := new(StorageVersionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StorageVersionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageVersionSpec) DeepCopyInto(out *StorageVersionSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionSpec.
func (in *StorageVersionSpec) DeepCopy() *StorageVersionSpec {
if in == nil {
return nil
}
out := new(StorageVersionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageVersionStatus) DeepCopyInto(out *StorageVersionStatus) {
*out = *in
if in.StorageVersions != nil {
in, out := &in.StorageVersions, &out.StorageVersions
*out = make([]ServerStorageVersion, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CommonEncodingVersion != nil {
in, out := &in.CommonEncodingVersion, &out.CommonEncodingVersion
*out = new(string)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]StorageVersionCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionStatus.
func (in *StorageVersionStatus) DeepCopy() *StorageVersionStatus {
if in == nil {
return nil
}
out := new(StorageVersionStatus)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"fmt"
"sigs.k8s.io/randfill"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/apis/apps"
)
// Funcs returns the fuzzer functions for the apps api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(r *apps.ControllerRevision, c randfill.Continue) {
c.FillNoCustom(r)
// match the fuzzer default content for runtime.Object
r.Data = runtime.RawExtension{Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`)}
},
func(s *apps.StatefulSet, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
// match defaulter
if len(s.Spec.PodManagementPolicy) == 0 {
s.Spec.PodManagementPolicy = apps.OrderedReadyPodManagement
}
if len(s.Spec.UpdateStrategy.Type) == 0 {
s.Spec.UpdateStrategy.Type = apps.RollingUpdateStatefulSetStrategyType
}
if s.Spec.PersistentVolumeClaimRetentionPolicy == nil {
s.Spec.PersistentVolumeClaimRetentionPolicy = &apps.StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
if len(s.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) == 0 {
s.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted = apps.RetainPersistentVolumeClaimRetentionPolicyType
}
if len(s.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) == 0 {
s.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled = apps.RetainPersistentVolumeClaimRetentionPolicyType
}
if s.Spec.RevisionHistoryLimit == nil {
s.Spec.RevisionHistoryLimit = new(int32)
*s.Spec.RevisionHistoryLimit = 10
}
if s.Status.ObservedGeneration == nil {
s.Status.ObservedGeneration = new(int64)
}
if s.Status.CollisionCount == nil {
s.Status.CollisionCount = new(int32)
}
if s.Spec.Selector == nil {
s.Spec.Selector = &metav1.LabelSelector{MatchLabels: s.Spec.Template.Labels}
}
if len(s.Labels) == 0 {
s.Labels = s.Spec.Template.Labels
}
},
func(j *apps.Deployment, c randfill.Continue) {
c.FillNoCustom(j)
// match defaulting
if j.Spec.Selector == nil {
j.Spec.Selector = &metav1.LabelSelector{MatchLabels: j.Spec.Template.Labels}
}
if len(j.Labels) == 0 {
j.Labels = j.Spec.Template.Labels
}
},
func(j *apps.DeploymentSpec, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
rhl := int32(c.Rand.Int31())
pds := int32(c.Rand.Int31())
j.RevisionHistoryLimit = &rhl
j.ProgressDeadlineSeconds = &pds
},
func(j *apps.DeploymentStrategy, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
// Ensure that strategyType is one of valid values.
strategyTypes := []apps.DeploymentStrategyType{apps.RecreateDeploymentStrategyType, apps.RollingUpdateDeploymentStrategyType}
j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
if j.Type != apps.RollingUpdateDeploymentStrategyType {
j.RollingUpdate = nil
} else {
rollingUpdate := apps.RollingUpdateDeployment{}
if c.Bool() {
rollingUpdate.MaxUnavailable = intstr.FromInt32(c.Rand.Int31())
rollingUpdate.MaxSurge = intstr.FromInt32(c.Rand.Int31())
} else {
rollingUpdate.MaxSurge = intstr.FromString(fmt.Sprintf("%d%%", c.Rand.Int31()))
}
j.RollingUpdate = &rollingUpdate
}
},
func(j *apps.DaemonSet, c randfill.Continue) {
c.FillNoCustom(j)
// match defaulter
j.Spec.Template.Generation = 0
if len(j.ObjectMeta.Labels) == 0 {
j.ObjectMeta.Labels = j.Spec.Template.ObjectMeta.Labels
}
},
func(j *apps.DaemonSetSpec, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
rhl := int32(c.Rand.Int31())
j.RevisionHistoryLimit = &rhl
},
func(j *apps.DaemonSetUpdateStrategy, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
// Ensure that strategyType is one of valid values.
strategyTypes := []apps.DaemonSetUpdateStrategyType{apps.RollingUpdateDaemonSetStrategyType, apps.OnDeleteDaemonSetStrategyType}
j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
if j.Type != apps.RollingUpdateDaemonSetStrategyType {
j.RollingUpdate = nil
} else {
rollingUpdate := apps.RollingUpdateDaemonSet{}
if c.Bool() {
if c.Bool() {
rollingUpdate.MaxUnavailable = intstr.FromInt32(c.Rand.Int31())
rollingUpdate.MaxSurge = intstr.FromInt32(c.Rand.Int31())
} else {
rollingUpdate.MaxSurge = intstr.FromString(fmt.Sprintf("%d%%", c.Rand.Int31()))
}
}
j.RollingUpdate = &rollingUpdate
}
},
func(j *apps.ReplicaSet, c randfill.Continue) {
c.FillNoCustom(j)
// match defaulter
if j.Spec.Selector == nil {
j.Spec.Selector = &metav1.LabelSelector{MatchLabels: j.Spec.Template.Labels}
}
if len(j.Labels) == 0 {
j.Labels = j.Spec.Template.Labels
}
},
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the apps API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/apps/v1"
"k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/apis/apps/v1beta2"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(apps.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1beta2.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta2.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apps
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/autoscaling"
)
var (
// SchemeBuilder stores functions to add things to a scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all stored functions t oa scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name use in this package
const GroupName = "apps"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&DaemonSet{},
&DaemonSetList{},
&Deployment{},
&DeploymentList{},
&DeploymentRollback{},
&autoscaling.Scale{},
&StatefulSet{},
&StatefulSetList{},
&ControllerRevision{},
&ControllerRevisionList{},
&ReplicaSet{},
&ReplicaSetList{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"strconv"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/core"
)
// Convert_apps_DeploymentSpec_To_v1_DeploymentSpec is defined here, because public
// conversion is not auto-generated due to existing warnings.
func Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1.DeploymentSpec, s conversion.Scope) error {
if err := autoConvert_apps_DeploymentSpec_To_v1_DeploymentSpec(in, out, s); err != nil {
return err
}
return nil
}
func Convert_v1_Deployment_To_apps_Deployment(in *appsv1.Deployment, out *apps.Deployment, s conversion.Scope) error {
if err := autoConvert_v1_Deployment_To_apps_Deployment(in, out, s); err != nil {
return err
}
// Copy annotation to deprecated rollbackTo field for roundtrip
// TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment
if revision := in.Annotations[appsv1.DeprecatedRollbackTo]; revision != "" {
if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil {
return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1.DeprecatedRollbackTo, revision, err)
} else {
out.Spec.RollbackTo = new(apps.RollbackConfig)
out.Spec.RollbackTo.Revision = revision64
}
out.Annotations = deepCopyStringMap(out.Annotations)
delete(out.Annotations, appsv1.DeprecatedRollbackTo)
} else {
out.Spec.RollbackTo = nil
}
return nil
}
func Convert_apps_Deployment_To_v1_Deployment(in *apps.Deployment, out *appsv1.Deployment, s conversion.Scope) error {
if err := autoConvert_apps_Deployment_To_v1_Deployment(in, out, s); err != nil {
return err
}
out.Annotations = deepCopyStringMap(out.Annotations) // deep copy because we modify it below
// Copy deprecated rollbackTo field to annotation for roundtrip
// TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment
if in.Spec.RollbackTo != nil {
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
out.Annotations[appsv1.DeprecatedRollbackTo] = strconv.FormatInt(in.Spec.RollbackTo.Revision, 10)
} else {
delete(out.Annotations, appsv1.DeprecatedRollbackTo)
}
return nil
}
func Convert_apps_DaemonSet_To_v1_DaemonSet(in *apps.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error {
if err := autoConvert_apps_DaemonSet_To_v1_DaemonSet(in, out, s); err != nil {
return err
}
out.Annotations = deepCopyStringMap(out.Annotations) // deep copy annotations because we change them below
out.Annotations[appsv1.DeprecatedTemplateGeneration] = strconv.FormatInt(in.Spec.TemplateGeneration, 10)
return nil
}
// Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec is defined here, because public
// conversion is not auto-generated due to existing warnings.
func Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1.DaemonSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in, out, s); err != nil {
return err
}
return nil
}
func Convert_v1_DaemonSet_To_apps_DaemonSet(in *appsv1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error {
if err := autoConvert_v1_DaemonSet_To_apps_DaemonSet(in, out, s); err != nil {
return err
}
if value, ok := in.Annotations[appsv1.DeprecatedTemplateGeneration]; ok {
if value64, err := strconv.ParseInt(value, 10, 64); err != nil {
return err
} else {
out.Spec.TemplateGeneration = value64
out.Annotations = deepCopyStringMap(out.Annotations)
delete(out.Annotations, appsv1.DeprecatedTemplateGeneration)
}
}
return nil
}
func deepCopyStringMap(m map[string]string) map[string]string {
ret := make(map[string]string, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
// Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see https://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]core.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = ""
out.VolumeClaimTemplates[i].Kind = ""
}
}
return nil
}
// Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see https://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]corev1.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = "v1"
out.VolumeClaimTemplates[i].Kind = "PersistentVolumeClaim"
}
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_Deployment sets additional defaults compared to its counterpart
// in extensions. These addons are:
// - MaxUnavailable during rolling update set to 25% (1 in extensions)
// - MaxSurge value during rolling update set to 25% (1 in extensions)
// - RevisionHistoryLimit set to 10 (not set in extensions)
// - ProgressDeadlineSeconds set to 600s (not set in extensions)
func SetDefaults_Deployment(obj *appsv1.Deployment) {
// Set DeploymentSpec.Replicas to 1 if it is not set.
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
strategy := &obj.Spec.Strategy
// Set default DeploymentStrategyType as RollingUpdate.
if strategy.Type == "" {
strategy.Type = appsv1.RollingUpdateDeploymentStrategyType
}
if strategy.Type == appsv1.RollingUpdateDeploymentStrategyType {
if strategy.RollingUpdate == nil {
rollingUpdate := appsv1.RollingUpdateDeployment{}
strategy.RollingUpdate = &rollingUpdate
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 25% by default.
maxUnavailable := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 25% by default.
maxSurge := intstr.FromString("25%")
strategy.RollingUpdate.MaxSurge = &maxSurge
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
if obj.Spec.ProgressDeadlineSeconds == nil {
obj.Spec.ProgressDeadlineSeconds = new(int32)
*obj.Spec.ProgressDeadlineSeconds = 600
}
}
func SetDefaults_DaemonSet(obj *appsv1.DaemonSet) {
updateStrategy := &obj.Spec.UpdateStrategy
if updateStrategy.Type == "" {
updateStrategy.Type = appsv1.RollingUpdateDaemonSetStrategyType
}
if updateStrategy.Type == appsv1.RollingUpdateDaemonSetStrategyType {
if updateStrategy.RollingUpdate == nil {
rollingUpdate := appsv1.RollingUpdateDaemonSet{}
updateStrategy.RollingUpdate = &rollingUpdate
}
if updateStrategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
updateStrategy.RollingUpdate.MaxUnavailable = ptr.To(intstr.FromInt32(1))
}
if updateStrategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 0 by default.
updateStrategy.RollingUpdate.MaxSurge = ptr.To(intstr.FromInt32(0))
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
func SetDefaults_StatefulSet(obj *appsv1.StatefulSet) {
if len(obj.Spec.PodManagementPolicy) == 0 {
obj.Spec.PodManagementPolicy = appsv1.OrderedReadyPodManagement
}
if obj.Spec.UpdateStrategy.Type == "" {
obj.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType
if obj.Spec.UpdateStrategy.RollingUpdate == nil {
// UpdateStrategy.RollingUpdate will take default values below.
obj.Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateStatefulSetStrategy{}
}
}
if obj.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType &&
obj.Spec.UpdateStrategy.RollingUpdate != nil {
if obj.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
obj.Spec.UpdateStrategy.RollingUpdate.Partition = ptr.To[int32](0)
}
if utilfeature.DefaultFeatureGate.Enabled(features.MaxUnavailableStatefulSet) {
if obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable == nil {
obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = ptr.To(intstr.FromInt32(1))
}
}
}
if obj.Spec.PersistentVolumeClaimRetentionPolicy == nil {
obj.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
func SetDefaults_ReplicaSet(obj *appsv1.ReplicaSet) {
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "apps"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &appsv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
apps "k8s.io/kubernetes/pkg/apis/apps"
core "k8s.io/kubernetes/pkg/apis/core"
apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*appsv1.ControllerRevision)(nil), (*apps.ControllerRevision)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ControllerRevision_To_apps_ControllerRevision(a.(*appsv1.ControllerRevision), b.(*apps.ControllerRevision), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ControllerRevision)(nil), (*appsv1.ControllerRevision)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ControllerRevision_To_v1_ControllerRevision(a.(*apps.ControllerRevision), b.(*appsv1.ControllerRevision), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.ControllerRevisionList)(nil), (*apps.ControllerRevisionList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ControllerRevisionList_To_apps_ControllerRevisionList(a.(*appsv1.ControllerRevisionList), b.(*apps.ControllerRevisionList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ControllerRevisionList)(nil), (*appsv1.ControllerRevisionList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(a.(*apps.ControllerRevisionList), b.(*appsv1.ControllerRevisionList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*appsv1.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*appsv1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*appsv1.DaemonSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonSetList_To_apps_DaemonSetList(a.(*appsv1.DaemonSetList), b.(*apps.DaemonSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*appsv1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetList_To_v1_DaemonSetList(a.(*apps.DaemonSetList), b.(*appsv1.DaemonSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*appsv1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*appsv1.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*appsv1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*appsv1.DaemonSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*appsv1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*appsv1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*appsv1.DaemonSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeploymentCondition_To_apps_DeploymentCondition(a.(*appsv1.DeploymentCondition), b.(*apps.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*appsv1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentCondition_To_v1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*appsv1.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeploymentList_To_apps_DeploymentList(a.(*appsv1.DeploymentList), b.(*apps.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*appsv1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentList_To_v1_DeploymentList(a.(*apps.DeploymentList), b.(*appsv1.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(a.(*appsv1.DeploymentSpec), b.(*apps.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(a.(*appsv1.DeploymentStatus), b.(*apps.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*appsv1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*appsv1.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*appsv1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*appsv1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*appsv1.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicaSet_To_apps_ReplicaSet(a.(*appsv1.ReplicaSet), b.(*apps.ReplicaSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*appsv1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSet_To_v1_ReplicaSet(a.(*apps.ReplicaSet), b.(*appsv1.ReplicaSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*appsv1.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*appsv1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*appsv1.ReplicaSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicaSetList_To_apps_ReplicaSetList(a.(*appsv1.ReplicaSetList), b.(*apps.ReplicaSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*appsv1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetList_To_v1_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*appsv1.ReplicaSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*appsv1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*appsv1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*appsv1.ReplicaSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*appsv1.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*appsv1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*appsv1.ReplicaSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*appsv1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*appsv1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*appsv1.RollingUpdateDaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*appsv1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*appsv1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*appsv1.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.RollingUpdateStatefulSetStrategy)(nil), (*apps.RollingUpdateStatefulSetStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(a.(*appsv1.RollingUpdateStatefulSetStrategy), b.(*apps.RollingUpdateStatefulSetStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateStatefulSetStrategy)(nil), (*appsv1.RollingUpdateStatefulSetStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(a.(*apps.RollingUpdateStatefulSetStrategy), b.(*appsv1.RollingUpdateStatefulSetStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.StatefulSet)(nil), (*apps.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSet_To_apps_StatefulSet(a.(*appsv1.StatefulSet), b.(*apps.StatefulSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSet)(nil), (*appsv1.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSet_To_v1_StatefulSet(a.(*apps.StatefulSet), b.(*appsv1.StatefulSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.StatefulSetCondition)(nil), (*apps.StatefulSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(a.(*appsv1.StatefulSetCondition), b.(*apps.StatefulSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetCondition)(nil), (*appsv1.StatefulSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(a.(*apps.StatefulSetCondition), b.(*appsv1.StatefulSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.StatefulSetList)(nil), (*apps.StatefulSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetList_To_apps_StatefulSetList(a.(*appsv1.StatefulSetList), b.(*apps.StatefulSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetList)(nil), (*appsv1.StatefulSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetList_To_v1_StatefulSetList(a.(*apps.StatefulSetList), b.(*appsv1.StatefulSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.StatefulSetOrdinals)(nil), (*apps.StatefulSetOrdinals)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(a.(*appsv1.StatefulSetOrdinals), b.(*apps.StatefulSetOrdinals), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetOrdinals)(nil), (*appsv1.StatefulSetOrdinals)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetOrdinals_To_v1_StatefulSetOrdinals(a.(*apps.StatefulSetOrdinals), b.(*appsv1.StatefulSetOrdinals), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), (*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(a.(*appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy), b.(*apps.StatefulSetPersistentVolumeClaimRetentionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), (*appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1_StatefulSetPersistentVolumeClaimRetentionPolicy(a.(*apps.StatefulSetPersistentVolumeClaimRetentionPolicy), b.(*appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*appsv1.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetStatus)(nil), (*appsv1.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*appsv1.StatefulSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1.StatefulSetUpdateStrategy)(nil), (*apps.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(a.(*appsv1.StatefulSetUpdateStrategy), b.(*apps.StatefulSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*appsv1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*appsv1.StatefulSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.DaemonSetSpec)(nil), (*appsv1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*appsv1.DaemonSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*appsv1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSet_To_v1_DaemonSet(a.(*apps.DaemonSet), b.(*appsv1.DaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*appsv1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*appsv1.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.Deployment)(nil), (*appsv1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_Deployment_To_v1_Deployment(a.(*apps.Deployment), b.(*appsv1.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*appsv1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*appsv1.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonSet_To_apps_DaemonSet(a.(*appsv1.DaemonSet), b.(*apps.DaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Deployment_To_apps_Deployment(a.(*appsv1.Deployment), b.(*apps.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*appsv1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_ControllerRevision_To_apps_ControllerRevision(in *appsv1.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = in.Data
out.Revision = in.Revision
return nil
}
// Convert_v1_ControllerRevision_To_apps_ControllerRevision is an autogenerated conversion function.
func Convert_v1_ControllerRevision_To_apps_ControllerRevision(in *appsv1.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error {
return autoConvert_v1_ControllerRevision_To_apps_ControllerRevision(in, out, s)
}
func autoConvert_apps_ControllerRevision_To_v1_ControllerRevision(in *apps.ControllerRevision, out *appsv1.ControllerRevision, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = in.Data
out.Revision = in.Revision
return nil
}
// Convert_apps_ControllerRevision_To_v1_ControllerRevision is an autogenerated conversion function.
func Convert_apps_ControllerRevision_To_v1_ControllerRevision(in *apps.ControllerRevision, out *appsv1.ControllerRevision, s conversion.Scope) error {
return autoConvert_apps_ControllerRevision_To_v1_ControllerRevision(in, out, s)
}
func autoConvert_v1_ControllerRevisionList_To_apps_ControllerRevisionList(in *appsv1.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]apps.ControllerRevision)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ControllerRevisionList_To_apps_ControllerRevisionList is an autogenerated conversion function.
func Convert_v1_ControllerRevisionList_To_apps_ControllerRevisionList(in *appsv1.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error {
return autoConvert_v1_ControllerRevisionList_To_apps_ControllerRevisionList(in, out, s)
}
func autoConvert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in *apps.ControllerRevisionList, out *appsv1.ControllerRevisionList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]appsv1.ControllerRevision)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList is an autogenerated conversion function.
func Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in *apps.ControllerRevisionList, out *appsv1.ControllerRevisionList, s conversion.Scope) error {
return autoConvert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in, out, s)
}
func autoConvert_v1_DaemonSet_To_apps_DaemonSet(in *appsv1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_apps_DaemonSet_To_v1_DaemonSet(in *apps.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in *appsv1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error {
out.Type = apps.DaemonSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function.
func Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in *appsv1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error {
return autoConvert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s)
}
func autoConvert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in *apps.DaemonSetCondition, out *appsv1.DaemonSetCondition, s conversion.Scope) error {
out.Type = appsv1.DaemonSetConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition is an autogenerated conversion function.
func Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in *apps.DaemonSetCondition, out *appsv1.DaemonSetCondition, s conversion.Scope) error {
return autoConvert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in, out, s)
}
func autoConvert_v1_DaemonSetList_To_apps_DaemonSetList(in *appsv1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.DaemonSet, len(*in))
for i := range *in {
if err := Convert_v1_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function.
func Convert_v1_DaemonSetList_To_apps_DaemonSetList(in *appsv1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error {
return autoConvert_v1_DaemonSetList_To_apps_DaemonSetList(in, out, s)
}
func autoConvert_apps_DaemonSetList_To_v1_DaemonSetList(in *apps.DaemonSetList, out *appsv1.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1.DaemonSet, len(*in))
for i := range *in {
if err := Convert_apps_DaemonSet_To_v1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_DaemonSetList_To_v1_DaemonSetList is an autogenerated conversion function.
func Convert_apps_DaemonSetList_To_v1_DaemonSetList(in *apps.DaemonSetList, out *appsv1.DaemonSetList, s conversion.Scope) error {
return autoConvert_apps_DaemonSetList_To_v1_DaemonSetList(in, out, s)
}
func autoConvert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
// Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec is an autogenerated conversion function.
func Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error {
return autoConvert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in, out, s)
}
func autoConvert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
// WARNING: in.TemplateGeneration requires manual conversion: does not exist in peer-type
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
func autoConvert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in *appsv1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function.
func Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in *appsv1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s)
}
func autoConvert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in *apps.DaemonSetStatus, out *appsv1.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]appsv1.DaemonSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus is an autogenerated conversion function.
func Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in *apps.DaemonSetStatus, out *appsv1.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in, out, s)
}
func autoConvert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = apps.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateDaemonSet)
if err := Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = appsv1.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1.RollingUpdateDaemonSet)
if err := Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_v1_Deployment_To_apps_Deployment(in *appsv1.Deployment, out *apps.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_apps_Deployment_To_v1_Deployment(in *apps.Deployment, out *appsv1.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1_DeploymentCondition_To_apps_DeploymentCondition(in *appsv1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
out.Type = apps.DeploymentConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function.
func Convert_v1_DeploymentCondition_To_apps_DeploymentCondition(in *appsv1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
return autoConvert_v1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s)
}
func autoConvert_apps_DeploymentCondition_To_v1_DeploymentCondition(in *apps.DeploymentCondition, out *appsv1.DeploymentCondition, s conversion.Scope) error {
out.Type = appsv1.DeploymentConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_DeploymentCondition_To_v1_DeploymentCondition is an autogenerated conversion function.
func Convert_apps_DeploymentCondition_To_v1_DeploymentCondition(in *apps.DeploymentCondition, out *appsv1.DeploymentCondition, s conversion.Scope) error {
return autoConvert_apps_DeploymentCondition_To_v1_DeploymentCondition(in, out, s)
}
func autoConvert_v1_DeploymentList_To_apps_DeploymentList(in *appsv1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.Deployment, len(*in))
for i := range *in {
if err := Convert_v1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function.
func Convert_v1_DeploymentList_To_apps_DeploymentList(in *appsv1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
return autoConvert_v1_DeploymentList_To_apps_DeploymentList(in, out, s)
}
func autoConvert_apps_DeploymentList_To_v1_DeploymentList(in *apps.DeploymentList, out *appsv1.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1.Deployment, len(*in))
for i := range *in {
if err := Convert_apps_Deployment_To_v1_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_DeploymentList_To_v1_DeploymentList is an autogenerated conversion function.
func Convert_apps_DeploymentList_To_v1_DeploymentList(in *apps.DeploymentList, out *appsv1.DeploymentList, s conversion.Scope) error {
return autoConvert_apps_DeploymentList_To_v1_DeploymentList(in, out, s)
}
func autoConvert_v1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
// Convert_v1_DeploymentSpec_To_apps_DeploymentSpec is an autogenerated conversion function.
func Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
return autoConvert_v1_DeploymentSpec_To_apps_DeploymentSpec(in, out, s)
}
func autoConvert_apps_DeploymentSpec_To_v1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
// WARNING: in.RollbackTo requires manual conversion: does not exist in peer-type
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
func autoConvert_v1_DeploymentStatus_To_apps_DeploymentStatus(in *appsv1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_v1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function.
func Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(in *appsv1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
return autoConvert_v1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s)
}
func autoConvert_apps_DeploymentStatus_To_v1_DeploymentStatus(in *apps.DeploymentStatus, out *appsv1.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]appsv1.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_apps_DeploymentStatus_To_v1_DeploymentStatus is an autogenerated conversion function.
func Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(in *apps.DeploymentStatus, out *appsv1.DeploymentStatus, s conversion.Scope) error {
return autoConvert_apps_DeploymentStatus_To_v1_DeploymentStatus(in, out, s)
}
func autoConvert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
out.Type = apps.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateDeployment)
if err := Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy is an autogenerated conversion function.
func Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in, out, s)
}
func autoConvert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error {
out.Type = appsv1.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1.RollingUpdateDeployment)
if err := Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy is an autogenerated conversion function.
func Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in, out, s)
}
func autoConvert_v1_ReplicaSet_To_apps_ReplicaSet(in *appsv1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function.
func Convert_v1_ReplicaSet_To_apps_ReplicaSet(in *appsv1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error {
return autoConvert_v1_ReplicaSet_To_apps_ReplicaSet(in, out, s)
}
func autoConvert_apps_ReplicaSet_To_v1_ReplicaSet(in *apps.ReplicaSet, out *appsv1.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_ReplicaSet_To_v1_ReplicaSet is an autogenerated conversion function.
func Convert_apps_ReplicaSet_To_v1_ReplicaSet(in *apps.ReplicaSet, out *appsv1.ReplicaSet, s conversion.Scope) error {
return autoConvert_apps_ReplicaSet_To_v1_ReplicaSet(in, out, s)
}
func autoConvert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *appsv1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error {
out.Type = apps.ReplicaSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function.
func Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *appsv1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s)
}
func autoConvert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *appsv1.ReplicaSetCondition, s conversion.Scope) error {
out.Type = appsv1.ReplicaSetConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition is an autogenerated conversion function.
func Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *appsv1.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in, out, s)
}
func autoConvert_v1_ReplicaSetList_To_apps_ReplicaSetList(in *appsv1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_v1_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function.
func Convert_v1_ReplicaSetList_To_apps_ReplicaSetList(in *appsv1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error {
return autoConvert_v1_ReplicaSetList_To_apps_ReplicaSetList(in, out, s)
}
func autoConvert_apps_ReplicaSetList_To_v1_ReplicaSetList(in *apps.ReplicaSetList, out *appsv1.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_apps_ReplicaSet_To_v1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_ReplicaSetList_To_v1_ReplicaSetList is an autogenerated conversion function.
func Convert_apps_ReplicaSetList_To_v1_ReplicaSetList(in *apps.ReplicaSetList, out *appsv1.ReplicaSetList, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetList_To_v1_ReplicaSetList(in, out, s)
}
func autoConvert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec is an autogenerated conversion function.
func Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
return autoConvert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in, out, s)
}
func autoConvert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec is an autogenerated conversion function.
func Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in, out, s)
}
func autoConvert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *appsv1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function.
func Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *appsv1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s)
}
func autoConvert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *appsv1.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]appsv1.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus is an autogenerated conversion function.
func Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *appsv1.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in, out, s)
}
func autoConvert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet is an autogenerated conversion function.
func Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error {
return autoConvert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in, out, s)
}
func autoConvert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet is an autogenerated conversion function.
func Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in, out, s)
}
func autoConvert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in, out, s)
}
func autoConvert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in, out, s)
}
func autoConvert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *appsv1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
return nil
}
// Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy is an autogenerated conversion function.
func Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *appsv1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
return autoConvert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *appsv1.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
return nil
}
// Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy is an autogenerated conversion function.
func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *appsv1.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_v1_StatefulSet_To_apps_StatefulSet(in *appsv1.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_StatefulSet_To_apps_StatefulSet is an autogenerated conversion function.
func Convert_v1_StatefulSet_To_apps_StatefulSet(in *appsv1.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
return autoConvert_v1_StatefulSet_To_apps_StatefulSet(in, out, s)
}
func autoConvert_apps_StatefulSet_To_v1_StatefulSet(in *apps.StatefulSet, out *appsv1.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_StatefulSet_To_v1_StatefulSet is an autogenerated conversion function.
func Convert_apps_StatefulSet_To_v1_StatefulSet(in *apps.StatefulSet, out *appsv1.StatefulSet, s conversion.Scope) error {
return autoConvert_apps_StatefulSet_To_v1_StatefulSet(in, out, s)
}
func autoConvert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(in *appsv1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error {
out.Type = apps.StatefulSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition is an autogenerated conversion function.
func Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(in *appsv1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error {
return autoConvert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(in, out, s)
}
func autoConvert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(in *apps.StatefulSetCondition, out *appsv1.StatefulSetCondition, s conversion.Scope) error {
out.Type = appsv1.StatefulSetConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition is an autogenerated conversion function.
func Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(in *apps.StatefulSetCondition, out *appsv1.StatefulSetCondition, s conversion.Scope) error {
return autoConvert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(in, out, s)
}
func autoConvert_v1_StatefulSetList_To_apps_StatefulSetList(in *appsv1.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.StatefulSet, len(*in))
for i := range *in {
if err := Convert_v1_StatefulSet_To_apps_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_StatefulSetList_To_apps_StatefulSetList is an autogenerated conversion function.
func Convert_v1_StatefulSetList_To_apps_StatefulSetList(in *appsv1.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error {
return autoConvert_v1_StatefulSetList_To_apps_StatefulSetList(in, out, s)
}
func autoConvert_apps_StatefulSetList_To_v1_StatefulSetList(in *apps.StatefulSetList, out *appsv1.StatefulSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1.StatefulSet, len(*in))
for i := range *in {
if err := Convert_apps_StatefulSet_To_v1_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_StatefulSetList_To_v1_StatefulSetList is an autogenerated conversion function.
func Convert_apps_StatefulSetList_To_v1_StatefulSetList(in *apps.StatefulSetList, out *appsv1.StatefulSetList, s conversion.Scope) error {
return autoConvert_apps_StatefulSetList_To_v1_StatefulSetList(in, out, s)
}
func autoConvert_v1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in *appsv1.StatefulSetOrdinals, out *apps.StatefulSetOrdinals, s conversion.Scope) error {
out.Start = in.Start
return nil
}
// Convert_v1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals is an autogenerated conversion function.
func Convert_v1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in *appsv1.StatefulSetOrdinals, out *apps.StatefulSetOrdinals, s conversion.Scope) error {
return autoConvert_v1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in, out, s)
}
func autoConvert_apps_StatefulSetOrdinals_To_v1_StatefulSetOrdinals(in *apps.StatefulSetOrdinals, out *appsv1.StatefulSetOrdinals, s conversion.Scope) error {
out.Start = in.Start
return nil
}
// Convert_apps_StatefulSetOrdinals_To_v1_StatefulSetOrdinals is an autogenerated conversion function.
func Convert_apps_StatefulSetOrdinals_To_v1_StatefulSetOrdinals(in *apps.StatefulSetOrdinals, out *appsv1.StatefulSetOrdinals, s conversion.Scope) error {
return autoConvert_apps_StatefulSetOrdinals_To_v1_StatefulSetOrdinals(in, out, s)
}
func autoConvert_v1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy, out *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
out.WhenDeleted = apps.PersistentVolumeClaimRetentionPolicyType(in.WhenDeleted)
out.WhenScaled = apps.PersistentVolumeClaimRetentionPolicyType(in.WhenScaled)
return nil
}
// Convert_v1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy is an autogenerated conversion function.
func Convert_v1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy, out *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
return autoConvert_v1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in, out, s)
}
func autoConvert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1_StatefulSetPersistentVolumeClaimRetentionPolicy(in *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, out *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
out.WhenDeleted = appsv1.PersistentVolumeClaimRetentionPolicyType(in.WhenDeleted)
out.WhenScaled = appsv1.PersistentVolumeClaimRetentionPolicyType(in.WhenScaled)
return nil
}
// Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1_StatefulSetPersistentVolumeClaimRetentionPolicy is an autogenerated conversion function.
func Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1_StatefulSetPersistentVolumeClaimRetentionPolicy(in *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, out *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
return autoConvert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1_StatefulSetPersistentVolumeClaimRetentionPolicy(in, out, s)
}
func autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.VolumeClaimTemplates = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.ServiceName = in.ServiceName
out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy)
if err := Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.MinReadySeconds = in.MinReadySeconds
out.PersistentVolumeClaimRetentionPolicy = (*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(unsafe.Pointer(in.PersistentVolumeClaimRetentionPolicy))
out.Ordinals = (*apps.StatefulSetOrdinals)(unsafe.Pointer(in.Ordinals))
return nil
}
func autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.VolumeClaimTemplates = *(*[]corev1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.ServiceName = in.ServiceName
out.PodManagementPolicy = appsv1.PodManagementPolicyType(in.PodManagementPolicy)
if err := Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.MinReadySeconds = in.MinReadySeconds
out.PersistentVolumeClaimRetentionPolicy = (*appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy)(unsafe.Pointer(in.PersistentVolumeClaimRetentionPolicy))
out.Ordinals = (*appsv1.StatefulSetOrdinals)(unsafe.Pointer(in.Ordinals))
return nil
}
func autoConvert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
if err := metav1.Convert_int64_To_Pointer_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil {
return err
}
out.Replicas = in.Replicas
out.ReadyReplicas = in.ReadyReplicas
out.CurrentReplicas = in.CurrentReplicas
out.UpdatedReplicas = in.UpdatedReplicas
out.CurrentRevision = in.CurrentRevision
out.UpdateRevision = in.UpdateRevision
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]apps.StatefulSetCondition)(unsafe.Pointer(&in.Conditions))
out.AvailableReplicas = in.AvailableReplicas
return nil
}
// Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus is an autogenerated conversion function.
func Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
return autoConvert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s)
}
func autoConvert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1.StatefulSetStatus, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int64_To_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil {
return err
}
out.Replicas = in.Replicas
out.ReadyReplicas = in.ReadyReplicas
out.CurrentReplicas = in.CurrentReplicas
out.UpdatedReplicas = in.UpdatedReplicas
out.CurrentRevision = in.CurrentRevision
out.UpdateRevision = in.UpdateRevision
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]appsv1.StatefulSetCondition)(unsafe.Pointer(&in.Conditions))
out.AvailableReplicas = in.AvailableReplicas
return nil
}
// Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus is an autogenerated conversion function.
func Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1.StatefulSetStatus, s conversion.Scope) error {
return autoConvert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in, out, s)
}
func autoConvert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error {
out.Type = apps.StatefulSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateStatefulSetStrategy)
if err := Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy is an autogenerated conversion function.
func Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in, out, s)
}
func autoConvert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1.StatefulSetUpdateStrategy, s conversion.Scope) error {
out.Type = appsv1.StatefulSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1.RollingUpdateStatefulSetStrategy)
if err := Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy is an autogenerated conversion function.
func Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1.StatefulSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&appsv1.DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*appsv1.DaemonSet)) })
scheme.AddTypeDefaultingFunc(&appsv1.DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*appsv1.DaemonSetList)) })
scheme.AddTypeDefaultingFunc(&appsv1.Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*appsv1.Deployment)) })
scheme.AddTypeDefaultingFunc(&appsv1.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*appsv1.DeploymentList)) })
scheme.AddTypeDefaultingFunc(&appsv1.ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*appsv1.ReplicaSet)) })
scheme.AddTypeDefaultingFunc(&appsv1.ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*appsv1.ReplicaSetList)) })
scheme.AddTypeDefaultingFunc(&appsv1.StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*appsv1.StatefulSet)) })
scheme.AddTypeDefaultingFunc(&appsv1.StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*appsv1.StatefulSetList)) })
return nil
}
func SetObjectDefaults_DaemonSet(in *appsv1.DaemonSet) {
SetDefaults_DaemonSet(in)
apiscorev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
apiscorev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
apiscorev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
apiscorev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
apiscorev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
apiscorev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
apiscorev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
apiscorev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
apiscorev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
apiscorev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DaemonSetList(in *appsv1.DaemonSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_DaemonSet(a)
}
}
func SetObjectDefaults_Deployment(in *appsv1.Deployment) {
SetDefaults_Deployment(in)
apiscorev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
apiscorev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
apiscorev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
apiscorev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
apiscorev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
apiscorev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
apiscorev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
apiscorev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
apiscorev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
apiscorev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *appsv1.DeploymentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Deployment(a)
}
}
func SetObjectDefaults_ReplicaSet(in *appsv1.ReplicaSet) {
SetDefaults_ReplicaSet(in)
apiscorev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
apiscorev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
apiscorev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
apiscorev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
apiscorev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
apiscorev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
apiscorev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
apiscorev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
apiscorev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
apiscorev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_ReplicaSetList(in *appsv1.ReplicaSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ReplicaSet(a)
}
}
func SetObjectDefaults_StatefulSet(in *appsv1.StatefulSet) {
SetDefaults_StatefulSet(in)
apiscorev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
apiscorev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
apiscorev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
apiscorev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
apiscorev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
apiscorev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
apiscorev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
apiscorev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
apiscorev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
apiscorev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
for i := range in.Spec.VolumeClaimTemplates {
a := &in.Spec.VolumeClaimTemplates[i]
apiscorev1.SetDefaults_PersistentVolumeClaim(a)
apiscorev1.SetDefaults_PersistentVolumeClaimSpec(&a.Spec)
apiscorev1.SetDefaults_ResourceList(&a.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Spec.Resources.Requests)
apiscorev1.SetDefaults_ResourceList(&a.Status.Capacity)
apiscorev1.SetDefaults_ResourceList(&a.Status.AllocatedResources)
}
}
func SetObjectDefaults_StatefulSetList(in *appsv1.StatefulSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_StatefulSet(a)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
appsv1beta1 "k8s.io/api/apps/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
apps "k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/core"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("StatefulSet"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "metadata.namespace", "status.successful":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for appsv1beta1.StatefulSet: %s", label)
}
}); err != nil {
return err
}
return nil
}
func Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *appsv1beta1.ScaleStatus, s conversion.Scope) error {
out.Replicas = int32(in.Replicas)
out.TargetSelector = in.Selector
out.Selector = nil
selector, err := metav1.ParseToLabelSelector(in.Selector)
if err != nil {
return fmt.Errorf("failed to parse selector: %v", err)
}
if len(selector.MatchExpressions) == 0 {
out.Selector = selector.MatchLabels
}
return nil
}
func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
if in.TargetSelector != "" {
out.Selector = in.TargetSelector
} else if in.Selector != nil {
set := labels.Set{}
for key, val := range in.Selector {
set[key] = val
}
out.Selector = labels.SelectorFromSet(set).String()
} else {
out.Selector = ""
}
return nil
}
// Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see https://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]core.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = ""
out.VolumeClaimTemplates[i].Kind = ""
}
}
return nil
}
// Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta1.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see https://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]corev1.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = "v1"
out.VolumeClaimTemplates[i].Kind = "PersistentVolumeClaim"
}
}
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
appsv1beta1 "k8s.io/api/apps/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_StatefulSet(obj *appsv1beta1.StatefulSet) {
if len(obj.Spec.PodManagementPolicy) == 0 {
obj.Spec.PodManagementPolicy = appsv1beta1.OrderedReadyPodManagement
}
if obj.Spec.UpdateStrategy.Type == "" {
obj.Spec.UpdateStrategy.Type = appsv1beta1.OnDeleteStatefulSetStrategyType
}
labels := obj.Spec.Template.Labels
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{
MatchLabels: labels,
}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
if obj.Spec.PersistentVolumeClaimRetentionPolicy == nil {
obj.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled = appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
if obj.Spec.UpdateStrategy.Type == appsv1beta1.RollingUpdateStatefulSetStrategyType &&
obj.Spec.UpdateStrategy.RollingUpdate != nil {
if obj.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
obj.Spec.UpdateStrategy.RollingUpdate.Partition = ptr.To[int32](0)
}
if utilfeature.DefaultFeatureGate.Enabled(features.MaxUnavailableStatefulSet) {
if obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable == nil {
obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = ptr.To(intstr.FromInt32(1))
}
}
}
}
// SetDefaults_Deployment sets additional defaults compared to its counterpart
// in extensions. These addons are:
// - MaxUnavailable during rolling update set to 25% (1 in extensions)
// - MaxSurge value during rolling update set to 25% (1 in extensions)
// - RevisionHistoryLimit set to 2 (not set in extensions)
// - ProgressDeadlineSeconds set to 600s (not set in extensions)
func SetDefaults_Deployment(obj *appsv1beta1.Deployment) {
// Default labels and selector to labels from pod template spec.
labels := obj.Spec.Template.Labels
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
// Set appsv1beta1.DeploymentSpec.Replicas to 1 if it is not set.
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
strategy := &obj.Spec.Strategy
// Set default appsv1beta1.DeploymentStrategyType as RollingUpdate.
if strategy.Type == "" {
strategy.Type = appsv1beta1.RollingUpdateDeploymentStrategyType
}
if strategy.Type == appsv1beta1.RollingUpdateDeploymentStrategyType {
if strategy.RollingUpdate == nil {
rollingUpdate := appsv1beta1.RollingUpdateDeployment{}
strategy.RollingUpdate = &rollingUpdate
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 25% by default.
maxUnavailable := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 25% by default.
maxSurge := intstr.FromString("25%")
strategy.RollingUpdate.MaxSurge = &maxSurge
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 2
}
if obj.Spec.ProgressDeadlineSeconds == nil {
obj.Spec.ProgressDeadlineSeconds = new(int32)
*obj.Spec.ProgressDeadlineSeconds = 600
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
appsv1beta1 "k8s.io/api/apps/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "apps"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &appsv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
appsv1beta1 "k8s.io/api/apps/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
apps "k8s.io/kubernetes/pkg/apis/apps"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*appsv1beta1.ControllerRevision)(nil), (*apps.ControllerRevision)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ControllerRevision_To_apps_ControllerRevision(a.(*appsv1beta1.ControllerRevision), b.(*apps.ControllerRevision), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ControllerRevision)(nil), (*appsv1beta1.ControllerRevision)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ControllerRevision_To_v1beta1_ControllerRevision(a.(*apps.ControllerRevision), b.(*appsv1beta1.ControllerRevision), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.ControllerRevisionList)(nil), (*apps.ControllerRevisionList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ControllerRevisionList_To_apps_ControllerRevisionList(a.(*appsv1beta1.ControllerRevisionList), b.(*apps.ControllerRevisionList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ControllerRevisionList)(nil), (*appsv1beta1.ControllerRevisionList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(a.(*apps.ControllerRevisionList), b.(*appsv1beta1.ControllerRevisionList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Deployment_To_apps_Deployment(a.(*appsv1beta1.Deployment), b.(*apps.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*appsv1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_Deployment_To_v1beta1_Deployment(a.(*apps.Deployment), b.(*appsv1beta1.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(a.(*appsv1beta1.DeploymentCondition), b.(*apps.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*appsv1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*appsv1beta1.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentList_To_apps_DeploymentList(a.(*appsv1beta1.DeploymentList), b.(*apps.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*appsv1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentList_To_v1beta1_DeploymentList(a.(*apps.DeploymentList), b.(*appsv1beta1.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.DeploymentRollback)(nil), (*apps.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(a.(*appsv1beta1.DeploymentRollback), b.(*apps.DeploymentRollback), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentRollback)(nil), (*appsv1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*apps.DeploymentRollback), b.(*appsv1beta1.DeploymentRollback), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*appsv1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*appsv1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*appsv1beta1.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(a.(*appsv1beta1.DeploymentStatus), b.(*apps.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*appsv1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*appsv1beta1.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*appsv1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*appsv1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*appsv1beta1.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.RollbackConfig)(nil), (*apps.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(a.(*appsv1beta1.RollbackConfig), b.(*apps.RollbackConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollbackConfig)(nil), (*appsv1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(a.(*apps.RollbackConfig), b.(*appsv1beta1.RollbackConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*appsv1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*appsv1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*appsv1beta1.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.RollingUpdateStatefulSetStrategy)(nil), (*apps.RollingUpdateStatefulSetStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(a.(*appsv1beta1.RollingUpdateStatefulSetStrategy), b.(*apps.RollingUpdateStatefulSetStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateStatefulSetStrategy)(nil), (*appsv1beta1.RollingUpdateStatefulSetStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(a.(*apps.RollingUpdateStatefulSetStrategy), b.(*appsv1beta1.RollingUpdateStatefulSetStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.Scale)(nil), (*autoscaling.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Scale_To_autoscaling_Scale(a.(*appsv1beta1.Scale), b.(*autoscaling.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.Scale)(nil), (*appsv1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_Scale_To_v1beta1_Scale(a.(*autoscaling.Scale), b.(*appsv1beta1.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.ScaleSpec)(nil), (*autoscaling.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(a.(*appsv1beta1.ScaleSpec), b.(*autoscaling.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleSpec)(nil), (*appsv1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(a.(*autoscaling.ScaleSpec), b.(*appsv1beta1.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.StatefulSet)(nil), (*apps.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSet_To_apps_StatefulSet(a.(*appsv1beta1.StatefulSet), b.(*apps.StatefulSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSet)(nil), (*appsv1beta1.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSet_To_v1beta1_StatefulSet(a.(*apps.StatefulSet), b.(*appsv1beta1.StatefulSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.StatefulSetCondition)(nil), (*apps.StatefulSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition(a.(*appsv1beta1.StatefulSetCondition), b.(*apps.StatefulSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetCondition)(nil), (*appsv1beta1.StatefulSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition(a.(*apps.StatefulSetCondition), b.(*appsv1beta1.StatefulSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.StatefulSetList)(nil), (*apps.StatefulSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList(a.(*appsv1beta1.StatefulSetList), b.(*apps.StatefulSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetList)(nil), (*appsv1beta1.StatefulSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList(a.(*apps.StatefulSetList), b.(*appsv1beta1.StatefulSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.StatefulSetOrdinals)(nil), (*apps.StatefulSetOrdinals)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(a.(*appsv1beta1.StatefulSetOrdinals), b.(*apps.StatefulSetOrdinals), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetOrdinals)(nil), (*appsv1beta1.StatefulSetOrdinals)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetOrdinals_To_v1beta1_StatefulSetOrdinals(a.(*apps.StatefulSetOrdinals), b.(*appsv1beta1.StatefulSetOrdinals), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), (*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(a.(*appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy), b.(*apps.StatefulSetPersistentVolumeClaimRetentionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), (*appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy(a.(*apps.StatefulSetPersistentVolumeClaimRetentionPolicy), b.(*appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*appsv1beta1.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetStatus)(nil), (*appsv1beta1.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*appsv1beta1.StatefulSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta1.StatefulSetUpdateStrategy)(nil), (*apps.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(a.(*appsv1beta1.StatefulSetUpdateStrategy), b.(*apps.StatefulSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*appsv1beta1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*appsv1beta1.StatefulSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*appsv1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*appsv1beta1.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*appsv1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*appsv1beta1.ScaleStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1beta1.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(a.(*appsv1beta1.ScaleStatus), b.(*autoscaling.ScaleStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1beta1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*appsv1beta1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_ControllerRevision_To_apps_ControllerRevision(in *appsv1beta1.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = in.Data
out.Revision = in.Revision
return nil
}
// Convert_v1beta1_ControllerRevision_To_apps_ControllerRevision is an autogenerated conversion function.
func Convert_v1beta1_ControllerRevision_To_apps_ControllerRevision(in *appsv1beta1.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error {
return autoConvert_v1beta1_ControllerRevision_To_apps_ControllerRevision(in, out, s)
}
func autoConvert_apps_ControllerRevision_To_v1beta1_ControllerRevision(in *apps.ControllerRevision, out *appsv1beta1.ControllerRevision, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = in.Data
out.Revision = in.Revision
return nil
}
// Convert_apps_ControllerRevision_To_v1beta1_ControllerRevision is an autogenerated conversion function.
func Convert_apps_ControllerRevision_To_v1beta1_ControllerRevision(in *apps.ControllerRevision, out *appsv1beta1.ControllerRevision, s conversion.Scope) error {
return autoConvert_apps_ControllerRevision_To_v1beta1_ControllerRevision(in, out, s)
}
func autoConvert_v1beta1_ControllerRevisionList_To_apps_ControllerRevisionList(in *appsv1beta1.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]apps.ControllerRevision)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_ControllerRevisionList_To_apps_ControllerRevisionList is an autogenerated conversion function.
func Convert_v1beta1_ControllerRevisionList_To_apps_ControllerRevisionList(in *appsv1beta1.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error {
return autoConvert_v1beta1_ControllerRevisionList_To_apps_ControllerRevisionList(in, out, s)
}
func autoConvert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in *apps.ControllerRevisionList, out *appsv1beta1.ControllerRevisionList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]appsv1beta1.ControllerRevision)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList is an autogenerated conversion function.
func Convert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in *apps.ControllerRevisionList, out *appsv1beta1.ControllerRevisionList, s conversion.Scope) error {
return autoConvert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in, out, s)
}
func autoConvert_v1beta1_Deployment_To_apps_Deployment(in *appsv1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Deployment_To_apps_Deployment is an autogenerated conversion function.
func Convert_v1beta1_Deployment_To_apps_Deployment(in *appsv1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error {
return autoConvert_v1beta1_Deployment_To_apps_Deployment(in, out, s)
}
func autoConvert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *appsv1beta1.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_Deployment_To_v1beta1_Deployment is an autogenerated conversion function.
func Convert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *appsv1beta1.Deployment, s conversion.Scope) error {
return autoConvert_apps_Deployment_To_v1beta1_Deployment(in, out, s)
}
func autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *appsv1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
out.Type = apps.DeploymentConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function.
func Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *appsv1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s)
}
func autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *appsv1beta1.DeploymentCondition, s conversion.Scope) error {
out.Type = appsv1beta1.DeploymentConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function.
func Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *appsv1beta1.DeploymentCondition, s conversion.Scope) error {
return autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s)
}
func autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in *appsv1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.Deployment, len(*in))
for i := range *in {
if err := Convert_v1beta1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function.
func Convert_v1beta1_DeploymentList_To_apps_DeploymentList(in *appsv1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in, out, s)
}
func autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *appsv1beta1.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1beta1.Deployment, len(*in))
for i := range *in {
if err := Convert_apps_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function.
func Convert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *appsv1beta1.DeploymentList, s conversion.Scope) error {
return autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in, out, s)
}
func autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *appsv1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback is an autogenerated conversion function.
func Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *appsv1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in, out, s)
}
func autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *appsv1beta1.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function.
func Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *appsv1beta1.DeploymentRollback, s conversion.Scope) error {
return autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s)
}
func autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*apps.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
// Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec is an autogenerated conversion function.
func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in, out, s)
}
func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*appsv1beta1.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
// Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec is an autogenerated conversion function.
func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error {
return autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in, out, s)
}
func autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *appsv1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function.
func Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *appsv1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s)
}
func autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *appsv1beta1.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]appsv1beta1.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function.
func Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *appsv1beta1.DeploymentStatus, s conversion.Scope) error {
return autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s)
}
func autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
out.Type = apps.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateDeployment)
if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy is an autogenerated conversion function.
func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in, out, s)
}
func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta1.DeploymentStrategy, s conversion.Scope) error {
out.Type = appsv1beta1.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1beta1.RollingUpdateDeployment)
if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy is an autogenerated conversion function.
func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta1.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in, out, s)
}
func autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *appsv1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig is an autogenerated conversion function.
func Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *appsv1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error {
return autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in, out, s)
}
func autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *appsv1beta1.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function.
func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *appsv1beta1.RollbackConfig, s conversion.Scope) error {
return autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s)
}
func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in, out, s)
}
func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta1.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta1.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in, out, s)
}
func autoConvert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *appsv1beta1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
return nil
}
// Convert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy is an autogenerated conversion function.
func Convert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *appsv1beta1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
return autoConvert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *appsv1beta1.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
return nil
}
// Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy is an autogenerated conversion function.
func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *appsv1beta1.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_v1beta1_Scale_To_autoscaling_Scale(in *appsv1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Scale_To_autoscaling_Scale is an autogenerated conversion function.
func Convert_v1beta1_Scale_To_autoscaling_Scale(in *appsv1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error {
return autoConvert_v1beta1_Scale_To_autoscaling_Scale(in, out, s)
}
func autoConvert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *appsv1beta1.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_Scale_To_v1beta1_Scale is an autogenerated conversion function.
func Convert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *appsv1beta1.Scale, s conversion.Scope) error {
return autoConvert_autoscaling_Scale_To_v1beta1_Scale(in, out, s)
}
func autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *appsv1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec is an autogenerated conversion function.
func Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *appsv1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s)
}
func autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *appsv1beta1.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function.
func Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *appsv1beta1.ScaleSpec, s conversion.Scope) error {
return autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s)
}
func autoConvert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs string)
// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *appsv1beta1.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (string vs map[string]string)
return nil
}
func autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in *appsv1beta1.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_StatefulSet_To_apps_StatefulSet is an autogenerated conversion function.
func Convert_v1beta1_StatefulSet_To_apps_StatefulSet(in *appsv1beta1.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in, out, s)
}
func autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *appsv1beta1.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_StatefulSet_To_v1beta1_StatefulSet is an autogenerated conversion function.
func Convert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *appsv1beta1.StatefulSet, s conversion.Scope) error {
return autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in, out, s)
}
func autoConvert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition(in *appsv1beta1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error {
out.Type = apps.StatefulSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition is an autogenerated conversion function.
func Convert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition(in *appsv1beta1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition(in, out, s)
}
func autoConvert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition(in *apps.StatefulSetCondition, out *appsv1beta1.StatefulSetCondition, s conversion.Scope) error {
out.Type = appsv1beta1.StatefulSetConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition is an autogenerated conversion function.
func Convert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition(in *apps.StatefulSetCondition, out *appsv1beta1.StatefulSetCondition, s conversion.Scope) error {
return autoConvert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition(in, out, s)
}
func autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *appsv1beta1.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.StatefulSet, len(*in))
for i := range *in {
if err := Convert_v1beta1_StatefulSet_To_apps_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList is an autogenerated conversion function.
func Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *appsv1beta1.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in, out, s)
}
func autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *appsv1beta1.StatefulSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1beta1.StatefulSet, len(*in))
for i := range *in {
if err := Convert_apps_StatefulSet_To_v1beta1_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList is an autogenerated conversion function.
func Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *appsv1beta1.StatefulSetList, s conversion.Scope) error {
return autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in, out, s)
}
func autoConvert_v1beta1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in *appsv1beta1.StatefulSetOrdinals, out *apps.StatefulSetOrdinals, s conversion.Scope) error {
out.Start = in.Start
return nil
}
// Convert_v1beta1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals is an autogenerated conversion function.
func Convert_v1beta1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in *appsv1beta1.StatefulSetOrdinals, out *apps.StatefulSetOrdinals, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in, out, s)
}
func autoConvert_apps_StatefulSetOrdinals_To_v1beta1_StatefulSetOrdinals(in *apps.StatefulSetOrdinals, out *appsv1beta1.StatefulSetOrdinals, s conversion.Scope) error {
out.Start = in.Start
return nil
}
// Convert_apps_StatefulSetOrdinals_To_v1beta1_StatefulSetOrdinals is an autogenerated conversion function.
func Convert_apps_StatefulSetOrdinals_To_v1beta1_StatefulSetOrdinals(in *apps.StatefulSetOrdinals, out *appsv1beta1.StatefulSetOrdinals, s conversion.Scope) error {
return autoConvert_apps_StatefulSetOrdinals_To_v1beta1_StatefulSetOrdinals(in, out, s)
}
func autoConvert_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy, out *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
out.WhenDeleted = apps.PersistentVolumeClaimRetentionPolicyType(in.WhenDeleted)
out.WhenScaled = apps.PersistentVolumeClaimRetentionPolicyType(in.WhenScaled)
return nil
}
// Convert_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy is an autogenerated conversion function.
func Convert_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy, out *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in, out, s)
}
func autoConvert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy(in *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, out *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
out.WhenDeleted = appsv1beta1.PersistentVolumeClaimRetentionPolicyType(in.WhenDeleted)
out.WhenScaled = appsv1beta1.PersistentVolumeClaimRetentionPolicyType(in.WhenScaled)
return nil
}
// Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy is an autogenerated conversion function.
func Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy(in *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, out *appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
return autoConvert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta1_StatefulSetPersistentVolumeClaimRetentionPolicy(in, out, s)
}
func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.VolumeClaimTemplates = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.ServiceName = in.ServiceName
out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy)
if err := Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.MinReadySeconds = in.MinReadySeconds
out.PersistentVolumeClaimRetentionPolicy = (*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(unsafe.Pointer(in.PersistentVolumeClaimRetentionPolicy))
out.Ordinals = (*apps.StatefulSetOrdinals)(unsafe.Pointer(in.Ordinals))
return nil
}
func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta1.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.VolumeClaimTemplates = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.ServiceName = in.ServiceName
out.PodManagementPolicy = appsv1beta1.PodManagementPolicyType(in.PodManagementPolicy)
if err := Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.MinReadySeconds = in.MinReadySeconds
out.PersistentVolumeClaimRetentionPolicy = (*appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy)(unsafe.Pointer(in.PersistentVolumeClaimRetentionPolicy))
out.Ordinals = (*appsv1beta1.StatefulSetOrdinals)(unsafe.Pointer(in.Ordinals))
return nil
}
func autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1beta1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.Replicas = in.Replicas
out.ReadyReplicas = in.ReadyReplicas
out.CurrentReplicas = in.CurrentReplicas
out.UpdatedReplicas = in.UpdatedReplicas
out.CurrentRevision = in.CurrentRevision
out.UpdateRevision = in.UpdateRevision
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]apps.StatefulSetCondition)(unsafe.Pointer(&in.Conditions))
out.AvailableReplicas = in.AvailableReplicas
return nil
}
// Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus is an autogenerated conversion function.
func Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1beta1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s)
}
func autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1beta1.StatefulSetStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.Replicas = in.Replicas
out.ReadyReplicas = in.ReadyReplicas
out.CurrentReplicas = in.CurrentReplicas
out.UpdatedReplicas = in.UpdatedReplicas
out.CurrentRevision = in.CurrentRevision
out.UpdateRevision = in.UpdateRevision
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]appsv1beta1.StatefulSetCondition)(unsafe.Pointer(&in.Conditions))
out.AvailableReplicas = in.AvailableReplicas
return nil
}
// Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus is an autogenerated conversion function.
func Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1beta1.StatefulSetStatus, s conversion.Scope) error {
return autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in, out, s)
}
func autoConvert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1beta1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error {
out.Type = apps.StatefulSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateStatefulSetStrategy)
if err := Convert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy is an autogenerated conversion function.
func Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1beta1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in, out, s)
}
func autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1beta1.StatefulSetUpdateStrategy, s conversion.Scope) error {
out.Type = appsv1beta1.StatefulSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1beta1.RollingUpdateStatefulSetStrategy)
if err := Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy is an autogenerated conversion function.
func Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1beta1.StatefulSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
appsv1beta1 "k8s.io/api/apps/v1beta1"
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&appsv1beta1.Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*appsv1beta1.Deployment)) })
scheme.AddTypeDefaultingFunc(&appsv1beta1.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*appsv1beta1.DeploymentList)) })
scheme.AddTypeDefaultingFunc(&appsv1beta1.StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*appsv1beta1.StatefulSet)) })
scheme.AddTypeDefaultingFunc(&appsv1beta1.StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*appsv1beta1.StatefulSetList)) })
return nil
}
func SetObjectDefaults_Deployment(in *appsv1beta1.Deployment) {
SetDefaults_Deployment(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *appsv1beta1.DeploymentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Deployment(a)
}
}
func SetObjectDefaults_StatefulSet(in *appsv1beta1.StatefulSet) {
SetDefaults_StatefulSet(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
for i := range in.Spec.VolumeClaimTemplates {
a := &in.Spec.VolumeClaimTemplates[i]
corev1.SetDefaults_PersistentVolumeClaim(a)
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.Spec)
corev1.SetDefaults_ResourceList(&a.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Spec.Resources.Requests)
corev1.SetDefaults_ResourceList(&a.Status.Capacity)
corev1.SetDefaults_ResourceList(&a.Status.AllocatedResources)
}
}
func SetObjectDefaults_StatefulSetList(in *appsv1beta1.StatefulSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_StatefulSet(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
fmt "fmt"
appsv1beta1 "k8s.io/api/apps/v1beta1"
operation "k8s.io/apimachinery/pkg/api/operation"
safe "k8s.io/apimachinery/pkg/api/safe"
validate "k8s.io/apimachinery/pkg/api/validate"
runtime "k8s.io/apimachinery/pkg/runtime"
field "k8s.io/apimachinery/pkg/util/validation/field"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
scheme.AddValidationFunc((*appsv1beta1.Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/scale":
return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*appsv1beta1.Scale), safe.Cast[*appsv1beta1.Scale](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
return nil
}
func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *appsv1beta1.Scale) (errs field.ErrorList) {
// field appsv1beta1.Scale.TypeMeta has no validation
// field appsv1beta1.Scale.ObjectMeta has no validation
// field appsv1beta1.Scale.Spec
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *appsv1beta1.ScaleSpec) (errs field.ErrorList) {
errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *appsv1beta1.Scale) *appsv1beta1.ScaleSpec { return &oldObj.Spec }))...)
// field appsv1beta1.Scale.Status has no validation
return errs
}
func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *appsv1beta1.ScaleSpec) (errs field.ErrorList) {
// field appsv1beta1.ScaleSpec.Replicas
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
// optional value-type fields with zero-value defaults are purely documentation
if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
return nil // no changes
}
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
return
}(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *appsv1beta1.ScaleSpec) *int32 { return &oldObj.Replicas }))...)
return errs
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"fmt"
"strconv"
appsv1beta2 "k8s.io/api/apps/v1beta2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/apps"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/core"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("StatefulSet"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "metadata.namespace", "status.successful":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for appsv1beta2.StatefulSet: %s", label)
}
}); err != nil {
return err
}
return nil
}
func Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(in *autoscaling.ScaleStatus, out *appsv1beta2.ScaleStatus, s conversion.Scope) error {
out.Replicas = int32(in.Replicas)
out.TargetSelector = in.Selector
out.Selector = nil
selector, err := metav1.ParseToLabelSelector(in.Selector)
if err != nil {
return fmt.Errorf("failed to parse selector: %v", err)
}
if len(selector.MatchExpressions) == 0 {
out.Selector = selector.MatchLabels
}
return nil
}
func Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta2.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// Normally when 2 fields map to the same internal value we favor the old field, since
// old clients can't be expected to know about new fields but clients that know about the
// new field can be expected to know about the old field (though that's not quite true, due
// to kubectl apply). However, these fields are readonly, so any non-nil value should work.
if in.TargetSelector != "" {
out.Selector = in.TargetSelector
} else if in.Selector != nil {
set := labels.Set{}
for key, val := range in.Selector {
set[key] = val
}
out.Selector = labels.SelectorFromSet(set).String()
} else {
out.Selector = ""
}
return nil
}
// Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec is defined here, because public
// conversion is not auto-generated due to existing warnings.
func Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta2.DeploymentSpec, s conversion.Scope) error {
if err := autoConvert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in, out, s); err != nil {
return err
}
return nil
}
func Convert_v1beta2_Deployment_To_apps_Deployment(in *appsv1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error {
if err := autoConvert_v1beta2_Deployment_To_apps_Deployment(in, out, s); err != nil {
return err
}
// Copy annotation to deprecated rollbackTo field for roundtrip
// TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment
if revision := in.Annotations[appsv1beta2.DeprecatedRollbackTo]; revision != "" {
if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil {
return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1beta2.DeprecatedRollbackTo, revision, err)
} else {
out.Spec.RollbackTo = new(apps.RollbackConfig)
out.Spec.RollbackTo.Revision = revision64
}
out.Annotations = deepCopyStringMap(out.Annotations)
delete(out.Annotations, appsv1beta2.DeprecatedRollbackTo)
} else {
out.Spec.RollbackTo = nil
}
return nil
}
func Convert_apps_Deployment_To_v1beta2_Deployment(in *apps.Deployment, out *appsv1beta2.Deployment, s conversion.Scope) error {
if err := autoConvert_apps_Deployment_To_v1beta2_Deployment(in, out, s); err != nil {
return err
}
out.Annotations = deepCopyStringMap(out.Annotations) // deep copy because we modify annotations below
// Copy deprecated rollbackTo field to annotation for roundtrip
// TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment
if in.Spec.RollbackTo != nil {
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
out.Annotations[appsv1beta2.DeprecatedRollbackTo] = strconv.FormatInt(in.Spec.RollbackTo.Revision, 10)
} else {
delete(out.Annotations, appsv1beta2.DeprecatedRollbackTo)
}
return nil
}
func Convert_apps_DaemonSet_To_v1beta2_DaemonSet(in *apps.DaemonSet, out *appsv1beta2.DaemonSet, s conversion.Scope) error {
if err := autoConvert_apps_DaemonSet_To_v1beta2_DaemonSet(in, out, s); err != nil {
return err
}
out.Annotations = deepCopyStringMap(out.Annotations)
out.Annotations[appsv1beta2.DeprecatedTemplateGeneration] = strconv.FormatInt(in.Spec.TemplateGeneration, 10)
return nil
}
// Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec is defined here, because public
// conversion is not auto-generated due to existing warnings.
func Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1beta2.DaemonSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in, out, s); err != nil {
return err
}
return nil
}
func Convert_v1beta2_DaemonSet_To_apps_DaemonSet(in *appsv1beta2.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error {
if err := autoConvert_v1beta2_DaemonSet_To_apps_DaemonSet(in, out, s); err != nil {
return err
}
if value, ok := in.Annotations[appsv1beta2.DeprecatedTemplateGeneration]; ok {
if value64, err := strconv.ParseInt(value, 10, 64); err != nil {
return err
} else {
out.Spec.TemplateGeneration = value64
out.Annotations = deepCopyStringMap(out.Annotations)
delete(out.Annotations, appsv1beta2.DeprecatedTemplateGeneration)
}
}
return nil
}
func deepCopyStringMap(m map[string]string) map[string]string {
ret := make(map[string]string, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
// Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta2.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see https://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]core.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = ""
out.VolumeClaimTemplates[i].Kind = ""
}
}
return nil
}
// Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta2.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see https://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]corev1.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = "v1"
out.VolumeClaimTemplates[i].Kind = "PersistentVolumeClaim"
}
}
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
appsv1beta2 "k8s.io/api/apps/v1beta2"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_DaemonSet(obj *appsv1beta2.DaemonSet) {
updateStrategy := &obj.Spec.UpdateStrategy
if updateStrategy.Type == "" {
updateStrategy.Type = appsv1beta2.RollingUpdateDaemonSetStrategyType
}
if updateStrategy.Type == appsv1beta2.RollingUpdateDaemonSetStrategyType {
if updateStrategy.RollingUpdate == nil {
rollingUpdate := appsv1beta2.RollingUpdateDaemonSet{}
updateStrategy.RollingUpdate = &rollingUpdate
}
if updateStrategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
updateStrategy.RollingUpdate.MaxUnavailable = ptr.To(intstr.FromInt32(1))
}
if updateStrategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 0 by default.
updateStrategy.RollingUpdate.MaxSurge = ptr.To(intstr.FromInt32(0))
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
func SetDefaults_StatefulSet(obj *appsv1beta2.StatefulSet) {
if len(obj.Spec.PodManagementPolicy) == 0 {
obj.Spec.PodManagementPolicy = appsv1beta2.OrderedReadyPodManagement
}
if obj.Spec.UpdateStrategy.Type == "" {
obj.Spec.UpdateStrategy.Type = appsv1beta2.RollingUpdateStatefulSetStrategyType
if obj.Spec.UpdateStrategy.RollingUpdate == nil {
// UpdateStrategy.RollingUpdate will take default values below.
obj.Spec.UpdateStrategy.RollingUpdate = &appsv1beta2.RollingUpdateStatefulSetStrategy{}
}
}
if obj.Spec.UpdateStrategy.Type == appsv1beta2.RollingUpdateStatefulSetStrategyType &&
obj.Spec.UpdateStrategy.RollingUpdate != nil {
if obj.Spec.UpdateStrategy.RollingUpdate.Partition == nil {
obj.Spec.UpdateStrategy.RollingUpdate.Partition = ptr.To[int32](0)
}
if utilfeature.DefaultFeatureGate.Enabled(features.MaxUnavailableStatefulSet) {
if obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable == nil {
obj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = ptr.To(intstr.FromInt32(1))
}
}
}
if obj.Spec.PersistentVolumeClaimRetentionPolicy == nil {
obj.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1beta2.RetainPersistentVolumeClaimRetentionPolicyType
}
if len(obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled) == 0 {
obj.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled = appsv1beta2.RetainPersistentVolumeClaimRetentionPolicyType
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
// SetDefaults_Deployment sets additional defaults compared to its counterpart
// in extensions. These addons are:
// - MaxUnavailable during rolling update set to 25% (1 in extensions)
// - MaxSurge value during rolling update set to 25% (1 in extensions)
// - RevisionHistoryLimit set to 10 (not set in extensions)
// - ProgressDeadlineSeconds set to 600s (not set in extensions)
func SetDefaults_Deployment(obj *appsv1beta2.Deployment) {
// Set appsv1beta2.DeploymentSpec.Replicas to 1 if it is not set.
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
strategy := &obj.Spec.Strategy
// Set default appsv1beta2.DeploymentStrategyType as RollingUpdate.
if strategy.Type == "" {
strategy.Type = appsv1beta2.RollingUpdateDeploymentStrategyType
}
if strategy.Type == appsv1beta2.RollingUpdateDeploymentStrategyType {
if strategy.RollingUpdate == nil {
rollingUpdate := appsv1beta2.RollingUpdateDeployment{}
strategy.RollingUpdate = &rollingUpdate
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 25% by default.
maxUnavailable := intstr.FromString("25%")
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 25% by default.
maxSurge := intstr.FromString("25%")
strategy.RollingUpdate.MaxSurge = &maxSurge
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
if obj.Spec.ProgressDeadlineSeconds == nil {
obj.Spec.ProgressDeadlineSeconds = new(int32)
*obj.Spec.ProgressDeadlineSeconds = 600
}
}
func SetDefaults_ReplicaSet(obj *appsv1beta2.ReplicaSet) {
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
appsv1beta2 "k8s.io/api/apps/v1beta2"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "apps"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &appsv1beta2.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta2
import (
unsafe "unsafe"
appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
apps "k8s.io/kubernetes/pkg/apis/apps"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ControllerRevision)(nil), (*apps.ControllerRevision)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ControllerRevision_To_apps_ControllerRevision(a.(*appsv1beta2.ControllerRevision), b.(*apps.ControllerRevision), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ControllerRevision)(nil), (*appsv1beta2.ControllerRevision)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ControllerRevision_To_v1beta2_ControllerRevision(a.(*apps.ControllerRevision), b.(*appsv1beta2.ControllerRevision), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ControllerRevisionList)(nil), (*apps.ControllerRevisionList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ControllerRevisionList_To_apps_ControllerRevisionList(a.(*appsv1beta2.ControllerRevisionList), b.(*apps.ControllerRevisionList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ControllerRevisionList)(nil), (*appsv1beta2.ControllerRevisionList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(a.(*apps.ControllerRevisionList), b.(*appsv1beta2.ControllerRevisionList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*appsv1beta2.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*appsv1beta2.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*appsv1beta2.DaemonSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList(a.(*appsv1beta2.DaemonSetList), b.(*apps.DaemonSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*appsv1beta2.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList(a.(*apps.DaemonSetList), b.(*appsv1beta2.DaemonSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*appsv1beta2.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*appsv1beta2.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*appsv1beta2.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*appsv1beta2.DaemonSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*appsv1beta2.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*appsv1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*appsv1beta2.DaemonSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(a.(*appsv1beta2.DeploymentCondition), b.(*apps.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*appsv1beta2.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*appsv1beta2.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeploymentList_To_apps_DeploymentList(a.(*appsv1beta2.DeploymentList), b.(*apps.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*appsv1beta2.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentList_To_v1beta2_DeploymentList(a.(*apps.DeploymentList), b.(*appsv1beta2.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(a.(*appsv1beta2.DeploymentSpec), b.(*apps.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(a.(*appsv1beta2.DeploymentStatus), b.(*apps.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*appsv1beta2.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*appsv1beta2.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*appsv1beta2.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*appsv1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*appsv1beta2.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(a.(*appsv1beta2.ReplicaSet), b.(*apps.ReplicaSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*appsv1beta2.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(a.(*apps.ReplicaSet), b.(*appsv1beta2.ReplicaSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*appsv1beta2.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*appsv1beta2.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*appsv1beta2.ReplicaSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(a.(*appsv1beta2.ReplicaSetList), b.(*apps.ReplicaSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*appsv1beta2.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*appsv1beta2.ReplicaSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*appsv1beta2.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*appsv1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*appsv1beta2.ReplicaSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*appsv1beta2.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*appsv1beta2.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*appsv1beta2.ReplicaSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*appsv1beta2.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*appsv1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*appsv1beta2.RollingUpdateDaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*appsv1beta2.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*appsv1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*appsv1beta2.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.RollingUpdateStatefulSetStrategy)(nil), (*apps.RollingUpdateStatefulSetStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(a.(*appsv1beta2.RollingUpdateStatefulSetStrategy), b.(*apps.RollingUpdateStatefulSetStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateStatefulSetStrategy)(nil), (*appsv1beta2.RollingUpdateStatefulSetStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(a.(*apps.RollingUpdateStatefulSetStrategy), b.(*appsv1beta2.RollingUpdateStatefulSetStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.Scale)(nil), (*autoscaling.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Scale_To_autoscaling_Scale(a.(*appsv1beta2.Scale), b.(*autoscaling.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.Scale)(nil), (*appsv1beta2.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_Scale_To_v1beta2_Scale(a.(*autoscaling.Scale), b.(*appsv1beta2.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.ScaleSpec)(nil), (*autoscaling.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(a.(*appsv1beta2.ScaleSpec), b.(*autoscaling.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleSpec)(nil), (*appsv1beta2.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(a.(*autoscaling.ScaleSpec), b.(*appsv1beta2.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.StatefulSet)(nil), (*apps.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSet_To_apps_StatefulSet(a.(*appsv1beta2.StatefulSet), b.(*apps.StatefulSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSet)(nil), (*appsv1beta2.StatefulSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSet_To_v1beta2_StatefulSet(a.(*apps.StatefulSet), b.(*appsv1beta2.StatefulSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.StatefulSetCondition)(nil), (*apps.StatefulSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(a.(*appsv1beta2.StatefulSetCondition), b.(*apps.StatefulSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetCondition)(nil), (*appsv1beta2.StatefulSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(a.(*apps.StatefulSetCondition), b.(*appsv1beta2.StatefulSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.StatefulSetList)(nil), (*apps.StatefulSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetList_To_apps_StatefulSetList(a.(*appsv1beta2.StatefulSetList), b.(*apps.StatefulSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetList)(nil), (*appsv1beta2.StatefulSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetList_To_v1beta2_StatefulSetList(a.(*apps.StatefulSetList), b.(*appsv1beta2.StatefulSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.StatefulSetOrdinals)(nil), (*apps.StatefulSetOrdinals)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(a.(*appsv1beta2.StatefulSetOrdinals), b.(*apps.StatefulSetOrdinals), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetOrdinals)(nil), (*appsv1beta2.StatefulSetOrdinals)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetOrdinals_To_v1beta2_StatefulSetOrdinals(a.(*apps.StatefulSetOrdinals), b.(*appsv1beta2.StatefulSetOrdinals), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), (*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(a.(*appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy), b.(*apps.StatefulSetPersistentVolumeClaimRetentionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), (*appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy(a.(*apps.StatefulSetPersistentVolumeClaimRetentionPolicy), b.(*appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*appsv1beta2.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetStatus)(nil), (*appsv1beta2.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*appsv1beta2.StatefulSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*appsv1beta2.StatefulSetUpdateStrategy)(nil), (*apps.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(a.(*appsv1beta2.StatefulSetUpdateStrategy), b.(*apps.StatefulSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*appsv1beta2.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*appsv1beta2.StatefulSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.DaemonSetSpec)(nil), (*appsv1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*appsv1beta2.DaemonSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*appsv1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSet_To_v1beta2_DaemonSet(a.(*apps.DaemonSet), b.(*appsv1beta2.DaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*appsv1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*appsv1beta2.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.Deployment)(nil), (*appsv1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_Deployment_To_v1beta2_Deployment(a.(*apps.Deployment), b.(*appsv1beta2.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*appsv1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*appsv1beta2.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*appsv1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*appsv1beta2.ScaleStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1beta2.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DaemonSet_To_apps_DaemonSet(a.(*appsv1beta2.DaemonSet), b.(*apps.DaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1beta2.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Deployment_To_apps_Deployment(a.(*appsv1beta2.Deployment), b.(*apps.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1beta2.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(a.(*appsv1beta2.ScaleStatus), b.(*autoscaling.ScaleStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*appsv1beta2.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*appsv1beta2.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta2_ControllerRevision_To_apps_ControllerRevision(in *appsv1beta2.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = in.Data
out.Revision = in.Revision
return nil
}
// Convert_v1beta2_ControllerRevision_To_apps_ControllerRevision is an autogenerated conversion function.
func Convert_v1beta2_ControllerRevision_To_apps_ControllerRevision(in *appsv1beta2.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error {
return autoConvert_v1beta2_ControllerRevision_To_apps_ControllerRevision(in, out, s)
}
func autoConvert_apps_ControllerRevision_To_v1beta2_ControllerRevision(in *apps.ControllerRevision, out *appsv1beta2.ControllerRevision, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Data = in.Data
out.Revision = in.Revision
return nil
}
// Convert_apps_ControllerRevision_To_v1beta2_ControllerRevision is an autogenerated conversion function.
func Convert_apps_ControllerRevision_To_v1beta2_ControllerRevision(in *apps.ControllerRevision, out *appsv1beta2.ControllerRevision, s conversion.Scope) error {
return autoConvert_apps_ControllerRevision_To_v1beta2_ControllerRevision(in, out, s)
}
func autoConvert_v1beta2_ControllerRevisionList_To_apps_ControllerRevisionList(in *appsv1beta2.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]apps.ControllerRevision)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta2_ControllerRevisionList_To_apps_ControllerRevisionList is an autogenerated conversion function.
func Convert_v1beta2_ControllerRevisionList_To_apps_ControllerRevisionList(in *appsv1beta2.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error {
return autoConvert_v1beta2_ControllerRevisionList_To_apps_ControllerRevisionList(in, out, s)
}
func autoConvert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(in *apps.ControllerRevisionList, out *appsv1beta2.ControllerRevisionList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]appsv1beta2.ControllerRevision)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList is an autogenerated conversion function.
func Convert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(in *apps.ControllerRevisionList, out *appsv1beta2.ControllerRevisionList, s conversion.Scope) error {
return autoConvert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(in, out, s)
}
func autoConvert_v1beta2_DaemonSet_To_apps_DaemonSet(in *appsv1beta2.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_apps_DaemonSet_To_v1beta2_DaemonSet(in *apps.DaemonSet, out *appsv1beta2.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in *appsv1beta2.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error {
out.Type = apps.DaemonSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in *appsv1beta2.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s)
}
func autoConvert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *apps.DaemonSetCondition, out *appsv1beta2.DaemonSetCondition, s conversion.Scope) error {
out.Type = appsv1beta2.DaemonSetConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition is an autogenerated conversion function.
func Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *apps.DaemonSetCondition, out *appsv1beta2.DaemonSetCondition, s conversion.Scope) error {
return autoConvert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in, out, s)
}
func autoConvert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in *appsv1beta2.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.DaemonSet, len(*in))
for i := range *in {
if err := Convert_v1beta2_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in *appsv1beta2.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in, out, s)
}
func autoConvert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in *apps.DaemonSetList, out *appsv1beta2.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1beta2.DaemonSet, len(*in))
for i := range *in {
if err := Convert_apps_DaemonSet_To_v1beta2_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList is an autogenerated conversion function.
func Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in *apps.DaemonSetList, out *appsv1beta2.DaemonSetList, s conversion.Scope) error {
return autoConvert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in, out, s)
}
func autoConvert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1beta2.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
// Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1beta2.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in, out, s)
}
func autoConvert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1beta2.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
// WARNING: in.TemplateGeneration requires manual conversion: does not exist in peer-type
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
func autoConvert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in *appsv1beta2.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in *appsv1beta2.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s)
}
func autoConvert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *apps.DaemonSetStatus, out *appsv1beta2.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]appsv1beta2.DaemonSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus is an autogenerated conversion function.
func Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *apps.DaemonSetStatus, out *appsv1beta2.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in, out, s)
}
func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = apps.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateDaemonSet)
if err := Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = appsv1beta2.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1beta2.RollingUpdateDaemonSet)
if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_v1beta2_Deployment_To_apps_Deployment(in *appsv1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_apps_Deployment_To_v1beta2_Deployment(in *apps.Deployment, out *appsv1beta2.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in *appsv1beta2.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
out.Type = apps.DeploymentConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function.
func Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in *appsv1beta2.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in, out, s)
}
func autoConvert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in *apps.DeploymentCondition, out *appsv1beta2.DeploymentCondition, s conversion.Scope) error {
out.Type = appsv1beta2.DeploymentConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition is an autogenerated conversion function.
func Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in *apps.DeploymentCondition, out *appsv1beta2.DeploymentCondition, s conversion.Scope) error {
return autoConvert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in, out, s)
}
func autoConvert_v1beta2_DeploymentList_To_apps_DeploymentList(in *appsv1beta2.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.Deployment, len(*in))
for i := range *in {
if err := Convert_v1beta2_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function.
func Convert_v1beta2_DeploymentList_To_apps_DeploymentList(in *appsv1beta2.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentList_To_apps_DeploymentList(in, out, s)
}
func autoConvert_apps_DeploymentList_To_v1beta2_DeploymentList(in *apps.DeploymentList, out *appsv1beta2.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1beta2.Deployment, len(*in))
for i := range *in {
if err := Convert_apps_Deployment_To_v1beta2_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_DeploymentList_To_v1beta2_DeploymentList is an autogenerated conversion function.
func Convert_apps_DeploymentList_To_v1beta2_DeploymentList(in *apps.DeploymentList, out *appsv1beta2.DeploymentList, s conversion.Scope) error {
return autoConvert_apps_DeploymentList_To_v1beta2_DeploymentList(in, out, s)
}
func autoConvert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta2.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
// Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec is an autogenerated conversion function.
func Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta2.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(in, out, s)
}
func autoConvert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta2.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
// WARNING: in.RollbackTo requires manual conversion: does not exist in peer-type
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
func autoConvert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in *appsv1beta2.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function.
func Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in *appsv1beta2.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in, out, s)
}
func autoConvert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in *apps.DeploymentStatus, out *appsv1beta2.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]appsv1beta2.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus is an autogenerated conversion function.
func Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in *apps.DeploymentStatus, out *appsv1beta2.DeploymentStatus, s conversion.Scope) error {
return autoConvert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in, out, s)
}
func autoConvert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
out.Type = apps.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateDeployment)
if err := Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy is an autogenerated conversion function.
func Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in, out, s)
}
func autoConvert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta2.DeploymentStrategy, s conversion.Scope) error {
out.Type = appsv1beta2.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1beta2.RollingUpdateDeployment)
if err := Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy is an autogenerated conversion function.
func Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta2.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in, out, s)
}
func autoConvert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in *appsv1beta2.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in *appsv1beta2.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in, out, s)
}
func autoConvert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in *apps.ReplicaSet, out *appsv1beta2.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet is an autogenerated conversion function.
func Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in *apps.ReplicaSet, out *appsv1beta2.ReplicaSet, s conversion.Scope) error {
return autoConvert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in, out, s)
}
func autoConvert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *appsv1beta2.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error {
out.Type = apps.ReplicaSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *appsv1beta2.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s)
}
func autoConvert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *appsv1beta2.ReplicaSetCondition, s conversion.Scope) error {
out.Type = appsv1beta2.ReplicaSetConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition is an autogenerated conversion function.
func Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *appsv1beta2.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in, out, s)
}
func autoConvert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in *appsv1beta2.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in *appsv1beta2.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in, out, s)
}
func autoConvert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in *apps.ReplicaSetList, out *appsv1beta2.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1beta2.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList is an autogenerated conversion function.
func Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in *apps.ReplicaSetList, out *appsv1beta2.ReplicaSetList, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in, out, s)
}
func autoConvert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in, out, s)
}
func autoConvert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1beta2.ReplicaSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec is an autogenerated conversion function.
func Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1beta2.ReplicaSetSpec, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in, out, s)
}
func autoConvert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *appsv1beta2.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function.
func Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *appsv1beta2.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s)
}
func autoConvert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *appsv1beta2.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]appsv1beta2.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus is an autogenerated conversion function.
func Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *appsv1beta2.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in, out, s)
}
func autoConvert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet is an autogenerated conversion function.
func Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error {
return autoConvert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in, out, s)
}
func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1beta2.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet is an autogenerated conversion function.
func Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1beta2.RollingUpdateDaemonSet, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in, out, s)
}
func autoConvert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in, out, s)
}
func autoConvert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta2.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta2.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in, out, s)
}
func autoConvert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *appsv1beta2.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
return nil
}
// Convert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy is an autogenerated conversion function.
func Convert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *appsv1beta2.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
return autoConvert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *appsv1beta2.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil {
return err
}
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
return nil
}
// Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy is an autogenerated conversion function.
func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *appsv1beta2.RollingUpdateStatefulSetStrategy, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(in, out, s)
}
func autoConvert_v1beta2_Scale_To_autoscaling_Scale(in *appsv1beta2.Scale, out *autoscaling.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_Scale_To_autoscaling_Scale is an autogenerated conversion function.
func Convert_v1beta2_Scale_To_autoscaling_Scale(in *appsv1beta2.Scale, out *autoscaling.Scale, s conversion.Scope) error {
return autoConvert_v1beta2_Scale_To_autoscaling_Scale(in, out, s)
}
func autoConvert_autoscaling_Scale_To_v1beta2_Scale(in *autoscaling.Scale, out *appsv1beta2.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_Scale_To_v1beta2_Scale is an autogenerated conversion function.
func Convert_autoscaling_Scale_To_v1beta2_Scale(in *autoscaling.Scale, out *appsv1beta2.Scale, s conversion.Scope) error {
return autoConvert_autoscaling_Scale_To_v1beta2_Scale(in, out, s)
}
func autoConvert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(in *appsv1beta2.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec is an autogenerated conversion function.
func Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(in *appsv1beta2.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
return autoConvert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s)
}
func autoConvert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(in *autoscaling.ScaleSpec, out *appsv1beta2.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function.
func Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(in *autoscaling.ScaleSpec, out *appsv1beta2.ScaleSpec, s conversion.Scope) error {
return autoConvert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s)
}
func autoConvert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta2.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs string)
// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(in *autoscaling.ScaleStatus, out *appsv1beta2.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (string vs map[string]string)
return nil
}
func autoConvert_v1beta2_StatefulSet_To_apps_StatefulSet(in *appsv1beta2.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_StatefulSet_To_apps_StatefulSet is an autogenerated conversion function.
func Convert_v1beta2_StatefulSet_To_apps_StatefulSet(in *appsv1beta2.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSet_To_apps_StatefulSet(in, out, s)
}
func autoConvert_apps_StatefulSet_To_v1beta2_StatefulSet(in *apps.StatefulSet, out *appsv1beta2.StatefulSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_StatefulSet_To_v1beta2_StatefulSet is an autogenerated conversion function.
func Convert_apps_StatefulSet_To_v1beta2_StatefulSet(in *apps.StatefulSet, out *appsv1beta2.StatefulSet, s conversion.Scope) error {
return autoConvert_apps_StatefulSet_To_v1beta2_StatefulSet(in, out, s)
}
func autoConvert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(in *appsv1beta2.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error {
out.Type = apps.StatefulSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition is an autogenerated conversion function.
func Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(in *appsv1beta2.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(in, out, s)
}
func autoConvert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(in *apps.StatefulSetCondition, out *appsv1beta2.StatefulSetCondition, s conversion.Scope) error {
out.Type = appsv1beta2.StatefulSetConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition is an autogenerated conversion function.
func Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(in *apps.StatefulSetCondition, out *appsv1beta2.StatefulSetCondition, s conversion.Scope) error {
return autoConvert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(in, out, s)
}
func autoConvert_v1beta2_StatefulSetList_To_apps_StatefulSetList(in *appsv1beta2.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.StatefulSet, len(*in))
for i := range *in {
if err := Convert_v1beta2_StatefulSet_To_apps_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_StatefulSetList_To_apps_StatefulSetList is an autogenerated conversion function.
func Convert_v1beta2_StatefulSetList_To_apps_StatefulSetList(in *appsv1beta2.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSetList_To_apps_StatefulSetList(in, out, s)
}
func autoConvert_apps_StatefulSetList_To_v1beta2_StatefulSetList(in *apps.StatefulSetList, out *appsv1beta2.StatefulSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]appsv1beta2.StatefulSet, len(*in))
for i := range *in {
if err := Convert_apps_StatefulSet_To_v1beta2_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_StatefulSetList_To_v1beta2_StatefulSetList is an autogenerated conversion function.
func Convert_apps_StatefulSetList_To_v1beta2_StatefulSetList(in *apps.StatefulSetList, out *appsv1beta2.StatefulSetList, s conversion.Scope) error {
return autoConvert_apps_StatefulSetList_To_v1beta2_StatefulSetList(in, out, s)
}
func autoConvert_v1beta2_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in *appsv1beta2.StatefulSetOrdinals, out *apps.StatefulSetOrdinals, s conversion.Scope) error {
out.Start = in.Start
return nil
}
// Convert_v1beta2_StatefulSetOrdinals_To_apps_StatefulSetOrdinals is an autogenerated conversion function.
func Convert_v1beta2_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in *appsv1beta2.StatefulSetOrdinals, out *apps.StatefulSetOrdinals, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSetOrdinals_To_apps_StatefulSetOrdinals(in, out, s)
}
func autoConvert_apps_StatefulSetOrdinals_To_v1beta2_StatefulSetOrdinals(in *apps.StatefulSetOrdinals, out *appsv1beta2.StatefulSetOrdinals, s conversion.Scope) error {
out.Start = in.Start
return nil
}
// Convert_apps_StatefulSetOrdinals_To_v1beta2_StatefulSetOrdinals is an autogenerated conversion function.
func Convert_apps_StatefulSetOrdinals_To_v1beta2_StatefulSetOrdinals(in *apps.StatefulSetOrdinals, out *appsv1beta2.StatefulSetOrdinals, s conversion.Scope) error {
return autoConvert_apps_StatefulSetOrdinals_To_v1beta2_StatefulSetOrdinals(in, out, s)
}
func autoConvert_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in *appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy, out *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
out.WhenDeleted = apps.PersistentVolumeClaimRetentionPolicyType(in.WhenDeleted)
out.WhenScaled = apps.PersistentVolumeClaimRetentionPolicyType(in.WhenScaled)
return nil
}
// Convert_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy is an autogenerated conversion function.
func Convert_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in *appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy, out *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy_To_apps_StatefulSetPersistentVolumeClaimRetentionPolicy(in, out, s)
}
func autoConvert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy(in *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, out *appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
out.WhenDeleted = appsv1beta2.PersistentVolumeClaimRetentionPolicyType(in.WhenDeleted)
out.WhenScaled = appsv1beta2.PersistentVolumeClaimRetentionPolicyType(in.WhenScaled)
return nil
}
// Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy is an autogenerated conversion function.
func Convert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy(in *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, out *appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy, s conversion.Scope) error {
return autoConvert_apps_StatefulSetPersistentVolumeClaimRetentionPolicy_To_v1beta2_StatefulSetPersistentVolumeClaimRetentionPolicy(in, out, s)
}
func autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta2.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.VolumeClaimTemplates = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.ServiceName = in.ServiceName
out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy)
if err := Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.MinReadySeconds = in.MinReadySeconds
out.PersistentVolumeClaimRetentionPolicy = (*apps.StatefulSetPersistentVolumeClaimRetentionPolicy)(unsafe.Pointer(in.PersistentVolumeClaimRetentionPolicy))
out.Ordinals = (*apps.StatefulSetOrdinals)(unsafe.Pointer(in.Ordinals))
return nil
}
func autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta2.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.VolumeClaimTemplates = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates))
out.ServiceName = in.ServiceName
out.PodManagementPolicy = appsv1beta2.PodManagementPolicyType(in.PodManagementPolicy)
if err := Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.MinReadySeconds = in.MinReadySeconds
out.PersistentVolumeClaimRetentionPolicy = (*appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy)(unsafe.Pointer(in.PersistentVolumeClaimRetentionPolicy))
out.Ordinals = (*appsv1beta2.StatefulSetOrdinals)(unsafe.Pointer(in.Ordinals))
return nil
}
func autoConvert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1beta2.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
if err := metav1.Convert_int64_To_Pointer_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil {
return err
}
out.Replicas = in.Replicas
out.ReadyReplicas = in.ReadyReplicas
out.CurrentReplicas = in.CurrentReplicas
out.UpdatedReplicas = in.UpdatedReplicas
out.CurrentRevision = in.CurrentRevision
out.UpdateRevision = in.UpdateRevision
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]apps.StatefulSetCondition)(unsafe.Pointer(&in.Conditions))
out.AvailableReplicas = in.AvailableReplicas
return nil
}
// Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus is an autogenerated conversion function.
func Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1beta2.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s)
}
func autoConvert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1beta2.StatefulSetStatus, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int64_To_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil {
return err
}
out.Replicas = in.Replicas
out.ReadyReplicas = in.ReadyReplicas
out.CurrentReplicas = in.CurrentReplicas
out.UpdatedReplicas = in.UpdatedReplicas
out.CurrentRevision = in.CurrentRevision
out.UpdateRevision = in.UpdateRevision
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]appsv1beta2.StatefulSetCondition)(unsafe.Pointer(&in.Conditions))
out.AvailableReplicas = in.AvailableReplicas
return nil
}
// Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus is an autogenerated conversion function.
func Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1beta2.StatefulSetStatus, s conversion.Scope) error {
return autoConvert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in, out, s)
}
func autoConvert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1beta2.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error {
out.Type = apps.StatefulSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateStatefulSetStrategy)
if err := Convert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy is an autogenerated conversion function.
func Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1beta2.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in, out, s)
}
func autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1beta2.StatefulSetUpdateStrategy, s conversion.Scope) error {
out.Type = appsv1beta2.StatefulSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(appsv1beta2.RollingUpdateStatefulSetStrategy)
if err := Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy is an autogenerated conversion function.
func Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1beta2.StatefulSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta2
import (
appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&appsv1beta2.DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*appsv1beta2.DaemonSet)) })
scheme.AddTypeDefaultingFunc(&appsv1beta2.DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*appsv1beta2.DaemonSetList)) })
scheme.AddTypeDefaultingFunc(&appsv1beta2.Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*appsv1beta2.Deployment)) })
scheme.AddTypeDefaultingFunc(&appsv1beta2.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*appsv1beta2.DeploymentList)) })
scheme.AddTypeDefaultingFunc(&appsv1beta2.ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*appsv1beta2.ReplicaSet)) })
scheme.AddTypeDefaultingFunc(&appsv1beta2.ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*appsv1beta2.ReplicaSetList)) })
scheme.AddTypeDefaultingFunc(&appsv1beta2.StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*appsv1beta2.StatefulSet)) })
scheme.AddTypeDefaultingFunc(&appsv1beta2.StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*appsv1beta2.StatefulSetList)) })
return nil
}
func SetObjectDefaults_DaemonSet(in *appsv1beta2.DaemonSet) {
SetDefaults_DaemonSet(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DaemonSetList(in *appsv1beta2.DaemonSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_DaemonSet(a)
}
}
func SetObjectDefaults_Deployment(in *appsv1beta2.Deployment) {
SetDefaults_Deployment(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *appsv1beta2.DeploymentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Deployment(a)
}
}
func SetObjectDefaults_ReplicaSet(in *appsv1beta2.ReplicaSet) {
SetDefaults_ReplicaSet(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_ReplicaSetList(in *appsv1beta2.ReplicaSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ReplicaSet(a)
}
}
func SetObjectDefaults_StatefulSet(in *appsv1beta2.StatefulSet) {
SetDefaults_StatefulSet(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
for i := range in.Spec.VolumeClaimTemplates {
a := &in.Spec.VolumeClaimTemplates[i]
corev1.SetDefaults_PersistentVolumeClaim(a)
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.Spec)
corev1.SetDefaults_ResourceList(&a.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Spec.Resources.Requests)
corev1.SetDefaults_ResourceList(&a.Status.Capacity)
corev1.SetDefaults_ResourceList(&a.Status.AllocatedResources)
}
}
func SetObjectDefaults_StatefulSetList(in *appsv1beta2.StatefulSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_StatefulSet(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1beta2
import (
context "context"
fmt "fmt"
appsv1beta2 "k8s.io/api/apps/v1beta2"
operation "k8s.io/apimachinery/pkg/api/operation"
safe "k8s.io/apimachinery/pkg/api/safe"
validate "k8s.io/apimachinery/pkg/api/validate"
runtime "k8s.io/apimachinery/pkg/runtime"
field "k8s.io/apimachinery/pkg/util/validation/field"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
scheme.AddValidationFunc((*appsv1beta2.Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/scale":
return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*appsv1beta2.Scale), safe.Cast[*appsv1beta2.Scale](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
return nil
}
func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *appsv1beta2.Scale) (errs field.ErrorList) {
// field appsv1beta2.Scale.TypeMeta has no validation
// field appsv1beta2.Scale.ObjectMeta has no validation
// field appsv1beta2.Scale.Spec
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *appsv1beta2.ScaleSpec) (errs field.ErrorList) {
errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *appsv1beta2.Scale) *appsv1beta2.ScaleSpec { return &oldObj.Spec }))...)
// field appsv1beta2.Scale.Status has no validation
return errs
}
func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *appsv1beta2.ScaleSpec) (errs field.ErrorList) {
// field appsv1beta2.ScaleSpec.Replicas
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
// optional value-type fields with zero-value defaults are purely documentation
if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
return nil // no changes
}
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
return
}(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *appsv1beta2.ScaleSpec) *int32 { return &oldObj.Replicas }))...)
return errs
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"encoding/json"
"fmt"
"strconv"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/apps"
api "k8s.io/kubernetes/pkg/apis/core"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
)
// StatefulSetValidationOptions is a struct that can be passed to ValidateStatefulSetSpec to record the validate options
type StatefulSetValidationOptions struct {
// Allow invalid DNS1123 ServiceName
AllowInvalidServiceName bool
// Skip validating pod template spec, which is used for StatefulSet update
SkipValidatePodTemplateSpec bool
// Skip validating volume claim templates, which is used for StatefulSet update
SkipValidateVolumeClaimTemplates bool
}
// ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateStatefulSetName(name string, prefix bool) []string {
// TODO: Validate that there's room for the suffix inserted by the pods.
// Currently this is just "-index". In the future we may allow a user
// specified list of suffixes and we need to validate the longest one.
return apimachineryvalidation.NameIsDNSLabel(name, prefix)
}
// ValidatePodTemplateSpecForStatefulSet validates the given template and ensures that it is in accordance with the desired selector.
func ValidatePodTemplateSpecForStatefulSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path, opts apivalidation.PodValidationOptions,
setOpts StatefulSetValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if template == nil {
allErrs = append(allErrs, field.Required(fldPath, ""))
} else {
if !selector.Empty() {
// Verify that the StatefulSet selector matches the labels in template.
labels := labels.Set(template.Labels)
if !selector.Matches(labels) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
}
}
if !setOpts.SkipValidatePodTemplateSpec {
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath, opts)...)
}
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...)
allErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child("annotations"))...)
allErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, &template.Spec, fldPath.Child("annotations"), opts)...)
}
return allErrs
}
func ValidatePersistentVolumeClaimRetentionPolicyType(policy apps.PersistentVolumeClaimRetentionPolicyType, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
switch policy {
case apps.RetainPersistentVolumeClaimRetentionPolicyType:
case apps.DeletePersistentVolumeClaimRetentionPolicyType:
default:
allErrs = append(allErrs, field.NotSupported(fldPath, policy, []string{string(apps.RetainPersistentVolumeClaimRetentionPolicyType), string(apps.DeletePersistentVolumeClaimRetentionPolicyType)}))
}
return allErrs
}
func ValidatePersistentVolumeClaimRetentionPolicy(policy *apps.StatefulSetPersistentVolumeClaimRetentionPolicy, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if policy != nil {
allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicyType(policy.WhenDeleted, fldPath.Child("whenDeleted"))...)
allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicyType(policy.WhenScaled, fldPath.Child("whenScaled"))...)
}
return allErrs
}
func volumesToAddForTemplates(spec *apps.StatefulSetSpec) map[string]api.Volume {
volumes := make(map[string]api.Volume)
templates := spec.VolumeClaimTemplates
for i := range templates {
volumes[templates[i].Name] = api.Volume{
Name: templates[i].Name,
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: templates[i].Name,
},
},
}
}
return volumes
}
func validateVolumeClaimTemplates(volumeClaimTemplates []api.PersistentVolumeClaim, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
opts := apivalidation.ValidationOptionsForPersistentVolumeClaimCreate()
for i, pvc := range volumeClaimTemplates {
allErrs = append(allErrs, apivalidation.ValidatePersistentVolumeClaimSpec(&pvc.Spec, fldPath.Index(i).Child("spec"), opts)...)
}
return allErrs
}
// ValidateStatefulSetSpec tests if required fields in the StatefulSet spec are set.
func ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions, setOpts StatefulSetValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
switch spec.PodManagementPolicy {
case "":
allErrs = append(allErrs, field.Required(fldPath.Child("podManagementPolicy"), ""))
case apps.OrderedReadyPodManagement, apps.ParallelPodManagement:
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("podManagementPolicy"), spec.PodManagementPolicy, fmt.Sprintf("must be '%s' or '%s'", apps.OrderedReadyPodManagement, apps.ParallelPodManagement)))
}
switch spec.UpdateStrategy.Type {
case "":
allErrs = append(allErrs, field.Required(fldPath.Child("updateStrategy"), ""))
case apps.OnDeleteStatefulSetStrategyType:
if spec.UpdateStrategy.RollingUpdate != nil {
allErrs = append(
allErrs,
field.Invalid(
fldPath.Child("updateStrategy").Child("rollingUpdate"),
spec.UpdateStrategy.RollingUpdate,
fmt.Sprintf("only allowed for updateStrategy '%s'", apps.RollingUpdateStatefulSetStrategyType)))
}
case apps.RollingUpdateStatefulSetStrategyType:
if spec.UpdateStrategy.RollingUpdate != nil {
allErrs = append(allErrs, validateRollingUpdateStatefulSet(spec.UpdateStrategy.RollingUpdate, fldPath.Child("updateStrategy", "rollingUpdate"))...)
}
default:
allErrs = append(allErrs,
field.Invalid(fldPath.Child("updateStrategy"), spec.UpdateStrategy,
fmt.Sprintf("must be '%s' or '%s'",
apps.RollingUpdateStatefulSetStrategyType,
apps.OnDeleteStatefulSetStrategyType)))
}
allErrs = append(allErrs, ValidatePersistentVolumeClaimRetentionPolicy(spec.PersistentVolumeClaimRetentionPolicy, fldPath.Child("persistentVolumeClaimRetentionPolicy"))...)
if !setOpts.SkipValidateVolumeClaimTemplates {
allErrs = append(allErrs, validateVolumeClaimTemplates(spec.VolumeClaimTemplates, fldPath.Child("volumeClaimTemplates"))...)
}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
if spec.Ordinals != nil {
replicaStartOrdinal := spec.Ordinals.Start
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(replicaStartOrdinal), fldPath.Child("ordinals.start"))...)
}
if !setOpts.AllowInvalidServiceName && len(spec.ServiceName) > 0 {
allErrs = append(allErrs, apivalidation.ValidateDNS1123Label(spec.ServiceName, fldPath.Child("serviceName"))...)
}
if spec.Selector == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
} else {
// validate selector strictly, spec.selector was always required to pass LabelSelectorAsSelector below
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, fldPath.Child("selector"))...)
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for statefulset"))
}
}
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, ""))
} else {
templateToValidate := &spec.Template
if len(spec.Template.Spec.Subdomain) > 0 || len(spec.Template.Spec.Hostname) > 0 || len(spec.VolumeClaimTemplates) > 0 {
templateToValidate = templateToValidate.DeepCopy()
// overwritten by controller to spec.ServiceName in initIdentity, don't validate
templateToValidate.Spec.Subdomain = ""
// overwritten by controller to pod.Name in initIdentity, don't validate
templateToValidate.Spec.Hostname = ""
if len(spec.VolumeClaimTemplates) > 0 {
templateVolumes := volumesToAddForTemplates(spec)
newVolumes := make([]api.Volume, 0, len(templateVolumes))
for _, v := range templateVolumes {
newVolumes = append(newVolumes, v)
}
for _, v := range templateToValidate.Spec.Volumes {
if _, ok := templateVolumes[v.Name]; !ok {
newVolumes = append(newVolumes, v)
}
}
templateToValidate.Spec.Volumes = newVolumes
}
}
allErrs = append(allErrs, ValidatePodTemplateSpecForStatefulSet(templateToValidate, selector, fldPath.Child("template"), opts, setOpts)...)
}
if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
}
if spec.Template.Spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("template", "spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in StatefulSet is not Supported"))
}
return allErrs
}
// ValidateStatefulSet validates a StatefulSet.
func ValidateStatefulSet(statefulSet *apps.StatefulSet, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&statefulSet.ObjectMeta, true, ValidateStatefulSetName, field.NewPath("metadata"))
setOpts := StatefulSetValidationOptions{
AllowInvalidServiceName: false, // require valid serviceNames in new StatefulSets
}
allErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath("spec"), opts, setOpts)...)
return allErrs
}
// ValidateStatefulSetUpdate tests if required fields in the StatefulSet are set.
func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *apps.StatefulSet, opts apivalidation.PodValidationOptions) field.ErrorList {
// First, validate that the new statefulset is valid. Don't call
// ValidateStatefulSet() because we don't want to revalidate the name on
// update. This is important here because we used to allow DNS subdomain
// for name, but that can't actually create pods. The only reasonable
// thing to do it delete such an instance, but if there is a finalizer, it
// would need to pass update validation. Name can't change anyway.
allErrs := apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata"))
setOpts := StatefulSetValidationOptions{
AllowInvalidServiceName: true, // serviceName is immutable, tolerate existing invalid names on update
SkipValidateVolumeClaimTemplates: true, // volumeClaimTemplates are immutable, tolerate previously persisted invalid values on update
}
// In order to tolerate the existing sts, we choose to skip the validation error of old sts podTemplateSpec.
if len(ValidateStatefulSetSpec(&oldStatefulSet.Spec, nil, opts, setOpts)) > 0 {
setOpts.SkipValidatePodTemplateSpec = true
}
allErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath("spec"), opts, setOpts)...)
// statefulset updates aren't super common and general updates are likely to be touching spec, so we'll do this
// deep copy right away. This avoids mutating our inputs
newStatefulSetClone := statefulSet.DeepCopy()
newStatefulSetClone.Spec.Replicas = oldStatefulSet.Spec.Replicas // +k8s:verify-mutation:reason=clone
newStatefulSetClone.Spec.Template = oldStatefulSet.Spec.Template // +k8s:verify-mutation:reason=clone
newStatefulSetClone.Spec.UpdateStrategy = oldStatefulSet.Spec.UpdateStrategy // +k8s:verify-mutation:reason=clone
newStatefulSetClone.Spec.MinReadySeconds = oldStatefulSet.Spec.MinReadySeconds // +k8s:verify-mutation:reason=clone
newStatefulSetClone.Spec.Ordinals = oldStatefulSet.Spec.Ordinals // +k8s:verify-mutation:reason=clone
newStatefulSetClone.Spec.RevisionHistoryLimit = oldStatefulSet.Spec.RevisionHistoryLimit // +k8s:verify-mutation:reason=clone
newStatefulSetClone.Spec.PersistentVolumeClaimRetentionPolicy = oldStatefulSet.Spec.PersistentVolumeClaimRetentionPolicy // +k8s:verify-mutation:reason=clone
if !apiequality.Semantic.DeepEqual(newStatefulSetClone.Spec, oldStatefulSet.Spec) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas', 'ordinals', 'template', 'updateStrategy', 'revisionHistoryLimit', 'persistentVolumeClaimRetentionPolicy' and 'minReadySeconds' are forbidden"))
}
return allErrs
}
// ValidateStatefulSetStatus validates a StatefulSetStatus.
func ValidateStatefulSetStatus(status *apps.StatefulSetStatus, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fieldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fieldPath.Child("readyReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentReplicas), fieldPath.Child("currentReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fieldPath.Child("updatedReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fieldPath.Child("availableReplicas"))...)
if status.ObservedGeneration != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.ObservedGeneration), fieldPath.Child("observedGeneration"))...)
}
if status.CollisionCount != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fieldPath.Child("collisionCount"))...)
}
msg := "cannot be greater than status.replicas"
if status.ReadyReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
}
if status.CurrentReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("currentReplicas"), status.CurrentReplicas, msg))
}
if status.UpdatedReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg))
}
if status.AvailableReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
}
if status.AvailableReplicas > status.ReadyReplicas {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than status.readyReplicas"))
}
return allErrs
}
// ValidateStatefulSetStatusUpdate tests if required fields in the StatefulSet are set.
func ValidateStatefulSetStatusUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateStatefulSetStatus(&statefulSet.Status, field.NewPath("status"))...)
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata"))...)
// TODO: Validate status.
if apivalidation.IsDecremented(statefulSet.Status.CollisionCount, oldStatefulSet.Status.CollisionCount) {
value := int32(0)
if statefulSet.Status.CollisionCount != nil {
value = *statefulSet.Status.CollisionCount
}
allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented"))
}
return allErrs
}
// ValidateControllerRevisionName can be used to check whether the given ControllerRevision name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateControllerRevisionName = apimachineryvalidation.NameIsDNSSubdomain
// validateControllerRevision collects errors for the fields of state and returns those errors as an ErrorList. If the
// returned list is empty, state is valid. Validation is performed to ensure that state is a valid ObjectMeta, its name
// is valid, and that it doesn't exceed the MaxControllerRevisionSize.
func validateControllerRevision(revision *apps.ControllerRevision) field.ErrorList {
errs := field.ErrorList{}
errs = append(errs, apivalidation.ValidateObjectMeta(&revision.ObjectMeta, true, ValidateControllerRevisionName, field.NewPath("metadata"))...)
errs = append(errs, apivalidation.ValidateNonnegativeField(revision.Revision, field.NewPath("revision"))...)
return errs
}
func ValidateControllerRevisionCreate(revision *apps.ControllerRevision) field.ErrorList {
errs := field.ErrorList{}
errs = append(errs, validateControllerRevision(revision)...)
var v any
if revision.Data.Raw == nil {
errs = append(errs, field.Required(field.NewPath("data"), "data is mandatory"))
} else if err := json.Unmarshal(revision.Data.Raw, &v); err != nil {
errs = append(errs, field.Invalid(field.NewPath("data"), "<value omitted>", fmt.Sprintf("error parsing data: %v", err.Error())))
} else if v == nil {
errs = append(errs, field.Required(field.NewPath("data"), "data is mandatory"))
} else if _, isObject := v.(map[string]any); !isObject {
errs = append(errs, field.Required(field.NewPath("data"), "data must be a valid JSON object"))
}
return errs
}
// ValidateControllerRevisionUpdate collects errors pertaining to the mutation of an ControllerRevision Object. If the
// returned ErrorList is empty the update operation is valid. Any mutation to the ControllerRevision's Data or Revision
// is considered to be invalid.
func ValidateControllerRevisionUpdate(newHistory, oldHistory *apps.ControllerRevision) field.ErrorList {
errs := field.ErrorList{}
errs = append(errs, apivalidation.ValidateObjectMetaUpdate(&newHistory.ObjectMeta, &oldHistory.ObjectMeta, field.NewPath("metadata"))...)
errs = append(errs, validateControllerRevision(newHistory)...)
errs = append(errs, apivalidation.ValidateImmutableField(newHistory.Data, oldHistory.Data, field.NewPath("data"))...)
return errs
}
// ValidateDaemonSet tests if required fields in the DaemonSet are set.
func ValidateDaemonSet(ds *apps.DaemonSet, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set.
func ValidateDaemonSetUpdate(ds, oldDS *apps.DaemonSet, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDaemonSetSpecUpdate(&ds.Spec, &oldDS.Spec, field.NewPath("spec"))...)
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateDaemonSetSpecUpdate tests if an update to a DaemonSetSpec is valid.
func ValidateDaemonSetSpecUpdate(newSpec, oldSpec *apps.DaemonSetSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TemplateGeneration shouldn't be decremented
if newSpec.TemplateGeneration < oldSpec.TemplateGeneration {
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be decremented"))
}
// TemplateGeneration should be increased when and only when template is changed
templateUpdated := !apiequality.Semantic.DeepEqual(newSpec.Template, oldSpec.Template)
if newSpec.TemplateGeneration == oldSpec.TemplateGeneration && templateUpdated {
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must be incremented upon template update"))
} else if newSpec.TemplateGeneration > oldSpec.TemplateGeneration && !templateUpdated {
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be incremented without template update"))
}
// Spec.Selector is immutable
allErrs = append(allErrs, apivalidation.ValidateImmutableField(newSpec.Selector, oldSpec.Selector, field.NewPath("spec").Child("selector"))...)
return allErrs
}
// validateDaemonSetStatus validates a DaemonSetStatus
func validateDaemonSetStatus(status *apps.DaemonSetStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedNumberScheduled), fldPath.Child("updatedNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberAvailable), fldPath.Child("numberAvailable"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberUnavailable), fldPath.Child("numberUnavailable"))...)
if status.CollisionCount != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...)
}
return allErrs
}
// ValidateDaemonSetStatusUpdate tests if required fields in the DaemonSet Status section
func ValidateDaemonSetStatusUpdate(ds, oldDS *apps.DaemonSet) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...)
if apivalidation.IsDecremented(ds.Status.CollisionCount, oldDS.Status.CollisionCount) {
value := int32(0)
if ds.Status.CollisionCount != nil {
value = *ds.Status.CollisionCount
}
allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented"))
}
return allErrs
}
// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set.
func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOpts, fldPath.Child("selector"))...)
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`"))
}
if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for daemonset"))
}
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"), opts)...)
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
}
if spec.Template.Spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("template", "spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in DaemonSet is not Supported"))
}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.TemplateGeneration), fldPath.Child("templateGeneration"))...)
allErrs = append(allErrs, ValidateDaemonSetUpdateStrategy(&spec.UpdateStrategy, fldPath.Child("updateStrategy"))...)
if spec.RevisionHistoryLimit != nil {
// zero is a valid RevisionHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...)
}
return allErrs
}
// ValidateRollingUpdateDaemonSet validates a given RollingUpdateDaemonSet.
func ValidateRollingUpdateDaemonSet(rollingUpdate *apps.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
// Validate both fields are positive ints or have a percentage value
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...)
// Validate that MaxUnavailable and MaxSurge are not more than 100%.
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...)
// Validate exactly one of MaxSurge or MaxUnavailable is non-zero
hasUnavailable := getIntOrPercentValue(rollingUpdate.MaxUnavailable) != 0
hasSurge := getIntOrPercentValue(rollingUpdate.MaxSurge) != 0
switch {
case hasUnavailable && hasSurge:
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxSurge"), rollingUpdate.MaxSurge, "may not be set when maxUnavailable is non-zero"))
case !hasUnavailable && !hasSurge:
allErrs = append(allErrs, field.Required(fldPath.Child("maxUnavailable"), "cannot be 0 when maxSurge is 0"))
}
return allErrs
}
// validateRollingUpdateStatefulSet validates a given RollingUpdateStatefulSet.
func validateRollingUpdateStatefulSet(rollingUpdate *apps.RollingUpdateStatefulSetStrategy, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
fldPathMaxUn := fldPath.Child("maxUnavailable")
allErrs = append(allErrs,
apivalidation.ValidateNonnegativeField(
int64(rollingUpdate.Partition),
fldPath.Child("partition"))...)
if rollingUpdate.MaxUnavailable != nil {
allErrs = append(allErrs, ValidatePositiveIntOrPercent(*rollingUpdate.MaxUnavailable, fldPathMaxUn)...)
if getIntOrPercentValue(*rollingUpdate.MaxUnavailable) == 0 {
// MaxUnavailable cannot be 0.
allErrs = append(allErrs, field.Invalid(fldPathMaxUn, *rollingUpdate.MaxUnavailable, "cannot be 0"))
}
// Validate that MaxUnavailable is not more than 100%.
allErrs = append(allErrs, IsNotMoreThan100Percent(*rollingUpdate.MaxUnavailable, fldPathMaxUn)...)
}
return allErrs
}
// ValidateDaemonSetUpdateStrategy validates a given DaemonSetUpdateStrategy.
func ValidateDaemonSetUpdateStrategy(strategy *apps.DaemonSetUpdateStrategy, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch strategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
// Make sure RollingUpdate field isn't nil.
if strategy.RollingUpdate == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), ""))
return allErrs
}
allErrs = append(allErrs, ValidateRollingUpdateDaemonSet(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
default:
validValues := []string{string(apps.RollingUpdateDaemonSetStrategyType), string(apps.OnDeleteDaemonSetStrategyType)}
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
}
return allErrs
}
// ValidateDaemonSetName can be used to check whether the given daemon set name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateDaemonSetName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateDeploymentName validates that the given name can be used as a deployment name.
var ValidateDeploymentName = apimachineryvalidation.NameIsDNSSubdomain
// ValidatePositiveIntOrPercent tests if a given value is a valid int or
// percentage.
func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch intOrPercent.Type {
case intstr.String:
for _, msg := range validation.IsValidPercent(intOrPercent.StrVal) {
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, msg))
}
case intstr.Int:
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...)
default:
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%%')"))
}
return allErrs
}
func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) {
if intOrStringValue.Type != intstr.String {
return 0, false
}
if len(validation.IsValidPercent(intOrStringValue.StrVal)) != 0 {
return 0, false
}
value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1])
return value, true
}
func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int {
value, isPercent := getPercentValue(intOrStringValue)
if isPercent {
return value
}
return intOrStringValue.IntValue()
}
// IsNotMoreThan100Percent tests is a value can be represented as a percentage
// and if this value is not more than 100%.
func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
value, isPercent := getPercentValue(intOrStringValue)
if !isPercent || value <= 100 {
return nil
}
allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%"))
return allErrs
}
// ValidateRollingUpdateDeployment validates a given RollingUpdateDeployment.
func ValidateRollingUpdateDeployment(rollingUpdate *apps.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...)
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 {
// Both MaxSurge and MaxUnavailable cannot be zero.
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0"))
}
// Validate that MaxUnavailable is not more than 100%.
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
return allErrs
}
// ValidateDeploymentStrategy validates given DeploymentStrategy.
func ValidateDeploymentStrategy(strategy *apps.DeploymentStrategy, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch strategy.Type {
case apps.RecreateDeploymentStrategyType:
if strategy.RollingUpdate != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(apps.RecreateDeploymentStrategyType+"'")))
}
case apps.RollingUpdateDeploymentStrategyType:
// This should never happen since it's set and checked in defaults.go
if strategy.RollingUpdate == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil"))
} else {
allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
}
default:
validValues := []string{string(apps.RecreateDeploymentStrategyType), string(apps.RollingUpdateDeploymentStrategyType)}
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
}
return allErrs
}
// ValidateRollback validates given RollbackConfig.
func ValidateRollback(rollback *apps.RollbackConfig, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
v := rollback.Revision
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...)
return allErrs
}
// ValidateDeploymentSpec validates given deployment spec.
func ValidateDeploymentSpec(spec, oldSpec *apps.DeploymentSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
if spec.Selector == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
} else {
// validate selector strictly, spec.selector was always required to pass LabelSelectorAsSelector below
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, fldPath.Child("selector"))...)
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment"))
}
}
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector"))
} else {
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"), opts)...)
}
allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
if spec.RevisionHistoryLimit != nil {
// zero is a valid RevisionHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...)
}
if spec.RollbackTo != nil {
allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...)
}
if spec.ProgressDeadlineSeconds != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ProgressDeadlineSeconds), fldPath.Child("progressDeadlineSeconds"))...)
if *spec.ProgressDeadlineSeconds <= spec.MinReadySeconds {
allErrs = append(allErrs, field.Invalid(fldPath.Child("progressDeadlineSeconds"), spec.ProgressDeadlineSeconds, "must be greater than minReadySeconds"))
}
}
return allErrs
}
// ValidateDeploymentStatus validates given deployment status.
func ValidateDeploymentStatus(status *apps.DeploymentStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...)
if status.TerminatingReplicas != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.TerminatingReplicas), fldPath.Child("terminatingReplicas"))...)
}
if status.CollisionCount != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...)
}
msg := "cannot be greater than status.replicas"
if status.UpdatedReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg))
}
if status.ReadyReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
}
if status.AvailableReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
}
if status.AvailableReplicas > status.ReadyReplicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
}
return allErrs
}
// ValidateDeploymentUpdate tests if an update to a Deployment is valid.
func ValidateDeploymentUpdate(update, old *apps.Deployment, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, &old.Spec, field.NewPath("spec"), opts)...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.Spec.Selector, old.Spec.Selector, field.NewPath("spec").Child("selector"))...)
return allErrs
}
// ValidateDeploymentStatusUpdate tests if a an update to a Deployment status
// is valid.
func ValidateDeploymentStatusUpdate(update, old *apps.Deployment) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
fldPath := field.NewPath("status")
allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, fldPath)...)
if apivalidation.IsDecremented(update.Status.CollisionCount, old.Status.CollisionCount) {
value := int32(0)
if update.Status.CollisionCount != nil {
value = *update.Status.CollisionCount
}
allErrs = append(allErrs, field.Invalid(fldPath.Child("collisionCount"), value, "cannot be decremented"))
}
return allErrs
}
// ValidateDeployment validates a given Deployment.
func ValidateDeployment(obj *apps.Deployment, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, nil, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateDeploymentRollback validates a given DeploymentRollback.
func ValidateDeploymentRollback(obj *apps.DeploymentRollback) field.ErrorList {
allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations"))
if len(obj.Name) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("name"), ""))
}
allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...)
return allErrs
}
// ValidateReplicaSetName can be used to check whether the given ReplicaSet
// name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateReplicaSetName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateReplicaSet tests if required fields in the ReplicaSet are set.
func ValidateReplicaSet(rs *apps.ReplicaSet, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, nil, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set.
func ValidateReplicaSetUpdate(rs, oldRs *apps.ReplicaSet, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, &oldRs.Spec, field.NewPath("spec"), opts)...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(rs.Spec.Selector, oldRs.Spec.Selector, field.NewPath("spec").Child("selector"))...)
return allErrs
}
// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set.
func ValidateReplicaSetStatusUpdate(rs, oldRs *apps.ReplicaSet) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateReplicaSetStatus(rs.Status, field.NewPath("status"))...)
return allErrs
}
// ValidateReplicaSetStatus validates a given ReplicaSetStatus.
func ValidateReplicaSetStatus(status apps.ReplicaSetStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.FullyLabeledReplicas), fldPath.Child("fullyLabeledReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ObservedGeneration), fldPath.Child("observedGeneration"))...)
if status.TerminatingReplicas != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.TerminatingReplicas), fldPath.Child("terminatingReplicas"))...)
}
msg := "cannot be greater than status.replicas"
if status.FullyLabeledReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg))
}
if status.ReadyReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
}
if status.AvailableReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
}
if status.AvailableReplicas > status.ReadyReplicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
}
return allErrs
}
// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set.
func ValidateReplicaSetSpec(spec, oldSpec *apps.ReplicaSetSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
if spec.Selector == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
} else {
// validate selector strictly, spec.selector was always required to pass LabelSelectorAsSelector below
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, fldPath.Child("selector"))...)
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment"))
}
}
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector"))
} else {
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"), opts)...)
}
return allErrs
}
// ValidatePodTemplateSpecForReplicaSet validates the given template and ensures that it is in accordance with the desired selector and replicas.
func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if template == nil {
allErrs = append(allErrs, field.Required(fldPath, ""))
} else {
if !selector.Empty() {
// Verify that the ReplicaSet selector matches the labels in template.
labels := labels.Set(template.Labels)
if !selector.Matches(labels) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
}
}
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath, opts)...)
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
if template.Spec.RestartPolicy != api.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
}
if template.Spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in ReplicaSet is not Supported"))
}
}
return allErrs
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package apps
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Data.DeepCopyInto(&out.Data)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision.
func (in *ControllerRevision) DeepCopy() *ControllerRevision {
if in == nil {
return nil
}
out := new(ControllerRevision)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ControllerRevision) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ControllerRevision, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList.
func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList {
if in == nil {
return nil
}
out := new(ControllerRevisionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ControllerRevisionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
func (in *DaemonSet) DeepCopy() *DaemonSet {
if in == nil {
return nil
}
out := new(DaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
if in == nil {
return nil
}
out := new(DaemonSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DaemonSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
func (in *DaemonSetList) DeepCopy() *DaemonSetList {
if in == nil {
return nil
}
out := new(DaemonSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
if in == nil {
return nil
}
out := new(DaemonSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DaemonSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
if in == nil {
return nil
}
out := new(DaemonSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateDaemonSet)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
if in == nil {
return nil
}
out := new(DaemonSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Deployment) DeepCopyInto(out *Deployment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
func (in *Deployment) DeepCopy() *Deployment {
if in == nil {
return nil
}
out := new(Deployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
if in == nil {
return nil
}
out := new(DeploymentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Deployment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
func (in *DeploymentList) DeepCopy() *DeploymentList {
if in == nil {
return nil
}
out := new(DeploymentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.UpdatedAnnotations != nil {
in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RollbackTo = in.RollbackTo
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
if in == nil {
return nil
}
out := new(DeploymentRollback)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
in.Strategy.DeepCopyInto(&out.Strategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.RollbackTo != nil {
in, out := &in.RollbackTo, &out.RollbackTo
*out = new(RollbackConfig)
**out = **in
}
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
if in == nil {
return nil
}
out := new(DeploymentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
if in.TerminatingReplicas != nil {
in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
if in == nil {
return nil
}
out := new(DeploymentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateDeployment)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
if in == nil {
return nil
}
out := new(DeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
func (in *ReplicaSet) DeepCopy() *ReplicaSet {
if in == nil {
return nil
}
out := new(ReplicaSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
if in == nil {
return nil
}
out := new(ReplicaSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ReplicaSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
if in == nil {
return nil
}
out := new(ReplicaSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
if in == nil {
return nil
}
out := new(ReplicaSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
*out = *in
if in.TerminatingReplicas != nil {
in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicaSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
if in == nil {
return nil
}
out := new(ReplicaSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
func (in *RollbackConfig) DeepCopy() *RollbackConfig {
if in == nil {
return nil
}
out := new(RollbackConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
*out = *in
out.MaxUnavailable = in.MaxUnavailable
out.MaxSurge = in.MaxSurge
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
if in == nil {
return nil
}
out := new(RollingUpdateDaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
*out = *in
out.MaxUnavailable = in.MaxUnavailable
out.MaxSurge = in.MaxSurge
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
if in == nil {
return nil
}
out := new(RollingUpdateDeployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
if in == nil {
return nil
}
out := new(RollingUpdateStatefulSetStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
func (in *StatefulSet) DeepCopy() *StatefulSet {
if in == nil {
return nil
}
out := new(StatefulSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition.
func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition {
if in == nil {
return nil
}
out := new(StatefulSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StatefulSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
func (in *StatefulSetList) DeepCopy() *StatefulSetList {
if in == nil {
return nil
}
out := new(StatefulSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals.
func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals {
if in == nil {
return nil
}
out := new(StatefulSetOrdinals)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy {
if in == nil {
return nil
}
out := new(StatefulSetPersistentVolumeClaimRetentionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
if in.VolumeClaimTemplates != nil {
in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
*out = make([]core.PersistentVolumeClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.PersistentVolumeClaimRetentionPolicy != nil {
in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy
*out = new(StatefulSetPersistentVolumeClaimRetentionPolicy)
**out = **in
}
if in.Ordinals != nil {
in, out := &in.Ordinals, &out.Ordinals
*out = new(StatefulSetOrdinals)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
if in == nil {
return nil
}
out := new(StatefulSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
*out = *in
if in.ObservedGeneration != nil {
in, out := &in.ObservedGeneration, &out.ObservedGeneration
*out = new(int64)
**out = **in
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]StatefulSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
if in == nil {
return nil
}
out := new(StatefulSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateStatefulSetStrategy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
if in == nil {
return nil
}
out := new(StatefulSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/authentication"
"k8s.io/kubernetes/pkg/apis/authentication/v1"
"k8s.io/kubernetes/pkg/apis/authentication/v1alpha1"
"k8s.io/kubernetes/pkg/apis/authentication/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(authentication.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package authentication
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "authentication.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&TokenReview{},
&TokenRequest{},
&SelfSubjectReview{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/authentication/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
authentication "k8s.io/kubernetes/pkg/apis/authentication"
)
// Convert_v1_UserInfo_To_authentication_UserInfo is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_v1_UserInfo_To_authentication_UserInfo(in *v1.UserInfo, out *authentication.UserInfo, s conversion.Scope) error {
return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s)
}
// Convert_authentication_UserInfo_To_v1_UserInfo is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *v1.UserInfo, s conversion.Scope) error {
return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_TokenRequestSpec(obj *authenticationv1.TokenRequestSpec) {
if obj.ExpirationSeconds == nil {
hour := int64(60 * 60)
obj.ExpirationSeconds = &hour
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "authentication.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &authenticationv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
authentication "k8s.io/kubernetes/pkg/apis/authentication"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*authenticationv1.BoundObjectReference)(nil), (*authentication.BoundObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_BoundObjectReference_To_authentication_BoundObjectReference(a.(*authenticationv1.BoundObjectReference), b.(*authentication.BoundObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.BoundObjectReference)(nil), (*authenticationv1.BoundObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_BoundObjectReference_To_v1_BoundObjectReference(a.(*authentication.BoundObjectReference), b.(*authenticationv1.BoundObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.SelfSubjectReview)(nil), (*authentication.SelfSubjectReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SelfSubjectReview_To_authentication_SelfSubjectReview(a.(*authenticationv1.SelfSubjectReview), b.(*authentication.SelfSubjectReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.SelfSubjectReview)(nil), (*authenticationv1.SelfSubjectReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_SelfSubjectReview_To_v1_SelfSubjectReview(a.(*authentication.SelfSubjectReview), b.(*authenticationv1.SelfSubjectReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.SelfSubjectReviewStatus)(nil), (*authentication.SelfSubjectReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(a.(*authenticationv1.SelfSubjectReviewStatus), b.(*authentication.SelfSubjectReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.SelfSubjectReviewStatus)(nil), (*authenticationv1.SelfSubjectReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_SelfSubjectReviewStatus_To_v1_SelfSubjectReviewStatus(a.(*authentication.SelfSubjectReviewStatus), b.(*authenticationv1.SelfSubjectReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.TokenRequest)(nil), (*authentication.TokenRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TokenRequest_To_authentication_TokenRequest(a.(*authenticationv1.TokenRequest), b.(*authentication.TokenRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenRequest)(nil), (*authenticationv1.TokenRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenRequest_To_v1_TokenRequest(a.(*authentication.TokenRequest), b.(*authenticationv1.TokenRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.TokenRequestSpec)(nil), (*authentication.TokenRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TokenRequestSpec_To_authentication_TokenRequestSpec(a.(*authenticationv1.TokenRequestSpec), b.(*authentication.TokenRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenRequestSpec)(nil), (*authenticationv1.TokenRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenRequestSpec_To_v1_TokenRequestSpec(a.(*authentication.TokenRequestSpec), b.(*authenticationv1.TokenRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.TokenRequestStatus)(nil), (*authentication.TokenRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TokenRequestStatus_To_authentication_TokenRequestStatus(a.(*authenticationv1.TokenRequestStatus), b.(*authentication.TokenRequestStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenRequestStatus)(nil), (*authenticationv1.TokenRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenRequestStatus_To_v1_TokenRequestStatus(a.(*authentication.TokenRequestStatus), b.(*authenticationv1.TokenRequestStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.TokenReview)(nil), (*authentication.TokenReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TokenReview_To_authentication_TokenReview(a.(*authenticationv1.TokenReview), b.(*authentication.TokenReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenReview)(nil), (*authenticationv1.TokenReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenReview_To_v1_TokenReview(a.(*authentication.TokenReview), b.(*authenticationv1.TokenReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.TokenReviewSpec)(nil), (*authentication.TokenReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(a.(*authenticationv1.TokenReviewSpec), b.(*authentication.TokenReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenReviewSpec)(nil), (*authenticationv1.TokenReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(a.(*authentication.TokenReviewSpec), b.(*authenticationv1.TokenReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1.TokenReviewStatus)(nil), (*authentication.TokenReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(a.(*authenticationv1.TokenReviewStatus), b.(*authentication.TokenReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenReviewStatus)(nil), (*authenticationv1.TokenReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(a.(*authentication.TokenReviewStatus), b.(*authenticationv1.TokenReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*authentication.UserInfo)(nil), (*authenticationv1.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_UserInfo_To_v1_UserInfo(a.(*authentication.UserInfo), b.(*authenticationv1.UserInfo), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*authenticationv1.UserInfo)(nil), (*authentication.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_UserInfo_To_authentication_UserInfo(a.(*authenticationv1.UserInfo), b.(*authentication.UserInfo), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_BoundObjectReference_To_authentication_BoundObjectReference(in *authenticationv1.BoundObjectReference, out *authentication.BoundObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.APIVersion = in.APIVersion
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_v1_BoundObjectReference_To_authentication_BoundObjectReference is an autogenerated conversion function.
func Convert_v1_BoundObjectReference_To_authentication_BoundObjectReference(in *authenticationv1.BoundObjectReference, out *authentication.BoundObjectReference, s conversion.Scope) error {
return autoConvert_v1_BoundObjectReference_To_authentication_BoundObjectReference(in, out, s)
}
func autoConvert_authentication_BoundObjectReference_To_v1_BoundObjectReference(in *authentication.BoundObjectReference, out *authenticationv1.BoundObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.APIVersion = in.APIVersion
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_authentication_BoundObjectReference_To_v1_BoundObjectReference is an autogenerated conversion function.
func Convert_authentication_BoundObjectReference_To_v1_BoundObjectReference(in *authentication.BoundObjectReference, out *authenticationv1.BoundObjectReference, s conversion.Scope) error {
return autoConvert_authentication_BoundObjectReference_To_v1_BoundObjectReference(in, out, s)
}
func autoConvert_v1_SelfSubjectReview_To_authentication_SelfSubjectReview(in *authenticationv1.SelfSubjectReview, out *authentication.SelfSubjectReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_SelfSubjectReview_To_authentication_SelfSubjectReview is an autogenerated conversion function.
func Convert_v1_SelfSubjectReview_To_authentication_SelfSubjectReview(in *authenticationv1.SelfSubjectReview, out *authentication.SelfSubjectReview, s conversion.Scope) error {
return autoConvert_v1_SelfSubjectReview_To_authentication_SelfSubjectReview(in, out, s)
}
func autoConvert_authentication_SelfSubjectReview_To_v1_SelfSubjectReview(in *authentication.SelfSubjectReview, out *authenticationv1.SelfSubjectReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authentication_SelfSubjectReviewStatus_To_v1_SelfSubjectReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authentication_SelfSubjectReview_To_v1_SelfSubjectReview is an autogenerated conversion function.
func Convert_authentication_SelfSubjectReview_To_v1_SelfSubjectReview(in *authentication.SelfSubjectReview, out *authenticationv1.SelfSubjectReview, s conversion.Scope) error {
return autoConvert_authentication_SelfSubjectReview_To_v1_SelfSubjectReview(in, out, s)
}
func autoConvert_v1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in *authenticationv1.SelfSubjectReviewStatus, out *authentication.SelfSubjectReviewStatus, s conversion.Scope) error {
if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
return nil
}
// Convert_v1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus is an autogenerated conversion function.
func Convert_v1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in *authenticationv1.SelfSubjectReviewStatus, out *authentication.SelfSubjectReviewStatus, s conversion.Scope) error {
return autoConvert_v1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in, out, s)
}
func autoConvert_authentication_SelfSubjectReviewStatus_To_v1_SelfSubjectReviewStatus(in *authentication.SelfSubjectReviewStatus, out *authenticationv1.SelfSubjectReviewStatus, s conversion.Scope) error {
if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
return nil
}
// Convert_authentication_SelfSubjectReviewStatus_To_v1_SelfSubjectReviewStatus is an autogenerated conversion function.
func Convert_authentication_SelfSubjectReviewStatus_To_v1_SelfSubjectReviewStatus(in *authentication.SelfSubjectReviewStatus, out *authenticationv1.SelfSubjectReviewStatus, s conversion.Scope) error {
return autoConvert_authentication_SelfSubjectReviewStatus_To_v1_SelfSubjectReviewStatus(in, out, s)
}
func autoConvert_v1_TokenRequest_To_authentication_TokenRequest(in *authenticationv1.TokenRequest, out *authentication.TokenRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_TokenRequestSpec_To_authentication_TokenRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_TokenRequestStatus_To_authentication_TokenRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_TokenRequest_To_authentication_TokenRequest is an autogenerated conversion function.
func Convert_v1_TokenRequest_To_authentication_TokenRequest(in *authenticationv1.TokenRequest, out *authentication.TokenRequest, s conversion.Scope) error {
return autoConvert_v1_TokenRequest_To_authentication_TokenRequest(in, out, s)
}
func autoConvert_authentication_TokenRequest_To_v1_TokenRequest(in *authentication.TokenRequest, out *authenticationv1.TokenRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authentication_TokenRequestSpec_To_v1_TokenRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authentication_TokenRequestStatus_To_v1_TokenRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authentication_TokenRequest_To_v1_TokenRequest is an autogenerated conversion function.
func Convert_authentication_TokenRequest_To_v1_TokenRequest(in *authentication.TokenRequest, out *authenticationv1.TokenRequest, s conversion.Scope) error {
return autoConvert_authentication_TokenRequest_To_v1_TokenRequest(in, out, s)
}
func autoConvert_v1_TokenRequestSpec_To_authentication_TokenRequestSpec(in *authenticationv1.TokenRequestSpec, out *authentication.TokenRequestSpec, s conversion.Scope) error {
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
if err := metav1.Convert_Pointer_int64_To_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil {
return err
}
out.BoundObjectRef = (*authentication.BoundObjectReference)(unsafe.Pointer(in.BoundObjectRef))
return nil
}
// Convert_v1_TokenRequestSpec_To_authentication_TokenRequestSpec is an autogenerated conversion function.
func Convert_v1_TokenRequestSpec_To_authentication_TokenRequestSpec(in *authenticationv1.TokenRequestSpec, out *authentication.TokenRequestSpec, s conversion.Scope) error {
return autoConvert_v1_TokenRequestSpec_To_authentication_TokenRequestSpec(in, out, s)
}
func autoConvert_authentication_TokenRequestSpec_To_v1_TokenRequestSpec(in *authentication.TokenRequestSpec, out *authenticationv1.TokenRequestSpec, s conversion.Scope) error {
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
if err := metav1.Convert_int64_To_Pointer_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil {
return err
}
out.BoundObjectRef = (*authenticationv1.BoundObjectReference)(unsafe.Pointer(in.BoundObjectRef))
return nil
}
// Convert_authentication_TokenRequestSpec_To_v1_TokenRequestSpec is an autogenerated conversion function.
func Convert_authentication_TokenRequestSpec_To_v1_TokenRequestSpec(in *authentication.TokenRequestSpec, out *authenticationv1.TokenRequestSpec, s conversion.Scope) error {
return autoConvert_authentication_TokenRequestSpec_To_v1_TokenRequestSpec(in, out, s)
}
func autoConvert_v1_TokenRequestStatus_To_authentication_TokenRequestStatus(in *authenticationv1.TokenRequestStatus, out *authentication.TokenRequestStatus, s conversion.Scope) error {
out.Token = in.Token
out.ExpirationTimestamp = in.ExpirationTimestamp
return nil
}
// Convert_v1_TokenRequestStatus_To_authentication_TokenRequestStatus is an autogenerated conversion function.
func Convert_v1_TokenRequestStatus_To_authentication_TokenRequestStatus(in *authenticationv1.TokenRequestStatus, out *authentication.TokenRequestStatus, s conversion.Scope) error {
return autoConvert_v1_TokenRequestStatus_To_authentication_TokenRequestStatus(in, out, s)
}
func autoConvert_authentication_TokenRequestStatus_To_v1_TokenRequestStatus(in *authentication.TokenRequestStatus, out *authenticationv1.TokenRequestStatus, s conversion.Scope) error {
out.Token = in.Token
out.ExpirationTimestamp = in.ExpirationTimestamp
return nil
}
// Convert_authentication_TokenRequestStatus_To_v1_TokenRequestStatus is an autogenerated conversion function.
func Convert_authentication_TokenRequestStatus_To_v1_TokenRequestStatus(in *authentication.TokenRequestStatus, out *authenticationv1.TokenRequestStatus, s conversion.Scope) error {
return autoConvert_authentication_TokenRequestStatus_To_v1_TokenRequestStatus(in, out, s)
}
func autoConvert_v1_TokenReview_To_authentication_TokenReview(in *authenticationv1.TokenReview, out *authentication.TokenReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_TokenReview_To_authentication_TokenReview is an autogenerated conversion function.
func Convert_v1_TokenReview_To_authentication_TokenReview(in *authenticationv1.TokenReview, out *authentication.TokenReview, s conversion.Scope) error {
return autoConvert_v1_TokenReview_To_authentication_TokenReview(in, out, s)
}
func autoConvert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *authenticationv1.TokenReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authentication_TokenReview_To_v1_TokenReview is an autogenerated conversion function.
func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *authenticationv1.TokenReview, s conversion.Scope) error {
return autoConvert_authentication_TokenReview_To_v1_TokenReview(in, out, s)
}
func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *authenticationv1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error {
out.Token = in.Token
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
return nil
}
// Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec is an autogenerated conversion function.
func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *authenticationv1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error {
return autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s)
}
func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *authenticationv1.TokenReviewSpec, s conversion.Scope) error {
out.Token = in.Token
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
return nil
}
// Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec is an autogenerated conversion function.
func Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *authenticationv1.TokenReviewSpec, s conversion.Scope) error {
return autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in, out, s)
}
func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *authenticationv1.TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error {
out.Authenticated = in.Authenticated
if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil {
return err
}
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
out.Error = in.Error
return nil
}
// Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus is an autogenerated conversion function.
func Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *authenticationv1.TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error {
return autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s)
}
func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *authenticationv1.TokenReviewStatus, s conversion.Scope) error {
out.Authenticated = in.Authenticated
if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil {
return err
}
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
out.Error = in.Error
return nil
}
// Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus is an autogenerated conversion function.
func Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *authenticationv1.TokenReviewStatus, s conversion.Scope) error {
return autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in, out, s)
}
func autoConvert_v1_UserInfo_To_authentication_UserInfo(in *authenticationv1.UserInfo, out *authentication.UserInfo, s conversion.Scope) error {
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *authenticationv1.UserInfo, s conversion.Scope) error {
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authenticationv1.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
authenticationv1 "k8s.io/api/authentication/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&authenticationv1.TokenRequest{}, func(obj interface{}) { SetObjectDefaults_TokenRequest(obj.(*authenticationv1.TokenRequest)) })
return nil
}
func SetObjectDefaults_TokenRequest(in *authenticationv1.TokenRequest) {
SetDefaults_TokenRequestSpec(&in.Spec)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "authentication.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &authenticationv1alpha1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
authentication "k8s.io/kubernetes/pkg/apis/authentication"
v1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*authenticationv1alpha1.SelfSubjectReview)(nil), (*authentication.SelfSubjectReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_SelfSubjectReview_To_authentication_SelfSubjectReview(a.(*authenticationv1alpha1.SelfSubjectReview), b.(*authentication.SelfSubjectReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.SelfSubjectReview)(nil), (*authenticationv1alpha1.SelfSubjectReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_SelfSubjectReview_To_v1alpha1_SelfSubjectReview(a.(*authentication.SelfSubjectReview), b.(*authenticationv1alpha1.SelfSubjectReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1alpha1.SelfSubjectReviewStatus)(nil), (*authentication.SelfSubjectReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(a.(*authenticationv1alpha1.SelfSubjectReviewStatus), b.(*authentication.SelfSubjectReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.SelfSubjectReviewStatus)(nil), (*authenticationv1alpha1.SelfSubjectReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_SelfSubjectReviewStatus_To_v1alpha1_SelfSubjectReviewStatus(a.(*authentication.SelfSubjectReviewStatus), b.(*authenticationv1alpha1.SelfSubjectReviewStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_SelfSubjectReview_To_authentication_SelfSubjectReview(in *authenticationv1alpha1.SelfSubjectReview, out *authentication.SelfSubjectReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_SelfSubjectReview_To_authentication_SelfSubjectReview is an autogenerated conversion function.
func Convert_v1alpha1_SelfSubjectReview_To_authentication_SelfSubjectReview(in *authenticationv1alpha1.SelfSubjectReview, out *authentication.SelfSubjectReview, s conversion.Scope) error {
return autoConvert_v1alpha1_SelfSubjectReview_To_authentication_SelfSubjectReview(in, out, s)
}
func autoConvert_authentication_SelfSubjectReview_To_v1alpha1_SelfSubjectReview(in *authentication.SelfSubjectReview, out *authenticationv1alpha1.SelfSubjectReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authentication_SelfSubjectReviewStatus_To_v1alpha1_SelfSubjectReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authentication_SelfSubjectReview_To_v1alpha1_SelfSubjectReview is an autogenerated conversion function.
func Convert_authentication_SelfSubjectReview_To_v1alpha1_SelfSubjectReview(in *authentication.SelfSubjectReview, out *authenticationv1alpha1.SelfSubjectReview, s conversion.Scope) error {
return autoConvert_authentication_SelfSubjectReview_To_v1alpha1_SelfSubjectReview(in, out, s)
}
func autoConvert_v1alpha1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in *authenticationv1alpha1.SelfSubjectReviewStatus, out *authentication.SelfSubjectReviewStatus, s conversion.Scope) error {
if err := v1.Convert_v1_UserInfo_To_authentication_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus is an autogenerated conversion function.
func Convert_v1alpha1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in *authenticationv1alpha1.SelfSubjectReviewStatus, out *authentication.SelfSubjectReviewStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in, out, s)
}
func autoConvert_authentication_SelfSubjectReviewStatus_To_v1alpha1_SelfSubjectReviewStatus(in *authentication.SelfSubjectReviewStatus, out *authenticationv1alpha1.SelfSubjectReviewStatus, s conversion.Scope) error {
if err := v1.Convert_authentication_UserInfo_To_v1_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
return nil
}
// Convert_authentication_SelfSubjectReviewStatus_To_v1alpha1_SelfSubjectReviewStatus is an autogenerated conversion function.
func Convert_authentication_SelfSubjectReviewStatus_To_v1alpha1_SelfSubjectReviewStatus(in *authentication.SelfSubjectReviewStatus, out *authenticationv1alpha1.SelfSubjectReviewStatus, s conversion.Scope) error {
return autoConvert_authentication_SelfSubjectReviewStatus_To_v1alpha1_SelfSubjectReviewStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "authentication.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &authenticationv1beta1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
authentication "k8s.io/kubernetes/pkg/apis/authentication"
v1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*authenticationv1beta1.SelfSubjectReview)(nil), (*authentication.SelfSubjectReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SelfSubjectReview_To_authentication_SelfSubjectReview(a.(*authenticationv1beta1.SelfSubjectReview), b.(*authentication.SelfSubjectReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.SelfSubjectReview)(nil), (*authenticationv1beta1.SelfSubjectReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_SelfSubjectReview_To_v1beta1_SelfSubjectReview(a.(*authentication.SelfSubjectReview), b.(*authenticationv1beta1.SelfSubjectReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1beta1.SelfSubjectReviewStatus)(nil), (*authentication.SelfSubjectReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(a.(*authenticationv1beta1.SelfSubjectReviewStatus), b.(*authentication.SelfSubjectReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.SelfSubjectReviewStatus)(nil), (*authenticationv1beta1.SelfSubjectReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_SelfSubjectReviewStatus_To_v1beta1_SelfSubjectReviewStatus(a.(*authentication.SelfSubjectReviewStatus), b.(*authenticationv1beta1.SelfSubjectReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1beta1.TokenReview)(nil), (*authentication.TokenReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_TokenReview_To_authentication_TokenReview(a.(*authenticationv1beta1.TokenReview), b.(*authentication.TokenReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenReview)(nil), (*authenticationv1beta1.TokenReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenReview_To_v1beta1_TokenReview(a.(*authentication.TokenReview), b.(*authenticationv1beta1.TokenReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1beta1.TokenReviewSpec)(nil), (*authentication.TokenReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(a.(*authenticationv1beta1.TokenReviewSpec), b.(*authentication.TokenReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenReviewSpec)(nil), (*authenticationv1beta1.TokenReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(a.(*authentication.TokenReviewSpec), b.(*authenticationv1beta1.TokenReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1beta1.TokenReviewStatus)(nil), (*authentication.TokenReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(a.(*authenticationv1beta1.TokenReviewStatus), b.(*authentication.TokenReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.TokenReviewStatus)(nil), (*authenticationv1beta1.TokenReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(a.(*authentication.TokenReviewStatus), b.(*authenticationv1beta1.TokenReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authenticationv1beta1.UserInfo)(nil), (*authentication.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_UserInfo_To_authentication_UserInfo(a.(*authenticationv1beta1.UserInfo), b.(*authentication.UserInfo), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authentication.UserInfo)(nil), (*authenticationv1beta1.UserInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authentication_UserInfo_To_v1beta1_UserInfo(a.(*authentication.UserInfo), b.(*authenticationv1beta1.UserInfo), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_SelfSubjectReview_To_authentication_SelfSubjectReview(in *authenticationv1beta1.SelfSubjectReview, out *authentication.SelfSubjectReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_SelfSubjectReview_To_authentication_SelfSubjectReview is an autogenerated conversion function.
func Convert_v1beta1_SelfSubjectReview_To_authentication_SelfSubjectReview(in *authenticationv1beta1.SelfSubjectReview, out *authentication.SelfSubjectReview, s conversion.Scope) error {
return autoConvert_v1beta1_SelfSubjectReview_To_authentication_SelfSubjectReview(in, out, s)
}
func autoConvert_authentication_SelfSubjectReview_To_v1beta1_SelfSubjectReview(in *authentication.SelfSubjectReview, out *authenticationv1beta1.SelfSubjectReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authentication_SelfSubjectReviewStatus_To_v1beta1_SelfSubjectReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authentication_SelfSubjectReview_To_v1beta1_SelfSubjectReview is an autogenerated conversion function.
func Convert_authentication_SelfSubjectReview_To_v1beta1_SelfSubjectReview(in *authentication.SelfSubjectReview, out *authenticationv1beta1.SelfSubjectReview, s conversion.Scope) error {
return autoConvert_authentication_SelfSubjectReview_To_v1beta1_SelfSubjectReview(in, out, s)
}
func autoConvert_v1beta1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in *authenticationv1beta1.SelfSubjectReviewStatus, out *authentication.SelfSubjectReviewStatus, s conversion.Scope) error {
if err := v1.Convert_v1_UserInfo_To_authentication_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus is an autogenerated conversion function.
func Convert_v1beta1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in *authenticationv1beta1.SelfSubjectReviewStatus, out *authentication.SelfSubjectReviewStatus, s conversion.Scope) error {
return autoConvert_v1beta1_SelfSubjectReviewStatus_To_authentication_SelfSubjectReviewStatus(in, out, s)
}
func autoConvert_authentication_SelfSubjectReviewStatus_To_v1beta1_SelfSubjectReviewStatus(in *authentication.SelfSubjectReviewStatus, out *authenticationv1beta1.SelfSubjectReviewStatus, s conversion.Scope) error {
if err := v1.Convert_authentication_UserInfo_To_v1_UserInfo(&in.UserInfo, &out.UserInfo, s); err != nil {
return err
}
return nil
}
// Convert_authentication_SelfSubjectReviewStatus_To_v1beta1_SelfSubjectReviewStatus is an autogenerated conversion function.
func Convert_authentication_SelfSubjectReviewStatus_To_v1beta1_SelfSubjectReviewStatus(in *authentication.SelfSubjectReviewStatus, out *authenticationv1beta1.SelfSubjectReviewStatus, s conversion.Scope) error {
return autoConvert_authentication_SelfSubjectReviewStatus_To_v1beta1_SelfSubjectReviewStatus(in, out, s)
}
func autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in *authenticationv1beta1.TokenReview, out *authentication.TokenReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_TokenReview_To_authentication_TokenReview is an autogenerated conversion function.
func Convert_v1beta1_TokenReview_To_authentication_TokenReview(in *authenticationv1beta1.TokenReview, out *authentication.TokenReview, s conversion.Scope) error {
return autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in, out, s)
}
func autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *authenticationv1beta1.TokenReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authentication_TokenReview_To_v1beta1_TokenReview is an autogenerated conversion function.
func Convert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *authenticationv1beta1.TokenReview, s conversion.Scope) error {
return autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in, out, s)
}
func autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *authenticationv1beta1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error {
out.Token = in.Token
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
return nil
}
// Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec is an autogenerated conversion function.
func Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *authenticationv1beta1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error {
return autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s)
}
func autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *authenticationv1beta1.TokenReviewSpec, s conversion.Scope) error {
out.Token = in.Token
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
return nil
}
// Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec is an autogenerated conversion function.
func Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *authenticationv1beta1.TokenReviewSpec, s conversion.Scope) error {
return autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s)
}
func autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *authenticationv1beta1.TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error {
out.Authenticated = in.Authenticated
if err := Convert_v1beta1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil {
return err
}
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
out.Error = in.Error
return nil
}
// Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus is an autogenerated conversion function.
func Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *authenticationv1beta1.TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error {
return autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s)
}
func autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *authenticationv1beta1.TokenReviewStatus, s conversion.Scope) error {
out.Authenticated = in.Authenticated
if err := Convert_authentication_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil {
return err
}
out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
out.Error = in.Error
return nil
}
// Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus is an autogenerated conversion function.
func Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *authenticationv1beta1.TokenReviewStatus, s conversion.Scope) error {
return autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s)
}
func autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in *authenticationv1beta1.UserInfo, out *authentication.UserInfo, s conversion.Scope) error {
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
// Convert_v1beta1_UserInfo_To_authentication_UserInfo is an autogenerated conversion function.
func Convert_v1beta1_UserInfo_To_authentication_UserInfo(in *authenticationv1beta1.UserInfo, out *authentication.UserInfo, s conversion.Scope) error {
return autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in, out, s)
}
func autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *authenticationv1beta1.UserInfo, s conversion.Scope) error {
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authenticationv1beta1.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
// Convert_authentication_UserInfo_To_v1beta1_UserInfo is an autogenerated conversion function.
func Convert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *authenticationv1beta1.UserInfo, s conversion.Scope) error {
return autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package authentication
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BoundObjectReference) DeepCopyInto(out *BoundObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoundObjectReference.
func (in *BoundObjectReference) DeepCopy() *BoundObjectReference {
if in == nil {
return nil
}
out := new(BoundObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview.
func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview {
if in == nil {
return nil
}
out := new(SelfSubjectReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SelfSubjectReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) {
*out = *in
in.UserInfo.DeepCopyInto(&out.UserInfo)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus.
func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus {
if in == nil {
return nil
}
out := new(SelfSubjectReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenRequest) DeepCopyInto(out *TokenRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest.
func (in *TokenRequest) DeepCopy() *TokenRequest {
if in == nil {
return nil
}
out := new(TokenRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TokenRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenRequestSpec) DeepCopyInto(out *TokenRequestSpec) {
*out = *in
if in.Audiences != nil {
in, out := &in.Audiences, &out.Audiences
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.BoundObjectRef != nil {
in, out := &in.BoundObjectRef, &out.BoundObjectRef
*out = new(BoundObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestSpec.
func (in *TokenRequestSpec) DeepCopy() *TokenRequestSpec {
if in == nil {
return nil
}
out := new(TokenRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenRequestStatus) DeepCopyInto(out *TokenRequestStatus) {
*out = *in
in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestStatus.
func (in *TokenRequestStatus) DeepCopy() *TokenRequestStatus {
if in == nil {
return nil
}
out := new(TokenRequestStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenReview) DeepCopyInto(out *TokenReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview.
func (in *TokenReview) DeepCopy() *TokenReview {
if in == nil {
return nil
}
out := new(TokenReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TokenReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) {
*out = *in
if in.Audiences != nil {
in, out := &in.Audiences, &out.Audiences
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec.
func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec {
if in == nil {
return nil
}
out := new(TokenReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) {
*out = *in
in.User.DeepCopyInto(&out.User)
if in.Audiences != nil {
in, out := &in.Audiences, &out.Audiences
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus.
func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus {
if in == nil {
return nil
}
out := new(TokenReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserInfo) DeepCopyInto(out *UserInfo) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Extra != nil {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(ExtraValue, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo.
func (in *UserInfo) DeepCopy() *UserInfo {
if in == nil {
return nil
}
out := new(UserInfo)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/authorization"
"k8s.io/kubernetes/pkg/apis/authorization/v1"
"k8s.io/kubernetes/pkg/apis/authorization/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(authorization.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package authorization
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "authorization.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&SelfSubjectRulesReview{},
&SelfSubjectAccessReview{},
&SubjectAccessReview{},
&LocalSubjectAccessReview{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
authorizationv1 "k8s.io/api/authorization/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "authorization.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
// defaulting and conversion init funcs are registered as well.
localSchemeBuilder = &authorizationv1.SchemeBuilder
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
authorizationv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
authorization "k8s.io/kubernetes/pkg/apis/authorization"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*authorizationv1.FieldSelectorAttributes)(nil), (*authorization.FieldSelectorAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FieldSelectorAttributes_To_authorization_FieldSelectorAttributes(a.(*authorizationv1.FieldSelectorAttributes), b.(*authorization.FieldSelectorAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.FieldSelectorAttributes)(nil), (*authorizationv1.FieldSelectorAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_FieldSelectorAttributes_To_v1_FieldSelectorAttributes(a.(*authorization.FieldSelectorAttributes), b.(*authorizationv1.FieldSelectorAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.LabelSelectorAttributes)(nil), (*authorization.LabelSelectorAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LabelSelectorAttributes_To_authorization_LabelSelectorAttributes(a.(*authorizationv1.LabelSelectorAttributes), b.(*authorization.LabelSelectorAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.LabelSelectorAttributes)(nil), (*authorizationv1.LabelSelectorAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_LabelSelectorAttributes_To_v1_LabelSelectorAttributes(a.(*authorization.LabelSelectorAttributes), b.(*authorizationv1.LabelSelectorAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.LocalSubjectAccessReview)(nil), (*authorization.LocalSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(a.(*authorizationv1.LocalSubjectAccessReview), b.(*authorization.LocalSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.LocalSubjectAccessReview)(nil), (*authorizationv1.LocalSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(a.(*authorization.LocalSubjectAccessReview), b.(*authorizationv1.LocalSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.NonResourceAttributes)(nil), (*authorization.NonResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(a.(*authorizationv1.NonResourceAttributes), b.(*authorization.NonResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.NonResourceAttributes)(nil), (*authorizationv1.NonResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(a.(*authorization.NonResourceAttributes), b.(*authorizationv1.NonResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.NonResourceRule)(nil), (*authorization.NonResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NonResourceRule_To_authorization_NonResourceRule(a.(*authorizationv1.NonResourceRule), b.(*authorization.NonResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.NonResourceRule)(nil), (*authorizationv1.NonResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_NonResourceRule_To_v1_NonResourceRule(a.(*authorization.NonResourceRule), b.(*authorizationv1.NonResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.ResourceAttributes)(nil), (*authorization.ResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes(a.(*authorizationv1.ResourceAttributes), b.(*authorization.ResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.ResourceAttributes)(nil), (*authorizationv1.ResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes(a.(*authorization.ResourceAttributes), b.(*authorizationv1.ResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.ResourceRule)(nil), (*authorization.ResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceRule_To_authorization_ResourceRule(a.(*authorizationv1.ResourceRule), b.(*authorization.ResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.ResourceRule)(nil), (*authorizationv1.ResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_ResourceRule_To_v1_ResourceRule(a.(*authorization.ResourceRule), b.(*authorizationv1.ResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SelfSubjectAccessReview)(nil), (*authorization.SelfSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(a.(*authorizationv1.SelfSubjectAccessReview), b.(*authorization.SelfSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectAccessReview)(nil), (*authorizationv1.SelfSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(a.(*authorization.SelfSubjectAccessReview), b.(*authorizationv1.SelfSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SelfSubjectAccessReviewSpec)(nil), (*authorization.SelfSubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(a.(*authorizationv1.SelfSubjectAccessReviewSpec), b.(*authorization.SelfSubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectAccessReviewSpec)(nil), (*authorizationv1.SelfSubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(a.(*authorization.SelfSubjectAccessReviewSpec), b.(*authorizationv1.SelfSubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SelfSubjectRulesReview)(nil), (*authorization.SelfSubjectRulesReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(a.(*authorizationv1.SelfSubjectRulesReview), b.(*authorization.SelfSubjectRulesReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectRulesReview)(nil), (*authorizationv1.SelfSubjectRulesReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectRulesReview_To_v1_SelfSubjectRulesReview(a.(*authorization.SelfSubjectRulesReview), b.(*authorizationv1.SelfSubjectRulesReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SelfSubjectRulesReviewSpec)(nil), (*authorization.SelfSubjectRulesReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(a.(*authorizationv1.SelfSubjectRulesReviewSpec), b.(*authorization.SelfSubjectRulesReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectRulesReviewSpec)(nil), (*authorizationv1.SelfSubjectRulesReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectRulesReviewSpec_To_v1_SelfSubjectRulesReviewSpec(a.(*authorization.SelfSubjectRulesReviewSpec), b.(*authorizationv1.SelfSubjectRulesReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SubjectAccessReview)(nil), (*authorization.SubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(a.(*authorizationv1.SubjectAccessReview), b.(*authorization.SubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectAccessReview)(nil), (*authorizationv1.SubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(a.(*authorization.SubjectAccessReview), b.(*authorizationv1.SubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SubjectAccessReviewSpec)(nil), (*authorization.SubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(a.(*authorizationv1.SubjectAccessReviewSpec), b.(*authorization.SubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectAccessReviewSpec)(nil), (*authorizationv1.SubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(a.(*authorization.SubjectAccessReviewSpec), b.(*authorizationv1.SubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SubjectAccessReviewStatus)(nil), (*authorization.SubjectAccessReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(a.(*authorizationv1.SubjectAccessReviewStatus), b.(*authorization.SubjectAccessReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectAccessReviewStatus)(nil), (*authorizationv1.SubjectAccessReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(a.(*authorization.SubjectAccessReviewStatus), b.(*authorizationv1.SubjectAccessReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1.SubjectRulesReviewStatus)(nil), (*authorization.SubjectRulesReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(a.(*authorizationv1.SubjectRulesReviewStatus), b.(*authorization.SubjectRulesReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectRulesReviewStatus)(nil), (*authorizationv1.SubjectRulesReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectRulesReviewStatus_To_v1_SubjectRulesReviewStatus(a.(*authorization.SubjectRulesReviewStatus), b.(*authorizationv1.SubjectRulesReviewStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_FieldSelectorAttributes_To_authorization_FieldSelectorAttributes(in *authorizationv1.FieldSelectorAttributes, out *authorization.FieldSelectorAttributes, s conversion.Scope) error {
out.RawSelector = in.RawSelector
out.Requirements = *(*[]metav1.FieldSelectorRequirement)(unsafe.Pointer(&in.Requirements))
return nil
}
// Convert_v1_FieldSelectorAttributes_To_authorization_FieldSelectorAttributes is an autogenerated conversion function.
func Convert_v1_FieldSelectorAttributes_To_authorization_FieldSelectorAttributes(in *authorizationv1.FieldSelectorAttributes, out *authorization.FieldSelectorAttributes, s conversion.Scope) error {
return autoConvert_v1_FieldSelectorAttributes_To_authorization_FieldSelectorAttributes(in, out, s)
}
func autoConvert_authorization_FieldSelectorAttributes_To_v1_FieldSelectorAttributes(in *authorization.FieldSelectorAttributes, out *authorizationv1.FieldSelectorAttributes, s conversion.Scope) error {
out.RawSelector = in.RawSelector
out.Requirements = *(*[]metav1.FieldSelectorRequirement)(unsafe.Pointer(&in.Requirements))
return nil
}
// Convert_authorization_FieldSelectorAttributes_To_v1_FieldSelectorAttributes is an autogenerated conversion function.
func Convert_authorization_FieldSelectorAttributes_To_v1_FieldSelectorAttributes(in *authorization.FieldSelectorAttributes, out *authorizationv1.FieldSelectorAttributes, s conversion.Scope) error {
return autoConvert_authorization_FieldSelectorAttributes_To_v1_FieldSelectorAttributes(in, out, s)
}
func autoConvert_v1_LabelSelectorAttributes_To_authorization_LabelSelectorAttributes(in *authorizationv1.LabelSelectorAttributes, out *authorization.LabelSelectorAttributes, s conversion.Scope) error {
out.RawSelector = in.RawSelector
out.Requirements = *(*[]metav1.LabelSelectorRequirement)(unsafe.Pointer(&in.Requirements))
return nil
}
// Convert_v1_LabelSelectorAttributes_To_authorization_LabelSelectorAttributes is an autogenerated conversion function.
func Convert_v1_LabelSelectorAttributes_To_authorization_LabelSelectorAttributes(in *authorizationv1.LabelSelectorAttributes, out *authorization.LabelSelectorAttributes, s conversion.Scope) error {
return autoConvert_v1_LabelSelectorAttributes_To_authorization_LabelSelectorAttributes(in, out, s)
}
func autoConvert_authorization_LabelSelectorAttributes_To_v1_LabelSelectorAttributes(in *authorization.LabelSelectorAttributes, out *authorizationv1.LabelSelectorAttributes, s conversion.Scope) error {
out.RawSelector = in.RawSelector
out.Requirements = *(*[]metav1.LabelSelectorRequirement)(unsafe.Pointer(&in.Requirements))
return nil
}
// Convert_authorization_LabelSelectorAttributes_To_v1_LabelSelectorAttributes is an autogenerated conversion function.
func Convert_authorization_LabelSelectorAttributes_To_v1_LabelSelectorAttributes(in *authorization.LabelSelectorAttributes, out *authorizationv1.LabelSelectorAttributes, s conversion.Scope) error {
return autoConvert_authorization_LabelSelectorAttributes_To_v1_LabelSelectorAttributes(in, out, s)
}
func autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *authorizationv1.LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview is an autogenerated conversion function.
func Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *authorizationv1.LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error {
return autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s)
}
func autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *authorizationv1.LocalSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview is an autogenerated conversion function.
func Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *authorizationv1.LocalSubjectAccessReview, s conversion.Scope) error {
return autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in, out, s)
}
func autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *authorizationv1.NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error {
out.Path = in.Path
out.Verb = in.Verb
return nil
}
// Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes is an autogenerated conversion function.
func Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *authorizationv1.NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error {
return autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s)
}
func autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *authorizationv1.NonResourceAttributes, s conversion.Scope) error {
out.Path = in.Path
out.Verb = in.Verb
return nil
}
// Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes is an autogenerated conversion function.
func Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *authorizationv1.NonResourceAttributes, s conversion.Scope) error {
return autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in, out, s)
}
func autoConvert_v1_NonResourceRule_To_authorization_NonResourceRule(in *authorizationv1.NonResourceRule, out *authorization.NonResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1_NonResourceRule_To_authorization_NonResourceRule is an autogenerated conversion function.
func Convert_v1_NonResourceRule_To_authorization_NonResourceRule(in *authorizationv1.NonResourceRule, out *authorization.NonResourceRule, s conversion.Scope) error {
return autoConvert_v1_NonResourceRule_To_authorization_NonResourceRule(in, out, s)
}
func autoConvert_authorization_NonResourceRule_To_v1_NonResourceRule(in *authorization.NonResourceRule, out *authorizationv1.NonResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_authorization_NonResourceRule_To_v1_NonResourceRule is an autogenerated conversion function.
func Convert_authorization_NonResourceRule_To_v1_NonResourceRule(in *authorization.NonResourceRule, out *authorizationv1.NonResourceRule, s conversion.Scope) error {
return autoConvert_authorization_NonResourceRule_To_v1_NonResourceRule(in, out, s)
}
func autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *authorizationv1.ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Verb = in.Verb
out.Group = in.Group
out.Version = in.Version
out.Resource = in.Resource
out.Subresource = in.Subresource
out.Name = in.Name
out.FieldSelector = (*authorization.FieldSelectorAttributes)(unsafe.Pointer(in.FieldSelector))
out.LabelSelector = (*authorization.LabelSelectorAttributes)(unsafe.Pointer(in.LabelSelector))
return nil
}
// Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes is an autogenerated conversion function.
func Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *authorizationv1.ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error {
return autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s)
}
func autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *authorizationv1.ResourceAttributes, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Verb = in.Verb
out.Group = in.Group
out.Version = in.Version
out.Resource = in.Resource
out.Subresource = in.Subresource
out.Name = in.Name
out.FieldSelector = (*authorizationv1.FieldSelectorAttributes)(unsafe.Pointer(in.FieldSelector))
out.LabelSelector = (*authorizationv1.LabelSelectorAttributes)(unsafe.Pointer(in.LabelSelector))
return nil
}
// Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes is an autogenerated conversion function.
func Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *authorizationv1.ResourceAttributes, s conversion.Scope) error {
return autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in, out, s)
}
func autoConvert_v1_ResourceRule_To_authorization_ResourceRule(in *authorizationv1.ResourceRule, out *authorization.ResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
return nil
}
// Convert_v1_ResourceRule_To_authorization_ResourceRule is an autogenerated conversion function.
func Convert_v1_ResourceRule_To_authorization_ResourceRule(in *authorizationv1.ResourceRule, out *authorization.ResourceRule, s conversion.Scope) error {
return autoConvert_v1_ResourceRule_To_authorization_ResourceRule(in, out, s)
}
func autoConvert_authorization_ResourceRule_To_v1_ResourceRule(in *authorization.ResourceRule, out *authorizationv1.ResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
return nil
}
// Convert_authorization_ResourceRule_To_v1_ResourceRule is an autogenerated conversion function.
func Convert_authorization_ResourceRule_To_v1_ResourceRule(in *authorization.ResourceRule, out *authorizationv1.ResourceRule, s conversion.Scope) error {
return autoConvert_authorization_ResourceRule_To_v1_ResourceRule(in, out, s)
}
func autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *authorizationv1.SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview is an autogenerated conversion function.
func Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *authorizationv1.SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error {
return autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s)
}
func autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *authorizationv1.SelfSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview is an autogenerated conversion function.
func Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *authorizationv1.SelfSubjectAccessReview, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in, out, s)
}
func autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *authorizationv1.SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
return nil
}
// Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *authorizationv1.SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s)
}
func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *authorizationv1.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorizationv1.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorizationv1.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
return nil
}
// Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *authorizationv1.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in, out, s)
}
func autoConvert_v1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(in *authorizationv1.SelfSubjectRulesReview, out *authorization.SelfSubjectRulesReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview is an autogenerated conversion function.
func Convert_v1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(in *authorizationv1.SelfSubjectRulesReview, out *authorization.SelfSubjectRulesReview, s conversion.Scope) error {
return autoConvert_v1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(in, out, s)
}
func autoConvert_authorization_SelfSubjectRulesReview_To_v1_SelfSubjectRulesReview(in *authorization.SelfSubjectRulesReview, out *authorizationv1.SelfSubjectRulesReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SelfSubjectRulesReviewSpec_To_v1_SelfSubjectRulesReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectRulesReviewStatus_To_v1_SubjectRulesReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_SelfSubjectRulesReview_To_v1_SelfSubjectRulesReview is an autogenerated conversion function.
func Convert_authorization_SelfSubjectRulesReview_To_v1_SelfSubjectRulesReview(in *authorization.SelfSubjectRulesReview, out *authorizationv1.SelfSubjectRulesReview, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectRulesReview_To_v1_SelfSubjectRulesReview(in, out, s)
}
func autoConvert_v1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(in *authorizationv1.SelfSubjectRulesReviewSpec, out *authorization.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
out.Namespace = in.Namespace
return nil
}
// Convert_v1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec is an autogenerated conversion function.
func Convert_v1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(in *authorizationv1.SelfSubjectRulesReviewSpec, out *authorization.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
return autoConvert_v1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(in, out, s)
}
func autoConvert_authorization_SelfSubjectRulesReviewSpec_To_v1_SelfSubjectRulesReviewSpec(in *authorization.SelfSubjectRulesReviewSpec, out *authorizationv1.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
out.Namespace = in.Namespace
return nil
}
// Convert_authorization_SelfSubjectRulesReviewSpec_To_v1_SelfSubjectRulesReviewSpec is an autogenerated conversion function.
func Convert_authorization_SelfSubjectRulesReviewSpec_To_v1_SelfSubjectRulesReviewSpec(in *authorization.SelfSubjectRulesReviewSpec, out *authorizationv1.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectRulesReviewSpec_To_v1_SelfSubjectRulesReviewSpec(in, out, s)
}
func autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *authorizationv1.SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview is an autogenerated conversion function.
func Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *authorizationv1.SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error {
return autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s)
}
func autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *authorizationv1.SubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview is an autogenerated conversion function.
func Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *authorizationv1.SubjectAccessReview, s conversion.Scope) error {
return autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in, out, s)
}
func autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *authorizationv1.SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
out.User = in.User
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra))
out.UID = in.UID
return nil
}
// Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *authorizationv1.SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s)
}
func autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *authorizationv1.SubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorizationv1.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorizationv1.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
out.User = in.User
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authorizationv1.ExtraValue)(unsafe.Pointer(&in.Extra))
out.UID = in.UID
return nil
}
// Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *authorizationv1.SubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in, out, s)
}
func autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *authorizationv1.SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Denied = in.Denied
out.Reason = in.Reason
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus is an autogenerated conversion function.
func Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *authorizationv1.SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error {
return autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s)
}
func autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *authorizationv1.SubjectAccessReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Denied = in.Denied
out.Reason = in.Reason
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus is an autogenerated conversion function.
func Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *authorizationv1.SubjectAccessReviewStatus, s conversion.Scope) error {
return autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in, out, s)
}
func autoConvert_v1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(in *authorizationv1.SubjectRulesReviewStatus, out *authorization.SubjectRulesReviewStatus, s conversion.Scope) error {
out.ResourceRules = *(*[]authorization.ResourceRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]authorization.NonResourceRule)(unsafe.Pointer(&in.NonResourceRules))
out.Incomplete = in.Incomplete
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_v1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus is an autogenerated conversion function.
func Convert_v1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(in *authorizationv1.SubjectRulesReviewStatus, out *authorization.SubjectRulesReviewStatus, s conversion.Scope) error {
return autoConvert_v1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(in, out, s)
}
func autoConvert_authorization_SubjectRulesReviewStatus_To_v1_SubjectRulesReviewStatus(in *authorization.SubjectRulesReviewStatus, out *authorizationv1.SubjectRulesReviewStatus, s conversion.Scope) error {
out.ResourceRules = *(*[]authorizationv1.ResourceRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]authorizationv1.NonResourceRule)(unsafe.Pointer(&in.NonResourceRules))
out.Incomplete = in.Incomplete
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_authorization_SubjectRulesReviewStatus_To_v1_SubjectRulesReviewStatus is an autogenerated conversion function.
func Convert_authorization_SubjectRulesReviewStatus_To_v1_SubjectRulesReviewStatus(in *authorization.SubjectRulesReviewStatus, out *authorizationv1.SubjectRulesReviewStatus, s conversion.Scope) error {
return autoConvert_authorization_SubjectRulesReviewStatus_To_v1_SubjectRulesReviewStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "authorization.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &authorizationv1beta1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
v1 "k8s.io/api/authorization/v1"
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
authorization "k8s.io/kubernetes/pkg/apis/authorization"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.LocalSubjectAccessReview)(nil), (*authorization.LocalSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(a.(*authorizationv1beta1.LocalSubjectAccessReview), b.(*authorization.LocalSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.LocalSubjectAccessReview)(nil), (*authorizationv1beta1.LocalSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(a.(*authorization.LocalSubjectAccessReview), b.(*authorizationv1beta1.LocalSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.NonResourceAttributes)(nil), (*authorization.NonResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(a.(*authorizationv1beta1.NonResourceAttributes), b.(*authorization.NonResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.NonResourceAttributes)(nil), (*authorizationv1beta1.NonResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(a.(*authorization.NonResourceAttributes), b.(*authorizationv1beta1.NonResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.NonResourceRule)(nil), (*authorization.NonResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NonResourceRule_To_authorization_NonResourceRule(a.(*authorizationv1beta1.NonResourceRule), b.(*authorization.NonResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.NonResourceRule)(nil), (*authorizationv1beta1.NonResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_NonResourceRule_To_v1beta1_NonResourceRule(a.(*authorization.NonResourceRule), b.(*authorizationv1beta1.NonResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.ResourceAttributes)(nil), (*authorization.ResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(a.(*authorizationv1beta1.ResourceAttributes), b.(*authorization.ResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.ResourceAttributes)(nil), (*authorizationv1beta1.ResourceAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(a.(*authorization.ResourceAttributes), b.(*authorizationv1beta1.ResourceAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.ResourceRule)(nil), (*authorization.ResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceRule_To_authorization_ResourceRule(a.(*authorizationv1beta1.ResourceRule), b.(*authorization.ResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.ResourceRule)(nil), (*authorizationv1beta1.ResourceRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_ResourceRule_To_v1beta1_ResourceRule(a.(*authorization.ResourceRule), b.(*authorizationv1beta1.ResourceRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SelfSubjectAccessReview)(nil), (*authorization.SelfSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(a.(*authorizationv1beta1.SelfSubjectAccessReview), b.(*authorization.SelfSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectAccessReview)(nil), (*authorizationv1beta1.SelfSubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(a.(*authorization.SelfSubjectAccessReview), b.(*authorizationv1beta1.SelfSubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SelfSubjectAccessReviewSpec)(nil), (*authorization.SelfSubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(a.(*authorizationv1beta1.SelfSubjectAccessReviewSpec), b.(*authorization.SelfSubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectAccessReviewSpec)(nil), (*authorizationv1beta1.SelfSubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(a.(*authorization.SelfSubjectAccessReviewSpec), b.(*authorizationv1beta1.SelfSubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SelfSubjectRulesReview)(nil), (*authorization.SelfSubjectRulesReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(a.(*authorizationv1beta1.SelfSubjectRulesReview), b.(*authorization.SelfSubjectRulesReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectRulesReview)(nil), (*authorizationv1beta1.SelfSubjectRulesReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectRulesReview_To_v1beta1_SelfSubjectRulesReview(a.(*authorization.SelfSubjectRulesReview), b.(*authorizationv1beta1.SelfSubjectRulesReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SelfSubjectRulesReviewSpec)(nil), (*authorization.SelfSubjectRulesReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(a.(*authorizationv1beta1.SelfSubjectRulesReviewSpec), b.(*authorization.SelfSubjectRulesReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SelfSubjectRulesReviewSpec)(nil), (*authorizationv1beta1.SelfSubjectRulesReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SelfSubjectRulesReviewSpec_To_v1beta1_SelfSubjectRulesReviewSpec(a.(*authorization.SelfSubjectRulesReviewSpec), b.(*authorizationv1beta1.SelfSubjectRulesReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SubjectAccessReview)(nil), (*authorization.SubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(a.(*authorizationv1beta1.SubjectAccessReview), b.(*authorization.SubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectAccessReview)(nil), (*authorizationv1beta1.SubjectAccessReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(a.(*authorization.SubjectAccessReview), b.(*authorizationv1beta1.SubjectAccessReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SubjectAccessReviewSpec)(nil), (*authorization.SubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(a.(*authorizationv1beta1.SubjectAccessReviewSpec), b.(*authorization.SubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectAccessReviewSpec)(nil), (*authorizationv1beta1.SubjectAccessReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(a.(*authorization.SubjectAccessReviewSpec), b.(*authorizationv1beta1.SubjectAccessReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SubjectAccessReviewStatus)(nil), (*authorization.SubjectAccessReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(a.(*authorizationv1beta1.SubjectAccessReviewStatus), b.(*authorization.SubjectAccessReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectAccessReviewStatus)(nil), (*authorizationv1beta1.SubjectAccessReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(a.(*authorization.SubjectAccessReviewStatus), b.(*authorizationv1beta1.SubjectAccessReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorizationv1beta1.SubjectRulesReviewStatus)(nil), (*authorization.SubjectRulesReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(a.(*authorizationv1beta1.SubjectRulesReviewStatus), b.(*authorization.SubjectRulesReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*authorization.SubjectRulesReviewStatus)(nil), (*authorizationv1beta1.SubjectRulesReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_authorization_SubjectRulesReviewStatus_To_v1beta1_SubjectRulesReviewStatus(a.(*authorization.SubjectRulesReviewStatus), b.(*authorizationv1beta1.SubjectRulesReviewStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *authorizationv1beta1.LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview is an autogenerated conversion function.
func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *authorizationv1beta1.LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error {
return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s)
}
func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *authorizationv1beta1.LocalSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview is an autogenerated conversion function.
func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *authorizationv1beta1.LocalSubjectAccessReview, s conversion.Scope) error {
return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s)
}
func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *authorizationv1beta1.NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error {
out.Path = in.Path
out.Verb = in.Verb
return nil
}
// Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes is an autogenerated conversion function.
func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *authorizationv1beta1.NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error {
return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s)
}
func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *authorizationv1beta1.NonResourceAttributes, s conversion.Scope) error {
out.Path = in.Path
out.Verb = in.Verb
return nil
}
// Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes is an autogenerated conversion function.
func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *authorizationv1beta1.NonResourceAttributes, s conversion.Scope) error {
return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s)
}
func autoConvert_v1beta1_NonResourceRule_To_authorization_NonResourceRule(in *authorizationv1beta1.NonResourceRule, out *authorization.NonResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1beta1_NonResourceRule_To_authorization_NonResourceRule is an autogenerated conversion function.
func Convert_v1beta1_NonResourceRule_To_authorization_NonResourceRule(in *authorizationv1beta1.NonResourceRule, out *authorization.NonResourceRule, s conversion.Scope) error {
return autoConvert_v1beta1_NonResourceRule_To_authorization_NonResourceRule(in, out, s)
}
func autoConvert_authorization_NonResourceRule_To_v1beta1_NonResourceRule(in *authorization.NonResourceRule, out *authorizationv1beta1.NonResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_authorization_NonResourceRule_To_v1beta1_NonResourceRule is an autogenerated conversion function.
func Convert_authorization_NonResourceRule_To_v1beta1_NonResourceRule(in *authorization.NonResourceRule, out *authorizationv1beta1.NonResourceRule, s conversion.Scope) error {
return autoConvert_authorization_NonResourceRule_To_v1beta1_NonResourceRule(in, out, s)
}
func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *authorizationv1beta1.ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Verb = in.Verb
out.Group = in.Group
out.Version = in.Version
out.Resource = in.Resource
out.Subresource = in.Subresource
out.Name = in.Name
out.FieldSelector = (*authorization.FieldSelectorAttributes)(unsafe.Pointer(in.FieldSelector))
out.LabelSelector = (*authorization.LabelSelectorAttributes)(unsafe.Pointer(in.LabelSelector))
return nil
}
// Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes is an autogenerated conversion function.
func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *authorizationv1beta1.ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s)
}
func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *authorizationv1beta1.ResourceAttributes, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Verb = in.Verb
out.Group = in.Group
out.Version = in.Version
out.Resource = in.Resource
out.Subresource = in.Subresource
out.Name = in.Name
out.FieldSelector = (*v1.FieldSelectorAttributes)(unsafe.Pointer(in.FieldSelector))
out.LabelSelector = (*v1.LabelSelectorAttributes)(unsafe.Pointer(in.LabelSelector))
return nil
}
// Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes is an autogenerated conversion function.
func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *authorizationv1beta1.ResourceAttributes, s conversion.Scope) error {
return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s)
}
func autoConvert_v1beta1_ResourceRule_To_authorization_ResourceRule(in *authorizationv1beta1.ResourceRule, out *authorization.ResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
return nil
}
// Convert_v1beta1_ResourceRule_To_authorization_ResourceRule is an autogenerated conversion function.
func Convert_v1beta1_ResourceRule_To_authorization_ResourceRule(in *authorizationv1beta1.ResourceRule, out *authorization.ResourceRule, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceRule_To_authorization_ResourceRule(in, out, s)
}
func autoConvert_authorization_ResourceRule_To_v1beta1_ResourceRule(in *authorization.ResourceRule, out *authorizationv1beta1.ResourceRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
return nil
}
// Convert_authorization_ResourceRule_To_v1beta1_ResourceRule is an autogenerated conversion function.
func Convert_authorization_ResourceRule_To_v1beta1_ResourceRule(in *authorization.ResourceRule, out *authorizationv1beta1.ResourceRule, s conversion.Scope) error {
return autoConvert_authorization_ResourceRule_To_v1beta1_ResourceRule(in, out, s)
}
func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *authorizationv1beta1.SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview is an autogenerated conversion function.
func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *authorizationv1beta1.SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error {
return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s)
}
func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *authorizationv1beta1.SelfSubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview is an autogenerated conversion function.
func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *authorizationv1beta1.SelfSubjectAccessReview, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s)
}
func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *authorizationv1beta1.SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
return nil
}
// Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *authorizationv1beta1.SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s)
}
func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *authorizationv1beta1.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorizationv1beta1.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorizationv1beta1.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
return nil
}
// Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *authorizationv1beta1.SelfSubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s)
}
func autoConvert_v1beta1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(in *authorizationv1beta1.SelfSubjectRulesReview, out *authorization.SelfSubjectRulesReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview is an autogenerated conversion function.
func Convert_v1beta1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(in *authorizationv1beta1.SelfSubjectRulesReview, out *authorization.SelfSubjectRulesReview, s conversion.Scope) error {
return autoConvert_v1beta1_SelfSubjectRulesReview_To_authorization_SelfSubjectRulesReview(in, out, s)
}
func autoConvert_authorization_SelfSubjectRulesReview_To_v1beta1_SelfSubjectRulesReview(in *authorization.SelfSubjectRulesReview, out *authorizationv1beta1.SelfSubjectRulesReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SelfSubjectRulesReviewSpec_To_v1beta1_SelfSubjectRulesReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectRulesReviewStatus_To_v1beta1_SubjectRulesReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_SelfSubjectRulesReview_To_v1beta1_SelfSubjectRulesReview is an autogenerated conversion function.
func Convert_authorization_SelfSubjectRulesReview_To_v1beta1_SelfSubjectRulesReview(in *authorization.SelfSubjectRulesReview, out *authorizationv1beta1.SelfSubjectRulesReview, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectRulesReview_To_v1beta1_SelfSubjectRulesReview(in, out, s)
}
func autoConvert_v1beta1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(in *authorizationv1beta1.SelfSubjectRulesReviewSpec, out *authorization.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
out.Namespace = in.Namespace
return nil
}
// Convert_v1beta1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec is an autogenerated conversion function.
func Convert_v1beta1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(in *authorizationv1beta1.SelfSubjectRulesReviewSpec, out *authorization.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
return autoConvert_v1beta1_SelfSubjectRulesReviewSpec_To_authorization_SelfSubjectRulesReviewSpec(in, out, s)
}
func autoConvert_authorization_SelfSubjectRulesReviewSpec_To_v1beta1_SelfSubjectRulesReviewSpec(in *authorization.SelfSubjectRulesReviewSpec, out *authorizationv1beta1.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
out.Namespace = in.Namespace
return nil
}
// Convert_authorization_SelfSubjectRulesReviewSpec_To_v1beta1_SelfSubjectRulesReviewSpec is an autogenerated conversion function.
func Convert_authorization_SelfSubjectRulesReviewSpec_To_v1beta1_SelfSubjectRulesReviewSpec(in *authorization.SelfSubjectRulesReviewSpec, out *authorizationv1beta1.SelfSubjectRulesReviewSpec, s conversion.Scope) error {
return autoConvert_authorization_SelfSubjectRulesReviewSpec_To_v1beta1_SelfSubjectRulesReviewSpec(in, out, s)
}
func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *authorizationv1beta1.SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview is an autogenerated conversion function.
func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *authorizationv1beta1.SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error {
return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s)
}
func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *authorizationv1beta1.SubjectAccessReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview is an autogenerated conversion function.
func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *authorizationv1beta1.SubjectAccessReview, s conversion.Scope) error {
return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s)
}
func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *authorizationv1beta1.SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
out.User = in.User
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra))
out.UID = in.UID
return nil
}
// Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *authorizationv1beta1.SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s)
}
func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *authorizationv1beta1.SubjectAccessReviewSpec, s conversion.Scope) error {
out.ResourceAttributes = (*authorizationv1beta1.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes))
out.NonResourceAttributes = (*authorizationv1beta1.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes))
out.User = in.User
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]authorizationv1beta1.ExtraValue)(unsafe.Pointer(&in.Extra))
out.UID = in.UID
return nil
}
// Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec is an autogenerated conversion function.
func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *authorizationv1beta1.SubjectAccessReviewSpec, s conversion.Scope) error {
return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s)
}
func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *authorizationv1beta1.SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Denied = in.Denied
out.Reason = in.Reason
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus is an autogenerated conversion function.
func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *authorizationv1beta1.SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error {
return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s)
}
func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *authorizationv1beta1.SubjectAccessReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Denied = in.Denied
out.Reason = in.Reason
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus is an autogenerated conversion function.
func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *authorizationv1beta1.SubjectAccessReviewStatus, s conversion.Scope) error {
return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s)
}
func autoConvert_v1beta1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(in *authorizationv1beta1.SubjectRulesReviewStatus, out *authorization.SubjectRulesReviewStatus, s conversion.Scope) error {
out.ResourceRules = *(*[]authorization.ResourceRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]authorization.NonResourceRule)(unsafe.Pointer(&in.NonResourceRules))
out.Incomplete = in.Incomplete
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_v1beta1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus is an autogenerated conversion function.
func Convert_v1beta1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(in *authorizationv1beta1.SubjectRulesReviewStatus, out *authorization.SubjectRulesReviewStatus, s conversion.Scope) error {
return autoConvert_v1beta1_SubjectRulesReviewStatus_To_authorization_SubjectRulesReviewStatus(in, out, s)
}
func autoConvert_authorization_SubjectRulesReviewStatus_To_v1beta1_SubjectRulesReviewStatus(in *authorization.SubjectRulesReviewStatus, out *authorizationv1beta1.SubjectRulesReviewStatus, s conversion.Scope) error {
out.ResourceRules = *(*[]authorizationv1beta1.ResourceRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]authorizationv1beta1.NonResourceRule)(unsafe.Pointer(&in.NonResourceRules))
out.Incomplete = in.Incomplete
out.EvaluationError = in.EvaluationError
return nil
}
// Convert_authorization_SubjectRulesReviewStatus_To_v1beta1_SubjectRulesReviewStatus is an autogenerated conversion function.
func Convert_authorization_SubjectRulesReviewStatus_To_v1beta1_SubjectRulesReviewStatus(in *authorization.SubjectRulesReviewStatus, out *authorizationv1beta1.SubjectRulesReviewStatus, s conversion.Scope) error {
return autoConvert_authorization_SubjectRulesReviewStatus_To_v1beta1_SubjectRulesReviewStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package authorization
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldSelectorAttributes) DeepCopyInto(out *FieldSelectorAttributes) {
*out = *in
if in.Requirements != nil {
in, out := &in.Requirements, &out.Requirements
*out = make([]v1.FieldSelectorRequirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorAttributes.
func (in *FieldSelectorAttributes) DeepCopy() *FieldSelectorAttributes {
if in == nil {
return nil
}
out := new(FieldSelectorAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelSelectorAttributes) DeepCopyInto(out *LabelSelectorAttributes) {
*out = *in
if in.Requirements != nil {
in, out := &in.Requirements, &out.Requirements
*out = make([]v1.LabelSelectorRequirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorAttributes.
func (in *LabelSelectorAttributes) DeepCopy() *LabelSelectorAttributes {
if in == nil {
return nil
}
out := new(LabelSelectorAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview.
func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview {
if in == nil {
return nil
}
out := new(LocalSubjectAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes.
func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes {
if in == nil {
return nil
}
out := new(NonResourceAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NonResourceURLs != nil {
in, out := &in.NonResourceURLs, &out.NonResourceURLs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule.
func (in *NonResourceRule) DeepCopy() *NonResourceRule {
if in == nil {
return nil
}
out := new(NonResourceRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) {
*out = *in
if in.FieldSelector != nil {
in, out := &in.FieldSelector, &out.FieldSelector
*out = new(FieldSelectorAttributes)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(LabelSelectorAttributes)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes.
func (in *ResourceAttributes) DeepCopy() *ResourceAttributes {
if in == nil {
return nil
}
out := new(ResourceAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRule) DeepCopyInto(out *ResourceRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.APIGroups != nil {
in, out := &in.APIGroups, &out.APIGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule.
func (in *ResourceRule) DeepCopy() *ResourceRule {
if in == nil {
return nil
}
out := new(ResourceRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview.
func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview {
if in == nil {
return nil
}
out := new(SelfSubjectAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) {
*out = *in
if in.ResourceAttributes != nil {
in, out := &in.ResourceAttributes, &out.ResourceAttributes
*out = new(ResourceAttributes)
(*in).DeepCopyInto(*out)
}
if in.NonResourceAttributes != nil {
in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
*out = new(NonResourceAttributes)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec.
func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec {
if in == nil {
return nil
}
out := new(SelfSubjectAccessReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview.
func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview {
if in == nil {
return nil
}
out := new(SelfSubjectRulesReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec.
func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec {
if in == nil {
return nil
}
out := new(SelfSubjectRulesReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview.
func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview {
if in == nil {
return nil
}
out := new(SubjectAccessReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SubjectAccessReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) {
*out = *in
if in.ResourceAttributes != nil {
in, out := &in.ResourceAttributes, &out.ResourceAttributes
*out = new(ResourceAttributes)
(*in).DeepCopyInto(*out)
}
if in.NonResourceAttributes != nil {
in, out := &in.NonResourceAttributes, &out.NonResourceAttributes
*out = new(NonResourceAttributes)
**out = **in
}
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Extra != nil {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(ExtraValue, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec.
func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec {
if in == nil {
return nil
}
out := new(SubjectAccessReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus.
func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus {
if in == nil {
return nil
}
out := new(SubjectAccessReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) {
*out = *in
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]ResourceRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NonResourceRules != nil {
in, out := &in.NonResourceRules, &out.NonResourceRules
*out = make([]NonResourceRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus.
func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus {
if in == nil {
return nil
}
out := new(SubjectRulesReviewStatus)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"sigs.k8s.io/randfill"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/autoscaling"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/ptr"
)
// Funcs returns the fuzzer functions for the autoscaling api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(s *autoscaling.ScaleStatus, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
// ensure we have a valid selector
metaSelector := &metav1.LabelSelector{}
c.Fill(metaSelector)
labelSelector, _ := metav1.LabelSelectorAsSelector(metaSelector)
s.Selector = labelSelector.String()
},
func(s *autoscaling.HorizontalPodAutoscalerSpec, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
s.MinReplicas = ptr.To[int32](c.Rand.Int31())
randomQuantity := func() resource.Quantity {
var q resource.Quantity
c.Fill(&q)
// precalc the string for benchmarking purposes
_ = q.String()
return q
}
var podMetricID autoscaling.MetricIdentifier
var objMetricID autoscaling.MetricIdentifier
c.Fill(&podMetricID)
c.Fill(&objMetricID)
targetUtilization := int32(c.Uint64())
averageValue := randomQuantity()
s.Metrics = []autoscaling.MetricSpec{
{
Type: autoscaling.PodsMetricSourceType,
Pods: &autoscaling.PodsMetricSource{
Metric: podMetricID,
Target: autoscaling.MetricTarget{
Type: autoscaling.AverageValueMetricType,
AverageValue: &averageValue,
},
},
},
{
Type: autoscaling.ObjectMetricSourceType,
Object: &autoscaling.ObjectMetricSource{
Metric: objMetricID,
Target: autoscaling.MetricTarget{
Type: autoscaling.ValueMetricType,
Value: &averageValue,
},
},
},
{
Type: autoscaling.ResourceMetricSourceType,
Resource: &autoscaling.ResourceMetricSource{
Name: api.ResourceCPU,
Target: autoscaling.MetricTarget{
Type: autoscaling.UtilizationMetricType,
AverageUtilization: &targetUtilization,
},
},
},
}
stabilizationWindow := int32(c.Uint64())
maxPolicy := autoscaling.MaxPolicySelect
minPolicy := autoscaling.MinPolicySelect
s.Behavior = &autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscaling.HPAScalingRules{
StabilizationWindowSeconds: &stabilizationWindow,
SelectPolicy: &maxPolicy,
Policies: []autoscaling.HPAScalingPolicy{
{
Type: autoscaling.PodsScalingPolicy,
Value: int32(c.Uint64()),
PeriodSeconds: int32(c.Uint64()),
},
{
Type: autoscaling.PercentScalingPolicy,
Value: int32(c.Uint64()),
PeriodSeconds: int32(c.Uint64()),
},
},
},
ScaleDown: &autoscaling.HPAScalingRules{
StabilizationWindowSeconds: &stabilizationWindow,
SelectPolicy: &minPolicy,
Policies: []autoscaling.HPAScalingPolicy{
{
Type: autoscaling.PodsScalingPolicy,
Value: int32(c.Uint64()),
PeriodSeconds: int32(c.Uint64()),
},
{
Type: autoscaling.PercentScalingPolicy,
Value: int32(c.Uint64()),
PeriodSeconds: int32(c.Uint64()),
},
},
},
}
},
func(s *autoscaling.HorizontalPodAutoscalerStatus, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
randomQuantity := func() resource.Quantity {
var q resource.Quantity
c.Fill(&q)
// precalc the string for benchmarking purposes
_ = q.String()
return q
}
averageValue := randomQuantity()
currentUtilization := int32(c.Uint64())
s.CurrentMetrics = []autoscaling.MetricStatus{
{
Type: autoscaling.PodsMetricSourceType,
Pods: &autoscaling.PodsMetricStatus{
Metric: autoscaling.MetricIdentifier{
Name: c.String(0),
},
Current: autoscaling.MetricValueStatus{
AverageValue: &averageValue,
},
},
},
{
Type: autoscaling.ResourceMetricSourceType,
Resource: &autoscaling.ResourceMetricStatus{
Name: api.ResourceCPU,
Current: autoscaling.MetricValueStatus{
AverageUtilization: ¤tUtilization,
AverageValue: &averageValue,
},
},
},
}
},
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
// DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to
// serialize round-tripped fields from HorizontalPodAutoscaler later API versions,
// and returns false if no changes were made and the original input object was returned.
//
// It should always be called when converting internal -> external versions, prior
// to setting any of the custom annotations:
//
// annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations)
// externalObj.Annotations = annotations
//
// if internal.SomeField != nil {
// if !copiedAnnotations {
// externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations)
// copiedAnnotations = true
// }
// externalObj.Annotations[...] = json.Marshal(...)
// }
func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) {
_, hasMetricsSpecs := in[MetricSpecsAnnotation]
_, hasBehaviorSpecs := in[BehaviorSpecsAnnotation]
_, hasToleranceScaleDown := in[ToleranceScaleDownAnnotation]
_, hasToleranceScaleUp := in[ToleranceScaleUpAnnotation]
_, hasMetricsStatuses := in[MetricStatusesAnnotation]
_, hasConditions := in[HorizontalPodAutoscalerConditionsAnnotation]
if hasMetricsSpecs || hasBehaviorSpecs || hasToleranceScaleDown || hasToleranceScaleUp || hasMetricsStatuses || hasConditions {
out = DeepCopyStringMap(in)
delete(out, MetricSpecsAnnotation)
delete(out, BehaviorSpecsAnnotation)
delete(out, ToleranceScaleDownAnnotation)
delete(out, ToleranceScaleUpAnnotation)
delete(out, MetricStatusesAnnotation)
delete(out, HorizontalPodAutoscalerConditionsAnnotation)
return out, true
}
return in, false
}
// DeepCopyStringMap returns a copy of the input map.
// If input is nil, an empty map is returned.
func DeepCopyStringMap(in map[string]string) map[string]string {
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/autoscaling/v1"
"k8s.io/kubernetes/pkg/apis/autoscaling/v2"
"k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1"
"k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(autoscaling.AddToScheme(scheme))
utilruntime.Must(v2beta2.AddToScheme(scheme))
utilruntime.Must(v2.AddToScheme(scheme))
utilruntime.Must(v2beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v2.SchemeGroupVersion, v1.SchemeGroupVersion, v2beta1.SchemeGroupVersion, v2beta2.SchemeGroupVersion))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Scale{},
&HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{},
)
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"encoding/json"
autoscalingv1 "k8s.io/api/autoscaling/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/core"
)
func Convert_autoscaling_MetricTarget_To_v1_CrossVersionObjectReference(in *autoscaling.MetricTarget, out *autoscalingv1.CrossVersionObjectReference, s conversion.Scope) error {
return nil
}
func Convert_v1_CrossVersionObjectReference_To_autoscaling_MetricTarget(in *autoscalingv1.CrossVersionObjectReference, out *autoscaling.MetricTarget, s conversion.Scope) error {
return nil
}
func Convert_autoscaling_ExternalMetricSource_To_v1_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv1.ExternalMetricSource, s conversion.Scope) error {
out.MetricName = in.Metric.Name
out.TargetValue = in.Target.Value
out.TargetAverageValue = in.Target.AverageValue
out.MetricSelector = in.Metric.Selector
return nil
}
func Convert_v1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv1.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
value := in.TargetValue
averageValue := in.TargetAverageValue
var metricType autoscaling.MetricTargetType
if value == nil {
metricType = autoscaling.AverageValueMetricType
} else {
metricType = autoscaling.ValueMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
Value: value,
AverageValue: averageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.MetricSelector,
}
return nil
}
func Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv1.ObjectMetricSource, s conversion.Scope) error {
if in.Target.Value != nil {
out.TargetValue = *in.Target.Value
}
out.AverageValue = in.Target.AverageValue
out.Target = autoscalingv1.CrossVersionObjectReference{
Kind: in.DescribedObject.Kind,
Name: in.DescribedObject.Name,
APIVersion: in.DescribedObject.APIVersion,
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
return nil
}
func Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv1.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
var metricType autoscaling.MetricTargetType
if in.AverageValue == nil {
metricType = autoscaling.ValueMetricType
} else {
metricType = autoscaling.AverageValueMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
Value: &in.TargetValue,
AverageValue: in.AverageValue,
}
out.DescribedObject = autoscaling.CrossVersionObjectReference{
Kind: in.Target.Kind,
Name: in.Target.Name,
APIVersion: in.Target.APIVersion,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv1.PodsMetricSource, s conversion.Scope) error {
if in.Target.AverageValue != nil {
out.TargetAverageValue = *in.Target.AverageValue
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
return nil
}
func Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv1.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
metricType := autoscaling.AverageValueMetricType
out.Target = autoscaling.MetricTarget{
Type: metricType,
AverageValue: &in.TargetAverageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_autoscaling_ExternalMetricStatus_To_v1_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv1.ExternalMetricStatus, s conversion.Scope) error {
out.MetricName = in.Metric.Name
if in.Current.Value != nil {
out.CurrentValue = *in.Current.Value
}
if in.Current.AverageValue != nil {
out.CurrentAverageValue = in.Current.AverageValue
}
out.MetricSelector = in.Metric.Selector
return nil
}
func Convert_v1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv1.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
value := in.CurrentValue
averageValue := in.CurrentAverageValue
out.Current = autoscaling.MetricValueStatus{
Value: &value,
AverageValue: averageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.MetricSelector,
}
return nil
}
func Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv1.ObjectMetricStatus, s conversion.Scope) error {
if in.Current.Value != nil {
out.CurrentValue = *in.Current.Value
}
if in.Current.AverageValue != nil {
currentAverageValue := *in.Current.AverageValue
out.AverageValue = ¤tAverageValue
}
out.Target = autoscalingv1.CrossVersionObjectReference{
Kind: in.DescribedObject.Kind,
Name: in.DescribedObject.Name,
APIVersion: in.DescribedObject.APIVersion,
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
return nil
}
func Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv1.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
out.Current = autoscaling.MetricValueStatus{
Value: &in.CurrentValue,
AverageValue: in.AverageValue,
}
out.DescribedObject = autoscaling.CrossVersionObjectReference{
Kind: in.Target.Kind,
Name: in.Target.Name,
APIVersion: in.Target.APIVersion,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv1.PodsMetricStatus, s conversion.Scope) error {
if in.Current.AverageValue != nil {
out.CurrentAverageValue = *in.Current.AverageValue
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
return nil
}
func Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv1.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
out.Current = autoscaling.MetricValueStatus{
AverageValue: &in.CurrentAverageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv1.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
utilization := in.TargetAverageUtilization
averageValue := in.TargetAverageValue
var metricType autoscaling.MetricTargetType
if utilization == nil {
metricType = autoscaling.AverageValueMetricType
} else {
metricType = autoscaling.UtilizationMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
AverageValue: averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv1.ResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.TargetAverageUtilization = in.Target.AverageUtilization
out.TargetAverageValue = in.Target.AverageValue
return nil
}
func Convert_v1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv1.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
out.Container = in.Container
utilization := in.CurrentAverageUtilization
averageValue := &in.CurrentAverageValue
out.Current = autoscaling.MetricValueStatus{
AverageValue: averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ContainerResourceMetricStatus_To_v1_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv1.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
out.CurrentAverageUtilization = in.Current.AverageUtilization
if in.Current.AverageValue != nil {
out.CurrentAverageValue = *in.Current.AverageValue
}
return nil
}
func Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv1.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
utilization := in.CurrentAverageUtilization
averageValue := &in.CurrentAverageValue
out.Current = autoscaling.MetricValueStatus{
AverageValue: averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv1.ResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.CurrentAverageUtilization = in.Current.AverageUtilization
if in.Current.AverageValue != nil {
out.CurrentAverageValue = *in.Current.AverageValue
}
return nil
}
func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv1.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
// clear any pre-existing round-trip annotations to make sure the only ones set are ones we produced during conversion
annotations, copiedAnnotations := autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
out.Annotations = annotations
otherMetrics := make([]autoscalingv1.MetricSpec, 0, len(in.Spec.Metrics))
for _, metric := range in.Spec.Metrics {
if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == core.ResourceCPU && metric.Resource.Target.AverageUtilization != nil {
continue
}
convMetric := autoscalingv1.MetricSpec{}
if err := Convert_autoscaling_MetricSpec_To_v1_MetricSpec(&metric, &convMetric, s); err != nil {
return err
}
otherMetrics = append(otherMetrics, convMetric)
}
// NB: we need to save the status even if it maps to a CPU utilization status in order to save the raw value as well
currentMetrics := make([]autoscalingv1.MetricStatus, len(in.Status.CurrentMetrics))
for i, currentMetric := range in.Status.CurrentMetrics {
if err := Convert_autoscaling_MetricStatus_To_v1_MetricStatus(¤tMetric, ¤tMetrics[i], s); err != nil {
return err
}
}
// store HPA conditions in an annotation
currentConditions := make([]autoscalingv1.HorizontalPodAutoscalerCondition, len(in.Status.Conditions))
for i, currentCondition := range in.Status.Conditions {
if err := Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v1_HorizontalPodAutoscalerCondition(¤tCondition, ¤tConditions[i], s); err != nil {
return err
}
}
if len(otherMetrics) > 0 {
otherMetricsEnc, err := json.Marshal(otherMetrics)
if err != nil {
return err
}
// copy before mutating
if !copiedAnnotations {
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.MetricSpecsAnnotation] = string(otherMetricsEnc)
}
if len(in.Status.CurrentMetrics) > 0 {
currentMetricsEnc, err := json.Marshal(currentMetrics)
if err != nil {
return err
}
// copy before mutating
if !copiedAnnotations {
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.MetricStatusesAnnotation] = string(currentMetricsEnc)
}
if in.Spec.Behavior != nil {
// TODO: this is marshaling an internal type. Fix this without breaking backwards compatibility.
behaviorEnc, err := json.Marshal(in.Spec.Behavior)
if err != nil {
return err
}
// copy before mutating
if !copiedAnnotations {
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.BehaviorSpecsAnnotation] = string(behaviorEnc)
}
if len(in.Status.Conditions) > 0 {
currentConditionsEnc, err := json.Marshal(currentConditions)
if err != nil {
return err
}
// copy before mutating
if !copiedAnnotations {
//nolint:ineffassign
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation] = string(currentConditionsEnc)
}
return nil
}
func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv1.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
if otherMetricsEnc, hasOtherMetrics := out.Annotations[autoscaling.MetricSpecsAnnotation]; hasOtherMetrics {
var otherMetrics []autoscalingv1.MetricSpec
if err := json.Unmarshal([]byte(otherMetricsEnc), &otherMetrics); err == nil {
// the normal Spec conversion could have populated out.Spec.Metrics with a single element, so deal with that
outMetrics := make([]autoscaling.MetricSpec, len(otherMetrics)+len(out.Spec.Metrics))
for i, metric := range otherMetrics {
if err := Convert_v1_MetricSpec_To_autoscaling_MetricSpec(&metric, &outMetrics[i], s); err != nil {
return err
}
}
if out.Spec.Metrics != nil {
outMetrics[len(otherMetrics)] = out.Spec.Metrics[0]
}
out.Spec.Metrics = outMetrics
}
}
if behaviorEnc, hasConstraints := out.Annotations[autoscaling.BehaviorSpecsAnnotation]; hasConstraints {
// TODO: this is unmarshaling an internal type. Fix this without breaking backwards compatibility.
var behavior autoscaling.HorizontalPodAutoscalerBehavior
if err := json.Unmarshal([]byte(behaviorEnc), &behavior); err == nil && behavior != (autoscaling.HorizontalPodAutoscalerBehavior{}) {
out.Spec.Behavior = &behavior
}
}
if currentMetricsEnc, hasCurrentMetrics := out.Annotations[autoscaling.MetricStatusesAnnotation]; hasCurrentMetrics {
// ignore any existing status values -- the ones here have more information
var currentMetrics []autoscalingv1.MetricStatus
if err := json.Unmarshal([]byte(currentMetricsEnc), ¤tMetrics); err == nil {
out.Status.CurrentMetrics = make([]autoscaling.MetricStatus, len(currentMetrics))
for i, currentMetric := range currentMetrics {
if err := Convert_v1_MetricStatus_To_autoscaling_MetricStatus(¤tMetric, &out.Status.CurrentMetrics[i], s); err != nil {
return err
}
}
}
}
// autoscaling/v1 formerly had an implicit default applied in the controller. In v2beta1, we apply it explicitly.
// We apply it here, explicitly, since we have access to the full set of metrics from the annotation.
if len(out.Spec.Metrics) == 0 {
// no other metrics, no explicit CPU value set
out.Spec.Metrics = []autoscaling.MetricSpec{
{
Type: autoscaling.ResourceMetricSourceType,
Resource: &autoscaling.ResourceMetricSource{
Name: core.ResourceCPU,
Target: autoscaling.MetricTarget{
Type: autoscaling.UtilizationMetricType,
},
},
},
}
out.Spec.Metrics[0].Resource.Target.AverageUtilization = new(int32)
*out.Spec.Metrics[0].Resource.Target.AverageUtilization = autoscaling.DefaultCPUUtilization
}
if currentConditionsEnc, hasCurrentConditions := out.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]; hasCurrentConditions {
var currentConditions []autoscalingv1.HorizontalPodAutoscalerCondition
if err := json.Unmarshal([]byte(currentConditionsEnc), ¤tConditions); err == nil {
out.Status.Conditions = make([]autoscaling.HorizontalPodAutoscalerCondition, len(currentConditions))
for i, currentCondition := range currentConditions {
if err := Convert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(¤tCondition, &out.Status.Conditions[i], s); err != nil {
return err
}
}
}
}
// drop round-tripping annotations after converting to internal
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
return nil
}
func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv1.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = in.MinReplicas
out.MaxReplicas = in.MaxReplicas
for _, metric := range in.Metrics {
if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == core.ResourceCPU && metric.Resource.Target.AverageUtilization != nil {
out.TargetCPUUtilizationPercentage = new(int32)
*out.TargetCPUUtilizationPercentage = *metric.Resource.Target.AverageUtilization
break
}
}
return nil
}
func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv1.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = in.MinReplicas
out.MaxReplicas = in.MaxReplicas
if in.TargetCPUUtilizationPercentage != nil {
out.Metrics = []autoscaling.MetricSpec{
{
Type: autoscaling.ResourceMetricSourceType,
Resource: &autoscaling.ResourceMetricSource{
Name: core.ResourceCPU,
Target: autoscaling.MetricTarget{
Type: autoscaling.UtilizationMetricType,
},
},
},
}
out.Metrics[0].Resource.Target.AverageUtilization = new(int32)
*out.Metrics[0].Resource.Target.AverageUtilization = *in.TargetCPUUtilizationPercentage
}
return nil
}
func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv1.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.LastScaleTime = in.LastScaleTime
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
for _, metric := range in.CurrentMetrics {
if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == core.ResourceCPU {
if metric.Resource.Current.AverageUtilization != nil {
out.CurrentCPUUtilizationPercentage = new(int32)
*out.CurrentCPUUtilizationPercentage = *metric.Resource.Current.AverageUtilization
}
}
}
return nil
}
func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv1.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.LastScaleTime = in.LastScaleTime
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentCPUUtilizationPercentage != nil {
out.CurrentMetrics = []autoscaling.MetricStatus{
{
Type: autoscaling.ResourceMetricSourceType,
Resource: &autoscaling.ResourceMetricStatus{
Name: core.ResourceCPU,
},
},
}
out.CurrentMetrics[0].Resource.Current.AverageUtilization = new(int32)
*out.CurrentMetrics[0].Resource.Current.AverageUtilization = *in.CurrentCPUUtilizationPercentage
}
return nil
}
func Convert_v1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv1.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
out.Container = in.Container
utilization := in.TargetAverageUtilization
averageValue := in.TargetAverageValue
var metricType autoscaling.MetricTargetType
if utilization == nil {
metricType = autoscaling.AverageValueMetricType
} else {
metricType = autoscaling.UtilizationMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
AverageValue: averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ContainerResourceMetricSource_To_v1_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv1.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
out.TargetAverageUtilization = in.Target.AverageUtilization
out.TargetAverageValue = in.Target.AverageValue
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_HorizontalPodAutoscaler(obj *autoscalingv1.HorizontalPodAutoscaler) {
if obj.Spec.MinReplicas == nil {
obj.Spec.MinReplicas = ptr.To[int32](1)
}
// NB: we apply a default for CPU utilization in conversion because
// we need access to the annotations to properly apply the default.
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &autoscalingv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*autoscalingv1.CrossVersionObjectReference)(nil), (*autoscaling.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(a.(*autoscalingv1.CrossVersionObjectReference), b.(*autoscaling.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.CrossVersionObjectReference)(nil), (*autoscalingv1.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(a.(*autoscaling.CrossVersionObjectReference), b.(*autoscalingv1.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv1.HorizontalPodAutoscalerCondition)(nil), (*autoscaling.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(a.(*autoscalingv1.HorizontalPodAutoscalerCondition), b.(*autoscaling.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerCondition)(nil), (*autoscalingv1.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v1_HorizontalPodAutoscalerCondition(a.(*autoscaling.HorizontalPodAutoscalerCondition), b.(*autoscalingv1.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv1.HorizontalPodAutoscalerList)(nil), (*autoscaling.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(a.(*autoscalingv1.HorizontalPodAutoscalerList), b.(*autoscaling.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerList)(nil), (*autoscalingv1.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(a.(*autoscaling.HorizontalPodAutoscalerList), b.(*autoscalingv1.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv1.MetricSpec)(nil), (*autoscaling.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_MetricSpec_To_autoscaling_MetricSpec(a.(*autoscalingv1.MetricSpec), b.(*autoscaling.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricSpec)(nil), (*autoscalingv1.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricSpec_To_v1_MetricSpec(a.(*autoscaling.MetricSpec), b.(*autoscalingv1.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv1.MetricStatus)(nil), (*autoscaling.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_MetricStatus_To_autoscaling_MetricStatus(a.(*autoscalingv1.MetricStatus), b.(*autoscaling.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricStatus)(nil), (*autoscalingv1.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricStatus_To_v1_MetricStatus(a.(*autoscaling.MetricStatus), b.(*autoscalingv1.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv1.Scale)(nil), (*autoscaling.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Scale_To_autoscaling_Scale(a.(*autoscalingv1.Scale), b.(*autoscaling.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.Scale)(nil), (*autoscalingv1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_Scale_To_v1_Scale(a.(*autoscaling.Scale), b.(*autoscalingv1.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv1.ScaleSpec)(nil), (*autoscaling.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(a.(*autoscalingv1.ScaleSpec), b.(*autoscaling.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleSpec)(nil), (*autoscalingv1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(a.(*autoscaling.ScaleSpec), b.(*autoscalingv1.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv1.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(a.(*autoscalingv1.ScaleStatus), b.(*autoscaling.ScaleStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleStatus)(nil), (*autoscalingv1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*autoscalingv1.ScaleStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ContainerResourceMetricSource)(nil), (*autoscalingv1.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricSource_To_v1_ContainerResourceMetricSource(a.(*autoscaling.ContainerResourceMetricSource), b.(*autoscalingv1.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ContainerResourceMetricStatus)(nil), (*autoscalingv1.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricStatus_To_v1_ContainerResourceMetricStatus(a.(*autoscaling.ContainerResourceMetricStatus), b.(*autoscalingv1.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*autoscalingv1.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricSource_To_v1_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*autoscalingv1.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ExternalMetricStatus)(nil), (*autoscalingv1.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricStatus_To_v1_ExternalMetricStatus(a.(*autoscaling.ExternalMetricStatus), b.(*autoscalingv1.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscalerSpec)(nil), (*autoscalingv1.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(a.(*autoscaling.HorizontalPodAutoscalerSpec), b.(*autoscalingv1.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscalerStatus)(nil), (*autoscalingv1.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(a.(*autoscaling.HorizontalPodAutoscalerStatus), b.(*autoscalingv1.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*autoscalingv1.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*autoscalingv1.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.MetricTarget)(nil), (*autoscalingv1.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricTarget_To_v1_CrossVersionObjectReference(a.(*autoscaling.MetricTarget), b.(*autoscalingv1.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ObjectMetricSource)(nil), (*autoscalingv1.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(a.(*autoscaling.ObjectMetricSource), b.(*autoscalingv1.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ObjectMetricStatus)(nil), (*autoscalingv1.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(a.(*autoscaling.ObjectMetricStatus), b.(*autoscalingv1.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.PodsMetricSource)(nil), (*autoscalingv1.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(a.(*autoscaling.PodsMetricSource), b.(*autoscalingv1.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.PodsMetricStatus)(nil), (*autoscalingv1.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(a.(*autoscaling.PodsMetricStatus), b.(*autoscalingv1.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ResourceMetricSource)(nil), (*autoscalingv1.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(a.(*autoscaling.ResourceMetricSource), b.(*autoscalingv1.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ResourceMetricStatus)(nil), (*autoscalingv1.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(a.(*autoscaling.ResourceMetricStatus), b.(*autoscalingv1.ResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ContainerResourceMetricSource)(nil), (*autoscaling.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(a.(*autoscalingv1.ContainerResourceMetricSource), b.(*autoscaling.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ContainerResourceMetricStatus)(nil), (*autoscaling.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(a.(*autoscalingv1.ContainerResourceMetricStatus), b.(*autoscaling.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.CrossVersionObjectReference)(nil), (*autoscaling.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CrossVersionObjectReference_To_autoscaling_MetricTarget(a.(*autoscalingv1.CrossVersionObjectReference), b.(*autoscaling.MetricTarget), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ExternalMetricSource)(nil), (*autoscaling.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(a.(*autoscalingv1.ExternalMetricSource), b.(*autoscaling.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ExternalMetricStatus)(nil), (*autoscaling.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(a.(*autoscalingv1.ExternalMetricStatus), b.(*autoscaling.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.HorizontalPodAutoscalerSpec)(nil), (*autoscaling.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(a.(*autoscalingv1.HorizontalPodAutoscalerSpec), b.(*autoscaling.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.HorizontalPodAutoscalerStatus)(nil), (*autoscaling.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(a.(*autoscalingv1.HorizontalPodAutoscalerStatus), b.(*autoscaling.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*autoscalingv1.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ObjectMetricSource)(nil), (*autoscaling.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(a.(*autoscalingv1.ObjectMetricSource), b.(*autoscaling.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ObjectMetricStatus)(nil), (*autoscaling.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(a.(*autoscalingv1.ObjectMetricStatus), b.(*autoscaling.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.PodsMetricSource)(nil), (*autoscaling.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(a.(*autoscalingv1.PodsMetricSource), b.(*autoscaling.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.PodsMetricStatus)(nil), (*autoscaling.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(a.(*autoscalingv1.PodsMetricStatus), b.(*autoscaling.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ResourceMetricSource)(nil), (*autoscaling.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(a.(*autoscalingv1.ResourceMetricSource), b.(*autoscaling.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv1.ResourceMetricStatus)(nil), (*autoscaling.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(a.(*autoscalingv1.ResourceMetricStatus), b.(*autoscaling.ResourceMetricStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv1.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.TargetAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
out.Container = in.Container
return nil
}
func autoConvert_autoscaling_ContainerResourceMetricSource_To_v1_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv1.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = corev1.ResourceName(in.Name)
out.Container = in.Container
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv1.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.CurrentAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
out.Container = in.Container
return nil
}
func autoConvert_autoscaling_ContainerResourceMetricStatus_To_v1_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv1.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = corev1.ResourceName(in.Name)
out.Container = in.Container
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv1.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv1.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s)
}
func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv1.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv1.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s)
}
func autoConvert_v1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv1.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.MetricSelector requires manual conversion: does not exist in peer-type
// WARNING: in.TargetValue requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ExternalMetricSource_To_v1_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv1.ExternalMetricSource, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv1.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.MetricSelector requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentValue requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ExternalMetricStatus_To_v1_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv1.ExternalMetricStatus, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv1.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv1.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv1.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscaling.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = autoscaling.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv1.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v1_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv1.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscalingv1.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v1_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v1_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv1.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v1_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv1.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv1.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv1.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscalingv1.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv1.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv1.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
// WARNING: in.TargetCPUUtilizationPercentage requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv1.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
// WARNING: in.Metrics requires manual conversion: does not exist in peer-type
// WARNING: in.Behavior requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv1.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
// WARNING: in.CurrentCPUUtilizationPercentage requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv1.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
// WARNING: in.CurrentMetrics requires manual conversion: does not exist in peer-type
// WARNING: in.Conditions requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv1.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscaling.ObjectMetricSource)
if err := Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscaling.PodsMetricSource)
if err := Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscaling.ResourceMetricSource)
if err := Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricSource)
if err := Convert_v1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscaling.ExternalMetricSource)
if err := Convert_v1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_v1_MetricSpec_To_autoscaling_MetricSpec is an autogenerated conversion function.
func Convert_v1_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv1.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
return autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in, out, s)
}
func autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv1.MetricSpec, s conversion.Scope) error {
out.Type = autoscalingv1.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscalingv1.ObjectMetricSource)
if err := Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscalingv1.PodsMetricSource)
if err := Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscalingv1.ResourceMetricSource)
if err := Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv1.ContainerResourceMetricSource)
if err := Convert_autoscaling_ContainerResourceMetricSource_To_v1_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscalingv1.ExternalMetricSource)
if err := Convert_autoscaling_ExternalMetricSource_To_v1_ExternalMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_autoscaling_MetricSpec_To_v1_MetricSpec is an autogenerated conversion function.
func Convert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv1.MetricSpec, s conversion.Scope) error {
return autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in, out, s)
}
func autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv1.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscaling.ObjectMetricStatus)
if err := Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscaling.PodsMetricStatus)
if err := Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscaling.ResourceMetricStatus)
if err := Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricStatus)
if err := Convert_v1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscaling.ExternalMetricStatus)
if err := Convert_v1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_v1_MetricStatus_To_autoscaling_MetricStatus is an autogenerated conversion function.
func Convert_v1_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv1.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
return autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in, out, s)
}
func autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv1.MetricStatus, s conversion.Scope) error {
out.Type = autoscalingv1.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscalingv1.ObjectMetricStatus)
if err := Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscalingv1.PodsMetricStatus)
if err := Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscalingv1.ResourceMetricStatus)
if err := Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv1.ContainerResourceMetricStatus)
if err := Convert_autoscaling_ContainerResourceMetricStatus_To_v1_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscalingv1.ExternalMetricStatus)
if err := Convert_autoscaling_ExternalMetricStatus_To_v1_ExternalMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_autoscaling_MetricStatus_To_v1_MetricStatus is an autogenerated conversion function.
func Convert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv1.MetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in, out, s)
}
func autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv1.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.TargetValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
// WARNING: in.AverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv1.ObjectMetricSource, s conversion.Scope) error {
// WARNING: in.DescribedObject requires manual conversion: does not exist in peer-type
if err := Convert_autoscaling_MetricTarget_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil {
return err
}
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv1.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
// WARNING: in.Target requires manual conversion: does not exist in peer-type
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
// WARNING: in.AverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv1.ObjectMetricStatus, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Current requires manual conversion: does not exist in peer-type
// WARNING: in.DescribedObject requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv1.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv1.PodsMetricSource, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv1.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv1.PodsMetricStatus, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv1.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.TargetAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv1.ResourceMetricSource, s conversion.Scope) error {
out.Name = corev1.ResourceName(in.Name)
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv1.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.CurrentAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv1.ResourceMetricStatus, s conversion.Scope) error {
out.Name = corev1.ResourceName(in.Name)
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1_Scale_To_autoscaling_Scale(in *autoscalingv1.Scale, out *autoscaling.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_Scale_To_autoscaling_Scale is an autogenerated conversion function.
func Convert_v1_Scale_To_autoscaling_Scale(in *autoscalingv1.Scale, out *autoscaling.Scale, s conversion.Scope) error {
return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s)
}
func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *autoscalingv1.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_Scale_To_v1_Scale is an autogenerated conversion function.
func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *autoscalingv1.Scale, s conversion.Scope) error {
return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s)
}
func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *autoscalingv1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec is an autogenerated conversion function.
func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *autoscalingv1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s)
}
func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *autoscalingv1.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec is an autogenerated conversion function.
func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *autoscalingv1.ScaleSpec, s conversion.Scope) error {
return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s)
}
func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *autoscalingv1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.Selector = in.Selector
return nil
}
// Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus is an autogenerated conversion function.
func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *autoscalingv1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s)
}
func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *autoscalingv1.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.Selector = in.Selector
return nil
}
// Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus is an autogenerated conversion function.
func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *autoscalingv1.ScaleStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
autoscalingv1 "k8s.io/api/autoscaling/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&autoscalingv1.HorizontalPodAutoscaler{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscaler(obj.(*autoscalingv1.HorizontalPodAutoscaler))
})
scheme.AddTypeDefaultingFunc(&autoscalingv1.HorizontalPodAutoscalerList{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*autoscalingv1.HorizontalPodAutoscalerList))
})
return nil
}
func SetObjectDefaults_HorizontalPodAutoscaler(in *autoscalingv1.HorizontalPodAutoscaler) {
SetDefaults_HorizontalPodAutoscaler(in)
}
func SetObjectDefaults_HorizontalPodAutoscalerList(in *autoscalingv1.HorizontalPodAutoscalerList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_HorizontalPodAutoscaler(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1
import (
context "context"
fmt "fmt"
autoscalingv1 "k8s.io/api/autoscaling/v1"
operation "k8s.io/apimachinery/pkg/api/operation"
safe "k8s.io/apimachinery/pkg/api/safe"
validate "k8s.io/apimachinery/pkg/api/validate"
runtime "k8s.io/apimachinery/pkg/runtime"
field "k8s.io/apimachinery/pkg/util/validation/field"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
scheme.AddValidationFunc((*autoscalingv1.Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/scale":
return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*autoscalingv1.Scale), safe.Cast[*autoscalingv1.Scale](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
return nil
}
func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *autoscalingv1.Scale) (errs field.ErrorList) {
// field autoscalingv1.Scale.TypeMeta has no validation
// field autoscalingv1.Scale.ObjectMeta has no validation
// field autoscalingv1.Scale.Spec
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *autoscalingv1.ScaleSpec) (errs field.ErrorList) {
errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *autoscalingv1.Scale) *autoscalingv1.ScaleSpec { return &oldObj.Spec }))...)
// field autoscalingv1.Scale.Status has no validation
return errs
}
func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *autoscalingv1.ScaleSpec) (errs field.ErrorList) {
// field autoscalingv1.ScaleSpec.Replicas
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
// optional value-type fields with zero-value defaults are purely documentation
if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
return nil // no changes
}
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
return
}(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *autoscalingv1.ScaleSpec) *int32 { return &oldObj.Replicas }))...)
return errs
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2
import (
autoscalingv2 "k8s.io/api/autoscaling/v2"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/autoscaling"
)
func Convert_autoscaling_HorizontalPodAutoscaler_To_v2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv2.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
// v2 round-trips to internal without any serialized annotations, make sure any from other versions don't get serialized
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
return nil
}
func Convert_v2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_v2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
// v2 round-trips to internal without any serialized annotations, make sure any from other versions don't get serialized
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2
import (
autoscalingv2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/utils/ptr"
)
var (
// These constants repeats previous HPA behavior
scaleUpLimitPercent int32 = 100
scaleUpLimitMinimumPods int32 = 4
scaleUpPeriod int32 = 15
scaleUpStabilizationSeconds int32
maxPolicy = autoscalingv2.MaxChangePolicySelect
defaultHPAScaleUpRules = autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: &scaleUpStabilizationSeconds,
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2.HPAScalingPolicy{
{
Type: autoscalingv2.PodsScalingPolicy,
Value: scaleUpLimitMinimumPods,
PeriodSeconds: scaleUpPeriod,
},
{
Type: autoscalingv2.PercentScalingPolicy,
Value: scaleUpLimitPercent,
PeriodSeconds: scaleUpPeriod,
},
},
}
scaleDownPeriod int32 = 15
// Currently we can set the downscaleStabilizationWindow from the command line
// So we can not rewrite the command line option from here
scaleDownLimitPercent int32 = 100
defaultHPAScaleDownRules = autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: nil,
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2.HPAScalingPolicy{
{
Type: autoscalingv2.PercentScalingPolicy,
Value: scaleDownLimitPercent,
PeriodSeconds: scaleDownPeriod,
},
},
}
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_HorizontalPodAutoscaler(obj *autoscalingv2.HorizontalPodAutoscaler) {
if obj.Spec.MinReplicas == nil {
obj.Spec.MinReplicas = ptr.To[int32](1)
}
if len(obj.Spec.Metrics) == 0 {
utilizationDefaultVal := int32(autoscaling.DefaultCPUUtilization)
obj.Spec.Metrics = []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: autoscalingv2.MetricTarget{
Type: autoscalingv2.UtilizationMetricType,
AverageUtilization: &utilizationDefaultVal,
},
},
},
}
}
SetDefaults_HorizontalPodAutoscalerBehavior(obj)
}
// SetDefaults_HorizontalPodAutoscalerBehavior fills the behavior if it contains
// at least one scaling rule policy (for scale-up or scale-down)
func SetDefaults_HorizontalPodAutoscalerBehavior(obj *autoscalingv2.HorizontalPodAutoscaler) {
// If behavior contains a scaling rule policy (either for scale-up, scale-down, or both), we
// should fill all the unset scaling policy fields (i.e. StabilizationWindowSeconds,
// SelectPolicy, Policies) with default values
if obj.Spec.Behavior != nil {
obj.Spec.Behavior.ScaleUp = GenerateHPAScaleUpRules(obj.Spec.Behavior.ScaleUp)
obj.Spec.Behavior.ScaleDown = GenerateHPAScaleDownRules(obj.Spec.Behavior.ScaleDown)
}
}
// GenerateHPAScaleUpRules returns a fully-initialized HPAScalingRules value
// We guarantee that no pointer in the structure will have the 'nil' value
func GenerateHPAScaleUpRules(scalingRules *autoscalingv2.HPAScalingRules) *autoscalingv2.HPAScalingRules {
defaultScalingRules := defaultHPAScaleUpRules.DeepCopy()
return copyHPAScalingRules(scalingRules, defaultScalingRules)
}
// GenerateHPAScaleDownRules returns a fully-initialized HPAScalingRules value
// We guarantee that no pointer in the structure will have the 'nil' value
// EXCEPT StabilizationWindowSeconds, for reasoning check the comment for defaultHPAScaleDownRules
func GenerateHPAScaleDownRules(scalingRules *autoscalingv2.HPAScalingRules) *autoscalingv2.HPAScalingRules {
defaultScalingRules := defaultHPAScaleDownRules.DeepCopy()
return copyHPAScalingRules(scalingRules, defaultScalingRules)
}
// copyHPAScalingRules copies all non-`nil` fields in HPA constraint structure
func copyHPAScalingRules(from, to *autoscalingv2.HPAScalingRules) *autoscalingv2.HPAScalingRules {
if from == nil {
return to
}
if from.SelectPolicy != nil {
to.SelectPolicy = from.SelectPolicy
}
if from.StabilizationWindowSeconds != nil {
to.StabilizationWindowSeconds = from.StabilizationWindowSeconds
}
if from.Policies != nil {
to.Policies = from.Policies
}
if from.Tolerance != nil {
to.Tolerance = from.Tolerance
}
return to
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2
import (
autoscalingv2 "k8s.io/api/autoscaling/v2"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &autoscalingv2.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v2
import (
unsafe "unsafe"
autoscalingv2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ContainerResourceMetricSource)(nil), (*autoscaling.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(a.(*autoscalingv2.ContainerResourceMetricSource), b.(*autoscaling.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ContainerResourceMetricSource)(nil), (*autoscalingv2.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricSource_To_v2_ContainerResourceMetricSource(a.(*autoscaling.ContainerResourceMetricSource), b.(*autoscalingv2.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ContainerResourceMetricStatus)(nil), (*autoscaling.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(a.(*autoscalingv2.ContainerResourceMetricStatus), b.(*autoscaling.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ContainerResourceMetricStatus)(nil), (*autoscalingv2.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricStatus_To_v2_ContainerResourceMetricStatus(a.(*autoscaling.ContainerResourceMetricStatus), b.(*autoscalingv2.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.CrossVersionObjectReference)(nil), (*autoscaling.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(a.(*autoscalingv2.CrossVersionObjectReference), b.(*autoscaling.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.CrossVersionObjectReference)(nil), (*autoscalingv2.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference(a.(*autoscaling.CrossVersionObjectReference), b.(*autoscalingv2.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ExternalMetricSource)(nil), (*autoscaling.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(a.(*autoscalingv2.ExternalMetricSource), b.(*autoscaling.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*autoscalingv2.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricSource_To_v2_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*autoscalingv2.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ExternalMetricStatus)(nil), (*autoscaling.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(a.(*autoscalingv2.ExternalMetricStatus), b.(*autoscaling.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricStatus)(nil), (*autoscalingv2.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricStatus_To_v2_ExternalMetricStatus(a.(*autoscaling.ExternalMetricStatus), b.(*autoscalingv2.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.HPAScalingPolicy)(nil), (*autoscaling.HPAScalingPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(a.(*autoscalingv2.HPAScalingPolicy), b.(*autoscaling.HPAScalingPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HPAScalingPolicy)(nil), (*autoscalingv2.HPAScalingPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HPAScalingPolicy_To_v2_HPAScalingPolicy(a.(*autoscaling.HPAScalingPolicy), b.(*autoscalingv2.HPAScalingPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.HPAScalingRules)(nil), (*autoscaling.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HPAScalingRules_To_autoscaling_HPAScalingRules(a.(*autoscalingv2.HPAScalingRules), b.(*autoscaling.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HPAScalingRules)(nil), (*autoscalingv2.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HPAScalingRules_To_v2_HPAScalingRules(a.(*autoscaling.HPAScalingRules), b.(*autoscalingv2.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.HorizontalPodAutoscalerBehavior)(nil), (*autoscaling.HorizontalPodAutoscalerBehavior)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(a.(*autoscalingv2.HorizontalPodAutoscalerBehavior), b.(*autoscaling.HorizontalPodAutoscalerBehavior), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerBehavior)(nil), (*autoscalingv2.HorizontalPodAutoscalerBehavior)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2_HorizontalPodAutoscalerBehavior(a.(*autoscaling.HorizontalPodAutoscalerBehavior), b.(*autoscalingv2.HorizontalPodAutoscalerBehavior), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.HorizontalPodAutoscalerCondition)(nil), (*autoscaling.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(a.(*autoscalingv2.HorizontalPodAutoscalerCondition), b.(*autoscaling.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerCondition)(nil), (*autoscalingv2.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2_HorizontalPodAutoscalerCondition(a.(*autoscaling.HorizontalPodAutoscalerCondition), b.(*autoscalingv2.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.HorizontalPodAutoscalerList)(nil), (*autoscaling.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(a.(*autoscalingv2.HorizontalPodAutoscalerList), b.(*autoscaling.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerList)(nil), (*autoscalingv2.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerList_To_v2_HorizontalPodAutoscalerList(a.(*autoscaling.HorizontalPodAutoscalerList), b.(*autoscalingv2.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.HorizontalPodAutoscalerSpec)(nil), (*autoscaling.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(a.(*autoscalingv2.HorizontalPodAutoscalerSpec), b.(*autoscaling.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerSpec)(nil), (*autoscalingv2.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2_HorizontalPodAutoscalerSpec(a.(*autoscaling.HorizontalPodAutoscalerSpec), b.(*autoscalingv2.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.HorizontalPodAutoscalerStatus)(nil), (*autoscaling.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(a.(*autoscalingv2.HorizontalPodAutoscalerStatus), b.(*autoscaling.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerStatus)(nil), (*autoscalingv2.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2_HorizontalPodAutoscalerStatus(a.(*autoscaling.HorizontalPodAutoscalerStatus), b.(*autoscalingv2.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.MetricIdentifier)(nil), (*autoscaling.MetricIdentifier)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(a.(*autoscalingv2.MetricIdentifier), b.(*autoscaling.MetricIdentifier), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricIdentifier)(nil), (*autoscalingv2.MetricIdentifier)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(a.(*autoscaling.MetricIdentifier), b.(*autoscalingv2.MetricIdentifier), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.MetricSpec)(nil), (*autoscaling.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_MetricSpec_To_autoscaling_MetricSpec(a.(*autoscalingv2.MetricSpec), b.(*autoscaling.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricSpec)(nil), (*autoscalingv2.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricSpec_To_v2_MetricSpec(a.(*autoscaling.MetricSpec), b.(*autoscalingv2.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.MetricStatus)(nil), (*autoscaling.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_MetricStatus_To_autoscaling_MetricStatus(a.(*autoscalingv2.MetricStatus), b.(*autoscaling.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricStatus)(nil), (*autoscalingv2.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricStatus_To_v2_MetricStatus(a.(*autoscaling.MetricStatus), b.(*autoscalingv2.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.MetricTarget)(nil), (*autoscaling.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_MetricTarget_To_autoscaling_MetricTarget(a.(*autoscalingv2.MetricTarget), b.(*autoscaling.MetricTarget), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricTarget)(nil), (*autoscalingv2.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricTarget_To_v2_MetricTarget(a.(*autoscaling.MetricTarget), b.(*autoscalingv2.MetricTarget), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.MetricValueStatus)(nil), (*autoscaling.MetricValueStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(a.(*autoscalingv2.MetricValueStatus), b.(*autoscaling.MetricValueStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricValueStatus)(nil), (*autoscalingv2.MetricValueStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(a.(*autoscaling.MetricValueStatus), b.(*autoscalingv2.MetricValueStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ObjectMetricSource)(nil), (*autoscaling.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(a.(*autoscalingv2.ObjectMetricSource), b.(*autoscaling.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricSource)(nil), (*autoscalingv2.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricSource_To_v2_ObjectMetricSource(a.(*autoscaling.ObjectMetricSource), b.(*autoscalingv2.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ObjectMetricStatus)(nil), (*autoscaling.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(a.(*autoscalingv2.ObjectMetricStatus), b.(*autoscaling.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricStatus)(nil), (*autoscalingv2.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricStatus_To_v2_ObjectMetricStatus(a.(*autoscaling.ObjectMetricStatus), b.(*autoscalingv2.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.PodsMetricSource)(nil), (*autoscaling.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_PodsMetricSource_To_autoscaling_PodsMetricSource(a.(*autoscalingv2.PodsMetricSource), b.(*autoscaling.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricSource)(nil), (*autoscalingv2.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricSource_To_v2_PodsMetricSource(a.(*autoscaling.PodsMetricSource), b.(*autoscalingv2.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.PodsMetricStatus)(nil), (*autoscaling.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(a.(*autoscalingv2.PodsMetricStatus), b.(*autoscaling.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricStatus)(nil), (*autoscalingv2.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricStatus_To_v2_PodsMetricStatus(a.(*autoscaling.PodsMetricStatus), b.(*autoscalingv2.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ResourceMetricSource)(nil), (*autoscaling.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(a.(*autoscalingv2.ResourceMetricSource), b.(*autoscaling.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricSource)(nil), (*autoscalingv2.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricSource_To_v2_ResourceMetricSource(a.(*autoscaling.ResourceMetricSource), b.(*autoscalingv2.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2.ResourceMetricStatus)(nil), (*autoscaling.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(a.(*autoscalingv2.ResourceMetricStatus), b.(*autoscaling.ResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricStatus)(nil), (*autoscalingv2.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricStatus_To_v2_ResourceMetricStatus(a.(*autoscaling.ResourceMetricStatus), b.(*autoscalingv2.ResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*autoscalingv2.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscaler_To_v2_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*autoscalingv2.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*autoscalingv2.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv2.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
out.Container = in.Container
return nil
}
// Convert_v2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource is an autogenerated conversion function.
func Convert_v2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv2.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
return autoConvert_v2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in, out, s)
}
func autoConvert_autoscaling_ContainerResourceMetricSource_To_v2_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv2.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
if err := Convert_autoscaling_MetricTarget_To_v2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ContainerResourceMetricSource_To_v2_ContainerResourceMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ContainerResourceMetricSource_To_v2_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv2.ContainerResourceMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ContainerResourceMetricSource_To_v2_ContainerResourceMetricSource(in, out, s)
}
func autoConvert_v2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv2.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
out.Container = in.Container
return nil
}
// Convert_v2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus is an autogenerated conversion function.
func Convert_v2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv2.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
return autoConvert_v2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ContainerResourceMetricStatus_To_v2_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv2.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
if err := Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ContainerResourceMetricStatus_To_v2_ContainerResourceMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ContainerResourceMetricStatus_To_v2_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv2.ContainerResourceMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ContainerResourceMetricStatus_To_v2_ContainerResourceMetricStatus(in, out, s)
}
func autoConvert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv2.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv2.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s)
}
func autoConvert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv2.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv2.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference(in, out, s)
}
func autoConvert_v2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv2.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
if err := Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_v2_ExternalMetricSource_To_autoscaling_ExternalMetricSource is an autogenerated conversion function.
func Convert_v2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv2.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
return autoConvert_v2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in, out, s)
}
func autoConvert_autoscaling_ExternalMetricSource_To_v2_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv2.ExternalMetricSource, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricTarget_To_v2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ExternalMetricSource_To_v2_ExternalMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ExternalMetricSource_To_v2_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv2.ExternalMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ExternalMetricSource_To_v2_ExternalMetricSource(in, out, s)
}
func autoConvert_v2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv2.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
if err := Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_v2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus is an autogenerated conversion function.
func Convert_v2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv2.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
return autoConvert_v2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ExternalMetricStatus_To_v2_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv2.ExternalMetricStatus, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ExternalMetricStatus_To_v2_ExternalMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ExternalMetricStatus_To_v2_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv2.ExternalMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ExternalMetricStatus_To_v2_ExternalMetricStatus(in, out, s)
}
func autoConvert_v2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(in *autoscalingv2.HPAScalingPolicy, out *autoscaling.HPAScalingPolicy, s conversion.Scope) error {
out.Type = autoscaling.HPAScalingPolicyType(in.Type)
out.Value = in.Value
out.PeriodSeconds = in.PeriodSeconds
return nil
}
// Convert_v2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy is an autogenerated conversion function.
func Convert_v2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(in *autoscalingv2.HPAScalingPolicy, out *autoscaling.HPAScalingPolicy, s conversion.Scope) error {
return autoConvert_v2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(in, out, s)
}
func autoConvert_autoscaling_HPAScalingPolicy_To_v2_HPAScalingPolicy(in *autoscaling.HPAScalingPolicy, out *autoscalingv2.HPAScalingPolicy, s conversion.Scope) error {
out.Type = autoscalingv2.HPAScalingPolicyType(in.Type)
out.Value = in.Value
out.PeriodSeconds = in.PeriodSeconds
return nil
}
// Convert_autoscaling_HPAScalingPolicy_To_v2_HPAScalingPolicy is an autogenerated conversion function.
func Convert_autoscaling_HPAScalingPolicy_To_v2_HPAScalingPolicy(in *autoscaling.HPAScalingPolicy, out *autoscalingv2.HPAScalingPolicy, s conversion.Scope) error {
return autoConvert_autoscaling_HPAScalingPolicy_To_v2_HPAScalingPolicy(in, out, s)
}
func autoConvert_v2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *autoscalingv2.HPAScalingRules, out *autoscaling.HPAScalingRules, s conversion.Scope) error {
out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds))
out.SelectPolicy = (*autoscaling.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy))
out.Policies = *(*[]autoscaling.HPAScalingPolicy)(unsafe.Pointer(&in.Policies))
out.Tolerance = (*resource.Quantity)(unsafe.Pointer(in.Tolerance))
return nil
}
// Convert_v2_HPAScalingRules_To_autoscaling_HPAScalingRules is an autogenerated conversion function.
func Convert_v2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *autoscalingv2.HPAScalingRules, out *autoscaling.HPAScalingRules, s conversion.Scope) error {
return autoConvert_v2_HPAScalingRules_To_autoscaling_HPAScalingRules(in, out, s)
}
func autoConvert_autoscaling_HPAScalingRules_To_v2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2.HPAScalingRules, s conversion.Scope) error {
out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds))
out.SelectPolicy = (*autoscalingv2.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy))
out.Policies = *(*[]autoscalingv2.HPAScalingPolicy)(unsafe.Pointer(&in.Policies))
out.Tolerance = (*resource.Quantity)(unsafe.Pointer(in.Tolerance))
return nil
}
// Convert_autoscaling_HPAScalingRules_To_v2_HPAScalingRules is an autogenerated conversion function.
func Convert_autoscaling_HPAScalingRules_To_v2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2.HPAScalingRules, s conversion.Scope) error {
return autoConvert_autoscaling_HPAScalingRules_To_v2_HPAScalingRules(in, out, s)
}
func autoConvert_v2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv2.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in *autoscalingv2.HorizontalPodAutoscalerBehavior, out *autoscaling.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
out.ScaleUp = (*autoscaling.HPAScalingRules)(unsafe.Pointer(in.ScaleUp))
out.ScaleDown = (*autoscaling.HPAScalingRules)(unsafe.Pointer(in.ScaleDown))
return nil
}
// Convert_v2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior is an autogenerated conversion function.
func Convert_v2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in *autoscalingv2.HorizontalPodAutoscalerBehavior, out *autoscaling.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
return autoConvert_v2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2_HorizontalPodAutoscalerBehavior(in *autoscaling.HorizontalPodAutoscalerBehavior, out *autoscalingv2.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
out.ScaleUp = (*autoscalingv2.HPAScalingRules)(unsafe.Pointer(in.ScaleUp))
out.ScaleDown = (*autoscalingv2.HPAScalingRules)(unsafe.Pointer(in.ScaleDown))
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2_HorizontalPodAutoscalerBehavior is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2_HorizontalPodAutoscalerBehavior(in *autoscaling.HorizontalPodAutoscalerBehavior, out *autoscalingv2.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2_HorizontalPodAutoscalerBehavior(in, out, s)
}
func autoConvert_v2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv2.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscaling.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = autoscaling.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_v2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv2.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_v2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv2.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscalingv2.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv2.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_v2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv2.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_v2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_v2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv2.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_v2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv2.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscalingv2.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v2_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerList_To_v2_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv2.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_v2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv2.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]autoscaling.MetricSpec, len(*in))
for i := range *in {
if err := Convert_v2_MetricSpec_To_autoscaling_MetricSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Metrics = nil
}
out.Behavior = (*autoscaling.HorizontalPodAutoscalerBehavior)(unsafe.Pointer(in.Behavior))
return nil
}
// Convert_v2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec is an autogenerated conversion function.
func Convert_v2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv2.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
return autoConvert_v2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv2.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]autoscalingv2.MetricSpec, len(*in))
for i := range *in {
if err := Convert_autoscaling_MetricSpec_To_v2_MetricSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Metrics = nil
}
out.Behavior = (*autoscalingv2.HorizontalPodAutoscalerBehavior)(unsafe.Pointer(in.Behavior))
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2_HorizontalPodAutoscalerSpec is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv2.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2_HorizontalPodAutoscalerSpec(in, out, s)
}
func autoConvert_v2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv2.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]autoscaling.MetricStatus, len(*in))
for i := range *in {
if err := Convert_v2_MetricStatus_To_autoscaling_MetricStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.CurrentMetrics = nil
}
out.Conditions = *(*[]autoscaling.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus is an autogenerated conversion function.
func Convert_v2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv2.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
return autoConvert_v2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv2.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]autoscalingv2.MetricStatus, len(*in))
for i := range *in {
if err := Convert_autoscaling_MetricStatus_To_v2_MetricStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.CurrentMetrics = nil
}
out.Conditions = *(*[]autoscalingv2.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2_HorizontalPodAutoscalerStatus is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv2.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2_HorizontalPodAutoscalerStatus(in, out, s)
}
func autoConvert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(in *autoscalingv2.MetricIdentifier, out *autoscaling.MetricIdentifier, s conversion.Scope) error {
out.Name = in.Name
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
return nil
}
// Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier is an autogenerated conversion function.
func Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(in *autoscalingv2.MetricIdentifier, out *autoscaling.MetricIdentifier, s conversion.Scope) error {
return autoConvert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(in, out, s)
}
func autoConvert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(in *autoscaling.MetricIdentifier, out *autoscalingv2.MetricIdentifier, s conversion.Scope) error {
out.Name = in.Name
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
return nil
}
// Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier is an autogenerated conversion function.
func Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(in *autoscaling.MetricIdentifier, out *autoscalingv2.MetricIdentifier, s conversion.Scope) error {
return autoConvert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(in, out, s)
}
func autoConvert_v2_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv2.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object))
out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricSource)
if err := Convert_v2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscaling.ExternalMetricSource)(unsafe.Pointer(in.External))
return nil
}
// Convert_v2_MetricSpec_To_autoscaling_MetricSpec is an autogenerated conversion function.
func Convert_v2_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv2.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
return autoConvert_v2_MetricSpec_To_autoscaling_MetricSpec(in, out, s)
}
func autoConvert_autoscaling_MetricSpec_To_v2_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv2.MetricSpec, s conversion.Scope) error {
out.Type = autoscalingv2.MetricSourceType(in.Type)
out.Object = (*autoscalingv2.ObjectMetricSource)(unsafe.Pointer(in.Object))
out.Pods = (*autoscalingv2.PodsMetricSource)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscalingv2.ResourceMetricSource)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv2.ContainerResourceMetricSource)
if err := Convert_autoscaling_ContainerResourceMetricSource_To_v2_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscalingv2.ExternalMetricSource)(unsafe.Pointer(in.External))
return nil
}
// Convert_autoscaling_MetricSpec_To_v2_MetricSpec is an autogenerated conversion function.
func Convert_autoscaling_MetricSpec_To_v2_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv2.MetricSpec, s conversion.Scope) error {
return autoConvert_autoscaling_MetricSpec_To_v2_MetricSpec(in, out, s)
}
func autoConvert_v2_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv2.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object))
out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricStatus)
if err := Convert_v2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscaling.ExternalMetricStatus)(unsafe.Pointer(in.External))
return nil
}
// Convert_v2_MetricStatus_To_autoscaling_MetricStatus is an autogenerated conversion function.
func Convert_v2_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv2.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
return autoConvert_v2_MetricStatus_To_autoscaling_MetricStatus(in, out, s)
}
func autoConvert_autoscaling_MetricStatus_To_v2_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv2.MetricStatus, s conversion.Scope) error {
out.Type = autoscalingv2.MetricSourceType(in.Type)
out.Object = (*autoscalingv2.ObjectMetricStatus)(unsafe.Pointer(in.Object))
out.Pods = (*autoscalingv2.PodsMetricStatus)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscalingv2.ResourceMetricStatus)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv2.ContainerResourceMetricStatus)
if err := Convert_autoscaling_ContainerResourceMetricStatus_To_v2_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscalingv2.ExternalMetricStatus)(unsafe.Pointer(in.External))
return nil
}
// Convert_autoscaling_MetricStatus_To_v2_MetricStatus is an autogenerated conversion function.
func Convert_autoscaling_MetricStatus_To_v2_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv2.MetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_MetricStatus_To_v2_MetricStatus(in, out, s)
}
func autoConvert_v2_MetricTarget_To_autoscaling_MetricTarget(in *autoscalingv2.MetricTarget, out *autoscaling.MetricTarget, s conversion.Scope) error {
out.Type = autoscaling.MetricTargetType(in.Type)
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_v2_MetricTarget_To_autoscaling_MetricTarget is an autogenerated conversion function.
func Convert_v2_MetricTarget_To_autoscaling_MetricTarget(in *autoscalingv2.MetricTarget, out *autoscaling.MetricTarget, s conversion.Scope) error {
return autoConvert_v2_MetricTarget_To_autoscaling_MetricTarget(in, out, s)
}
func autoConvert_autoscaling_MetricTarget_To_v2_MetricTarget(in *autoscaling.MetricTarget, out *autoscalingv2.MetricTarget, s conversion.Scope) error {
out.Type = autoscalingv2.MetricTargetType(in.Type)
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_autoscaling_MetricTarget_To_v2_MetricTarget is an autogenerated conversion function.
func Convert_autoscaling_MetricTarget_To_v2_MetricTarget(in *autoscaling.MetricTarget, out *autoscalingv2.MetricTarget, s conversion.Scope) error {
return autoConvert_autoscaling_MetricTarget_To_v2_MetricTarget(in, out, s)
}
func autoConvert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(in *autoscalingv2.MetricValueStatus, out *autoscaling.MetricValueStatus, s conversion.Scope) error {
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus is an autogenerated conversion function.
func Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(in *autoscalingv2.MetricValueStatus, out *autoscaling.MetricValueStatus, s conversion.Scope) error {
return autoConvert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(in, out, s)
}
func autoConvert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(in *autoscaling.MetricValueStatus, out *autoscalingv2.MetricValueStatus, s conversion.Scope) error {
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus is an autogenerated conversion function.
func Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(in *autoscaling.MetricValueStatus, out *autoscalingv2.MetricValueStatus, s conversion.Scope) error {
return autoConvert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(in, out, s)
}
func autoConvert_v2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv2.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
if err := Convert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
if err := Convert_v2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
if err := Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
return nil
}
// Convert_v2_ObjectMetricSource_To_autoscaling_ObjectMetricSource is an autogenerated conversion function.
func Convert_v2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv2.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
return autoConvert_v2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s)
}
func autoConvert_autoscaling_ObjectMetricSource_To_v2_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv2.ObjectMetricSource, s conversion.Scope) error {
if err := Convert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricTarget_To_v2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ObjectMetricSource_To_v2_ObjectMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ObjectMetricSource_To_v2_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv2.ObjectMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ObjectMetricSource_To_v2_ObjectMetricSource(in, out, s)
}
func autoConvert_v2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv2.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
if err := Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
if err := Convert_v2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
return nil
}
// Convert_v2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus is an autogenerated conversion function.
func Convert_v2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv2.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
return autoConvert_v2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ObjectMetricStatus_To_v2_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv2.ObjectMetricStatus, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
if err := Convert_autoscaling_CrossVersionObjectReference_To_v2_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ObjectMetricStatus_To_v2_ObjectMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ObjectMetricStatus_To_v2_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv2.ObjectMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ObjectMetricStatus_To_v2_ObjectMetricStatus(in, out, s)
}
func autoConvert_v2_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv2.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
if err := Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_v2_PodsMetricSource_To_autoscaling_PodsMetricSource is an autogenerated conversion function.
func Convert_v2_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv2.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
return autoConvert_v2_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s)
}
func autoConvert_autoscaling_PodsMetricSource_To_v2_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2.PodsMetricSource, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricTarget_To_v2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_PodsMetricSource_To_v2_PodsMetricSource is an autogenerated conversion function.
func Convert_autoscaling_PodsMetricSource_To_v2_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2.PodsMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_PodsMetricSource_To_v2_PodsMetricSource(in, out, s)
}
func autoConvert_v2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv2.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
if err := Convert_v2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_v2_PodsMetricStatus_To_autoscaling_PodsMetricStatus is an autogenerated conversion function.
func Convert_v2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv2.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
return autoConvert_v2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s)
}
func autoConvert_autoscaling_PodsMetricStatus_To_v2_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv2.PodsMetricStatus, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_PodsMetricStatus_To_v2_PodsMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_PodsMetricStatus_To_v2_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv2.PodsMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_PodsMetricStatus_To_v2_PodsMetricStatus(in, out, s)
}
func autoConvert_v2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv2.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_v2_ResourceMetricSource_To_autoscaling_ResourceMetricSource is an autogenerated conversion function.
func Convert_v2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv2.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
return autoConvert_v2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s)
}
func autoConvert_autoscaling_ResourceMetricSource_To_v2_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv2.ResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
if err := Convert_autoscaling_MetricTarget_To_v2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ResourceMetricSource_To_v2_ResourceMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ResourceMetricSource_To_v2_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv2.ResourceMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ResourceMetricSource_To_v2_ResourceMetricSource(in, out, s)
}
func autoConvert_v2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv2.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_v2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus is an autogenerated conversion function.
func Convert_v2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv2.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
return autoConvert_v2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ResourceMetricStatus_To_v2_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv2.ResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
if err := Convert_autoscaling_MetricValueStatus_To_v2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ResourceMetricStatus_To_v2_ResourceMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ResourceMetricStatus_To_v2_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv2.ResourceMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ResourceMetricStatus_To_v2_ResourceMetricStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v2
import (
autoscalingv2 "k8s.io/api/autoscaling/v2"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&autoscalingv2.HorizontalPodAutoscaler{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscaler(obj.(*autoscalingv2.HorizontalPodAutoscaler))
})
scheme.AddTypeDefaultingFunc(&autoscalingv2.HorizontalPodAutoscalerList{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*autoscalingv2.HorizontalPodAutoscalerList))
})
return nil
}
func SetObjectDefaults_HorizontalPodAutoscaler(in *autoscalingv2.HorizontalPodAutoscaler) {
SetDefaults_HorizontalPodAutoscaler(in)
}
func SetObjectDefaults_HorizontalPodAutoscalerList(in *autoscalingv2.HorizontalPodAutoscalerList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_HorizontalPodAutoscaler(a)
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta1
import (
"encoding/json"
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
)
func Convert_autoscaling_MetricTarget_To_v2beta1_CrossVersionObjectReference(in *autoscaling.MetricTarget, out *autoscalingv2beta1.CrossVersionObjectReference, s conversion.Scope) error {
return nil
}
func Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_MetricTarget(in *autoscalingv2beta1.CrossVersionObjectReference, out *autoscaling.MetricTarget, s conversion.Scope) error {
return nil
}
func Convert_v2beta1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv2beta1.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
out.Container = in.Container
utilization := in.CurrentAverageUtilization
averageValue := in.CurrentAverageValue
out.Current = autoscaling.MetricValueStatus{
AverageValue: &averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ContainerResourceMetricStatus_To_v2beta1_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv2beta1.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
out.CurrentAverageUtilization = in.Current.AverageUtilization
if in.Current.AverageValue != nil {
out.CurrentAverageValue = *in.Current.AverageValue
}
return nil
}
func Convert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv2beta1.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
utilization := in.CurrentAverageUtilization
averageValue := in.CurrentAverageValue
out.Current = autoscaling.MetricValueStatus{
AverageValue: &averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv2beta1.ResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.CurrentAverageUtilization = in.Current.AverageUtilization
if in.Current.AverageValue != nil {
out.CurrentAverageValue = *in.Current.AverageValue
}
return nil
}
func Convert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv2beta1.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
utilization := in.TargetAverageUtilization
averageValue := in.TargetAverageValue
var metricType autoscaling.MetricTargetType
if utilization == nil {
metricType = autoscaling.AverageValueMetricType
} else {
metricType = autoscaling.UtilizationMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
AverageValue: averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv2beta1.ResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.TargetAverageUtilization = in.Target.AverageUtilization
out.TargetAverageValue = in.Target.AverageValue
return nil
}
func Convert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv2beta1.ExternalMetricSource, s conversion.Scope) error {
out.MetricName = in.Metric.Name
out.TargetValue = in.Target.Value
out.TargetAverageValue = in.Target.AverageValue
out.MetricSelector = in.Metric.Selector
return nil
}
func Convert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv2beta1.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
value := in.TargetValue
averageValue := in.TargetAverageValue
var metricType autoscaling.MetricTargetType
if value == nil {
metricType = autoscaling.AverageValueMetricType
} else {
metricType = autoscaling.ValueMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
Value: value,
AverageValue: averageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.MetricSelector,
}
return nil
}
func Convert_autoscaling_ObjectMetricSource_To_v2beta1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv2beta1.ObjectMetricSource, s conversion.Scope) error {
if in.Target.Value != nil {
out.TargetValue = *in.Target.Value
}
out.AverageValue = in.Target.AverageValue
out.Target = autoscalingv2beta1.CrossVersionObjectReference{
Kind: in.DescribedObject.Kind,
Name: in.DescribedObject.Name,
APIVersion: in.DescribedObject.APIVersion,
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
return nil
}
func Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv2beta1.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
var metricType autoscaling.MetricTargetType
if in.AverageValue == nil {
metricType = autoscaling.ValueMetricType
} else {
metricType = autoscaling.AverageValueMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
Value: &in.TargetValue,
AverageValue: in.AverageValue,
}
out.DescribedObject = autoscaling.CrossVersionObjectReference{
Kind: in.Target.Kind,
Name: in.Target.Name,
APIVersion: in.Target.APIVersion,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta1.PodsMetricSource, s conversion.Scope) error {
if in.Target.AverageValue != nil {
targetAverageValue := *in.Target.AverageValue
out.TargetAverageValue = targetAverageValue
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
return nil
}
func Convert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv2beta1.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
targetAverageValue := &in.TargetAverageValue
metricType := autoscaling.AverageValueMetricType
out.Target = autoscaling.MetricTarget{
Type: metricType,
AverageValue: targetAverageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_autoscaling_ExternalMetricStatus_To_v2beta1_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv2beta1.ExternalMetricStatus, s conversion.Scope) error {
out.CurrentAverageValue = in.Current.AverageValue
out.MetricName = in.Metric.Name
if in.Current.Value != nil {
out.CurrentValue = *in.Current.Value
}
out.MetricSelector = in.Metric.Selector
return nil
}
func Convert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv2beta1.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
value := in.CurrentValue
averageValue := in.CurrentAverageValue
out.Current = autoscaling.MetricValueStatus{
Value: &value,
AverageValue: averageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.MetricSelector,
}
return nil
}
func Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv2beta1.ObjectMetricStatus, s conversion.Scope) error {
if in.Current.Value != nil {
out.CurrentValue = *in.Current.Value
}
out.Target = autoscalingv2beta1.CrossVersionObjectReference{
Kind: in.DescribedObject.Kind,
Name: in.DescribedObject.Name,
APIVersion: in.DescribedObject.APIVersion,
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
if in.Current.AverageValue != nil {
currentAverageValue := *in.Current.AverageValue
out.AverageValue = ¤tAverageValue
}
return nil
}
func Convert_v2beta1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv2beta1.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
out.Current = autoscaling.MetricValueStatus{
Value: &in.CurrentValue,
AverageValue: in.AverageValue,
}
out.DescribedObject = autoscaling.CrossVersionObjectReference{
Kind: in.Target.Kind,
Name: in.Target.Name,
APIVersion: in.Target.APIVersion,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv2beta1.PodsMetricStatus, s conversion.Scope) error {
if in.Current.AverageValue != nil {
out.CurrentAverageValue = *in.Current.AverageValue
}
out.MetricName = in.Metric.Name
out.Selector = in.Metric.Selector
return nil
}
func Convert_v2beta1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv2beta1.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
out.Current = autoscaling.MetricValueStatus{
AverageValue: &in.CurrentAverageValue,
}
out.Metric = autoscaling.MetricIdentifier{
Name: in.MetricName,
Selector: in.Selector,
}
return nil
}
func Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv2beta1.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
// clear any pre-existing round-trip annotations to make sure the only ones set are ones we produced during conversion
annotations, copiedAnnotations := autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
out.Annotations = annotations
if in.Spec.Behavior != nil {
// TODO: this is marshaling an internal type. Fix this without breaking backwards compatibility with n-1 API servers.
behaviorEnc, err := json.Marshal(in.Spec.Behavior)
if err != nil {
return err
}
// copy before mutating
if !copiedAnnotations {
//nolint:ineffassign
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.BehaviorSpecsAnnotation] = string(behaviorEnc)
}
return nil
}
func Convert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2beta1.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
if behaviorEnc, hasBehaviors := out.Annotations[autoscaling.BehaviorSpecsAnnotation]; hasBehaviors {
// TODO: this is unmarshaling an internal type. Fix this without breaking backwards compatibility with n-1 API servers.
var behavior autoscaling.HorizontalPodAutoscalerBehavior
if err := json.Unmarshal([]byte(behaviorEnc), &behavior); err == nil && behavior != (autoscaling.HorizontalPodAutoscalerBehavior{}) {
// only move well-formed data from annotations to fields
out.Spec.Behavior = &behavior
}
}
// drop round-tripping annotations after converting to internal
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
return nil
}
func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv2beta1.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta1_HorizontalPodAutoscalerSpec(in, out, s)
}
func Convert_v2beta1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv2beta1.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
utilization := in.TargetAverageUtilization
averageValue := in.TargetAverageValue
var metricType autoscaling.MetricTargetType
if utilization == nil {
metricType = autoscaling.AverageValueMetricType
} else {
metricType = autoscaling.UtilizationMetricType
}
out.Target = autoscaling.MetricTarget{
Type: metricType,
AverageValue: averageValue,
AverageUtilization: utilization,
}
return nil
}
func Convert_autoscaling_ContainerResourceMetricSource_To_v2beta1_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv2beta1.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.TargetAverageUtilization = in.Target.AverageUtilization
out.TargetAverageValue = in.Target.AverageValue
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta1
import (
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_HorizontalPodAutoscaler(obj *autoscalingv2beta1.HorizontalPodAutoscaler) {
if obj.Spec.MinReplicas == nil {
obj.Spec.MinReplicas = ptr.To[int32](1)
}
if len(obj.Spec.Metrics) == 0 {
utilizationDefaultVal := int32(autoscaling.DefaultCPUUtilization)
obj.Spec.Metrics = []autoscalingv2beta1.MetricSpec{
{
Type: autoscalingv2beta1.ResourceMetricSourceType,
Resource: &autoscalingv2beta1.ResourceMetricSource{
Name: v1.ResourceCPU,
TargetAverageUtilization: &utilizationDefaultVal,
},
},
}
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta1
import (
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &autoscalingv2beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v2beta1
import (
unsafe "unsafe"
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta1.CrossVersionObjectReference)(nil), (*autoscaling.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(a.(*autoscalingv2beta1.CrossVersionObjectReference), b.(*autoscaling.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.CrossVersionObjectReference)(nil), (*autoscalingv2beta1.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObjectReference(a.(*autoscaling.CrossVersionObjectReference), b.(*autoscalingv2beta1.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta1.HorizontalPodAutoscalerCondition)(nil), (*autoscaling.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(a.(*autoscalingv2beta1.HorizontalPodAutoscalerCondition), b.(*autoscaling.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerCondition)(nil), (*autoscalingv2beta1.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta1_HorizontalPodAutoscalerCondition(a.(*autoscaling.HorizontalPodAutoscalerCondition), b.(*autoscalingv2beta1.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta1.HorizontalPodAutoscalerList)(nil), (*autoscaling.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(a.(*autoscalingv2beta1.HorizontalPodAutoscalerList), b.(*autoscaling.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerList)(nil), (*autoscalingv2beta1.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta1_HorizontalPodAutoscalerList(a.(*autoscaling.HorizontalPodAutoscalerList), b.(*autoscalingv2beta1.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta1.HorizontalPodAutoscalerSpec)(nil), (*autoscaling.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(a.(*autoscalingv2beta1.HorizontalPodAutoscalerSpec), b.(*autoscaling.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta1.HorizontalPodAutoscalerStatus)(nil), (*autoscaling.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(a.(*autoscalingv2beta1.HorizontalPodAutoscalerStatus), b.(*autoscaling.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerStatus)(nil), (*autoscalingv2beta1.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta1_HorizontalPodAutoscalerStatus(a.(*autoscaling.HorizontalPodAutoscalerStatus), b.(*autoscalingv2beta1.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta1.MetricSpec)(nil), (*autoscaling.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_MetricSpec_To_autoscaling_MetricSpec(a.(*autoscalingv2beta1.MetricSpec), b.(*autoscaling.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricSpec)(nil), (*autoscalingv2beta1.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricSpec_To_v2beta1_MetricSpec(a.(*autoscaling.MetricSpec), b.(*autoscalingv2beta1.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta1.MetricStatus)(nil), (*autoscaling.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_MetricStatus_To_autoscaling_MetricStatus(a.(*autoscalingv2beta1.MetricStatus), b.(*autoscaling.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricStatus)(nil), (*autoscalingv2beta1.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricStatus_To_v2beta1_MetricStatus(a.(*autoscaling.MetricStatus), b.(*autoscalingv2beta1.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ContainerResourceMetricSource)(nil), (*autoscalingv2beta1.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricSource_To_v2beta1_ContainerResourceMetricSource(a.(*autoscaling.ContainerResourceMetricSource), b.(*autoscalingv2beta1.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ContainerResourceMetricStatus)(nil), (*autoscalingv2beta1.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricStatus_To_v2beta1_ContainerResourceMetricStatus(a.(*autoscaling.ContainerResourceMetricStatus), b.(*autoscalingv2beta1.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*autoscalingv2beta1.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*autoscalingv2beta1.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ExternalMetricStatus)(nil), (*autoscalingv2beta1.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricStatus_To_v2beta1_ExternalMetricStatus(a.(*autoscaling.ExternalMetricStatus), b.(*autoscalingv2beta1.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscalerSpec)(nil), (*autoscalingv2beta1.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta1_HorizontalPodAutoscalerSpec(a.(*autoscaling.HorizontalPodAutoscalerSpec), b.(*autoscalingv2beta1.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*autoscalingv2beta1.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*autoscalingv2beta1.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.MetricTarget)(nil), (*autoscalingv2beta1.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricTarget_To_v2beta1_CrossVersionObjectReference(a.(*autoscaling.MetricTarget), b.(*autoscalingv2beta1.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ObjectMetricSource)(nil), (*autoscalingv2beta1.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricSource_To_v2beta1_ObjectMetricSource(a.(*autoscaling.ObjectMetricSource), b.(*autoscalingv2beta1.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ObjectMetricStatus)(nil), (*autoscalingv2beta1.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(a.(*autoscaling.ObjectMetricStatus), b.(*autoscalingv2beta1.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.PodsMetricSource)(nil), (*autoscalingv2beta1.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(a.(*autoscaling.PodsMetricSource), b.(*autoscalingv2beta1.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.PodsMetricStatus)(nil), (*autoscalingv2beta1.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(a.(*autoscaling.PodsMetricStatus), b.(*autoscalingv2beta1.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ResourceMetricSource)(nil), (*autoscalingv2beta1.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource(a.(*autoscaling.ResourceMetricSource), b.(*autoscalingv2beta1.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ResourceMetricStatus)(nil), (*autoscalingv2beta1.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(a.(*autoscaling.ResourceMetricStatus), b.(*autoscalingv2beta1.ResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ContainerResourceMetricSource)(nil), (*autoscaling.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(a.(*autoscalingv2beta1.ContainerResourceMetricSource), b.(*autoscaling.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ContainerResourceMetricStatus)(nil), (*autoscaling.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(a.(*autoscalingv2beta1.ContainerResourceMetricStatus), b.(*autoscaling.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.CrossVersionObjectReference)(nil), (*autoscaling.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_MetricTarget(a.(*autoscalingv2beta1.CrossVersionObjectReference), b.(*autoscaling.MetricTarget), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ExternalMetricSource)(nil), (*autoscaling.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(a.(*autoscalingv2beta1.ExternalMetricSource), b.(*autoscaling.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ExternalMetricStatus)(nil), (*autoscaling.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(a.(*autoscalingv2beta1.ExternalMetricStatus), b.(*autoscaling.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*autoscalingv2beta1.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ObjectMetricSource)(nil), (*autoscaling.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(a.(*autoscalingv2beta1.ObjectMetricSource), b.(*autoscaling.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ObjectMetricStatus)(nil), (*autoscaling.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(a.(*autoscalingv2beta1.ObjectMetricStatus), b.(*autoscaling.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.PodsMetricSource)(nil), (*autoscaling.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource(a.(*autoscalingv2beta1.PodsMetricSource), b.(*autoscaling.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.PodsMetricStatus)(nil), (*autoscaling.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(a.(*autoscalingv2beta1.PodsMetricStatus), b.(*autoscaling.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ResourceMetricSource)(nil), (*autoscaling.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(a.(*autoscalingv2beta1.ResourceMetricSource), b.(*autoscaling.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta1.ResourceMetricStatus)(nil), (*autoscaling.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(a.(*autoscalingv2beta1.ResourceMetricStatus), b.(*autoscaling.ResourceMetricStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v2beta1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv2beta1.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.TargetAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
out.Container = in.Container
return nil
}
func autoConvert_autoscaling_ContainerResourceMetricSource_To_v2beta1_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv2beta1.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv2beta1.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.CurrentAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
out.Container = in.Container
return nil
}
func autoConvert_autoscaling_ContainerResourceMetricStatus_To_v2beta1_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv2beta1.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv2beta1.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv2beta1.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s)
}
func autoConvert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv2beta1.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv2beta1.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObjectReference(in, out, s)
}
func autoConvert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv2beta1.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.MetricSelector requires manual conversion: does not exist in peer-type
// WARNING: in.TargetValue requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv2beta1.ExternalMetricSource, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv2beta1.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.MetricSelector requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentValue requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ExternalMetricStatus_To_v2beta1_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv2beta1.ExternalMetricStatus, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2beta1.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v2beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv2beta1.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v2beta1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv2beta1.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscaling.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = autoscaling.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v2beta1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_v2beta1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv2beta1.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_v2beta1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta1_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv2beta1.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscalingv2beta1.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta1_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta1_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv2beta1.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta1_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_v2beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv2beta1.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v2beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_v2beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv2beta1.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_v2beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv2beta1.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscalingv2beta1.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta1_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv2beta1.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2beta1_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_v2beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv2beta1.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]autoscaling.MetricSpec, len(*in))
for i := range *in {
if err := Convert_v2beta1_MetricSpec_To_autoscaling_MetricSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Metrics = nil
}
return nil
}
// Convert_v2beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec is an autogenerated conversion function.
func Convert_v2beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv2beta1.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
return autoConvert_v2beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv2beta1.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_autoscaling_CrossVersionObjectReference_To_v2beta1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]autoscalingv2beta1.MetricSpec, len(*in))
for i := range *in {
if err := Convert_autoscaling_MetricSpec_To_v2beta1_MetricSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Metrics = nil
}
// WARNING: in.Behavior requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv2beta1.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]autoscaling.MetricStatus, len(*in))
for i := range *in {
if err := Convert_v2beta1_MetricStatus_To_autoscaling_MetricStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.CurrentMetrics = nil
}
out.Conditions = *(*[]autoscaling.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus is an autogenerated conversion function.
func Convert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv2beta1.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
return autoConvert_v2beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv2beta1.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]autoscalingv2beta1.MetricStatus, len(*in))
for i := range *in {
if err := Convert_autoscaling_MetricStatus_To_v2beta1_MetricStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.CurrentMetrics = nil
}
out.Conditions = *(*[]autoscalingv2beta1.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta1_HorizontalPodAutoscalerStatus is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv2beta1.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta1_HorizontalPodAutoscalerStatus(in, out, s)
}
func autoConvert_v2beta1_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv2beta1.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscaling.ObjectMetricSource)
if err := Convert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscaling.PodsMetricSource)
if err := Convert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscaling.ResourceMetricSource)
if err := Convert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricSource)
if err := Convert_v2beta1_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscaling.ExternalMetricSource)
if err := Convert_v2beta1_ExternalMetricSource_To_autoscaling_ExternalMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_v2beta1_MetricSpec_To_autoscaling_MetricSpec is an autogenerated conversion function.
func Convert_v2beta1_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv2beta1.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
return autoConvert_v2beta1_MetricSpec_To_autoscaling_MetricSpec(in, out, s)
}
func autoConvert_autoscaling_MetricSpec_To_v2beta1_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv2beta1.MetricSpec, s conversion.Scope) error {
out.Type = autoscalingv2beta1.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscalingv2beta1.ObjectMetricSource)
if err := Convert_autoscaling_ObjectMetricSource_To_v2beta1_ObjectMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscalingv2beta1.PodsMetricSource)
if err := Convert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscalingv2beta1.ResourceMetricSource)
if err := Convert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv2beta1.ContainerResourceMetricSource)
if err := Convert_autoscaling_ContainerResourceMetricSource_To_v2beta1_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscalingv2beta1.ExternalMetricSource)
if err := Convert_autoscaling_ExternalMetricSource_To_v2beta1_ExternalMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_autoscaling_MetricSpec_To_v2beta1_MetricSpec is an autogenerated conversion function.
func Convert_autoscaling_MetricSpec_To_v2beta1_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv2beta1.MetricSpec, s conversion.Scope) error {
return autoConvert_autoscaling_MetricSpec_To_v2beta1_MetricSpec(in, out, s)
}
func autoConvert_v2beta1_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv2beta1.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscaling.ObjectMetricStatus)
if err := Convert_v2beta1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscaling.PodsMetricStatus)
if err := Convert_v2beta1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscaling.ResourceMetricStatus)
if err := Convert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricStatus)
if err := Convert_v2beta1_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscaling.ExternalMetricStatus)
if err := Convert_v2beta1_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_v2beta1_MetricStatus_To_autoscaling_MetricStatus is an autogenerated conversion function.
func Convert_v2beta1_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv2beta1.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
return autoConvert_v2beta1_MetricStatus_To_autoscaling_MetricStatus(in, out, s)
}
func autoConvert_autoscaling_MetricStatus_To_v2beta1_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv2beta1.MetricStatus, s conversion.Scope) error {
out.Type = autoscalingv2beta1.MetricSourceType(in.Type)
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(autoscalingv2beta1.ObjectMetricStatus)
if err := Convert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Object = nil
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(autoscalingv2beta1.PodsMetricStatus)
if err := Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Pods = nil
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(autoscalingv2beta1.ResourceMetricStatus)
if err := Convert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.Resource = nil
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv2beta1.ContainerResourceMetricStatus)
if err := Convert_autoscaling_ContainerResourceMetricStatus_To_v2beta1_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(autoscalingv2beta1.ExternalMetricStatus)
if err := Convert_autoscaling_ExternalMetricStatus_To_v2beta1_ExternalMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.External = nil
}
return nil
}
// Convert_autoscaling_MetricStatus_To_v2beta1_MetricStatus is an autogenerated conversion function.
func Convert_autoscaling_MetricStatus_To_v2beta1_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv2beta1.MetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_MetricStatus_To_v2beta1_MetricStatus(in, out, s)
}
func autoConvert_v2beta1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv2beta1.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
if err := Convert_v2beta1_CrossVersionObjectReference_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.TargetValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
// WARNING: in.AverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ObjectMetricSource_To_v2beta1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv2beta1.ObjectMetricSource, s conversion.Scope) error {
// WARNING: in.DescribedObject requires manual conversion: does not exist in peer-type
if err := Convert_autoscaling_MetricTarget_To_v2beta1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil {
return err
}
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv2beta1.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
// WARNING: in.Target requires manual conversion: does not exist in peer-type
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
// WARNING: in.AverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ObjectMetricStatus_To_v2beta1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv2beta1.ObjectMetricStatus, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Current requires manual conversion: does not exist in peer-type
// WARNING: in.DescribedObject requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv2beta1.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_PodsMetricSource_To_v2beta1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta1.PodsMetricSource, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv2beta1.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
// WARNING: in.MetricName requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
// WARNING: in.Selector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv2beta1.PodsMetricStatus, s conversion.Scope) error {
// WARNING: in.Metric requires manual conversion: does not exist in peer-type
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv2beta1.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.TargetAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.TargetAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv2beta1.ResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
// WARNING: in.Target requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv2beta1.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
// WARNING: in.CurrentAverageUtilization requires manual conversion: does not exist in peer-type
// WARNING: in.CurrentAverageValue requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ResourceMetricStatus_To_v2beta1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv2beta1.ResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
// WARNING: in.Current requires manual conversion: does not exist in peer-type
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v2beta1
import (
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&autoscalingv2beta1.HorizontalPodAutoscaler{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscaler(obj.(*autoscalingv2beta1.HorizontalPodAutoscaler))
})
scheme.AddTypeDefaultingFunc(&autoscalingv2beta1.HorizontalPodAutoscalerList{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*autoscalingv2beta1.HorizontalPodAutoscalerList))
})
return nil
}
func SetObjectDefaults_HorizontalPodAutoscaler(in *autoscalingv2beta1.HorizontalPodAutoscaler) {
SetDefaults_HorizontalPodAutoscaler(in)
}
func SetObjectDefaults_HorizontalPodAutoscalerList(in *autoscalingv2beta1.HorizontalPodAutoscalerList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_HorizontalPodAutoscaler(a)
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta2
import (
"fmt"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/autoscaling"
)
func Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv2beta2.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
// Ensure old round-trips annotations are discarded
annotations, copiedAnnotations := autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
out.Annotations = annotations
behavior := in.Spec.Behavior
if behavior == nil {
return nil
}
// Save the tolerance fields in annotations for round-trip
if behavior.ScaleDown != nil && behavior.ScaleDown.Tolerance != nil {
if !copiedAnnotations {
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.ToleranceScaleDownAnnotation] = behavior.ScaleDown.Tolerance.String()
}
if behavior.ScaleUp != nil && behavior.ScaleUp.Tolerance != nil {
if !copiedAnnotations {
copiedAnnotations = true //nolint:ineffassign // Intentionally setting the variable even though it's not read because this might change when adding another if clause below in the future.
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.ToleranceScaleUpAnnotation] = behavior.ScaleUp.Tolerance.String()
}
return nil
}
func Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
if err := autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil {
return err
}
// Restore the tolerance fields from annotations for round-trip
if tolerance, ok := out.Annotations[autoscaling.ToleranceScaleDownAnnotation]; ok {
if out.Spec.Behavior == nil {
out.Spec.Behavior = &autoscaling.HorizontalPodAutoscalerBehavior{}
}
if out.Spec.Behavior.ScaleDown == nil {
out.Spec.Behavior.ScaleDown = &autoscaling.HPAScalingRules{}
}
q, err := resource.ParseQuantity(tolerance)
if err != nil {
return fmt.Errorf("failed to parse annotation %q: %w", autoscaling.ToleranceScaleDownAnnotation, err)
}
out.Spec.Behavior.ScaleDown.Tolerance = &q
}
if tolerance, ok := out.Annotations[autoscaling.ToleranceScaleUpAnnotation]; ok {
if out.Spec.Behavior == nil {
out.Spec.Behavior = &autoscaling.HorizontalPodAutoscalerBehavior{}
}
if out.Spec.Behavior.ScaleUp == nil {
out.Spec.Behavior.ScaleUp = &autoscaling.HPAScalingRules{}
}
q, err := resource.ParseQuantity(tolerance)
if err != nil {
return fmt.Errorf("failed to parse annotation %q: %w", autoscaling.ToleranceScaleUpAnnotation, err)
}
out.Spec.Behavior.ScaleUp.Tolerance = &q
}
// Do not save round-trip annotations in internal resource
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
return nil
}
func Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *autoscalingv2beta2.HPAScalingRules, out *autoscaling.HPAScalingRules, s conversion.Scope) error {
// Tolerance field is handled in the HorizontalPodAutoscaler conversion function.
return autoConvert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in, out, s)
}
func Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2beta2.HPAScalingRules, s conversion.Scope) error {
// Tolerance field is handled in the HorizontalPodAutoscaler conversion function.
return autoConvert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in, out, s)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta2
import (
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/utils/ptr"
)
var (
// These constants repeats previous HPA behavior
scaleUpLimitPercent int32 = 100
scaleUpLimitMinimumPods int32 = 4
scaleUpPeriod int32 = 15
scaleUpStabilizationSeconds int32
maxPolicy = autoscalingv2beta2.MaxPolicySelect
defaultHPAScaleUpRules = autoscalingv2beta2.HPAScalingRules{
StabilizationWindowSeconds: &scaleUpStabilizationSeconds,
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2beta2.HPAScalingPolicy{
{
Type: autoscalingv2beta2.PodsScalingPolicy,
Value: scaleUpLimitMinimumPods,
PeriodSeconds: scaleUpPeriod,
},
{
Type: autoscalingv2beta2.PercentScalingPolicy,
Value: scaleUpLimitPercent,
PeriodSeconds: scaleUpPeriod,
},
},
}
scaleDownPeriod int32 = 15
// Currently we can set the downscaleStabilizationWindow from the command line
// So we can not rewrite the command line option from here
scaleDownStabilizationSeconds *int32 = nil
scaleDownLimitPercent int32 = 100
defaultHPAScaleDownRules = autoscalingv2beta2.HPAScalingRules{
StabilizationWindowSeconds: scaleDownStabilizationSeconds,
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2beta2.HPAScalingPolicy{
{
Type: autoscalingv2beta2.PercentScalingPolicy,
Value: scaleDownLimitPercent,
PeriodSeconds: scaleDownPeriod,
},
},
}
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_HorizontalPodAutoscaler(obj *autoscalingv2beta2.HorizontalPodAutoscaler) {
if obj.Spec.MinReplicas == nil {
obj.Spec.MinReplicas = ptr.To[int32](1)
}
if len(obj.Spec.Metrics) == 0 {
utilizationDefaultVal := int32(autoscaling.DefaultCPUUtilization)
obj.Spec.Metrics = []autoscalingv2beta2.MetricSpec{
{
Type: autoscalingv2beta2.ResourceMetricSourceType,
Resource: &autoscalingv2beta2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: autoscalingv2beta2.MetricTarget{
Type: autoscalingv2beta2.UtilizationMetricType,
AverageUtilization: &utilizationDefaultVal,
},
},
},
}
}
SetDefaults_HorizontalPodAutoscalerBehavior(obj)
}
// SetDefaults_HorizontalPodAutoscalerBehavior fills the behavior if it is not null
func SetDefaults_HorizontalPodAutoscalerBehavior(obj *autoscalingv2beta2.HorizontalPodAutoscaler) {
// if behavior is specified, we should fill all the 'nil' values with the default ones
if obj.Spec.Behavior != nil {
obj.Spec.Behavior.ScaleUp = GenerateHPAScaleUpRules(obj.Spec.Behavior.ScaleUp)
obj.Spec.Behavior.ScaleDown = GenerateHPAScaleDownRules(obj.Spec.Behavior.ScaleDown)
}
}
// GenerateHPAScaleUpRules returns a fully-initialized HPAScalingRules value
// We guarantee that no pointer in the structure will have the 'nil' value
func GenerateHPAScaleUpRules(scalingRules *autoscalingv2beta2.HPAScalingRules) *autoscalingv2beta2.HPAScalingRules {
defaultScalingRules := defaultHPAScaleUpRules.DeepCopy()
return copyHPAScalingRules(scalingRules, defaultScalingRules)
}
// GenerateHPAScaleDownRules returns a fully-initialized HPAScalingRules value
// We guarantee that no pointer in the structure will have the 'nil' value
// EXCEPT StabilizationWindowSeconds, for reasoning check the comment for defaultHPAScaleDownRules
func GenerateHPAScaleDownRules(scalingRules *autoscalingv2beta2.HPAScalingRules) *autoscalingv2beta2.HPAScalingRules {
defaultScalingRules := defaultHPAScaleDownRules.DeepCopy()
return copyHPAScalingRules(scalingRules, defaultScalingRules)
}
// copyHPAScalingRules copies all non-`nil` fields in HPA constraint structure
func copyHPAScalingRules(from, to *autoscalingv2beta2.HPAScalingRules) *autoscalingv2beta2.HPAScalingRules {
if from == nil {
return to
}
if from.SelectPolicy != nil {
to.SelectPolicy = from.SelectPolicy
}
if from.StabilizationWindowSeconds != nil {
to.StabilizationWindowSeconds = from.StabilizationWindowSeconds
}
if from.Policies != nil {
to.Policies = from.Policies
}
return to
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta2
import (
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &autoscalingv2beta2.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v2beta2
import (
unsafe "unsafe"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ContainerResourceMetricSource)(nil), (*autoscaling.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(a.(*autoscalingv2beta2.ContainerResourceMetricSource), b.(*autoscaling.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ContainerResourceMetricSource)(nil), (*autoscalingv2beta2.ContainerResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricSource_To_v2beta2_ContainerResourceMetricSource(a.(*autoscaling.ContainerResourceMetricSource), b.(*autoscalingv2beta2.ContainerResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ContainerResourceMetricStatus)(nil), (*autoscaling.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(a.(*autoscalingv2beta2.ContainerResourceMetricStatus), b.(*autoscaling.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ContainerResourceMetricStatus)(nil), (*autoscalingv2beta2.ContainerResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ContainerResourceMetricStatus_To_v2beta2_ContainerResourceMetricStatus(a.(*autoscaling.ContainerResourceMetricStatus), b.(*autoscalingv2beta2.ContainerResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.CrossVersionObjectReference)(nil), (*autoscaling.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(a.(*autoscalingv2beta2.CrossVersionObjectReference), b.(*autoscaling.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.CrossVersionObjectReference)(nil), (*autoscalingv2beta2.CrossVersionObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(a.(*autoscaling.CrossVersionObjectReference), b.(*autoscalingv2beta2.CrossVersionObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ExternalMetricSource)(nil), (*autoscaling.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(a.(*autoscalingv2beta2.ExternalMetricSource), b.(*autoscaling.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricSource)(nil), (*autoscalingv2beta2.ExternalMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(a.(*autoscaling.ExternalMetricSource), b.(*autoscalingv2beta2.ExternalMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ExternalMetricStatus)(nil), (*autoscaling.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(a.(*autoscalingv2beta2.ExternalMetricStatus), b.(*autoscaling.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ExternalMetricStatus)(nil), (*autoscalingv2beta2.ExternalMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(a.(*autoscaling.ExternalMetricStatus), b.(*autoscalingv2beta2.ExternalMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HPAScalingPolicy)(nil), (*autoscaling.HPAScalingPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(a.(*autoscalingv2beta2.HPAScalingPolicy), b.(*autoscaling.HPAScalingPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HPAScalingPolicy)(nil), (*autoscalingv2beta2.HPAScalingPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HPAScalingPolicy_To_v2beta2_HPAScalingPolicy(a.(*autoscaling.HPAScalingPolicy), b.(*autoscalingv2beta2.HPAScalingPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscalerBehavior)(nil), (*autoscaling.HorizontalPodAutoscalerBehavior)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(a.(*autoscalingv2beta2.HorizontalPodAutoscalerBehavior), b.(*autoscaling.HorizontalPodAutoscalerBehavior), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerBehavior)(nil), (*autoscalingv2beta2.HorizontalPodAutoscalerBehavior)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(a.(*autoscaling.HorizontalPodAutoscalerBehavior), b.(*autoscalingv2beta2.HorizontalPodAutoscalerBehavior), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscalerCondition)(nil), (*autoscaling.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(a.(*autoscalingv2beta2.HorizontalPodAutoscalerCondition), b.(*autoscaling.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerCondition)(nil), (*autoscalingv2beta2.HorizontalPodAutoscalerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(a.(*autoscaling.HorizontalPodAutoscalerCondition), b.(*autoscalingv2beta2.HorizontalPodAutoscalerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscalerList)(nil), (*autoscaling.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(a.(*autoscalingv2beta2.HorizontalPodAutoscalerList), b.(*autoscaling.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerList)(nil), (*autoscalingv2beta2.HorizontalPodAutoscalerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(a.(*autoscaling.HorizontalPodAutoscalerList), b.(*autoscalingv2beta2.HorizontalPodAutoscalerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscalerSpec)(nil), (*autoscaling.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(a.(*autoscalingv2beta2.HorizontalPodAutoscalerSpec), b.(*autoscaling.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerSpec)(nil), (*autoscalingv2beta2.HorizontalPodAutoscalerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(a.(*autoscaling.HorizontalPodAutoscalerSpec), b.(*autoscalingv2beta2.HorizontalPodAutoscalerSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscalerStatus)(nil), (*autoscaling.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(a.(*autoscalingv2beta2.HorizontalPodAutoscalerStatus), b.(*autoscaling.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscalerStatus)(nil), (*autoscalingv2beta2.HorizontalPodAutoscalerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(a.(*autoscaling.HorizontalPodAutoscalerStatus), b.(*autoscalingv2beta2.HorizontalPodAutoscalerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.MetricIdentifier)(nil), (*autoscaling.MetricIdentifier)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(a.(*autoscalingv2beta2.MetricIdentifier), b.(*autoscaling.MetricIdentifier), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricIdentifier)(nil), (*autoscalingv2beta2.MetricIdentifier)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(a.(*autoscaling.MetricIdentifier), b.(*autoscalingv2beta2.MetricIdentifier), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.MetricSpec)(nil), (*autoscaling.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(a.(*autoscalingv2beta2.MetricSpec), b.(*autoscaling.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricSpec)(nil), (*autoscalingv2beta2.MetricSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(a.(*autoscaling.MetricSpec), b.(*autoscalingv2beta2.MetricSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.MetricStatus)(nil), (*autoscaling.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(a.(*autoscalingv2beta2.MetricStatus), b.(*autoscaling.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricStatus)(nil), (*autoscalingv2beta2.MetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(a.(*autoscaling.MetricStatus), b.(*autoscalingv2beta2.MetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.MetricTarget)(nil), (*autoscaling.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(a.(*autoscalingv2beta2.MetricTarget), b.(*autoscaling.MetricTarget), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricTarget)(nil), (*autoscalingv2beta2.MetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(a.(*autoscaling.MetricTarget), b.(*autoscalingv2beta2.MetricTarget), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.MetricValueStatus)(nil), (*autoscaling.MetricValueStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(a.(*autoscalingv2beta2.MetricValueStatus), b.(*autoscaling.MetricValueStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.MetricValueStatus)(nil), (*autoscalingv2beta2.MetricValueStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(a.(*autoscaling.MetricValueStatus), b.(*autoscalingv2beta2.MetricValueStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ObjectMetricSource)(nil), (*autoscaling.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(a.(*autoscalingv2beta2.ObjectMetricSource), b.(*autoscaling.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricSource)(nil), (*autoscalingv2beta2.ObjectMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(a.(*autoscaling.ObjectMetricSource), b.(*autoscalingv2beta2.ObjectMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ObjectMetricStatus)(nil), (*autoscaling.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(a.(*autoscalingv2beta2.ObjectMetricStatus), b.(*autoscaling.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ObjectMetricStatus)(nil), (*autoscalingv2beta2.ObjectMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(a.(*autoscaling.ObjectMetricStatus), b.(*autoscalingv2beta2.ObjectMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.PodsMetricSource)(nil), (*autoscaling.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(a.(*autoscalingv2beta2.PodsMetricSource), b.(*autoscaling.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricSource)(nil), (*autoscalingv2beta2.PodsMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(a.(*autoscaling.PodsMetricSource), b.(*autoscalingv2beta2.PodsMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.PodsMetricStatus)(nil), (*autoscaling.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(a.(*autoscalingv2beta2.PodsMetricStatus), b.(*autoscaling.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.PodsMetricStatus)(nil), (*autoscalingv2beta2.PodsMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(a.(*autoscaling.PodsMetricStatus), b.(*autoscalingv2beta2.PodsMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ResourceMetricSource)(nil), (*autoscaling.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(a.(*autoscalingv2beta2.ResourceMetricSource), b.(*autoscaling.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricSource)(nil), (*autoscalingv2beta2.ResourceMetricSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(a.(*autoscaling.ResourceMetricSource), b.(*autoscalingv2beta2.ResourceMetricSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.ResourceMetricStatus)(nil), (*autoscaling.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(a.(*autoscalingv2beta2.ResourceMetricStatus), b.(*autoscaling.ResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ResourceMetricStatus)(nil), (*autoscalingv2beta2.ResourceMetricStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(a.(*autoscaling.ResourceMetricStatus), b.(*autoscalingv2beta2.ResourceMetricStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HPAScalingRules)(nil), (*autoscalingv2beta2.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(a.(*autoscaling.HPAScalingRules), b.(*autoscalingv2beta2.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*autoscalingv2beta2.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*autoscalingv2beta2.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta2.HPAScalingRules)(nil), (*autoscaling.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(a.(*autoscalingv2beta2.HPAScalingRules), b.(*autoscaling.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*autoscalingv2beta2.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v2beta2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv2beta2.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
out.Container = in.Container
return nil
}
// Convert_v2beta2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource is an autogenerated conversion function.
func Convert_v2beta2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in *autoscalingv2beta2.ContainerResourceMetricSource, out *autoscaling.ContainerResourceMetricSource, s conversion.Scope) error {
return autoConvert_v2beta2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(in, out, s)
}
func autoConvert_autoscaling_ContainerResourceMetricSource_To_v2beta2_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv2beta2.ContainerResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ContainerResourceMetricSource_To_v2beta2_ContainerResourceMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ContainerResourceMetricSource_To_v2beta2_ContainerResourceMetricSource(in *autoscaling.ContainerResourceMetricSource, out *autoscalingv2beta2.ContainerResourceMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ContainerResourceMetricSource_To_v2beta2_ContainerResourceMetricSource(in, out, s)
}
func autoConvert_v2beta2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv2beta2.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
out.Container = in.Container
return nil
}
// Convert_v2beta2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus is an autogenerated conversion function.
func Convert_v2beta2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in *autoscalingv2beta2.ContainerResourceMetricStatus, out *autoscaling.ContainerResourceMetricStatus, s conversion.Scope) error {
return autoConvert_v2beta2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ContainerResourceMetricStatus_To_v2beta2_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv2beta2.ContainerResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Container = in.Container
if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ContainerResourceMetricStatus_To_v2beta2_ContainerResourceMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ContainerResourceMetricStatus_To_v2beta2_ContainerResourceMetricStatus(in *autoscaling.ContainerResourceMetricStatus, out *autoscalingv2beta2.ContainerResourceMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ContainerResourceMetricStatus_To_v2beta2_ContainerResourceMetricStatus(in, out, s)
}
func autoConvert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv2beta2.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *autoscalingv2beta2.CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s)
}
func autoConvert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv2beta2.CrossVersionObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.APIVersion = in.APIVersion
return nil
}
// Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference is an autogenerated conversion function.
func Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *autoscalingv2beta2.CrossVersionObjectReference, s conversion.Scope) error {
return autoConvert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(in, out, s)
}
func autoConvert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv2beta2.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource is an autogenerated conversion function.
func Convert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in *autoscalingv2beta2.ExternalMetricSource, out *autoscaling.ExternalMetricSource, s conversion.Scope) error {
return autoConvert_v2beta2_ExternalMetricSource_To_autoscaling_ExternalMetricSource(in, out, s)
}
func autoConvert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv2beta2.ExternalMetricSource, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(in *autoscaling.ExternalMetricSource, out *autoscalingv2beta2.ExternalMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ExternalMetricSource_To_v2beta2_ExternalMetricSource(in, out, s)
}
func autoConvert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv2beta2.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus is an autogenerated conversion function.
func Convert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in *autoscalingv2beta2.ExternalMetricStatus, out *autoscaling.ExternalMetricStatus, s conversion.Scope) error {
return autoConvert_v2beta2_ExternalMetricStatus_To_autoscaling_ExternalMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv2beta2.ExternalMetricStatus, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(in *autoscaling.ExternalMetricStatus, out *autoscalingv2beta2.ExternalMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ExternalMetricStatus_To_v2beta2_ExternalMetricStatus(in, out, s)
}
func autoConvert_v2beta2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(in *autoscalingv2beta2.HPAScalingPolicy, out *autoscaling.HPAScalingPolicy, s conversion.Scope) error {
out.Type = autoscaling.HPAScalingPolicyType(in.Type)
out.Value = in.Value
out.PeriodSeconds = in.PeriodSeconds
return nil
}
// Convert_v2beta2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy is an autogenerated conversion function.
func Convert_v2beta2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(in *autoscalingv2beta2.HPAScalingPolicy, out *autoscaling.HPAScalingPolicy, s conversion.Scope) error {
return autoConvert_v2beta2_HPAScalingPolicy_To_autoscaling_HPAScalingPolicy(in, out, s)
}
func autoConvert_autoscaling_HPAScalingPolicy_To_v2beta2_HPAScalingPolicy(in *autoscaling.HPAScalingPolicy, out *autoscalingv2beta2.HPAScalingPolicy, s conversion.Scope) error {
out.Type = autoscalingv2beta2.HPAScalingPolicyType(in.Type)
out.Value = in.Value
out.PeriodSeconds = in.PeriodSeconds
return nil
}
// Convert_autoscaling_HPAScalingPolicy_To_v2beta2_HPAScalingPolicy is an autogenerated conversion function.
func Convert_autoscaling_HPAScalingPolicy_To_v2beta2_HPAScalingPolicy(in *autoscaling.HPAScalingPolicy, out *autoscalingv2beta2.HPAScalingPolicy, s conversion.Scope) error {
return autoConvert_autoscaling_HPAScalingPolicy_To_v2beta2_HPAScalingPolicy(in, out, s)
}
func autoConvert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *autoscalingv2beta2.HPAScalingRules, out *autoscaling.HPAScalingRules, s conversion.Scope) error {
out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds))
out.SelectPolicy = (*autoscaling.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy))
out.Policies = *(*[]autoscaling.HPAScalingPolicy)(unsafe.Pointer(&in.Policies))
return nil
}
func autoConvert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2beta2.HPAScalingRules, s conversion.Scope) error {
out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds))
out.SelectPolicy = (*autoscalingv2beta2.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy))
out.Policies = *(*[]autoscalingv2beta2.HPAScalingPolicy)(unsafe.Pointer(&in.Policies))
// WARNING: in.Tolerance requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv2beta2.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, out *autoscaling.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
if in.ScaleUp != nil {
in, out := &in.ScaleUp, &out.ScaleUp
*out = new(autoscaling.HPAScalingRules)
if err := Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleUp = nil
}
if in.ScaleDown != nil {
in, out := &in.ScaleDown, &out.ScaleDown
*out = new(autoscaling.HPAScalingRules)
if err := Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleDown = nil
}
return nil
}
// Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior is an autogenerated conversion function.
func Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, out *autoscaling.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
return autoConvert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(in *autoscaling.HorizontalPodAutoscalerBehavior, out *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
if in.ScaleUp != nil {
in, out := &in.ScaleUp, &out.ScaleUp
*out = new(autoscalingv2beta2.HPAScalingRules)
if err := Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleUp = nil
}
if in.ScaleDown != nil {
in, out := &in.ScaleDown, &out.ScaleDown
*out = new(autoscalingv2beta2.HPAScalingRules)
if err := Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleDown = nil
}
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(in *autoscaling.HorizontalPodAutoscalerBehavior, out *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(in, out, s)
}
func autoConvert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv2beta2.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscaling.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = autoscaling.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in *autoscalingv2beta2.HorizontalPodAutoscalerCondition, out *autoscaling.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_v2beta2_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv2beta2.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
out.Type = autoscalingv2beta2.HorizontalPodAutoscalerConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(in *autoscaling.HorizontalPodAutoscalerCondition, out *autoscalingv2beta2.HorizontalPodAutoscalerCondition, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalPodAutoscalerCondition(in, out, s)
}
func autoConvert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv2beta2.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *autoscalingv2beta2.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv2beta2.HorizontalPodAutoscalerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]autoscalingv2beta2.HorizontalPodAutoscaler, len(*in))
for i := range *in {
if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *autoscalingv2beta2.HorizontalPodAutoscalerList, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(in, out, s)
}
func autoConvert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv2beta2.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]autoscaling.MetricSpec, len(*in))
for i := range *in {
if err := Convert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Metrics = nil
}
if in.Behavior != nil {
in, out := &in.Behavior, &out.Behavior
*out = new(autoscaling.HorizontalPodAutoscalerBehavior)
if err := Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(*in, *out, s); err != nil {
return err
}
} else {
out.Behavior = nil
}
return nil
}
// Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec is an autogenerated conversion function.
func Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *autoscalingv2beta2.HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
return autoConvert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv2beta2.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
if err := Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil {
return err
}
out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas))
out.MaxReplicas = in.MaxReplicas
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]autoscalingv2beta2.MetricSpec, len(*in))
for i := range *in {
if err := Convert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Metrics = nil
}
if in.Behavior != nil {
in, out := &in.Behavior, &out.Behavior
*out = new(autoscalingv2beta2.HorizontalPodAutoscalerBehavior)
if err := Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(*in, *out, s); err != nil {
return err
}
} else {
out.Behavior = nil
}
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *autoscalingv2beta2.HorizontalPodAutoscalerSpec, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(in, out, s)
}
func autoConvert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv2beta2.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]autoscaling.MetricStatus, len(*in))
for i := range *in {
if err := Convert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.CurrentMetrics = nil
}
out.Conditions = *(*[]autoscaling.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus is an autogenerated conversion function.
func Convert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *autoscalingv2beta2.HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
return autoConvert_v2beta2_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s)
}
func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv2beta2.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.LastScaleTime = (*metav1.Time)(unsafe.Pointer(in.LastScaleTime))
out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]autoscalingv2beta2.MetricStatus, len(*in))
for i := range *in {
if err := Convert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.CurrentMetrics = nil
}
out.Conditions = *(*[]autoscalingv2beta2.HorizontalPodAutoscalerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus is an autogenerated conversion function.
func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *autoscalingv2beta2.HorizontalPodAutoscalerStatus, s conversion.Scope) error {
return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2beta2_HorizontalPodAutoscalerStatus(in, out, s)
}
func autoConvert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(in *autoscalingv2beta2.MetricIdentifier, out *autoscaling.MetricIdentifier, s conversion.Scope) error {
out.Name = in.Name
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
return nil
}
// Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier is an autogenerated conversion function.
func Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(in *autoscalingv2beta2.MetricIdentifier, out *autoscaling.MetricIdentifier, s conversion.Scope) error {
return autoConvert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(in, out, s)
}
func autoConvert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(in *autoscaling.MetricIdentifier, out *autoscalingv2beta2.MetricIdentifier, s conversion.Scope) error {
out.Name = in.Name
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
return nil
}
// Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier is an autogenerated conversion function.
func Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(in *autoscaling.MetricIdentifier, out *autoscalingv2beta2.MetricIdentifier, s conversion.Scope) error {
return autoConvert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(in, out, s)
}
func autoConvert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv2beta2.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object))
out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricSource)
if err := Convert_v2beta2_ContainerResourceMetricSource_To_autoscaling_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscaling.ExternalMetricSource)(unsafe.Pointer(in.External))
return nil
}
// Convert_v2beta2_MetricSpec_To_autoscaling_MetricSpec is an autogenerated conversion function.
func Convert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(in *autoscalingv2beta2.MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error {
return autoConvert_v2beta2_MetricSpec_To_autoscaling_MetricSpec(in, out, s)
}
func autoConvert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv2beta2.MetricSpec, s conversion.Scope) error {
out.Type = autoscalingv2beta2.MetricSourceType(in.Type)
out.Object = (*autoscalingv2beta2.ObjectMetricSource)(unsafe.Pointer(in.Object))
out.Pods = (*autoscalingv2beta2.PodsMetricSource)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscalingv2beta2.ResourceMetricSource)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv2beta2.ContainerResourceMetricSource)
if err := Convert_autoscaling_ContainerResourceMetricSource_To_v2beta2_ContainerResourceMetricSource(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscalingv2beta2.ExternalMetricSource)(unsafe.Pointer(in.External))
return nil
}
// Convert_autoscaling_MetricSpec_To_v2beta2_MetricSpec is an autogenerated conversion function.
func Convert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(in *autoscaling.MetricSpec, out *autoscalingv2beta2.MetricSpec, s conversion.Scope) error {
return autoConvert_autoscaling_MetricSpec_To_v2beta2_MetricSpec(in, out, s)
}
func autoConvert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv2beta2.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
out.Type = autoscaling.MetricSourceType(in.Type)
out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object))
out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscaling.ContainerResourceMetricStatus)
if err := Convert_v2beta2_ContainerResourceMetricStatus_To_autoscaling_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscaling.ExternalMetricStatus)(unsafe.Pointer(in.External))
return nil
}
// Convert_v2beta2_MetricStatus_To_autoscaling_MetricStatus is an autogenerated conversion function.
func Convert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(in *autoscalingv2beta2.MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error {
return autoConvert_v2beta2_MetricStatus_To_autoscaling_MetricStatus(in, out, s)
}
func autoConvert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv2beta2.MetricStatus, s conversion.Scope) error {
out.Type = autoscalingv2beta2.MetricSourceType(in.Type)
out.Object = (*autoscalingv2beta2.ObjectMetricStatus)(unsafe.Pointer(in.Object))
out.Pods = (*autoscalingv2beta2.PodsMetricStatus)(unsafe.Pointer(in.Pods))
out.Resource = (*autoscalingv2beta2.ResourceMetricStatus)(unsafe.Pointer(in.Resource))
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(autoscalingv2beta2.ContainerResourceMetricStatus)
if err := Convert_autoscaling_ContainerResourceMetricStatus_To_v2beta2_ContainerResourceMetricStatus(*in, *out, s); err != nil {
return err
}
} else {
out.ContainerResource = nil
}
out.External = (*autoscalingv2beta2.ExternalMetricStatus)(unsafe.Pointer(in.External))
return nil
}
// Convert_autoscaling_MetricStatus_To_v2beta2_MetricStatus is an autogenerated conversion function.
func Convert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(in *autoscaling.MetricStatus, out *autoscalingv2beta2.MetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_MetricStatus_To_v2beta2_MetricStatus(in, out, s)
}
func autoConvert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(in *autoscalingv2beta2.MetricTarget, out *autoscaling.MetricTarget, s conversion.Scope) error {
out.Type = autoscaling.MetricTargetType(in.Type)
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget is an autogenerated conversion function.
func Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(in *autoscalingv2beta2.MetricTarget, out *autoscaling.MetricTarget, s conversion.Scope) error {
return autoConvert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(in, out, s)
}
func autoConvert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(in *autoscaling.MetricTarget, out *autoscalingv2beta2.MetricTarget, s conversion.Scope) error {
out.Type = autoscalingv2beta2.MetricTargetType(in.Type)
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget is an autogenerated conversion function.
func Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(in *autoscaling.MetricTarget, out *autoscalingv2beta2.MetricTarget, s conversion.Scope) error {
return autoConvert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(in, out, s)
}
func autoConvert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(in *autoscalingv2beta2.MetricValueStatus, out *autoscaling.MetricValueStatus, s conversion.Scope) error {
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus is an autogenerated conversion function.
func Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(in *autoscalingv2beta2.MetricValueStatus, out *autoscaling.MetricValueStatus, s conversion.Scope) error {
return autoConvert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(in, out, s)
}
func autoConvert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(in *autoscaling.MetricValueStatus, out *autoscalingv2beta2.MetricValueStatus, s conversion.Scope) error {
out.Value = (*resource.Quantity)(unsafe.Pointer(in.Value))
out.AverageValue = (*resource.Quantity)(unsafe.Pointer(in.AverageValue))
out.AverageUtilization = (*int32)(unsafe.Pointer(in.AverageUtilization))
return nil
}
// Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus is an autogenerated conversion function.
func Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(in *autoscaling.MetricValueStatus, out *autoscalingv2beta2.MetricValueStatus, s conversion.Scope) error {
return autoConvert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(in, out, s)
}
func autoConvert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv2beta2.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
if err := Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource is an autogenerated conversion function.
func Convert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *autoscalingv2beta2.ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error {
return autoConvert_v2beta2_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s)
}
func autoConvert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv2beta2.ObjectMetricSource, s conversion.Scope) error {
if err := Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *autoscalingv2beta2.ObjectMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ObjectMetricSource_To_v2beta2_ObjectMetricSource(in, out, s)
}
func autoConvert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv2beta2.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
if err := Convert_v2beta2_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus is an autogenerated conversion function.
func Convert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *autoscalingv2beta2.ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error {
return autoConvert_v2beta2_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv2beta2.ObjectMetricStatus, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
if err := Convert_autoscaling_CrossVersionObjectReference_To_v2beta2_CrossVersionObjectReference(&in.DescribedObject, &out.DescribedObject, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *autoscalingv2beta2.ObjectMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ObjectMetricStatus_To_v2beta2_ObjectMetricStatus(in, out, s)
}
func autoConvert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv2beta2.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource is an autogenerated conversion function.
func Convert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(in *autoscalingv2beta2.PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error {
return autoConvert_v2beta2_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s)
}
func autoConvert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta2.PodsMetricSource, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource is an autogenerated conversion function.
func Convert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(in *autoscaling.PodsMetricSource, out *autoscalingv2beta2.PodsMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_PodsMetricSource_To_v2beta2_PodsMetricSource(in, out, s)
}
func autoConvert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv2beta2.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
if err := Convert_v2beta2_MetricIdentifier_To_autoscaling_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus is an autogenerated conversion function.
func Convert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *autoscalingv2beta2.PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error {
return autoConvert_v2beta2_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s)
}
func autoConvert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv2beta2.PodsMetricStatus, s conversion.Scope) error {
if err := Convert_autoscaling_MetricIdentifier_To_v2beta2_MetricIdentifier(&in.Metric, &out.Metric, s); err != nil {
return err
}
if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *autoscalingv2beta2.PodsMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_PodsMetricStatus_To_v2beta2_PodsMetricStatus(in, out, s)
}
func autoConvert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv2beta2.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2beta2_MetricTarget_To_autoscaling_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource is an autogenerated conversion function.
func Convert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *autoscalingv2beta2.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error {
return autoConvert_v2beta2_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s)
}
func autoConvert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv2beta2.ResourceMetricSource, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
if err := Convert_autoscaling_MetricTarget_To_v2beta2_MetricTarget(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource is an autogenerated conversion function.
func Convert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *autoscalingv2beta2.ResourceMetricSource, s conversion.Scope) error {
return autoConvert_autoscaling_ResourceMetricSource_To_v2beta2_ResourceMetricSource(in, out, s)
}
func autoConvert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv2beta2.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
if err := Convert_v2beta2_MetricValueStatus_To_autoscaling_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus is an autogenerated conversion function.
func Convert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *autoscalingv2beta2.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error {
return autoConvert_v2beta2_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s)
}
func autoConvert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv2beta2.ResourceMetricStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
if err := Convert_autoscaling_MetricValueStatus_To_v2beta2_MetricValueStatus(&in.Current, &out.Current, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus is an autogenerated conversion function.
func Convert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *autoscalingv2beta2.ResourceMetricStatus, s conversion.Scope) error {
return autoConvert_autoscaling_ResourceMetricStatus_To_v2beta2_ResourceMetricStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v2beta2
import (
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&autoscalingv2beta2.HorizontalPodAutoscaler{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscaler(obj.(*autoscalingv2beta2.HorizontalPodAutoscaler))
})
scheme.AddTypeDefaultingFunc(&autoscalingv2beta2.HorizontalPodAutoscalerList{}, func(obj interface{}) {
SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*autoscalingv2beta2.HorizontalPodAutoscalerList))
})
return nil
}
func SetObjectDefaults_HorizontalPodAutoscaler(in *autoscalingv2beta2.HorizontalPodAutoscaler) {
SetDefaults_HorizontalPodAutoscaler(in)
}
func SetObjectDefaults_HorizontalPodAutoscalerList(in *autoscalingv2beta2.HorizontalPodAutoscalerList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_HorizontalPodAutoscaler(a)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
pathvalidation "k8s.io/apimachinery/pkg/api/validation/path"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/autoscaling"
corevalidation "k8s.io/kubernetes/pkg/apis/core/v1/validation"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
)
const (
// MaxPeriodSeconds is the largest allowed scaling policy period (in seconds)
MaxPeriodSeconds int32 = 1800
// MaxStabilizationWindowSeconds is the largest allowed stabilization window (in seconds)
MaxStabilizationWindowSeconds int32 = 3600
)
// ValidateScale validates a Scale and returns an ErrorList with any errors.
func ValidateScale(scale *autoscaling.Scale) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&scale.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...)
if scale.Spec.Replicas < 0 {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(scale.Spec.Replicas), field.NewPath("spec", "replicas")).MarkCoveredByDeclarative()...)
}
return allErrs
}
// ValidateHorizontalPodAutoscalerName can be used to check whether the given autoscaler name is valid.
// Prefix indicates this name will be used as part of generation, in which case trailing dashes are allowed.
var ValidateHorizontalPodAutoscalerName = apivalidation.ValidateReplicationControllerName
func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAutoscalerSpec, fldPath *field.Path, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < opts.MinReplicasLowerBound {
allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas,
fmt.Sprintf("must be greater than or equal to %d", opts.MinReplicasLowerBound)))
}
if autoscaler.MaxReplicas < 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0"))
}
if autoscaler.MinReplicas != nil && autoscaler.MaxReplicas < *autoscaler.MinReplicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than or equal to `minReplicas`"))
}
if refErrs := ValidateCrossVersionObjectReference(autoscaler.ScaleTargetRef, fldPath.Child("scaleTargetRef"), opts.ScaleTargetRefValidationOptions); len(refErrs) > 0 {
allErrs = append(allErrs, refErrs...)
}
if refErrs := validateMetrics(autoscaler.Metrics, fldPath.Child("metrics"), autoscaler.MinReplicas, opts.ObjectMetricsValidationOptions); len(refErrs) > 0 {
allErrs = append(allErrs, refErrs...)
}
if refErrs := validateBehavior(autoscaler.Behavior, fldPath.Child("behavior"), opts); len(refErrs) > 0 {
allErrs = append(allErrs, refErrs...)
}
return allErrs
}
// ValidateCrossVersionObjectReference validates a CrossVersionObjectReference and returns an
// ErrorList with any errors.
func ValidateCrossVersionObjectReference(ref autoscaling.CrossVersionObjectReference, fldPath *field.Path, opts CrossVersionObjectReferenceValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if len(ref.Kind) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
} else {
for _, msg := range pathvalidation.IsValidPathSegmentName(ref.Kind) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ref.Kind, msg))
}
}
if len(ref.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else {
for _, msg := range pathvalidation.IsValidPathSegmentName(ref.Name) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ref.Name, msg))
}
}
if err := ValidateAPIVersion(ref, opts); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ref.APIVersion, err.Error()))
}
return allErrs
}
func ValidateAPIVersion(ref autoscaling.CrossVersionObjectReference, opts CrossVersionObjectReferenceValidationOptions) error {
if opts.AllowInvalidAPIVersion {
return nil
}
gv, err := schema.ParseGroupVersion(ref.APIVersion)
if err != nil {
return err
} else if !opts.AllowEmptyAPIGroup && gv.Group == "" {
return fmt.Errorf("apiVersion must specify API group")
}
return nil
}
// ValidateHorizontalPodAutoscaler validates a HorizontalPodAutoscaler and returns an
// ErrorList with any errors.
func ValidateHorizontalPodAutoscaler(autoscaler *autoscaling.HorizontalPodAutoscaler, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName, field.NewPath("metadata"))
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateHorizontalPodAutoscalerUpdate validates an update to a HorizontalPodAutoscaler and returns an
// ErrorList with any errors.
func ValidateHorizontalPodAutoscalerUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateHorizontalPodAutoscalerStatusUpdate validates an update to status on a HorizontalPodAutoscaler and
// returns an ErrorList with any errors.
func ValidateHorizontalPodAutoscalerStatusUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata"))
status := newAutoscaler.Status
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentReplicas), field.NewPath("status", "currentReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredReplicas), field.NewPath("status", "desiredReplicas"))...)
return allErrs
}
// CrossVersionObjectReferenceValidationOptions contains the different setting
// to validate CrossVersionObjectReference.
type CrossVersionObjectReferenceValidationOptions struct {
// Whether to allow API Version empty
AllowEmptyAPIGroup bool
// AllowInvalidAPIVersion skips APIVersion validation when true.
AllowInvalidAPIVersion bool
}
// HorizontalPodAutoscalerSpecValidationOptions contains the different settings for
// HorizontalPodAutoscaler spec validation.
type HorizontalPodAutoscalerSpecValidationOptions struct {
// The minimum value for minReplicas.
MinReplicasLowerBound int32
ScaleTargetRefValidationOptions CrossVersionObjectReferenceValidationOptions
ObjectMetricsValidationOptions CrossVersionObjectReferenceValidationOptions
}
func validateMetrics(newMetrics []autoscaling.MetricSpec, fldPath *field.Path, minReplicas *int32, opts CrossVersionObjectReferenceValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
hasObjectMetrics := false
hasExternalMetrics := false
for i, metricSpec := range newMetrics {
idxPath := fldPath.Index(i)
if targetErrs := validateMetricSpec(metricSpec, idxPath, opts); len(targetErrs) > 0 {
allErrs = append(allErrs, targetErrs...)
}
if metricSpec.Type == autoscaling.ObjectMetricSourceType {
hasObjectMetrics = true
}
if metricSpec.Type == autoscaling.ExternalMetricSourceType {
hasExternalMetrics = true
}
}
if minReplicas != nil && *minReplicas == 0 {
if !hasObjectMetrics && !hasExternalMetrics {
allErrs = append(allErrs, field.Forbidden(fldPath, "must specify at least one Object or External metric to support scaling to zero replicas"))
}
}
return allErrs
}
func validateBehavior(behavior *autoscaling.HorizontalPodAutoscalerBehavior, fldPath *field.Path, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if behavior != nil {
if scaleUpErrs := validateScalingRules(behavior.ScaleUp, fldPath.Child("scaleUp"), opts); len(scaleUpErrs) > 0 {
allErrs = append(allErrs, scaleUpErrs...)
}
if scaleDownErrs := validateScalingRules(behavior.ScaleDown, fldPath.Child("scaleDown"), opts); len(scaleDownErrs) > 0 {
allErrs = append(allErrs, scaleDownErrs...)
}
}
return allErrs
}
var validSelectPolicyTypes = sets.NewString(string(autoscaling.MaxPolicySelect), string(autoscaling.MinPolicySelect), string(autoscaling.DisabledPolicySelect))
var validSelectPolicyTypesList = validSelectPolicyTypes.List()
func validateScalingRules(rules *autoscaling.HPAScalingRules, fldPath *field.Path, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if rules != nil {
if rules.StabilizationWindowSeconds != nil && *rules.StabilizationWindowSeconds < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("stabilizationWindowSeconds"), rules.StabilizationWindowSeconds, "must be greater than or equal to zero"))
}
if rules.StabilizationWindowSeconds != nil && *rules.StabilizationWindowSeconds > MaxStabilizationWindowSeconds {
allErrs = append(allErrs, field.Invalid(fldPath.Child("stabilizationWindowSeconds"), rules.StabilizationWindowSeconds,
fmt.Sprintf("must be less than or equal to %v", MaxStabilizationWindowSeconds)))
}
if rules.SelectPolicy != nil && !validSelectPolicyTypes.Has(string(*rules.SelectPolicy)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("selectPolicy"), rules.SelectPolicy, validSelectPolicyTypesList))
}
policiesPath := fldPath.Child("policies")
if len(rules.Policies) == 0 {
allErrs = append(allErrs, field.Required(policiesPath, "must specify at least one Policy"))
}
for i, policy := range rules.Policies {
idxPath := policiesPath.Index(i)
if policyErrs := validateScalingPolicy(policy, idxPath); len(policyErrs) > 0 {
allErrs = append(allErrs, policyErrs...)
}
}
if rules.Tolerance != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeQuantity(*rules.Tolerance, fldPath.Child("tolerance"))...)
}
}
return allErrs
}
var validPolicyTypes = sets.NewString(string(autoscaling.PodsScalingPolicy), string(autoscaling.PercentScalingPolicy))
var validPolicyTypesList = validPolicyTypes.List()
func validateScalingPolicy(policy autoscaling.HPAScalingPolicy, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if policy.Type != autoscaling.PodsScalingPolicy && policy.Type != autoscaling.PercentScalingPolicy {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), policy.Type, validPolicyTypesList))
}
if policy.Value <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("value"), policy.Value, "must be greater than zero"))
}
if policy.PeriodSeconds <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("periodSeconds"), policy.PeriodSeconds, "must be greater than zero"))
}
if policy.PeriodSeconds > MaxPeriodSeconds {
allErrs = append(allErrs, field.Invalid(fldPath.Child("periodSeconds"), policy.PeriodSeconds,
fmt.Sprintf("must be less than or equal to %v", MaxPeriodSeconds)))
}
return allErrs
}
var validMetricSourceTypes = sets.NewString(
string(autoscaling.ObjectMetricSourceType), string(autoscaling.PodsMetricSourceType),
string(autoscaling.ResourceMetricSourceType), string(autoscaling.ExternalMetricSourceType),
string(autoscaling.ContainerResourceMetricSourceType))
var validMetricSourceTypesList = validMetricSourceTypes.List()
func validateMetricSpec(spec autoscaling.MetricSpec, fldPath *field.Path, opts CrossVersionObjectReferenceValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if len(string(spec.Type)) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must specify a metric source type"))
}
if !validMetricSourceTypes.Has(string(spec.Type)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), spec.Type, validMetricSourceTypesList))
}
typesPresent := sets.NewString()
if spec.Object != nil {
typesPresent.Insert("object")
if typesPresent.Len() == 1 {
allErrs = append(allErrs, validateObjectSource(spec.Object, fldPath.Child("object"), opts)...)
}
}
if spec.External != nil {
typesPresent.Insert("external")
if typesPresent.Len() == 1 {
allErrs = append(allErrs, validateExternalSource(spec.External, fldPath.Child("external"))...)
}
}
if spec.Pods != nil {
typesPresent.Insert("pods")
if typesPresent.Len() == 1 {
allErrs = append(allErrs, validatePodsSource(spec.Pods, fldPath.Child("pods"))...)
}
}
if spec.Resource != nil {
typesPresent.Insert("resource")
if typesPresent.Len() == 1 {
allErrs = append(allErrs, validateResourceSource(spec.Resource, fldPath.Child("resource"))...)
}
}
if spec.ContainerResource != nil {
typesPresent.Insert("containerResource")
if typesPresent.Len() == 1 {
allErrs = append(allErrs, validateContainerResourceSource(spec.ContainerResource, fldPath.Child("containerResource"))...)
}
}
var expectedField string
switch spec.Type {
case autoscaling.ObjectMetricSourceType:
if spec.Object == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("object"), "must populate information for the given metric source"))
}
expectedField = "object"
case autoscaling.PodsMetricSourceType:
if spec.Pods == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("pods"), "must populate information for the given metric source"))
}
expectedField = "pods"
case autoscaling.ResourceMetricSourceType:
if spec.Resource == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "must populate information for the given metric source"))
}
expectedField = "resource"
case autoscaling.ExternalMetricSourceType:
if spec.External == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("external"), "must populate information for the given metric source"))
}
expectedField = "external"
case autoscaling.ContainerResourceMetricSourceType:
if spec.ContainerResource == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("containerResource"), "must populate information for the given metric source"))
}
expectedField = "containerResource"
default:
allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), spec.Type, validMetricSourceTypesList))
}
if typesPresent.Len() != 1 {
typesPresent.Delete(expectedField)
for typ := range typesPresent {
allErrs = append(allErrs, field.Forbidden(fldPath.Child(typ), "must populate the given metric source only"))
}
}
return allErrs
}
func validateObjectSource(src *autoscaling.ObjectMetricSource, fldPath *field.Path, opts CrossVersionObjectReferenceValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateCrossVersionObjectReference(src.DescribedObject, fldPath.Child("describedObject"), opts)...)
allErrs = append(allErrs, validateMetricIdentifier(src.Metric, fldPath.Child("metric"))...)
allErrs = append(allErrs, validateMetricTarget(src.Target, fldPath.Child("target"))...)
if src.Target.Value == nil && src.Target.AverageValue == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("target").Child("averageValue"), "must set either a target value or averageValue"))
}
return allErrs
}
func validateExternalSource(src *autoscaling.ExternalMetricSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validateMetricIdentifier(src.Metric, fldPath.Child("metric"))...)
allErrs = append(allErrs, validateMetricTarget(src.Target, fldPath.Child("target"))...)
if src.Target.Value == nil && src.Target.AverageValue == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("target").Child("averageValue"), "must set either a target value for metric or a per-pod target"))
}
if src.Target.Value != nil && src.Target.AverageValue != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("target").Child("value"), "may not set both a target value for metric and a per-pod target"))
}
return allErrs
}
func validatePodsSource(src *autoscaling.PodsMetricSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validateMetricIdentifier(src.Metric, fldPath.Child("metric"))...)
allErrs = append(allErrs, validateMetricTarget(src.Target, fldPath.Child("target"))...)
if src.Target.AverageValue == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("target").Child("averageValue"), "must specify a positive target averageValue"))
}
return allErrs
}
func validateContainerResourceSource(src *autoscaling.ContainerResourceMetricSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(src.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), "must specify a resource name"))
} else {
allErrs = append(allErrs, corevalidation.ValidateContainerResourceName(src.Name, fldPath.Child("name"))...)
}
if len(src.Container) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("container"), "must specify a container"))
} else {
allErrs = append(allErrs, apivalidation.ValidateDNS1123Label(src.Container, fldPath.Child("container"))...)
}
allErrs = append(allErrs, validateMetricTarget(src.Target, fldPath.Child("target"))...)
if src.Target.AverageUtilization == nil && src.Target.AverageValue == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("target").Child("averageUtilization"), "must set either a target raw value or a target utilization"))
}
if src.Target.AverageUtilization != nil && src.Target.AverageValue != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("target").Child("averageValue"), "may not set both a target raw value and a target utilization"))
}
return allErrs
}
func validateResourceSource(src *autoscaling.ResourceMetricSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(src.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), "must specify a resource name"))
}
allErrs = append(allErrs, validateMetricTarget(src.Target, fldPath.Child("target"))...)
if src.Target.AverageUtilization == nil && src.Target.AverageValue == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("target").Child("averageUtilization"), "must set either a target raw value or a target utilization"))
}
if src.Target.AverageUtilization != nil && src.Target.AverageValue != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("target").Child("averageValue"), "may not set both a target raw value and a target utilization"))
}
return allErrs
}
func validateMetricTarget(mt autoscaling.MetricTarget, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(mt.Type) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must specify a metric target type"))
}
if mt.Type != autoscaling.UtilizationMetricType &&
mt.Type != autoscaling.ValueMetricType &&
mt.Type != autoscaling.AverageValueMetricType {
allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), mt.Type, "must be either Utilization, Value, or AverageValue"))
}
if mt.Value != nil && mt.Value.Sign() != 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("value"), mt.Value, "must be positive"))
}
if mt.AverageValue != nil && mt.AverageValue.Sign() != 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("averageValue"), mt.AverageValue, "must be positive"))
}
if mt.AverageUtilization != nil && *mt.AverageUtilization < 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("averageUtilization"), mt.AverageUtilization, "must be greater than 0"))
}
return allErrs
}
func validateMetricIdentifier(id autoscaling.MetricIdentifier, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(id.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), "must specify a metric name"))
} else {
for _, msg := range pathvalidation.IsValidPathSegmentName(id.Name) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), id.Name, msg))
}
}
return allErrs
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package autoscaling
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerResourceMetricSource) DeepCopyInto(out *ContainerResourceMetricSource) {
*out = *in
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricSource.
func (in *ContainerResourceMetricSource) DeepCopy() *ContainerResourceMetricSource {
if in == nil {
return nil
}
out := new(ContainerResourceMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerResourceMetricStatus) DeepCopyInto(out *ContainerResourceMetricStatus) {
*out = *in
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourceMetricStatus.
func (in *ContainerResourceMetricStatus) DeepCopy() *ContainerResourceMetricStatus {
if in == nil {
return nil
}
out := new(ContainerResourceMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference.
func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference {
if in == nil {
return nil
}
out := new(CrossVersionObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource.
func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource {
if in == nil {
return nil
}
out := new(ExternalMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus.
func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
if in == nil {
return nil
}
out := new(ExternalMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy.
func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy {
if in == nil {
return nil
}
out := new(HPAScalingPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
*out = *in
if in.StabilizationWindowSeconds != nil {
in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds
*out = new(int32)
**out = **in
}
if in.SelectPolicy != nil {
in, out := &in.SelectPolicy, &out.SelectPolicy
*out = new(ScalingPolicySelect)
**out = **in
}
if in.Policies != nil {
in, out := &in.Policies, &out.Policies
*out = make([]HPAScalingPolicy, len(*in))
copy(*out, *in)
}
if in.Tolerance != nil {
in, out := &in.Tolerance, &out.Tolerance
x := (*in).DeepCopy()
*out = &x
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules.
func (in *HPAScalingRules) DeepCopy() *HPAScalingRules {
if in == nil {
return nil
}
out := new(HPAScalingRules)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler.
func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscaler)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) {
*out = *in
if in.ScaleUp != nil {
in, out := &in.ScaleUp, &out.ScaleUp
*out = new(HPAScalingRules)
(*in).DeepCopyInto(*out)
}
if in.ScaleDown != nil {
in, out := &in.ScaleDown, &out.ScaleDown
*out = new(HPAScalingRules)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior.
func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerBehavior)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition.
func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HorizontalPodAutoscaler, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList.
func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) {
*out = *in
out.ScaleTargetRef = in.ScaleTargetRef
if in.MinReplicas != nil {
in, out := &in.MinReplicas, &out.MinReplicas
*out = new(int32)
**out = **in
}
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]MetricSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Behavior != nil {
in, out := &in.Behavior, &out.Behavior
*out = new(HorizontalPodAutoscalerBehavior)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec.
func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) {
*out = *in
if in.ObservedGeneration != nil {
in, out := &in.ObservedGeneration, &out.ObservedGeneration
*out = new(int64)
**out = **in
}
if in.LastScaleTime != nil {
in, out := &in.LastScaleTime, &out.LastScaleTime
*out = (*in).DeepCopy()
}
if in.CurrentMetrics != nil {
in, out := &in.CurrentMetrics, &out.CurrentMetrics
*out = make([]MetricStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]HorizontalPodAutoscalerCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus.
func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier.
func (in *MetricIdentifier) DeepCopy() *MetricIdentifier {
if in == nil {
return nil
}
out := new(MetricIdentifier)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricSpec) DeepCopyInto(out *MetricSpec) {
*out = *in
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(ObjectMetricSource)
(*in).DeepCopyInto(*out)
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(PodsMetricSource)
(*in).DeepCopyInto(*out)
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(ResourceMetricSource)
(*in).DeepCopyInto(*out)
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(ContainerResourceMetricSource)
(*in).DeepCopyInto(*out)
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(ExternalMetricSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec.
func (in *MetricSpec) DeepCopy() *MetricSpec {
if in == nil {
return nil
}
out := new(MetricSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricStatus) DeepCopyInto(out *MetricStatus) {
*out = *in
if in.Object != nil {
in, out := &in.Object, &out.Object
*out = new(ObjectMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.Pods != nil {
in, out := &in.Pods, &out.Pods
*out = new(PodsMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(ResourceMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.ContainerResource != nil {
in, out := &in.ContainerResource, &out.ContainerResource
*out = new(ContainerResourceMetricStatus)
(*in).DeepCopyInto(*out)
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(ExternalMetricStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus.
func (in *MetricStatus) DeepCopy() *MetricStatus {
if in == nil {
return nil
}
out := new(MetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricTarget) DeepCopyInto(out *MetricTarget) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
x := (*in).DeepCopy()
*out = &x
}
if in.AverageValue != nil {
in, out := &in.AverageValue, &out.AverageValue
x := (*in).DeepCopy()
*out = &x
}
if in.AverageUtilization != nil {
in, out := &in.AverageUtilization, &out.AverageUtilization
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget.
func (in *MetricTarget) DeepCopy() *MetricTarget {
if in == nil {
return nil
}
out := new(MetricTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
x := (*in).DeepCopy()
*out = &x
}
if in.AverageValue != nil {
in, out := &in.AverageValue, &out.AverageValue
x := (*in).DeepCopy()
*out = &x
}
if in.AverageUtilization != nil {
in, out := &in.AverageUtilization, &out.AverageUtilization
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus.
func (in *MetricValueStatus) DeepCopy() *MetricValueStatus {
if in == nil {
return nil
}
out := new(MetricValueStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) {
*out = *in
out.DescribedObject = in.DescribedObject
in.Target.DeepCopyInto(&out.Target)
in.Metric.DeepCopyInto(&out.Metric)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource.
func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource {
if in == nil {
return nil
}
out := new(ObjectMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Current.DeepCopyInto(&out.Current)
out.DescribedObject = in.DescribedObject
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus.
func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus {
if in == nil {
return nil
}
out := new(ObjectMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource.
func (in *PodsMetricSource) DeepCopy() *PodsMetricSource {
if in == nil {
return nil
}
out := new(PodsMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) {
*out = *in
in.Metric.DeepCopyInto(&out.Metric)
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus.
func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus {
if in == nil {
return nil
}
out := new(PodsMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) {
*out = *in
in.Target.DeepCopyInto(&out.Target)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource.
func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource {
if in == nil {
return nil
}
out := new(ResourceMetricSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) {
*out = *in
in.Current.DeepCopyInto(&out.Current)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus.
func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus {
if in == nil {
return nil
}
out := new(ResourceMetricStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scale) DeepCopyInto(out *Scale) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
func (in *Scale) DeepCopy() *Scale {
if in == nil {
return nil
}
out := new(Scale)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Scale) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
func (in *ScaleSpec) DeepCopy() *ScaleSpec {
if in == nil {
return nil
}
out := new(ScaleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
func (in *ScaleStatus) DeepCopy() *ScaleStatus {
if in == nil {
return nil
}
out := new(ScaleStatus)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"math"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/ptr"
"sigs.k8s.io/randfill"
)
// Funcs returns the fuzzer functions for the batch api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(j *batch.Job, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
// match defaulting
if len(j.Labels) == 0 {
j.Labels = j.Spec.Template.Labels
}
},
func(j *batch.JobSpec, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
completions := c.Int31()
parallelism := c.Int31()
backoffLimit := c.Int31()
j.Completions = &completions
j.Parallelism = ¶llelism
j.BackoffLimit = &backoffLimit
j.ManualSelector = ptr.To(c.Bool())
mode := batch.NonIndexedCompletion
if c.Bool() {
mode = batch.IndexedCompletion
j.BackoffLimitPerIndex = ptr.To[int32](c.Int31())
j.MaxFailedIndexes = ptr.To[int32](c.Int31())
}
if c.Bool() {
j.BackoffLimit = ptr.To[int32](math.MaxInt32)
}
j.CompletionMode = &mode
// We're fuzzing the internal JobSpec type, not the v1 type, so we don't
// need to fuzz the nil value.
j.Suspend = ptr.To(c.Bool())
podReplacementPolicy := batch.TerminatingOrFailed
if c.Bool() {
podReplacementPolicy = batch.Failed
}
j.PodReplacementPolicy = &podReplacementPolicy
if c.Bool() {
c.Fill(j.ManagedBy)
}
},
func(sj *batch.CronJobSpec, c randfill.Continue) {
c.FillNoCustom(sj)
suspend := c.Bool()
sj.Suspend = &suspend
sds := int64(c.Uint64())
sj.StartingDeadlineSeconds = &sds
sj.Schedule = c.String(0)
successfulJobsHistoryLimit := c.Int31()
sj.SuccessfulJobsHistoryLimit = &successfulJobsHistoryLimit
failedJobsHistoryLimit := c.Int31()
sj.FailedJobsHistoryLimit = &failedJobsHistoryLimit
},
func(cp *batch.ConcurrencyPolicy, c randfill.Continue) {
policies := []batch.ConcurrencyPolicy{batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent}
*cp = policies[c.Rand.Intn(len(policies))]
},
func(p *batch.PodFailurePolicyOnPodConditionsPattern, c randfill.Continue) {
c.FillNoCustom(p)
if p.Status == "" {
p.Status = api.ConditionTrue
}
},
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the batch API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/apis/batch/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(batch.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package batch
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "batch"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Job{},
&JobList{},
&CronJob{},
&CronJobList{},
)
return nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
v1 "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/batch"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Job"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "metadata.namespace", "status.successful":
return label, value, nil
default:
return "", "", fmt.Errorf("field label %q not supported for Job", label)
}
})
}
// The following functions don't do anything special, but they need to be added
// here due to the dependency of v1beta1 on v1.
func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *v1.JobSpec, s conversion.Scope) error {
return autoConvert_batch_JobSpec_To_v1_JobSpec(in, out, s)
}
func Convert_v1_JobSpec_To_batch_JobSpec(in *v1.JobSpec, out *batch.JobSpec, s conversion.Scope) error {
return autoConvert_v1_JobSpec_To_batch_JobSpec(in, out, s)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"math"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_Job(obj *batchv1.Job) {
// For a non-parallel job, you can leave both `.spec.completions` and
// `.spec.parallelism` unset. When both are unset, both are defaulted to 1.
if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil {
obj.Spec.Completions = ptr.To[int32](1)
obj.Spec.Parallelism = ptr.To[int32](1)
}
if obj.Spec.Parallelism == nil {
obj.Spec.Parallelism = ptr.To[int32](1)
}
if obj.Spec.BackoffLimit == nil {
if obj.Spec.BackoffLimitPerIndex != nil {
obj.Spec.BackoffLimit = ptr.To[int32](math.MaxInt32)
} else {
obj.Spec.BackoffLimit = ptr.To[int32](6)
}
}
labels := obj.Spec.Template.Labels
if labels != nil && len(obj.Labels) == 0 {
obj.Labels = labels
}
if obj.Spec.CompletionMode == nil {
mode := batchv1.NonIndexedCompletion
obj.Spec.CompletionMode = &mode
}
if obj.Spec.Suspend == nil {
obj.Spec.Suspend = ptr.To(false)
}
if utilfeature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) {
if obj.Spec.PodReplacementPolicy == nil {
if obj.Spec.PodFailurePolicy != nil {
obj.Spec.PodReplacementPolicy = ptr.To(batchv1.Failed)
} else {
obj.Spec.PodReplacementPolicy = ptr.To(batchv1.TerminatingOrFailed)
}
}
}
if obj.Spec.ManualSelector == nil {
obj.Spec.ManualSelector = ptr.To(false)
}
}
func SetDefaults_CronJob(obj *batchv1.CronJob) {
if obj.Spec.ConcurrencyPolicy == "" {
obj.Spec.ConcurrencyPolicy = batchv1.AllowConcurrent
}
if obj.Spec.Suspend == nil {
obj.Spec.Suspend = ptr.To(false)
}
if obj.Spec.SuccessfulJobsHistoryLimit == nil {
obj.Spec.SuccessfulJobsHistoryLimit = ptr.To[int32](3)
}
if obj.Spec.FailedJobsHistoryLimit == nil {
obj.Spec.FailedJobsHistoryLimit = ptr.To[int32](1)
}
}
func SetDefaults_PodFailurePolicyOnPodConditionsPattern(obj *batchv1.PodFailurePolicyOnPodConditionsPattern) {
if obj.Status == "" {
obj.Status = corev1.ConditionTrue
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
batchv1 "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "batch"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &batchv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
batch "k8s.io/kubernetes/pkg/apis/batch"
core "k8s.io/kubernetes/pkg/apis/core"
apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*batchv1.CronJob)(nil), (*batch.CronJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CronJob_To_batch_CronJob(a.(*batchv1.CronJob), b.(*batch.CronJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJob)(nil), (*batchv1.CronJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJob_To_v1_CronJob(a.(*batch.CronJob), b.(*batchv1.CronJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.CronJobList)(nil), (*batch.CronJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CronJobList_To_batch_CronJobList(a.(*batchv1.CronJobList), b.(*batch.CronJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJobList)(nil), (*batchv1.CronJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJobList_To_v1_CronJobList(a.(*batch.CronJobList), b.(*batchv1.CronJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.CronJobSpec)(nil), (*batch.CronJobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CronJobSpec_To_batch_CronJobSpec(a.(*batchv1.CronJobSpec), b.(*batch.CronJobSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJobSpec)(nil), (*batchv1.CronJobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJobSpec_To_v1_CronJobSpec(a.(*batch.CronJobSpec), b.(*batchv1.CronJobSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.CronJobStatus)(nil), (*batch.CronJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CronJobStatus_To_batch_CronJobStatus(a.(*batchv1.CronJobStatus), b.(*batch.CronJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJobStatus)(nil), (*batchv1.CronJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJobStatus_To_v1_CronJobStatus(a.(*batch.CronJobStatus), b.(*batchv1.CronJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.Job)(nil), (*batch.Job)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Job_To_batch_Job(a.(*batchv1.Job), b.(*batch.Job), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.Job)(nil), (*batchv1.Job)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_Job_To_v1_Job(a.(*batch.Job), b.(*batchv1.Job), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.JobCondition)(nil), (*batch.JobCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_JobCondition_To_batch_JobCondition(a.(*batchv1.JobCondition), b.(*batch.JobCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.JobCondition)(nil), (*batchv1.JobCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_JobCondition_To_v1_JobCondition(a.(*batch.JobCondition), b.(*batchv1.JobCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.JobList)(nil), (*batch.JobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_JobList_To_batch_JobList(a.(*batchv1.JobList), b.(*batch.JobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.JobList)(nil), (*batchv1.JobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_JobList_To_v1_JobList(a.(*batch.JobList), b.(*batchv1.JobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.JobStatus)(nil), (*batch.JobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_JobStatus_To_batch_JobStatus(a.(*batchv1.JobStatus), b.(*batch.JobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.JobStatus)(nil), (*batchv1.JobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_JobStatus_To_v1_JobStatus(a.(*batch.JobStatus), b.(*batchv1.JobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.JobTemplateSpec)(nil), (*batch.JobTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_JobTemplateSpec_To_batch_JobTemplateSpec(a.(*batchv1.JobTemplateSpec), b.(*batch.JobTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.JobTemplateSpec)(nil), (*batchv1.JobTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_JobTemplateSpec_To_v1_JobTemplateSpec(a.(*batch.JobTemplateSpec), b.(*batchv1.JobTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.PodFailurePolicy)(nil), (*batch.PodFailurePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodFailurePolicy_To_batch_PodFailurePolicy(a.(*batchv1.PodFailurePolicy), b.(*batch.PodFailurePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.PodFailurePolicy)(nil), (*batchv1.PodFailurePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_PodFailurePolicy_To_v1_PodFailurePolicy(a.(*batch.PodFailurePolicy), b.(*batchv1.PodFailurePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.PodFailurePolicyOnExitCodesRequirement)(nil), (*batch.PodFailurePolicyOnExitCodesRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodFailurePolicyOnExitCodesRequirement_To_batch_PodFailurePolicyOnExitCodesRequirement(a.(*batchv1.PodFailurePolicyOnExitCodesRequirement), b.(*batch.PodFailurePolicyOnExitCodesRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.PodFailurePolicyOnExitCodesRequirement)(nil), (*batchv1.PodFailurePolicyOnExitCodesRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_PodFailurePolicyOnExitCodesRequirement_To_v1_PodFailurePolicyOnExitCodesRequirement(a.(*batch.PodFailurePolicyOnExitCodesRequirement), b.(*batchv1.PodFailurePolicyOnExitCodesRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.PodFailurePolicyOnPodConditionsPattern)(nil), (*batch.PodFailurePolicyOnPodConditionsPattern)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodFailurePolicyOnPodConditionsPattern_To_batch_PodFailurePolicyOnPodConditionsPattern(a.(*batchv1.PodFailurePolicyOnPodConditionsPattern), b.(*batch.PodFailurePolicyOnPodConditionsPattern), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.PodFailurePolicyOnPodConditionsPattern)(nil), (*batchv1.PodFailurePolicyOnPodConditionsPattern)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_PodFailurePolicyOnPodConditionsPattern_To_v1_PodFailurePolicyOnPodConditionsPattern(a.(*batch.PodFailurePolicyOnPodConditionsPattern), b.(*batchv1.PodFailurePolicyOnPodConditionsPattern), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.PodFailurePolicyRule)(nil), (*batch.PodFailurePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodFailurePolicyRule_To_batch_PodFailurePolicyRule(a.(*batchv1.PodFailurePolicyRule), b.(*batch.PodFailurePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.PodFailurePolicyRule)(nil), (*batchv1.PodFailurePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule(a.(*batch.PodFailurePolicyRule), b.(*batchv1.PodFailurePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.SuccessPolicy)(nil), (*batch.SuccessPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SuccessPolicy_To_batch_SuccessPolicy(a.(*batchv1.SuccessPolicy), b.(*batch.SuccessPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.SuccessPolicy)(nil), (*batchv1.SuccessPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_SuccessPolicy_To_v1_SuccessPolicy(a.(*batch.SuccessPolicy), b.(*batchv1.SuccessPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.SuccessPolicyRule)(nil), (*batch.SuccessPolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(a.(*batchv1.SuccessPolicyRule), b.(*batch.SuccessPolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.SuccessPolicyRule)(nil), (*batchv1.SuccessPolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(a.(*batch.SuccessPolicyRule), b.(*batchv1.SuccessPolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1.UncountedTerminatedPods)(nil), (*batch.UncountedTerminatedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(a.(*batchv1.UncountedTerminatedPods), b.(*batch.UncountedTerminatedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.UncountedTerminatedPods)(nil), (*batchv1.UncountedTerminatedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_UncountedTerminatedPods_To_v1_UncountedTerminatedPods(a.(*batch.UncountedTerminatedPods), b.(*batchv1.UncountedTerminatedPods), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*batch.JobSpec)(nil), (*batchv1.JobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_JobSpec_To_v1_JobSpec(a.(*batch.JobSpec), b.(*batchv1.JobSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*batchv1.JobSpec)(nil), (*batch.JobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_JobSpec_To_batch_JobSpec(a.(*batchv1.JobSpec), b.(*batch.JobSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_CronJob_To_batch_CronJob(in *batchv1.CronJob, out *batch.CronJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_CronJobSpec_To_batch_CronJobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_CronJobStatus_To_batch_CronJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_CronJob_To_batch_CronJob is an autogenerated conversion function.
func Convert_v1_CronJob_To_batch_CronJob(in *batchv1.CronJob, out *batch.CronJob, s conversion.Scope) error {
return autoConvert_v1_CronJob_To_batch_CronJob(in, out, s)
}
func autoConvert_batch_CronJob_To_v1_CronJob(in *batch.CronJob, out *batchv1.CronJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_batch_CronJobSpec_To_v1_CronJobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_batch_CronJobStatus_To_v1_CronJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_batch_CronJob_To_v1_CronJob is an autogenerated conversion function.
func Convert_batch_CronJob_To_v1_CronJob(in *batch.CronJob, out *batchv1.CronJob, s conversion.Scope) error {
return autoConvert_batch_CronJob_To_v1_CronJob(in, out, s)
}
func autoConvert_v1_CronJobList_To_batch_CronJobList(in *batchv1.CronJobList, out *batch.CronJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]batch.CronJob, len(*in))
for i := range *in {
if err := Convert_v1_CronJob_To_batch_CronJob(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_CronJobList_To_batch_CronJobList is an autogenerated conversion function.
func Convert_v1_CronJobList_To_batch_CronJobList(in *batchv1.CronJobList, out *batch.CronJobList, s conversion.Scope) error {
return autoConvert_v1_CronJobList_To_batch_CronJobList(in, out, s)
}
func autoConvert_batch_CronJobList_To_v1_CronJobList(in *batch.CronJobList, out *batchv1.CronJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]batchv1.CronJob, len(*in))
for i := range *in {
if err := Convert_batch_CronJob_To_v1_CronJob(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_batch_CronJobList_To_v1_CronJobList is an autogenerated conversion function.
func Convert_batch_CronJobList_To_v1_CronJobList(in *batch.CronJobList, out *batchv1.CronJobList, s conversion.Scope) error {
return autoConvert_batch_CronJobList_To_v1_CronJobList(in, out, s)
}
func autoConvert_v1_CronJobSpec_To_batch_CronJobSpec(in *batchv1.CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error {
out.Schedule = in.Schedule
out.TimeZone = (*string)(unsafe.Pointer(in.TimeZone))
out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds))
out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy)
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
if err := Convert_v1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
return err
}
out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit))
out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit))
return nil
}
// Convert_v1_CronJobSpec_To_batch_CronJobSpec is an autogenerated conversion function.
func Convert_v1_CronJobSpec_To_batch_CronJobSpec(in *batchv1.CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error {
return autoConvert_v1_CronJobSpec_To_batch_CronJobSpec(in, out, s)
}
func autoConvert_batch_CronJobSpec_To_v1_CronJobSpec(in *batch.CronJobSpec, out *batchv1.CronJobSpec, s conversion.Scope) error {
out.Schedule = in.Schedule
out.TimeZone = (*string)(unsafe.Pointer(in.TimeZone))
out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds))
out.ConcurrencyPolicy = batchv1.ConcurrencyPolicy(in.ConcurrencyPolicy)
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
if err := Convert_batch_JobTemplateSpec_To_v1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
return err
}
out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit))
out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit))
return nil
}
// Convert_batch_CronJobSpec_To_v1_CronJobSpec is an autogenerated conversion function.
func Convert_batch_CronJobSpec_To_v1_CronJobSpec(in *batch.CronJobSpec, out *batchv1.CronJobSpec, s conversion.Scope) error {
return autoConvert_batch_CronJobSpec_To_v1_CronJobSpec(in, out, s)
}
func autoConvert_v1_CronJobStatus_To_batch_CronJobStatus(in *batchv1.CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error {
out.Active = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Active))
out.LastScheduleTime = (*metav1.Time)(unsafe.Pointer(in.LastScheduleTime))
out.LastSuccessfulTime = (*metav1.Time)(unsafe.Pointer(in.LastSuccessfulTime))
return nil
}
// Convert_v1_CronJobStatus_To_batch_CronJobStatus is an autogenerated conversion function.
func Convert_v1_CronJobStatus_To_batch_CronJobStatus(in *batchv1.CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error {
return autoConvert_v1_CronJobStatus_To_batch_CronJobStatus(in, out, s)
}
func autoConvert_batch_CronJobStatus_To_v1_CronJobStatus(in *batch.CronJobStatus, out *batchv1.CronJobStatus, s conversion.Scope) error {
out.Active = *(*[]corev1.ObjectReference)(unsafe.Pointer(&in.Active))
out.LastScheduleTime = (*metav1.Time)(unsafe.Pointer(in.LastScheduleTime))
out.LastSuccessfulTime = (*metav1.Time)(unsafe.Pointer(in.LastSuccessfulTime))
return nil
}
// Convert_batch_CronJobStatus_To_v1_CronJobStatus is an autogenerated conversion function.
func Convert_batch_CronJobStatus_To_v1_CronJobStatus(in *batch.CronJobStatus, out *batchv1.CronJobStatus, s conversion.Scope) error {
return autoConvert_batch_CronJobStatus_To_v1_CronJobStatus(in, out, s)
}
func autoConvert_v1_Job_To_batch_Job(in *batchv1.Job, out *batch.Job, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_Job_To_batch_Job is an autogenerated conversion function.
func Convert_v1_Job_To_batch_Job(in *batchv1.Job, out *batch.Job, s conversion.Scope) error {
return autoConvert_v1_Job_To_batch_Job(in, out, s)
}
func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *batchv1.Job, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_batch_Job_To_v1_Job is an autogenerated conversion function.
func Convert_batch_Job_To_v1_Job(in *batch.Job, out *batchv1.Job, s conversion.Scope) error {
return autoConvert_batch_Job_To_v1_Job(in, out, s)
}
func autoConvert_v1_JobCondition_To_batch_JobCondition(in *batchv1.JobCondition, out *batch.JobCondition, s conversion.Scope) error {
out.Type = batch.JobConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastProbeTime = in.LastProbeTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_JobCondition_To_batch_JobCondition is an autogenerated conversion function.
func Convert_v1_JobCondition_To_batch_JobCondition(in *batchv1.JobCondition, out *batch.JobCondition, s conversion.Scope) error {
return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s)
}
func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *batchv1.JobCondition, s conversion.Scope) error {
out.Type = batchv1.JobConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastProbeTime = in.LastProbeTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_batch_JobCondition_To_v1_JobCondition is an autogenerated conversion function.
func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *batchv1.JobCondition, s conversion.Scope) error {
return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s)
}
func autoConvert_v1_JobList_To_batch_JobList(in *batchv1.JobList, out *batch.JobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]batch.Job, len(*in))
for i := range *in {
if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_JobList_To_batch_JobList is an autogenerated conversion function.
func Convert_v1_JobList_To_batch_JobList(in *batchv1.JobList, out *batch.JobList, s conversion.Scope) error {
return autoConvert_v1_JobList_To_batch_JobList(in, out, s)
}
func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *batchv1.JobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]batchv1.Job, len(*in))
for i := range *in {
if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_batch_JobList_To_v1_JobList is an autogenerated conversion function.
func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *batchv1.JobList, s conversion.Scope) error {
return autoConvert_batch_JobList_To_v1_JobList(in, out, s)
}
func autoConvert_v1_JobSpec_To_batch_JobSpec(in *batchv1.JobSpec, out *batch.JobSpec, s conversion.Scope) error {
out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism))
out.Completions = (*int32)(unsafe.Pointer(in.Completions))
out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds))
out.PodFailurePolicy = (*batch.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy))
out.SuccessPolicy = (*batch.SuccessPolicy)(unsafe.Pointer(in.SuccessPolicy))
out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit))
out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex))
out.MaxFailedIndexes = (*int32)(unsafe.Pointer(in.MaxFailedIndexes))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector))
if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.TTLSecondsAfterFinished = (*int32)(unsafe.Pointer(in.TTLSecondsAfterFinished))
out.CompletionMode = (*batch.CompletionMode)(unsafe.Pointer(in.CompletionMode))
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
out.PodReplacementPolicy = (*batch.PodReplacementPolicy)(unsafe.Pointer(in.PodReplacementPolicy))
out.ManagedBy = (*string)(unsafe.Pointer(in.ManagedBy))
return nil
}
func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *batchv1.JobSpec, s conversion.Scope) error {
out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism))
out.Completions = (*int32)(unsafe.Pointer(in.Completions))
out.PodFailurePolicy = (*batchv1.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy))
out.SuccessPolicy = (*batchv1.SuccessPolicy)(unsafe.Pointer(in.SuccessPolicy))
out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds))
out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit))
out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex))
out.MaxFailedIndexes = (*int32)(unsafe.Pointer(in.MaxFailedIndexes))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector))
if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
out.TTLSecondsAfterFinished = (*int32)(unsafe.Pointer(in.TTLSecondsAfterFinished))
out.CompletionMode = (*batchv1.CompletionMode)(unsafe.Pointer(in.CompletionMode))
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
out.PodReplacementPolicy = (*batchv1.PodReplacementPolicy)(unsafe.Pointer(in.PodReplacementPolicy))
out.ManagedBy = (*string)(unsafe.Pointer(in.ManagedBy))
return nil
}
func autoConvert_v1_JobStatus_To_batch_JobStatus(in *batchv1.JobStatus, out *batch.JobStatus, s conversion.Scope) error {
out.Conditions = *(*[]batch.JobCondition)(unsafe.Pointer(&in.Conditions))
out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime))
out.CompletionTime = (*metav1.Time)(unsafe.Pointer(in.CompletionTime))
out.Active = in.Active
out.Succeeded = in.Succeeded
out.Failed = in.Failed
out.Terminating = (*int32)(unsafe.Pointer(in.Terminating))
out.CompletedIndexes = in.CompletedIndexes
out.FailedIndexes = (*string)(unsafe.Pointer(in.FailedIndexes))
out.UncountedTerminatedPods = (*batch.UncountedTerminatedPods)(unsafe.Pointer(in.UncountedTerminatedPods))
out.Ready = (*int32)(unsafe.Pointer(in.Ready))
return nil
}
// Convert_v1_JobStatus_To_batch_JobStatus is an autogenerated conversion function.
func Convert_v1_JobStatus_To_batch_JobStatus(in *batchv1.JobStatus, out *batch.JobStatus, s conversion.Scope) error {
return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s)
}
func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *batchv1.JobStatus, s conversion.Scope) error {
out.Conditions = *(*[]batchv1.JobCondition)(unsafe.Pointer(&in.Conditions))
out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime))
out.CompletionTime = (*metav1.Time)(unsafe.Pointer(in.CompletionTime))
out.Active = in.Active
out.Terminating = (*int32)(unsafe.Pointer(in.Terminating))
out.Ready = (*int32)(unsafe.Pointer(in.Ready))
out.Succeeded = in.Succeeded
out.Failed = in.Failed
out.CompletedIndexes = in.CompletedIndexes
out.FailedIndexes = (*string)(unsafe.Pointer(in.FailedIndexes))
out.UncountedTerminatedPods = (*batchv1.UncountedTerminatedPods)(unsafe.Pointer(in.UncountedTerminatedPods))
return nil
}
// Convert_batch_JobStatus_To_v1_JobStatus is an autogenerated conversion function.
func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *batchv1.JobStatus, s conversion.Scope) error {
return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s)
}
func autoConvert_v1_JobTemplateSpec_To_batch_JobTemplateSpec(in *batchv1.JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_JobTemplateSpec_To_batch_JobTemplateSpec is an autogenerated conversion function.
func Convert_v1_JobTemplateSpec_To_batch_JobTemplateSpec(in *batchv1.JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error {
return autoConvert_v1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s)
}
func autoConvert_batch_JobTemplateSpec_To_v1_JobTemplateSpec(in *batch.JobTemplateSpec, out *batchv1.JobTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_batch_JobTemplateSpec_To_v1_JobTemplateSpec is an autogenerated conversion function.
func Convert_batch_JobTemplateSpec_To_v1_JobTemplateSpec(in *batch.JobTemplateSpec, out *batchv1.JobTemplateSpec, s conversion.Scope) error {
return autoConvert_batch_JobTemplateSpec_To_v1_JobTemplateSpec(in, out, s)
}
func autoConvert_v1_PodFailurePolicy_To_batch_PodFailurePolicy(in *batchv1.PodFailurePolicy, out *batch.PodFailurePolicy, s conversion.Scope) error {
out.Rules = *(*[]batch.PodFailurePolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1_PodFailurePolicy_To_batch_PodFailurePolicy is an autogenerated conversion function.
func Convert_v1_PodFailurePolicy_To_batch_PodFailurePolicy(in *batchv1.PodFailurePolicy, out *batch.PodFailurePolicy, s conversion.Scope) error {
return autoConvert_v1_PodFailurePolicy_To_batch_PodFailurePolicy(in, out, s)
}
func autoConvert_batch_PodFailurePolicy_To_v1_PodFailurePolicy(in *batch.PodFailurePolicy, out *batchv1.PodFailurePolicy, s conversion.Scope) error {
out.Rules = *(*[]batchv1.PodFailurePolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_batch_PodFailurePolicy_To_v1_PodFailurePolicy is an autogenerated conversion function.
func Convert_batch_PodFailurePolicy_To_v1_PodFailurePolicy(in *batch.PodFailurePolicy, out *batchv1.PodFailurePolicy, s conversion.Scope) error {
return autoConvert_batch_PodFailurePolicy_To_v1_PodFailurePolicy(in, out, s)
}
func autoConvert_v1_PodFailurePolicyOnExitCodesRequirement_To_batch_PodFailurePolicyOnExitCodesRequirement(in *batchv1.PodFailurePolicyOnExitCodesRequirement, out *batch.PodFailurePolicyOnExitCodesRequirement, s conversion.Scope) error {
out.ContainerName = (*string)(unsafe.Pointer(in.ContainerName))
out.Operator = batch.PodFailurePolicyOnExitCodesOperator(in.Operator)
out.Values = *(*[]int32)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_v1_PodFailurePolicyOnExitCodesRequirement_To_batch_PodFailurePolicyOnExitCodesRequirement is an autogenerated conversion function.
func Convert_v1_PodFailurePolicyOnExitCodesRequirement_To_batch_PodFailurePolicyOnExitCodesRequirement(in *batchv1.PodFailurePolicyOnExitCodesRequirement, out *batch.PodFailurePolicyOnExitCodesRequirement, s conversion.Scope) error {
return autoConvert_v1_PodFailurePolicyOnExitCodesRequirement_To_batch_PodFailurePolicyOnExitCodesRequirement(in, out, s)
}
func autoConvert_batch_PodFailurePolicyOnExitCodesRequirement_To_v1_PodFailurePolicyOnExitCodesRequirement(in *batch.PodFailurePolicyOnExitCodesRequirement, out *batchv1.PodFailurePolicyOnExitCodesRequirement, s conversion.Scope) error {
out.ContainerName = (*string)(unsafe.Pointer(in.ContainerName))
out.Operator = batchv1.PodFailurePolicyOnExitCodesOperator(in.Operator)
out.Values = *(*[]int32)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_batch_PodFailurePolicyOnExitCodesRequirement_To_v1_PodFailurePolicyOnExitCodesRequirement is an autogenerated conversion function.
func Convert_batch_PodFailurePolicyOnExitCodesRequirement_To_v1_PodFailurePolicyOnExitCodesRequirement(in *batch.PodFailurePolicyOnExitCodesRequirement, out *batchv1.PodFailurePolicyOnExitCodesRequirement, s conversion.Scope) error {
return autoConvert_batch_PodFailurePolicyOnExitCodesRequirement_To_v1_PodFailurePolicyOnExitCodesRequirement(in, out, s)
}
func autoConvert_v1_PodFailurePolicyOnPodConditionsPattern_To_batch_PodFailurePolicyOnPodConditionsPattern(in *batchv1.PodFailurePolicyOnPodConditionsPattern, out *batch.PodFailurePolicyOnPodConditionsPattern, s conversion.Scope) error {
out.Type = core.PodConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
return nil
}
// Convert_v1_PodFailurePolicyOnPodConditionsPattern_To_batch_PodFailurePolicyOnPodConditionsPattern is an autogenerated conversion function.
func Convert_v1_PodFailurePolicyOnPodConditionsPattern_To_batch_PodFailurePolicyOnPodConditionsPattern(in *batchv1.PodFailurePolicyOnPodConditionsPattern, out *batch.PodFailurePolicyOnPodConditionsPattern, s conversion.Scope) error {
return autoConvert_v1_PodFailurePolicyOnPodConditionsPattern_To_batch_PodFailurePolicyOnPodConditionsPattern(in, out, s)
}
func autoConvert_batch_PodFailurePolicyOnPodConditionsPattern_To_v1_PodFailurePolicyOnPodConditionsPattern(in *batch.PodFailurePolicyOnPodConditionsPattern, out *batchv1.PodFailurePolicyOnPodConditionsPattern, s conversion.Scope) error {
out.Type = corev1.PodConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
return nil
}
// Convert_batch_PodFailurePolicyOnPodConditionsPattern_To_v1_PodFailurePolicyOnPodConditionsPattern is an autogenerated conversion function.
func Convert_batch_PodFailurePolicyOnPodConditionsPattern_To_v1_PodFailurePolicyOnPodConditionsPattern(in *batch.PodFailurePolicyOnPodConditionsPattern, out *batchv1.PodFailurePolicyOnPodConditionsPattern, s conversion.Scope) error {
return autoConvert_batch_PodFailurePolicyOnPodConditionsPattern_To_v1_PodFailurePolicyOnPodConditionsPattern(in, out, s)
}
func autoConvert_v1_PodFailurePolicyRule_To_batch_PodFailurePolicyRule(in *batchv1.PodFailurePolicyRule, out *batch.PodFailurePolicyRule, s conversion.Scope) error {
out.Action = batch.PodFailurePolicyAction(in.Action)
out.OnExitCodes = (*batch.PodFailurePolicyOnExitCodesRequirement)(unsafe.Pointer(in.OnExitCodes))
out.OnPodConditions = *(*[]batch.PodFailurePolicyOnPodConditionsPattern)(unsafe.Pointer(&in.OnPodConditions))
return nil
}
// Convert_v1_PodFailurePolicyRule_To_batch_PodFailurePolicyRule is an autogenerated conversion function.
func Convert_v1_PodFailurePolicyRule_To_batch_PodFailurePolicyRule(in *batchv1.PodFailurePolicyRule, out *batch.PodFailurePolicyRule, s conversion.Scope) error {
return autoConvert_v1_PodFailurePolicyRule_To_batch_PodFailurePolicyRule(in, out, s)
}
func autoConvert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule(in *batch.PodFailurePolicyRule, out *batchv1.PodFailurePolicyRule, s conversion.Scope) error {
out.Action = batchv1.PodFailurePolicyAction(in.Action)
out.OnExitCodes = (*batchv1.PodFailurePolicyOnExitCodesRequirement)(unsafe.Pointer(in.OnExitCodes))
out.OnPodConditions = *(*[]batchv1.PodFailurePolicyOnPodConditionsPattern)(unsafe.Pointer(&in.OnPodConditions))
return nil
}
// Convert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule is an autogenerated conversion function.
func Convert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule(in *batch.PodFailurePolicyRule, out *batchv1.PodFailurePolicyRule, s conversion.Scope) error {
return autoConvert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule(in, out, s)
}
func autoConvert_v1_SuccessPolicy_To_batch_SuccessPolicy(in *batchv1.SuccessPolicy, out *batch.SuccessPolicy, s conversion.Scope) error {
out.Rules = *(*[]batch.SuccessPolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1_SuccessPolicy_To_batch_SuccessPolicy is an autogenerated conversion function.
func Convert_v1_SuccessPolicy_To_batch_SuccessPolicy(in *batchv1.SuccessPolicy, out *batch.SuccessPolicy, s conversion.Scope) error {
return autoConvert_v1_SuccessPolicy_To_batch_SuccessPolicy(in, out, s)
}
func autoConvert_batch_SuccessPolicy_To_v1_SuccessPolicy(in *batch.SuccessPolicy, out *batchv1.SuccessPolicy, s conversion.Scope) error {
out.Rules = *(*[]batchv1.SuccessPolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_batch_SuccessPolicy_To_v1_SuccessPolicy is an autogenerated conversion function.
func Convert_batch_SuccessPolicy_To_v1_SuccessPolicy(in *batch.SuccessPolicy, out *batchv1.SuccessPolicy, s conversion.Scope) error {
return autoConvert_batch_SuccessPolicy_To_v1_SuccessPolicy(in, out, s)
}
func autoConvert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(in *batchv1.SuccessPolicyRule, out *batch.SuccessPolicyRule, s conversion.Scope) error {
out.SucceededIndexes = (*string)(unsafe.Pointer(in.SucceededIndexes))
out.SucceededCount = (*int32)(unsafe.Pointer(in.SucceededCount))
return nil
}
// Convert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule is an autogenerated conversion function.
func Convert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(in *batchv1.SuccessPolicyRule, out *batch.SuccessPolicyRule, s conversion.Scope) error {
return autoConvert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(in, out, s)
}
func autoConvert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(in *batch.SuccessPolicyRule, out *batchv1.SuccessPolicyRule, s conversion.Scope) error {
out.SucceededIndexes = (*string)(unsafe.Pointer(in.SucceededIndexes))
out.SucceededCount = (*int32)(unsafe.Pointer(in.SucceededCount))
return nil
}
// Convert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule is an autogenerated conversion function.
func Convert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(in *batch.SuccessPolicyRule, out *batchv1.SuccessPolicyRule, s conversion.Scope) error {
return autoConvert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(in, out, s)
}
func autoConvert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(in *batchv1.UncountedTerminatedPods, out *batch.UncountedTerminatedPods, s conversion.Scope) error {
out.Succeeded = *(*[]types.UID)(unsafe.Pointer(&in.Succeeded))
out.Failed = *(*[]types.UID)(unsafe.Pointer(&in.Failed))
return nil
}
// Convert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods is an autogenerated conversion function.
func Convert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(in *batchv1.UncountedTerminatedPods, out *batch.UncountedTerminatedPods, s conversion.Scope) error {
return autoConvert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(in, out, s)
}
func autoConvert_batch_UncountedTerminatedPods_To_v1_UncountedTerminatedPods(in *batch.UncountedTerminatedPods, out *batchv1.UncountedTerminatedPods, s conversion.Scope) error {
out.Succeeded = *(*[]types.UID)(unsafe.Pointer(&in.Succeeded))
out.Failed = *(*[]types.UID)(unsafe.Pointer(&in.Failed))
return nil
}
// Convert_batch_UncountedTerminatedPods_To_v1_UncountedTerminatedPods is an autogenerated conversion function.
func Convert_batch_UncountedTerminatedPods_To_v1_UncountedTerminatedPods(in *batch.UncountedTerminatedPods, out *batchv1.UncountedTerminatedPods, s conversion.Scope) error {
return autoConvert_batch_UncountedTerminatedPods_To_v1_UncountedTerminatedPods(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&batchv1.CronJob{}, func(obj interface{}) { SetObjectDefaults_CronJob(obj.(*batchv1.CronJob)) })
scheme.AddTypeDefaultingFunc(&batchv1.CronJobList{}, func(obj interface{}) { SetObjectDefaults_CronJobList(obj.(*batchv1.CronJobList)) })
scheme.AddTypeDefaultingFunc(&batchv1.Job{}, func(obj interface{}) { SetObjectDefaults_Job(obj.(*batchv1.Job)) })
scheme.AddTypeDefaultingFunc(&batchv1.JobList{}, func(obj interface{}) { SetObjectDefaults_JobList(obj.(*batchv1.JobList)) })
return nil
}
func SetObjectDefaults_CronJob(in *batchv1.CronJob) {
SetDefaults_CronJob(in)
if in.Spec.JobTemplate.Spec.PodFailurePolicy != nil {
for i := range in.Spec.JobTemplate.Spec.PodFailurePolicy.Rules {
a := &in.Spec.JobTemplate.Spec.PodFailurePolicy.Rules[i]
for j := range a.OnPodConditions {
b := &a.OnPodConditions[j]
SetDefaults_PodFailurePolicyOnPodConditionsPattern(b)
}
}
}
apiscorev1.SetDefaults_PodSpec(&in.Spec.JobTemplate.Spec.Template.Spec)
for i := range in.Spec.JobTemplate.Spec.Template.Spec.Volumes {
a := &in.Spec.JobTemplate.Spec.Template.Spec.Volumes[i]
apiscorev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
apiscorev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
apiscorev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
apiscorev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
apiscorev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
apiscorev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
apiscorev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
apiscorev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers {
a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.JobTemplate.Spec.Template.Spec.Containers {
a := &in.Spec.JobTemplate.Spec.Template.Spec.Containers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.JobTemplate.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.JobTemplate.Spec.Template.Spec.EphemeralContainers[i]
apiscorev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Overhead)
if in.Spec.JobTemplate.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_CronJobList(in *batchv1.CronJobList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_CronJob(a)
}
}
func SetObjectDefaults_Job(in *batchv1.Job) {
SetDefaults_Job(in)
if in.Spec.PodFailurePolicy != nil {
for i := range in.Spec.PodFailurePolicy.Rules {
a := &in.Spec.PodFailurePolicy.Rules[i]
for j := range a.OnPodConditions {
b := &a.OnPodConditions[j]
SetDefaults_PodFailurePolicyOnPodConditionsPattern(b)
}
}
}
apiscorev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
apiscorev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
apiscorev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
apiscorev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
apiscorev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
apiscorev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
apiscorev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
apiscorev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
apiscorev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
apiscorev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
apiscorev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
apiscorev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
apiscorev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
apiscorev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
apiscorev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_JobList(in *batchv1.JobList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Job(a)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
var err error
// Add field label conversions for kinds having selectable nothing but ObjectMeta fields.
for _, k := range []string{"Job", "JobTemplate", "CronJob"} {
kind := k // don't close over range variables
err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind(kind),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "metadata.namespace", "status.successful":
return label, value, nil
default:
return "", "", fmt.Errorf("field label %q not supported for %q", label, kind)
}
})
if err != nil {
return err
}
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_CronJob(obj *batchv1beta1.CronJob) {
if obj.Spec.ConcurrencyPolicy == "" {
obj.Spec.ConcurrencyPolicy = batchv1beta1.AllowConcurrent
}
if obj.Spec.Suspend == nil {
obj.Spec.Suspend = new(bool)
}
if obj.Spec.SuccessfulJobsHistoryLimit == nil {
obj.Spec.SuccessfulJobsHistoryLimit = new(int32)
*obj.Spec.SuccessfulJobsHistoryLimit = 3
}
if obj.Spec.FailedJobsHistoryLimit == nil {
obj.Spec.FailedJobsHistoryLimit = new(int32)
*obj.Spec.FailedJobsHistoryLimit = 1
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "batch"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &batchv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
batchv1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
batch "k8s.io/kubernetes/pkg/apis/batch"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*batchv1beta1.CronJob)(nil), (*batch.CronJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CronJob_To_batch_CronJob(a.(*batchv1beta1.CronJob), b.(*batch.CronJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJob)(nil), (*batchv1beta1.CronJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJob_To_v1beta1_CronJob(a.(*batch.CronJob), b.(*batchv1beta1.CronJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1beta1.CronJobList)(nil), (*batch.CronJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CronJobList_To_batch_CronJobList(a.(*batchv1beta1.CronJobList), b.(*batch.CronJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJobList)(nil), (*batchv1beta1.CronJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJobList_To_v1beta1_CronJobList(a.(*batch.CronJobList), b.(*batchv1beta1.CronJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1beta1.CronJobSpec)(nil), (*batch.CronJobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CronJobSpec_To_batch_CronJobSpec(a.(*batchv1beta1.CronJobSpec), b.(*batch.CronJobSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJobSpec)(nil), (*batchv1beta1.CronJobSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJobSpec_To_v1beta1_CronJobSpec(a.(*batch.CronJobSpec), b.(*batchv1beta1.CronJobSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1beta1.CronJobStatus)(nil), (*batch.CronJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CronJobStatus_To_batch_CronJobStatus(a.(*batchv1beta1.CronJobStatus), b.(*batch.CronJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.CronJobStatus)(nil), (*batchv1beta1.CronJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_CronJobStatus_To_v1beta1_CronJobStatus(a.(*batch.CronJobStatus), b.(*batchv1beta1.CronJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batchv1beta1.JobTemplateSpec)(nil), (*batch.JobTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_JobTemplateSpec_To_batch_JobTemplateSpec(a.(*batchv1beta1.JobTemplateSpec), b.(*batch.JobTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.JobTemplateSpec)(nil), (*batchv1beta1.JobTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_JobTemplateSpec_To_v1beta1_JobTemplateSpec(a.(*batch.JobTemplateSpec), b.(*batchv1beta1.JobTemplateSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_CronJob_To_batch_CronJob(in *batchv1beta1.CronJob, out *batch.CronJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_CronJobSpec_To_batch_CronJobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_CronJobStatus_To_batch_CronJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_CronJob_To_batch_CronJob is an autogenerated conversion function.
func Convert_v1beta1_CronJob_To_batch_CronJob(in *batchv1beta1.CronJob, out *batch.CronJob, s conversion.Scope) error {
return autoConvert_v1beta1_CronJob_To_batch_CronJob(in, out, s)
}
func autoConvert_batch_CronJob_To_v1beta1_CronJob(in *batch.CronJob, out *batchv1beta1.CronJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_batch_CronJobSpec_To_v1beta1_CronJobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_batch_CronJobStatus_To_v1beta1_CronJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_batch_CronJob_To_v1beta1_CronJob is an autogenerated conversion function.
func Convert_batch_CronJob_To_v1beta1_CronJob(in *batch.CronJob, out *batchv1beta1.CronJob, s conversion.Scope) error {
return autoConvert_batch_CronJob_To_v1beta1_CronJob(in, out, s)
}
func autoConvert_v1beta1_CronJobList_To_batch_CronJobList(in *batchv1beta1.CronJobList, out *batch.CronJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]batch.CronJob, len(*in))
for i := range *in {
if err := Convert_v1beta1_CronJob_To_batch_CronJob(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_CronJobList_To_batch_CronJobList is an autogenerated conversion function.
func Convert_v1beta1_CronJobList_To_batch_CronJobList(in *batchv1beta1.CronJobList, out *batch.CronJobList, s conversion.Scope) error {
return autoConvert_v1beta1_CronJobList_To_batch_CronJobList(in, out, s)
}
func autoConvert_batch_CronJobList_To_v1beta1_CronJobList(in *batch.CronJobList, out *batchv1beta1.CronJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]batchv1beta1.CronJob, len(*in))
for i := range *in {
if err := Convert_batch_CronJob_To_v1beta1_CronJob(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_batch_CronJobList_To_v1beta1_CronJobList is an autogenerated conversion function.
func Convert_batch_CronJobList_To_v1beta1_CronJobList(in *batch.CronJobList, out *batchv1beta1.CronJobList, s conversion.Scope) error {
return autoConvert_batch_CronJobList_To_v1beta1_CronJobList(in, out, s)
}
func autoConvert_v1beta1_CronJobSpec_To_batch_CronJobSpec(in *batchv1beta1.CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error {
out.Schedule = in.Schedule
out.TimeZone = (*string)(unsafe.Pointer(in.TimeZone))
out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds))
out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy)
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
if err := Convert_v1beta1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
return err
}
out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit))
out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit))
return nil
}
// Convert_v1beta1_CronJobSpec_To_batch_CronJobSpec is an autogenerated conversion function.
func Convert_v1beta1_CronJobSpec_To_batch_CronJobSpec(in *batchv1beta1.CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error {
return autoConvert_v1beta1_CronJobSpec_To_batch_CronJobSpec(in, out, s)
}
func autoConvert_batch_CronJobSpec_To_v1beta1_CronJobSpec(in *batch.CronJobSpec, out *batchv1beta1.CronJobSpec, s conversion.Scope) error {
out.Schedule = in.Schedule
out.TimeZone = (*string)(unsafe.Pointer(in.TimeZone))
out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds))
out.ConcurrencyPolicy = batchv1beta1.ConcurrencyPolicy(in.ConcurrencyPolicy)
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
if err := Convert_batch_JobTemplateSpec_To_v1beta1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
return err
}
out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit))
out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit))
return nil
}
// Convert_batch_CronJobSpec_To_v1beta1_CronJobSpec is an autogenerated conversion function.
func Convert_batch_CronJobSpec_To_v1beta1_CronJobSpec(in *batch.CronJobSpec, out *batchv1beta1.CronJobSpec, s conversion.Scope) error {
return autoConvert_batch_CronJobSpec_To_v1beta1_CronJobSpec(in, out, s)
}
func autoConvert_v1beta1_CronJobStatus_To_batch_CronJobStatus(in *batchv1beta1.CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error {
out.Active = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Active))
out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime))
out.LastSuccessfulTime = (*v1.Time)(unsafe.Pointer(in.LastSuccessfulTime))
return nil
}
// Convert_v1beta1_CronJobStatus_To_batch_CronJobStatus is an autogenerated conversion function.
func Convert_v1beta1_CronJobStatus_To_batch_CronJobStatus(in *batchv1beta1.CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error {
return autoConvert_v1beta1_CronJobStatus_To_batch_CronJobStatus(in, out, s)
}
func autoConvert_batch_CronJobStatus_To_v1beta1_CronJobStatus(in *batch.CronJobStatus, out *batchv1beta1.CronJobStatus, s conversion.Scope) error {
out.Active = *(*[]corev1.ObjectReference)(unsafe.Pointer(&in.Active))
out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime))
out.LastSuccessfulTime = (*v1.Time)(unsafe.Pointer(in.LastSuccessfulTime))
return nil
}
// Convert_batch_CronJobStatus_To_v1beta1_CronJobStatus is an autogenerated conversion function.
func Convert_batch_CronJobStatus_To_v1beta1_CronJobStatus(in *batch.CronJobStatus, out *batchv1beta1.CronJobStatus, s conversion.Scope) error {
return autoConvert_batch_CronJobStatus_To_v1beta1_CronJobStatus(in, out, s)
}
func autoConvert_v1beta1_JobTemplateSpec_To_batch_JobTemplateSpec(in *batchv1beta1.JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := batchv1.Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_JobTemplateSpec_To_batch_JobTemplateSpec is an autogenerated conversion function.
func Convert_v1beta1_JobTemplateSpec_To_batch_JobTemplateSpec(in *batchv1beta1.JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error {
return autoConvert_v1beta1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s)
}
func autoConvert_batch_JobTemplateSpec_To_v1beta1_JobTemplateSpec(in *batch.JobTemplateSpec, out *batchv1beta1.JobTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := batchv1.Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_batch_JobTemplateSpec_To_v1beta1_JobTemplateSpec is an autogenerated conversion function.
func Convert_batch_JobTemplateSpec_To_v1beta1_JobTemplateSpec(in *batch.JobTemplateSpec, out *batchv1beta1.JobTemplateSpec, s conversion.Scope) error {
return autoConvert_batch_JobTemplateSpec_To_v1beta1_JobTemplateSpec(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
batchv1beta1 "k8s.io/api/batch/v1beta1"
v1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&batchv1beta1.CronJob{}, func(obj interface{}) { SetObjectDefaults_CronJob(obj.(*batchv1beta1.CronJob)) })
scheme.AddTypeDefaultingFunc(&batchv1beta1.CronJobList{}, func(obj interface{}) { SetObjectDefaults_CronJobList(obj.(*batchv1beta1.CronJobList)) })
return nil
}
func SetObjectDefaults_CronJob(in *batchv1beta1.CronJob) {
SetDefaults_CronJob(in)
if in.Spec.JobTemplate.Spec.PodFailurePolicy != nil {
for i := range in.Spec.JobTemplate.Spec.PodFailurePolicy.Rules {
a := &in.Spec.JobTemplate.Spec.PodFailurePolicy.Rules[i]
for j := range a.OnPodConditions {
b := &a.OnPodConditions[j]
batchv1.SetDefaults_PodFailurePolicyOnPodConditionsPattern(b)
}
}
}
corev1.SetDefaults_PodSpec(&in.Spec.JobTemplate.Spec.Template.Spec)
for i := range in.Spec.JobTemplate.Spec.Template.Spec.Volumes {
a := &in.Spec.JobTemplate.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers {
a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.JobTemplate.Spec.Template.Spec.Containers {
a := &in.Spec.JobTemplate.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.JobTemplate.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.JobTemplate.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Overhead)
if in.Spec.JobTemplate.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.JobTemplate.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_CronJobList(in *batchv1beta1.CronJobList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_CronJob(a)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/robfig/cron/v3"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
apimachineryvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/utils/ptr"
)
// maxParallelismForIndexJob is the maximum parallelism that an Indexed Job
// is allowed to have. This threshold allows to cap the length of
// .status.completedIndexes.
const maxParallelismForIndexedJob = 100000
// maxFailedIndexesForIndexedJob is the maximum number of failed indexes that
// an Indexed Job is allowed to have. This threshold allows to cap the length of
// .status.completedIndexes and .status.failedIndexes.
const maxFailedIndexesForIndexedJob = 100_000
const (
completionsSoftLimit = 100_000
parallelismLimitForHighCompletions = 10_000
maxFailedIndexesLimitForHighCompletions = 10_000
// maximum number of rules in pod failure policy
maxPodFailurePolicyRules = 20
// maximum number of values for a OnExitCodes requirement in pod failure policy
maxPodFailurePolicyOnExitCodesValues = 255
// maximum number of patterns for a OnPodConditions requirement in pod failure policy
maxPodFailurePolicyOnPodConditionsPatterns = 20
// maximum length of the value of the managedBy field
maxManagedByLength = 63
// maximum length of succeededIndexes in JobSuccessPolicy.
maxJobSuccessPolicySucceededIndexesLimit = 64 * 1024
// maximum number of rules in successPolicy.
maxSuccessPolicyRule = 20
)
var (
supportedPodFailurePolicyActions = sets.New(
batch.PodFailurePolicyActionCount,
batch.PodFailurePolicyActionFailIndex,
batch.PodFailurePolicyActionFailJob,
batch.PodFailurePolicyActionIgnore)
supportedPodFailurePolicyOnExitCodesOperator = sets.New(
batch.PodFailurePolicyOnExitCodesOpIn,
batch.PodFailurePolicyOnExitCodesOpNotIn)
supportedPodFailurePolicyOnPodConditionsStatus = sets.New(
api.ConditionFalse,
api.ConditionTrue,
api.ConditionUnknown)
supportedPodReplacementPolicy = sets.New(
batch.Failed,
batch.TerminatingOrFailed)
)
// validateGeneratedSelector validates that the generated selector on a controller object match the controller object
// metadata, and the labels on the pod template are as generated.
//
// TODO: generalize for other controller objects that will follow the same pattern, such as ReplicaSet and DaemonSet, and
// move to new location. Replace batch.Job with an interface.
func validateGeneratedSelector(obj *batch.Job, validateBatchLabels bool) field.ErrorList {
allErrs := field.ErrorList{}
if obj.Spec.ManualSelector != nil && *obj.Spec.ManualSelector {
return allErrs
}
if obj.Spec.Selector == nil {
return allErrs // This case should already have been checked in caller. No need for more errors.
}
// If somehow uid was unset then we would get "controller-uid=" as the selector
// which is bad.
if obj.ObjectMeta.UID == "" {
allErrs = append(allErrs, field.Required(field.NewPath("metadata").Child("uid"), ""))
}
// If selector generation was requested, then expected labels must be
// present on pod template, and must match job's uid and name. The
// generated (not-manual) selectors/labels ensure no overlap with other
// controllers. The manual mode allows orphaning, adoption,
// backward-compatibility, and experimentation with new
// labeling/selection schemes. Automatic selector generation should
// have placed certain labels on the pod, but this could have failed if
// the user added conflicting labels. Validate that the expected
// generated ones are there.
allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), batch.LegacyControllerUidLabel, string(obj.UID))...)
allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), batch.LegacyJobNameLabel, string(obj.Name))...)
expectedLabels := make(map[string]string)
if validateBatchLabels {
allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), batch.ControllerUidLabel, string(obj.UID))...)
allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), batch.JobNameLabel, string(obj.Name))...)
expectedLabels[batch.ControllerUidLabel] = string(obj.UID)
expectedLabels[batch.JobNameLabel] = string(obj.Name)
}
// Labels created by the Kubernetes project should have a Kubernetes prefix.
// These labels are set due to legacy reasons.
expectedLabels[batch.LegacyControllerUidLabel] = string(obj.UID)
expectedLabels[batch.LegacyJobNameLabel] = string(obj.Name)
// Whether manually or automatically generated, the selector of the job must match the pods it will produce.
if selector, err := metav1.LabelSelectorAsSelector(obj.Spec.Selector); err == nil {
if !selector.Matches(labels.Set(expectedLabels)) {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("selector"), obj.Spec.Selector, "`selector` not auto-generated"))
}
}
return allErrs
}
// ValidateJob validates a Job and returns an ErrorList with any errors.
func ValidateJob(job *batch.Job, opts JobValidationOptions) field.ErrorList {
// Jobs and rcs have the same name validation
allErrs := apivalidation.ValidateObjectMeta(&job.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata"))
allErrs = append(allErrs, validateGeneratedSelector(job, opts.RequirePrefixedLabels)...)
allErrs = append(allErrs, ValidateJobSpec(&job.Spec, field.NewPath("spec"), opts.PodValidationOptions)...)
if job.Spec.CompletionMode != nil && *job.Spec.CompletionMode == batch.IndexedCompletion && job.Spec.Completions != nil && *job.Spec.Completions > 0 {
// For indexed job, the job controller appends a suffix (`-$INDEX`)
// to the pod hostname when indexed job create pods.
// The index could be maximum `.spec.completions-1`
// If we don't validate this here, the indexed job will fail to create pods later.
maximumPodHostname := fmt.Sprintf("%s-%d", job.ObjectMeta.Name, *job.Spec.Completions-1)
if errs := apimachineryvalidation.IsDNS1123Label(maximumPodHostname); len(errs) > 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), job.ObjectMeta.Name, fmt.Sprintf("will not able to create pod with invalid DNS label: %s", maximumPodHostname)))
}
}
return allErrs
}
// ValidateJobSpec validates a JobSpec and returns an ErrorList with any errors.
func ValidateJobSpec(spec *batch.JobSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := validateJobSpec(spec, fldPath, opts)
if spec.Selector == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
} else {
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOpts, fldPath.Child("selector"))...)
}
// Whether manually or automatically generated, the selector of the job must match the pods it will produce.
if selector, err := metav1.LabelSelectorAsSelector(spec.Selector); err == nil {
labels := labels.Set(spec.Template.Labels)
if !selector.Matches(labels) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`"))
}
}
return allErrs
}
func validateJobSpec(spec *batch.JobSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if spec.Parallelism != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Parallelism), fldPath.Child("parallelism"))...)
}
if spec.Completions != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Completions), fldPath.Child("completions"))...)
}
if spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ActiveDeadlineSeconds), fldPath.Child("activeDeadlineSeconds"))...)
}
if spec.BackoffLimit != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.BackoffLimit), fldPath.Child("backoffLimit"))...)
}
if spec.TTLSecondsAfterFinished != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.TTLSecondsAfterFinished), fldPath.Child("ttlSecondsAfterFinished"))...)
}
if spec.BackoffLimitPerIndex != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.BackoffLimitPerIndex), fldPath.Child("backoffLimitPerIndex"))...)
}
if spec.MaxFailedIndexes != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.MaxFailedIndexes), fldPath.Child("maxFailedIndexes"))...)
if spec.BackoffLimitPerIndex == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("backoffLimitPerIndex"), fmt.Sprintf("when maxFailedIndexes is specified")))
}
}
if spec.ManagedBy != nil {
allErrs = append(allErrs, apimachineryvalidation.IsDomainPrefixedPath(fldPath.Child("managedBy"), *spec.ManagedBy)...)
if len(*spec.ManagedBy) > maxManagedByLength {
allErrs = append(allErrs, field.TooLong(fldPath.Child("managedBy"), "" /*unused*/, maxManagedByLength))
}
}
if spec.CompletionMode != nil {
if *spec.CompletionMode != batch.NonIndexedCompletion && *spec.CompletionMode != batch.IndexedCompletion {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("completionMode"), spec.CompletionMode, []batch.CompletionMode{batch.NonIndexedCompletion, batch.IndexedCompletion}))
}
if *spec.CompletionMode == batch.IndexedCompletion {
if spec.Completions == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("completions"), fmt.Sprintf("when completion mode is %s", batch.IndexedCompletion)))
}
if spec.Parallelism != nil && *spec.Parallelism > maxParallelismForIndexedJob {
allErrs = append(allErrs, field.Invalid(fldPath.Child("parallelism"), *spec.Parallelism, fmt.Sprintf("must be less than or equal to %d when completion mode is %s", maxParallelismForIndexedJob, batch.IndexedCompletion)))
}
if spec.Completions != nil && spec.MaxFailedIndexes != nil && *spec.MaxFailedIndexes > *spec.Completions {
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, "must be less than or equal to completions"))
}
if spec.MaxFailedIndexes != nil && *spec.MaxFailedIndexes > maxFailedIndexesForIndexedJob {
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, fmt.Sprintf("must be less than or equal to %d", maxFailedIndexesForIndexedJob)))
}
if spec.Completions != nil && *spec.Completions > completionsSoftLimit && spec.BackoffLimitPerIndex != nil {
if spec.MaxFailedIndexes == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("maxFailedIndexes"), fmt.Sprintf("must be specified when completions is above %d", completionsSoftLimit)))
}
if spec.Parallelism != nil && *spec.Parallelism > parallelismLimitForHighCompletions {
allErrs = append(allErrs, field.Invalid(fldPath.Child("parallelism"), *spec.Parallelism, fmt.Sprintf("must be less than or equal to %d when completions are above %d and used with backoff limit per index", parallelismLimitForHighCompletions, completionsSoftLimit)))
}
if spec.MaxFailedIndexes != nil && *spec.MaxFailedIndexes > maxFailedIndexesLimitForHighCompletions {
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, fmt.Sprintf("must be less than or equal to %d when completions are above %d and used with backoff limit per index", maxFailedIndexesLimitForHighCompletions, completionsSoftLimit)))
}
}
}
}
if spec.CompletionMode == nil || *spec.CompletionMode == batch.NonIndexedCompletion {
if spec.BackoffLimitPerIndex != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("backoffLimitPerIndex"), *spec.BackoffLimitPerIndex, "requires indexed completion mode"))
}
if spec.MaxFailedIndexes != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, "requires indexed completion mode"))
}
}
if spec.PodFailurePolicy != nil {
allErrs = append(allErrs, validatePodFailurePolicy(spec, fldPath.Child("podFailurePolicy"))...)
}
if spec.SuccessPolicy != nil {
if ptr.Deref(spec.CompletionMode, batch.NonIndexedCompletion) != batch.IndexedCompletion {
allErrs = append(allErrs, field.Invalid(fldPath.Child("successPolicy"), *spec.SuccessPolicy, "requires indexed completion mode"))
} else {
allErrs = append(allErrs, validateSuccessPolicy(spec, fldPath.Child("successPolicy"))...)
}
}
allErrs = append(allErrs, validatePodReplacementPolicy(spec, fldPath.Child("podReplacementPolicy"))...)
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"), opts)...)
// spec.Template.Spec.RestartPolicy can be defaulted as RestartPolicyAlways
// by SetDefaults_PodSpec function when the user does not explicitly specify a value for it,
// so we check both empty and RestartPolicyAlways cases here
if spec.Template.Spec.RestartPolicy == api.RestartPolicyAlways || spec.Template.Spec.RestartPolicy == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("template", "spec", "restartPolicy"),
fmt.Sprintf("valid values: %q, %q", api.RestartPolicyOnFailure, api.RestartPolicyNever)))
} else if spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure && spec.Template.Spec.RestartPolicy != api.RestartPolicyNever {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"),
spec.Template.Spec.RestartPolicy, []api.RestartPolicy{api.RestartPolicyOnFailure, api.RestartPolicyNever}))
} else if spec.PodFailurePolicy != nil && spec.Template.Spec.RestartPolicy != api.RestartPolicyNever {
allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "restartPolicy"),
spec.Template.Spec.RestartPolicy, fmt.Sprintf("only %q is supported when podFailurePolicy is specified", api.RestartPolicyNever)))
}
return allErrs
}
func validatePodFailurePolicy(spec *batch.JobSpec, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
rulesPath := fldPath.Child("rules")
if len(spec.PodFailurePolicy.Rules) > maxPodFailurePolicyRules {
allErrs = append(allErrs, field.TooMany(rulesPath, len(spec.PodFailurePolicy.Rules), maxPodFailurePolicyRules))
}
containerNames := sets.NewString()
for _, containerSpec := range spec.Template.Spec.Containers {
containerNames.Insert(containerSpec.Name)
}
for _, containerSpec := range spec.Template.Spec.InitContainers {
containerNames.Insert(containerSpec.Name)
}
for i, rule := range spec.PodFailurePolicy.Rules {
allErrs = append(allErrs, validatePodFailurePolicyRule(spec, &rule, rulesPath.Index(i), containerNames)...)
}
return allErrs
}
func validatePodReplacementPolicy(spec *batch.JobSpec, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if spec.PodReplacementPolicy != nil {
// If PodFailurePolicy is specified then we only allow Failed.
if spec.PodFailurePolicy != nil {
if *spec.PodReplacementPolicy != batch.Failed {
allErrs = append(allErrs, field.NotSupported(fldPath, *spec.PodReplacementPolicy, []batch.PodReplacementPolicy{batch.Failed}))
}
// If PodFailurePolicy not specified we allow values in supportedPodReplacementPolicy.
} else if !supportedPodReplacementPolicy.Has(*spec.PodReplacementPolicy) {
allErrs = append(allErrs, field.NotSupported(fldPath, *spec.PodReplacementPolicy, sets.List(supportedPodReplacementPolicy)))
}
}
return allErrs
}
func validatePodFailurePolicyRule(spec *batch.JobSpec, rule *batch.PodFailurePolicyRule, rulePath *field.Path, containerNames sets.String) field.ErrorList {
var allErrs field.ErrorList
actionPath := rulePath.Child("action")
if rule.Action == "" {
allErrs = append(allErrs, field.Required(actionPath, fmt.Sprintf("valid values: %q", sets.List(supportedPodFailurePolicyActions))))
} else if rule.Action == batch.PodFailurePolicyActionFailIndex {
if spec.BackoffLimitPerIndex == nil {
allErrs = append(allErrs, field.Invalid(actionPath, rule.Action, "requires the backoffLimitPerIndex to be set"))
}
} else if !supportedPodFailurePolicyActions.Has(rule.Action) {
allErrs = append(allErrs, field.NotSupported(actionPath, rule.Action, sets.List(supportedPodFailurePolicyActions)))
}
if rule.OnExitCodes != nil {
allErrs = append(allErrs, validatePodFailurePolicyRuleOnExitCodes(rule.OnExitCodes, rulePath.Child("onExitCodes"), containerNames)...)
}
if len(rule.OnPodConditions) > 0 {
allErrs = append(allErrs, validatePodFailurePolicyRuleOnPodConditions(rule.OnPodConditions, rulePath.Child("onPodConditions"))...)
}
if rule.OnExitCodes != nil && len(rule.OnPodConditions) > 0 {
allErrs = append(allErrs, field.Invalid(rulePath, field.OmitValueType{}, "specifying both OnExitCodes and OnPodConditions is not supported"))
}
if rule.OnExitCodes == nil && len(rule.OnPodConditions) == 0 {
allErrs = append(allErrs, field.Invalid(rulePath, field.OmitValueType{}, "specifying one of OnExitCodes and OnPodConditions is required"))
}
return allErrs
}
func validatePodFailurePolicyRuleOnPodConditions(onPodConditions []batch.PodFailurePolicyOnPodConditionsPattern, onPodConditionsPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if len(onPodConditions) > maxPodFailurePolicyOnPodConditionsPatterns {
allErrs = append(allErrs, field.TooMany(onPodConditionsPath, len(onPodConditions), maxPodFailurePolicyOnPodConditionsPatterns))
}
for j, pattern := range onPodConditions {
patternPath := onPodConditionsPath.Index(j)
statusPath := patternPath.Child("status")
allErrs = append(allErrs, apivalidation.ValidateQualifiedName(string(pattern.Type), patternPath.Child("type"))...)
if pattern.Status == "" {
allErrs = append(allErrs, field.Required(statusPath, fmt.Sprintf("valid values: %q", sets.List(supportedPodFailurePolicyOnPodConditionsStatus))))
} else if !supportedPodFailurePolicyOnPodConditionsStatus.Has(pattern.Status) {
allErrs = append(allErrs, field.NotSupported(statusPath, pattern.Status, sets.List(supportedPodFailurePolicyOnPodConditionsStatus)))
}
}
return allErrs
}
func validatePodFailurePolicyRuleOnExitCodes(onExitCode *batch.PodFailurePolicyOnExitCodesRequirement, onExitCodesPath *field.Path, containerNames sets.String) field.ErrorList {
var allErrs field.ErrorList
operatorPath := onExitCodesPath.Child("operator")
if onExitCode.Operator == "" {
allErrs = append(allErrs, field.Required(operatorPath, fmt.Sprintf("valid values: %q", sets.List(supportedPodFailurePolicyOnExitCodesOperator))))
} else if !supportedPodFailurePolicyOnExitCodesOperator.Has(onExitCode.Operator) {
allErrs = append(allErrs, field.NotSupported(operatorPath, onExitCode.Operator, sets.List(supportedPodFailurePolicyOnExitCodesOperator)))
}
if onExitCode.ContainerName != nil && !containerNames.Has(*onExitCode.ContainerName) {
allErrs = append(allErrs, field.Invalid(onExitCodesPath.Child("containerName"), *onExitCode.ContainerName, "must be one of the container or initContainer names in the pod template"))
}
valuesPath := onExitCodesPath.Child("values")
if len(onExitCode.Values) == 0 {
allErrs = append(allErrs, field.Invalid(valuesPath, onExitCode.Values, "at least one value is required"))
} else if len(onExitCode.Values) > maxPodFailurePolicyOnExitCodesValues {
allErrs = append(allErrs, field.TooMany(valuesPath, len(onExitCode.Values), maxPodFailurePolicyOnExitCodesValues))
}
isOrdered := true
uniqueValues := sets.NewInt32()
for j, exitCodeValue := range onExitCode.Values {
valuePath := valuesPath.Index(j)
if onExitCode.Operator == batch.PodFailurePolicyOnExitCodesOpIn && exitCodeValue == 0 {
allErrs = append(allErrs, field.Invalid(valuePath, exitCodeValue, "must not be 0 for the In operator"))
}
if uniqueValues.Has(exitCodeValue) {
allErrs = append(allErrs, field.Duplicate(valuePath, exitCodeValue))
} else {
uniqueValues.Insert(exitCodeValue)
}
if j > 0 && onExitCode.Values[j-1] > exitCodeValue {
isOrdered = false
}
}
if !isOrdered {
allErrs = append(allErrs, field.Invalid(valuesPath, onExitCode.Values, "must be ordered"))
}
return allErrs
}
func validateSuccessPolicy(spec *batch.JobSpec, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
rulesPath := fldPath.Child("rules")
if len(spec.SuccessPolicy.Rules) == 0 {
allErrs = append(allErrs, field.Required(rulesPath, "at least one rules must be specified when the successPolicy is specified"))
}
if len(spec.SuccessPolicy.Rules) > maxSuccessPolicyRule {
allErrs = append(allErrs, field.TooMany(rulesPath, len(spec.SuccessPolicy.Rules), maxSuccessPolicyRule))
}
for i, rule := range spec.SuccessPolicy.Rules {
allErrs = append(allErrs, validateSuccessPolicyRule(spec, &rule, rulesPath.Index(i))...)
}
return allErrs
}
func validateSuccessPolicyRule(spec *batch.JobSpec, rule *batch.SuccessPolicyRule, rulePath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if rule.SucceededCount == nil && rule.SucceededIndexes == nil {
allErrs = append(allErrs, field.Required(rulePath, "at least one of succeededCount or succeededIndexes must be specified"))
}
var totalIndexes int32
if rule.SucceededIndexes != nil {
succeededIndexes := rulePath.Child("succeededIndexes")
if len(*rule.SucceededIndexes) > maxJobSuccessPolicySucceededIndexesLimit {
allErrs = append(allErrs, field.TooLong(succeededIndexes, "" /*unused*/, maxJobSuccessPolicySucceededIndexesLimit))
}
var err error
if totalIndexes, err = validateIndexesFormat(*rule.SucceededIndexes, *spec.Completions); err != nil {
allErrs = append(allErrs, field.Invalid(succeededIndexes, *rule.SucceededIndexes, fmt.Sprintf("error parsing succeededIndexes: %s", err.Error())))
}
}
if rule.SucceededCount != nil {
succeededCountPath := rulePath.Child("succeededCount")
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*rule.SucceededCount), succeededCountPath)...)
if *rule.SucceededCount > *spec.Completions {
allErrs = append(allErrs, field.Invalid(succeededCountPath, *rule.SucceededCount, fmt.Sprintf("must be less than or equal to %d (the number of specified completions)", *spec.Completions)))
}
if rule.SucceededIndexes != nil && *rule.SucceededCount > totalIndexes {
allErrs = append(allErrs, field.Invalid(succeededCountPath, *rule.SucceededCount, fmt.Sprintf("must be less than or equal to %d (the number of indexes in the specified succeededIndexes field)", totalIndexes)))
}
}
return allErrs
}
// validateJobStatus validates a JobStatus and returns an ErrorList with any errors.
func validateJobStatus(job *batch.Job, fldPath *field.Path, opts JobStatusValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
status := job.Status
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Active), fldPath.Child("active"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Succeeded), fldPath.Child("succeeded"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Failed), fldPath.Child("failed"))...)
if status.Ready != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.Ready), fldPath.Child("ready"))...)
}
if status.Terminating != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.Terminating), fldPath.Child("terminating"))...)
}
if status.UncountedTerminatedPods != nil {
path := fldPath.Child("uncountedTerminatedPods")
seen := sets.New[types.UID]()
for i, k := range status.UncountedTerminatedPods.Succeeded {
p := path.Child("succeeded").Index(i)
if k == "" {
allErrs = append(allErrs, field.Invalid(p, k, "must not be empty"))
} else if seen.Has(k) {
allErrs = append(allErrs, field.Duplicate(p, k))
} else {
seen.Insert(k)
}
}
for i, k := range status.UncountedTerminatedPods.Failed {
p := path.Child("failed").Index(i)
if k == "" {
allErrs = append(allErrs, field.Invalid(p, k, "must not be empty"))
} else if seen.Has(k) {
allErrs = append(allErrs, field.Duplicate(p, k))
} else {
seen.Insert(k)
}
}
}
if opts.RejectCompleteJobWithFailedCondition {
if IsJobComplete(job) && IsJobFailed(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Complete=True and Failed=true conditions"))
}
}
if opts.RejectCompleteJobWithFailureTargetCondition {
if IsJobComplete(job) && IsConditionTrue(status.Conditions, batch.JobFailureTarget) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Complete=True and FailureTarget=true conditions"))
}
}
if opts.RejectNotCompleteJobWithCompletionTime {
if status.CompletionTime != nil && !IsJobComplete(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("completionTime"), status.CompletionTime, "cannot set completionTime when there is no Complete=True condition"))
}
}
if opts.RejectCompleteJobWithoutCompletionTime {
if status.CompletionTime == nil && IsJobComplete(job) {
allErrs = append(allErrs, field.Required(fldPath.Child("completionTime"), "completionTime is required for Complete jobs"))
}
}
if opts.RejectCompletionTimeBeforeStartTime {
if status.StartTime != nil && status.CompletionTime != nil && status.CompletionTime.Before(status.StartTime) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("completionTime"), status.CompletionTime, "must be equal to or after `startTime`"))
}
}
if opts.RejectFailedJobWithoutFailureTarget {
if IsJobFailed(job) && !isJobFailureTarget(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Failed=True condition without the FailureTarget=true condition"))
}
}
if opts.RejectCompleteJobWithoutSuccessCriteriaMet {
if IsJobComplete(job) && !isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Complete=True condition without the SuccessCriteriaMet=true condition"))
}
}
isJobFinished := IsJobFinished(job)
if opts.RejectFinishedJobWithActivePods {
if status.Active > 0 && isJobFinished {
allErrs = append(allErrs, field.Invalid(fldPath.Child("active"), status.Active, "active>0 is invalid for finished job"))
}
}
if opts.RejectFinishedJobWithoutStartTime {
if status.StartTime == nil && isJobFinished {
allErrs = append(allErrs, field.Required(fldPath.Child("startTime"), "startTime is required for finished job"))
}
}
if opts.RejectFinishedJobWithUncountedTerminatedPods {
if isJobFinished && status.UncountedTerminatedPods != nil && len(status.UncountedTerminatedPods.Failed)+len(status.UncountedTerminatedPods.Succeeded) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("uncountedTerminatedPods"), status.UncountedTerminatedPods, "must be empty for finished job"))
}
}
if opts.RejectInvalidCompletedIndexes {
if job.Spec.Completions != nil {
if _, err := validateIndexesFormat(status.CompletedIndexes, int32(*job.Spec.Completions)); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("completedIndexes"), status.CompletedIndexes, fmt.Sprintf("error parsing completedIndexes: %s", err.Error())))
}
}
}
if opts.RejectInvalidFailedIndexes {
if job.Spec.Completions != nil && job.Spec.BackoffLimitPerIndex != nil && status.FailedIndexes != nil {
if _, err := validateIndexesFormat(*status.FailedIndexes, int32(*job.Spec.Completions)); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), status.FailedIndexes, fmt.Sprintf("error parsing failedIndexes: %s", err.Error())))
}
}
}
isIndexed := ptr.Deref(job.Spec.CompletionMode, batch.NonIndexedCompletion) == batch.IndexedCompletion
if opts.RejectCompletedIndexesForNonIndexedJob {
if len(status.CompletedIndexes) != 0 && !isIndexed {
allErrs = append(allErrs, field.Invalid(fldPath.Child("completedIndexes"), status.CompletedIndexes, "cannot set non-empty completedIndexes when non-indexed completion mode"))
}
}
if opts.RejectFailedIndexesForNoBackoffLimitPerIndex {
// Note that this check also verifies that FailedIndexes are not used for
// regular (non-indexed) jobs, because regular jobs have backoffLimitPerIndex = nil.
if job.Spec.BackoffLimitPerIndex == nil && status.FailedIndexes != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), *status.FailedIndexes, "cannot set non-null failedIndexes when backoffLimitPerIndex is null"))
}
}
if opts.RejectFailedIndexesOverlappingCompleted {
if job.Spec.Completions != nil && status.FailedIndexes != nil {
if err := validateFailedIndexesNotOverlapCompleted(status.CompletedIndexes, *status.FailedIndexes, int32(*job.Spec.Completions)); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), *status.FailedIndexes, err.Error()))
}
}
}
if opts.RejectFinishedJobWithTerminatingPods {
if status.Terminating != nil && *status.Terminating > 0 && isJobFinished {
allErrs = append(allErrs, field.Invalid(fldPath.Child("terminating"), status.Terminating, "terminating>0 is invalid for finished job"))
}
}
if opts.RejectMoreReadyThanActivePods {
if status.Ready != nil && *status.Ready > status.Active {
allErrs = append(allErrs, field.Invalid(fldPath.Child("ready"), *status.Ready, "cannot set more ready pods than active"))
}
}
if !opts.AllowForSuccessCriteriaMetInExtendedScope && ptr.Deref(job.Spec.CompletionMode, batch.NonIndexedCompletion) != batch.IndexedCompletion && isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet to NonIndexed Job"))
}
if isJobSuccessCriteriaMet(job) && IsJobFailed(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True and Failed=true conditions"))
}
if isJobSuccessCriteriaMet(job) && isJobFailureTarget(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True and FailureTarget=true conditions"))
}
if !opts.AllowForSuccessCriteriaMetInExtendedScope && job.Spec.SuccessPolicy == nil && isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True for Job without SuccessPolicy"))
}
if job.Spec.SuccessPolicy != nil && !isJobSuccessCriteriaMet(job) && IsJobComplete(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Complete=True for Job with SuccessPolicy unless SuccessCriteriaMet=True"))
}
return allErrs
}
// ValidateJobUpdate validates an update to a Job and returns an ErrorList with any errors.
func ValidateJobUpdate(job, oldJob *batch.Job, opts JobValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&job.ObjectMeta, &oldJob.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateJobSpecUpdate(job.Spec, oldJob.Spec, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateJobUpdateStatus validates an update to the status of a Job and returns an ErrorList with any errors.
func ValidateJobUpdateStatus(job, oldJob *batch.Job, opts JobStatusValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&job.ObjectMeta, &oldJob.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateJobStatusUpdate(job, oldJob, opts)...)
return allErrs
}
// ValidateJobSpecUpdate validates an update to a JobSpec and returns an ErrorList with any errors.
func ValidateJobSpecUpdate(spec, oldSpec batch.JobSpec, fldPath *field.Path, opts JobValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateJobSpec(&spec, fldPath, opts.PodValidationOptions)...)
allErrs = append(allErrs, validateCompletions(spec, oldSpec, fldPath.Child("completions"), opts)...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.Selector, oldSpec.Selector, fldPath.Child("selector"))...)
allErrs = append(allErrs, validatePodTemplateUpdate(spec, oldSpec, fldPath, opts)...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.CompletionMode, oldSpec.CompletionMode, fldPath.Child("completionMode"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.PodFailurePolicy, oldSpec.PodFailurePolicy, fldPath.Child("podFailurePolicy"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.BackoffLimitPerIndex, oldSpec.BackoffLimitPerIndex, fldPath.Child("backoffLimitPerIndex"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.ManagedBy, oldSpec.ManagedBy, fldPath.Child("managedBy"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.SuccessPolicy, oldSpec.SuccessPolicy, fldPath.Child("successPolicy"))...)
return allErrs
}
func validatePodTemplateUpdate(spec, oldSpec batch.JobSpec, fldPath *field.Path, opts JobValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
template := &spec.Template
oldTemplate := &oldSpec.Template
if opts.AllowMutableSchedulingDirectives {
oldTemplate = oldSpec.Template.DeepCopy() // +k8s:verify-mutation:reason=clone
switch {
case template.Spec.Affinity == nil && oldTemplate.Spec.Affinity != nil:
// allow the Affinity field to be cleared if the old template had no affinity directives other than NodeAffinity
oldTemplate.Spec.Affinity.NodeAffinity = nil // +k8s:verify-mutation:reason=clone
if (*oldTemplate.Spec.Affinity) == (api.Affinity{}) {
oldTemplate.Spec.Affinity = nil // +k8s:verify-mutation:reason=clone
}
case template.Spec.Affinity != nil && oldTemplate.Spec.Affinity == nil:
// allow the NodeAffinity field to skip immutability checking
oldTemplate.Spec.Affinity = &api.Affinity{NodeAffinity: template.Spec.Affinity.NodeAffinity} // +k8s:verify-mutation:reason=clone
case template.Spec.Affinity != nil && oldTemplate.Spec.Affinity != nil:
// allow the NodeAffinity field to skip immutability checking
oldTemplate.Spec.Affinity.NodeAffinity = template.Spec.Affinity.NodeAffinity // +k8s:verify-mutation:reason=clone
}
oldTemplate.Spec.NodeSelector = template.Spec.NodeSelector // +k8s:verify-mutation:reason=clone
oldTemplate.Spec.Tolerations = template.Spec.Tolerations // +k8s:verify-mutation:reason=clone
oldTemplate.Annotations = template.Annotations // +k8s:verify-mutation:reason=clone
oldTemplate.Labels = template.Labels // +k8s:verify-mutation:reason=clone
oldTemplate.Spec.SchedulingGates = template.Spec.SchedulingGates // +k8s:verify-mutation:reason=clone
}
allErrs = append(allErrs, apivalidation.ValidateImmutableField(template, oldTemplate, fldPath.Child("template"))...)
return allErrs
}
// ValidateJobStatusUpdate validates an update to a JobStatus and returns an ErrorList with any errors.
func ValidateJobStatusUpdate(job, oldJob *batch.Job, opts JobStatusValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
statusFld := field.NewPath("status")
allErrs = append(allErrs, validateJobStatus(job, statusFld, opts)...)
if opts.RejectDisablingTerminalCondition {
for _, cType := range []batch.JobConditionType{batch.JobFailed, batch.JobComplete, batch.JobFailureTarget} {
if IsConditionTrue(oldJob.Status.Conditions, cType) && !IsConditionTrue(job.Status.Conditions, cType) {
allErrs = append(allErrs, field.Invalid(statusFld.Child("conditions"), field.OmitValueType{}, fmt.Sprintf("cannot disable the terminal %s=True condition", string(cType))))
}
}
}
if opts.RejectDecreasingFailedCounter {
if job.Status.Failed < oldJob.Status.Failed {
allErrs = append(allErrs, field.Invalid(statusFld.Child("failed"), job.Status.Failed, "cannot decrease the failed counter"))
}
}
if opts.RejectDecreasingSucceededCounter {
if job.Status.Succeeded < oldJob.Status.Succeeded {
allErrs = append(allErrs, field.Invalid(statusFld.Child("succeeded"), job.Status.Succeeded, "cannot decrease the succeeded counter"))
}
}
if opts.RejectMutatingCompletionTime {
// Note that we check the condition only when `job.Status.CompletionTime != nil`, this is because
// we don't want to block transitions to completionTime = nil when the job is not finished yet.
// Setting completionTime = nil for finished jobs is prevented in RejectCompleteJobWithoutCompletionTime.
if job.Status.CompletionTime != nil && oldJob.Status.CompletionTime != nil && !ptr.Equal(job.Status.CompletionTime, oldJob.Status.CompletionTime) {
allErrs = append(allErrs, field.Invalid(statusFld.Child("completionTime"), job.Status.CompletionTime, "field is immutable"))
}
}
if opts.RejectStartTimeUpdateForUnsuspendedJob {
// Note that we check `oldJob.Status.StartTime != nil` to allow transitioning from
// startTime = nil to startTime != nil for unsuspended jobs, which is a desired transition.
if oldJob.Status.StartTime != nil && !ptr.Equal(oldJob.Status.StartTime, job.Status.StartTime) && !ptr.Deref(job.Spec.Suspend, false) {
allErrs = append(allErrs, field.Required(statusFld.Child("startTime"), "startTime cannot be removed for unsuspended job"))
}
}
if isJobSuccessCriteriaMet(oldJob) && !isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(statusFld.Child("conditions"), field.OmitValueType{}, "cannot disable the SuccessCriteriaMet=True condition"))
}
if IsJobComplete(oldJob) && !isJobSuccessCriteriaMet(oldJob) && isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(statusFld.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True for Job already has Complete=true conditions"))
}
return allErrs
}
// ValidateCronJobCreate validates a CronJob on creation and returns an ErrorList with any errors.
func ValidateCronJobCreate(cronJob *batch.CronJob, opts apivalidation.PodValidationOptions) field.ErrorList {
// CronJobs and rcs have the same name validation
allErrs := apivalidation.ValidateObjectMeta(&cronJob.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata"))
allErrs = append(allErrs, validateCronJobSpec(&cronJob.Spec, nil, field.NewPath("spec"), opts)...)
if len(cronJob.ObjectMeta.Name) > apimachineryvalidation.DNS1035LabelMaxLength-11 {
// The cronjob controller appends a 11-character suffix to the cronjob (`-$TIMESTAMP`) when
// creating a job. The job name length limit is 63 characters.
// Therefore cronjob names must have length <= 63-11=52. If we don't validate this here,
// then job creation will fail later.
allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), cronJob.ObjectMeta.Name, "must be no more than 52 characters"))
}
return allErrs
}
// ValidateCronJobUpdate validates an update to a CronJob and returns an ErrorList with any errors.
func ValidateCronJobUpdate(job, oldJob *batch.CronJob, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&job.ObjectMeta, &oldJob.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, validateCronJobSpec(&job.Spec, &oldJob.Spec, field.NewPath("spec"), opts)...)
// skip the 52-character name validation limit on update validation
// to allow old cronjobs with names > 52 chars to be updated/deleted
return allErrs
}
// validateCronJobSpec validates a CronJobSpec and returns an ErrorList with any errors.
func validateCronJobSpec(spec, oldSpec *batch.CronJobSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if len(spec.Schedule) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("schedule"), ""))
} else {
allowTZInSchedule := false
if oldSpec != nil {
allowTZInSchedule = strings.Contains(oldSpec.Schedule, "TZ")
}
allErrs = append(allErrs, validateScheduleFormat(spec.Schedule, allowTZInSchedule, spec.TimeZone, fldPath.Child("schedule"))...)
}
if spec.StartingDeadlineSeconds != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.StartingDeadlineSeconds), fldPath.Child("startingDeadlineSeconds"))...)
}
if oldSpec == nil || !ptr.Equal(oldSpec.TimeZone, spec.TimeZone) {
allErrs = append(allErrs, validateTimeZone(spec.TimeZone, fldPath.Child("timeZone"))...)
}
allErrs = append(allErrs, validateConcurrencyPolicy(&spec.ConcurrencyPolicy, fldPath.Child("concurrencyPolicy"))...)
allErrs = append(allErrs, ValidateJobTemplateSpec(&spec.JobTemplate, fldPath.Child("jobTemplate"), opts)...)
if spec.SuccessfulJobsHistoryLimit != nil {
// zero is a valid SuccessfulJobsHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.SuccessfulJobsHistoryLimit), fldPath.Child("successfulJobsHistoryLimit"))...)
}
if spec.FailedJobsHistoryLimit != nil {
// zero is a valid SuccessfulJobsHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.FailedJobsHistoryLimit), fldPath.Child("failedJobsHistoryLimit"))...)
}
return allErrs
}
func validateConcurrencyPolicy(concurrencyPolicy *batch.ConcurrencyPolicy, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch *concurrencyPolicy {
case batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent:
break
case "":
allErrs = append(allErrs, field.Required(fldPath, ""))
default:
validValues := []batch.ConcurrencyPolicy{batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent}
allErrs = append(allErrs, field.NotSupported(fldPath, *concurrencyPolicy, validValues))
}
return allErrs
}
func validateScheduleFormat(schedule string, allowTZInSchedule bool, timeZone *string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if _, err := cron.ParseStandard(schedule); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, schedule, err.Error()))
}
switch {
case allowTZInSchedule && strings.Contains(schedule, "TZ") && timeZone != nil:
allErrs = append(allErrs, field.Invalid(fldPath, schedule, "cannot use both timeZone field and TZ or CRON_TZ in schedule"))
case !allowTZInSchedule && strings.Contains(schedule, "TZ"):
allErrs = append(allErrs, field.Invalid(fldPath, schedule, "cannot use TZ or CRON_TZ in schedule, use timeZone field instead"))
}
return allErrs
}
// https://data.iana.org/time-zones/theory.html#naming
// * A name must not be empty, or contain '//', or start or end with '/'.
// * Do not use the file name components '.' and '..'.
// * Within a file name component, use only ASCII letters, '.', '-' and '_'.
// * Do not use digits, as that might create an ambiguity with POSIX TZ strings.
// * A file name component must not exceed 14 characters or start with '-'
//
// 0-9 and + characters are tolerated to accommodate legacy compatibility names
var validTimeZoneCharacters = regexp.MustCompile(`^[A-Za-z\.\-_0-9+]{1,14}$`)
func validateTimeZone(timeZone *string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if timeZone == nil {
return allErrs
}
if len(*timeZone) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath, timeZone, "timeZone must be nil or non-empty string"))
return allErrs
}
for _, part := range strings.Split(*timeZone, "/") {
if part == "." || part == ".." || strings.HasPrefix(part, "-") || !validTimeZoneCharacters.MatchString(part) {
allErrs = append(allErrs, field.Invalid(fldPath, timeZone, fmt.Sprintf("unknown time zone %s", *timeZone)))
return allErrs
}
}
if strings.EqualFold(*timeZone, "Local") {
allErrs = append(allErrs, field.Invalid(fldPath, timeZone, "timeZone must be an explicit time zone as defined in https://www.iana.org/time-zones"))
}
if _, err := time.LoadLocation(*timeZone); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, timeZone, err.Error()))
}
return allErrs
}
// ValidateJobTemplateSpec validates a JobTemplateSpec and returns an ErrorList with any errors.
func ValidateJobTemplateSpec(spec *batch.JobTemplateSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
allErrs := validateJobSpec(&spec.Spec, fldPath.Child("spec"), opts)
// jobtemplate will always have the selector automatically generated
if spec.Spec.Selector != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "selector"), spec.Spec.Selector, "`selector` will be auto-generated"))
}
if spec.Spec.ManualSelector != nil && *spec.Spec.ManualSelector {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "manualSelector"), spec.Spec.ManualSelector, []string{"nil", "false"}))
}
return allErrs
}
func validateCompletions(spec, oldSpec batch.JobSpec, fldPath *field.Path, opts JobValidationOptions) field.ErrorList {
// Completions is immutable for non-indexed jobs, but mutable for Indexed Jobs.
isIndexedJob := spec.CompletionMode != nil && *spec.CompletionMode == batch.IndexedCompletion
if !isIndexedJob {
return apivalidation.ValidateImmutableField(spec.Completions, oldSpec.Completions, fldPath)
}
var allErrs field.ErrorList
if apiequality.Semantic.DeepEqual(spec.Completions, oldSpec.Completions) {
return allErrs
}
// Indexed Jobs cannot set completions to nil. The nil check
// is already performed in validateJobSpec, no need to add another error.
if spec.Completions == nil {
return allErrs
}
if *spec.Completions != *spec.Parallelism {
allErrs = append(allErrs, field.Invalid(fldPath, spec.Completions, fmt.Sprintf("can only be modified in tandem with %s", fldPath.Root().Child("parallelism").String())))
}
return allErrs
}
func IsJobFinished(job *batch.Job) bool {
for _, c := range job.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == api.ConditionTrue {
return true
}
}
return false
}
func IsJobComplete(job *batch.Job) bool {
return IsConditionTrue(job.Status.Conditions, batch.JobComplete)
}
func IsJobFailed(job *batch.Job) bool {
return IsConditionTrue(job.Status.Conditions, batch.JobFailed)
}
func isJobSuccessCriteriaMet(job *batch.Job) bool {
return IsConditionTrue(job.Status.Conditions, batch.JobSuccessCriteriaMet)
}
func isJobFailureTarget(job *batch.Job) bool {
return IsConditionTrue(job.Status.Conditions, batch.JobFailureTarget)
}
func IsConditionTrue(list []batch.JobCondition, cType batch.JobConditionType) bool {
for _, c := range list {
if c.Type == cType && c.Status == api.ConditionTrue {
return true
}
}
return false
}
func validateFailedIndexesNotOverlapCompleted(completedIndexesStr string, failedIndexesStr string, completions int32) error {
if len(completedIndexesStr) == 0 || len(failedIndexesStr) == 0 {
return nil
}
completedIndexesIntervals := strings.Split(completedIndexesStr, ",")
failedIndexesIntervals := strings.Split(failedIndexesStr, ",")
var completedPos, failedPos int
cX, cY, cErr := parseIndexInterval(completedIndexesIntervals[completedPos], completions)
fX, fY, fErr := parseIndexInterval(failedIndexesIntervals[failedPos], completions)
for completedPos < len(completedIndexesIntervals) && failedPos < len(failedIndexesIntervals) {
if cErr != nil {
// Failure to parse "completed" interval. We go to the next interval,
// the error will be reported to the user when validating the format.
completedPos++
if completedPos < len(completedIndexesIntervals) {
cX, cY, cErr = parseIndexInterval(completedIndexesIntervals[completedPos], completions)
}
} else if fErr != nil {
// Failure to parse "failed" interval. We go to the next interval,
// the error will be reported to the user when validating the format.
failedPos++
if failedPos < len(failedIndexesIntervals) {
fX, fY, fErr = parseIndexInterval(failedIndexesIntervals[failedPos], completions)
}
} else {
// We have one failed and one completed interval parsed.
if cX <= fY && fX <= cY {
return fmt.Errorf("failedIndexes and completedIndexes overlap at index: %d", max(cX, fX))
}
// No overlap, let's move to the next one.
if cX <= fX {
completedPos++
if completedPos < len(completedIndexesIntervals) {
cX, cY, cErr = parseIndexInterval(completedIndexesIntervals[completedPos], completions)
}
} else {
failedPos++
if failedPos < len(failedIndexesIntervals) {
fX, fY, fErr = parseIndexInterval(failedIndexesIntervals[failedPos], completions)
}
}
}
}
return nil
}
func validateIndexesFormat(indexesStr string, completions int32) (int32, error) {
if len(indexesStr) == 0 {
return 0, nil
}
var lastIndex *int32
var total int32
for _, intervalStr := range strings.Split(indexesStr, ",") {
x, y, err := parseIndexInterval(intervalStr, completions)
if err != nil {
return 0, err
}
if lastIndex != nil && *lastIndex >= x {
return 0, fmt.Errorf("non-increasing order, previous: %d, current: %d", *lastIndex, x)
}
total += y - x + 1
lastIndex = &y
}
return total, nil
}
func parseIndexInterval(intervalStr string, completions int32) (int32, int32, error) {
limitsStr := strings.Split(intervalStr, "-")
if len(limitsStr) > 2 {
return 0, 0, fmt.Errorf("the fragment %q violates the requirement that an index interval can have at most two parts separated by '-'", intervalStr)
}
x, err := strconv.Atoi(limitsStr[0])
if err != nil {
return 0, 0, fmt.Errorf("cannot convert string to integer for index: %q", limitsStr[0])
}
if x >= int(completions) {
return 0, 0, fmt.Errorf("too large index: %q", limitsStr[0])
}
if len(limitsStr) > 1 {
y, err := strconv.Atoi(limitsStr[1])
if err != nil {
return 0, 0, fmt.Errorf("cannot convert string to integer for index: %q", limitsStr[1])
}
if y >= int(completions) {
return 0, 0, fmt.Errorf("too large index: %q", limitsStr[1])
}
if x >= y {
return 0, 0, fmt.Errorf("non-increasing order, previous: %d, current: %d", x, y)
}
return int32(x), int32(y), nil
}
return int32(x), int32(x), nil
}
type JobValidationOptions struct {
apivalidation.PodValidationOptions
// Allow mutable node affinity, selector and tolerations of the template
AllowMutableSchedulingDirectives bool
// Require Job to have the label on batch.kubernetes.io/job-name and batch.kubernetes.io/controller-uid
RequirePrefixedLabels bool
}
type JobStatusValidationOptions struct {
RejectDecreasingSucceededCounter bool
RejectDecreasingFailedCounter bool
RejectDisablingTerminalCondition bool
RejectInvalidCompletedIndexes bool
RejectInvalidFailedIndexes bool
RejectFailedIndexesOverlappingCompleted bool
RejectCompletedIndexesForNonIndexedJob bool
RejectFailedIndexesForNoBackoffLimitPerIndex bool
RejectFailedJobWithoutFailureTarget bool
RejectCompleteJobWithoutSuccessCriteriaMet bool
RejectFinishedJobWithActivePods bool
RejectFinishedJobWithoutStartTime bool
RejectFinishedJobWithUncountedTerminatedPods bool
RejectStartTimeUpdateForUnsuspendedJob bool
RejectCompletionTimeBeforeStartTime bool
RejectMutatingCompletionTime bool
RejectCompleteJobWithoutCompletionTime bool
RejectNotCompleteJobWithCompletionTime bool
RejectCompleteJobWithFailedCondition bool
RejectCompleteJobWithFailureTargetCondition bool
AllowForSuccessCriteriaMetInExtendedScope bool
RejectMoreReadyThanActivePods bool
RejectFinishedJobWithTerminatingPods bool
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package batch
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJob) DeepCopyInto(out *CronJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob.
func (in *CronJob) DeepCopy() *CronJob {
if in == nil {
return nil
}
out := new(CronJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CronJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobList) DeepCopyInto(out *CronJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CronJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList.
func (in *CronJobList) DeepCopy() *CronJobList {
if in == nil {
return nil
}
out := new(CronJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CronJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) {
*out = *in
if in.TimeZone != nil {
in, out := &in.TimeZone, &out.TimeZone
*out = new(string)
**out = **in
}
if in.StartingDeadlineSeconds != nil {
in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
in.JobTemplate.DeepCopyInto(&out.JobTemplate)
if in.SuccessfulJobsHistoryLimit != nil {
in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedJobsHistoryLimit != nil {
in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec.
func (in *CronJobSpec) DeepCopy() *CronJobSpec {
if in == nil {
return nil
}
out := new(CronJobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) {
*out = *in
if in.Active != nil {
in, out := &in.Active, &out.Active
*out = make([]core.ObjectReference, len(*in))
copy(*out, *in)
}
if in.LastScheduleTime != nil {
in, out := &in.LastScheduleTime, &out.LastScheduleTime
*out = (*in).DeepCopy()
}
if in.LastSuccessfulTime != nil {
in, out := &in.LastSuccessfulTime, &out.LastSuccessfulTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus.
func (in *CronJobStatus) DeepCopy() *CronJobStatus {
if in == nil {
return nil
}
out := new(CronJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Job) DeepCopyInto(out *Job) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job.
func (in *Job) DeepCopy() *Job {
if in == nil {
return nil
}
out := new(Job)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Job) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobCondition) DeepCopyInto(out *JobCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobCondition.
func (in *JobCondition) DeepCopy() *JobCondition {
if in == nil {
return nil
}
out := new(JobCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobList) DeepCopyInto(out *JobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Job, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList.
func (in *JobList) DeepCopy() *JobList {
if in == nil {
return nil
}
out := new(JobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *JobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobSpec) DeepCopyInto(out *JobSpec) {
*out = *in
if in.Parallelism != nil {
in, out := &in.Parallelism, &out.Parallelism
*out = new(int32)
**out = **in
}
if in.Completions != nil {
in, out := &in.Completions, &out.Completions
*out = new(int32)
**out = **in
}
if in.PodFailurePolicy != nil {
in, out := &in.PodFailurePolicy, &out.PodFailurePolicy
*out = new(PodFailurePolicy)
(*in).DeepCopyInto(*out)
}
if in.SuccessPolicy != nil {
in, out := &in.SuccessPolicy, &out.SuccessPolicy
*out = new(SuccessPolicy)
(*in).DeepCopyInto(*out)
}
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.BackoffLimit != nil {
in, out := &in.BackoffLimit, &out.BackoffLimit
*out = new(int32)
**out = **in
}
if in.BackoffLimitPerIndex != nil {
in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex
*out = new(int32)
**out = **in
}
if in.MaxFailedIndexes != nil {
in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ManualSelector != nil {
in, out := &in.ManualSelector, &out.ManualSelector
*out = new(bool)
**out = **in
}
in.Template.DeepCopyInto(&out.Template)
if in.TTLSecondsAfterFinished != nil {
in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
*out = new(int32)
**out = **in
}
if in.CompletionMode != nil {
in, out := &in.CompletionMode, &out.CompletionMode
*out = new(CompletionMode)
**out = **in
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
if in.PodReplacementPolicy != nil {
in, out := &in.PodReplacementPolicy, &out.PodReplacementPolicy
*out = new(PodReplacementPolicy)
**out = **in
}
if in.ManagedBy != nil {
in, out := &in.ManagedBy, &out.ManagedBy
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec.
func (in *JobSpec) DeepCopy() *JobSpec {
if in == nil {
return nil
}
out := new(JobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobStatus) DeepCopyInto(out *JobStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]JobCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.Terminating != nil {
in, out := &in.Terminating, &out.Terminating
*out = new(int32)
**out = **in
}
if in.Ready != nil {
in, out := &in.Ready, &out.Ready
*out = new(int32)
**out = **in
}
if in.FailedIndexes != nil {
in, out := &in.FailedIndexes, &out.FailedIndexes
*out = new(string)
**out = **in
}
if in.UncountedTerminatedPods != nil {
in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods
*out = new(UncountedTerminatedPods)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus.
func (in *JobStatus) DeepCopy() *JobStatus {
if in == nil {
return nil
}
out := new(JobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec.
func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec {
if in == nil {
return nil
}
out := new(JobTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicy) DeepCopyInto(out *PodFailurePolicy) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PodFailurePolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicy.
func (in *PodFailurePolicy) DeepCopy() *PodFailurePolicy {
if in == nil {
return nil
}
out := new(PodFailurePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopyInto(out *PodFailurePolicyOnExitCodesRequirement) {
*out = *in
if in.ContainerName != nil {
in, out := &in.ContainerName, &out.ContainerName
*out = new(string)
**out = **in
}
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]int32, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnExitCodesRequirement.
func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopy() *PodFailurePolicyOnExitCodesRequirement {
if in == nil {
return nil
}
out := new(PodFailurePolicyOnExitCodesRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopyInto(out *PodFailurePolicyOnPodConditionsPattern) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnPodConditionsPattern.
func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopy() *PodFailurePolicyOnPodConditionsPattern {
if in == nil {
return nil
}
out := new(PodFailurePolicyOnPodConditionsPattern)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodFailurePolicyRule) DeepCopyInto(out *PodFailurePolicyRule) {
*out = *in
if in.OnExitCodes != nil {
in, out := &in.OnExitCodes, &out.OnExitCodes
*out = new(PodFailurePolicyOnExitCodesRequirement)
(*in).DeepCopyInto(*out)
}
if in.OnPodConditions != nil {
in, out := &in.OnPodConditions, &out.OnPodConditions
*out = make([]PodFailurePolicyOnPodConditionsPattern, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyRule.
func (in *PodFailurePolicyRule) DeepCopy() *PodFailurePolicyRule {
if in == nil {
return nil
}
out := new(PodFailurePolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicy) DeepCopyInto(out *SuccessPolicy) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]SuccessPolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicy.
func (in *SuccessPolicy) DeepCopy() *SuccessPolicy {
if in == nil {
return nil
}
out := new(SuccessPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicyRule) DeepCopyInto(out *SuccessPolicyRule) {
*out = *in
if in.SucceededIndexes != nil {
in, out := &in.SucceededIndexes, &out.SucceededIndexes
*out = new(string)
**out = **in
}
if in.SucceededCount != nil {
in, out := &in.SucceededCount, &out.SucceededCount
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicyRule.
func (in *SuccessPolicyRule) DeepCopy() *SuccessPolicyRule {
if in == nil {
return nil
}
out := new(SuccessPolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) {
*out = *in
if in.Succeeded != nil {
in, out := &in.Succeeded, &out.Succeeded
*out = make([]types.UID, len(*in))
copy(*out, *in)
}
if in.Failed != nil {
in, out := &in.Failed, &out.Failed
*out = make([]types.UID, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UncountedTerminatedPods.
func (in *UncountedTerminatedPods) DeepCopy() *UncountedTerminatedPods {
if in == nil {
return nil
}
out := new(UncountedTerminatedPods)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"time"
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/util/certificate/csr"
"k8s.io/kubernetes/pkg/apis/certificates"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/ptr"
)
// Funcs returns the fuzzer functions for the certificates api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *certificates.CertificateSigningRequestSpec, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
obj.Usages = []certificates.KeyUsage{certificates.UsageKeyEncipherment}
obj.SignerName = "example.com/custom-sample-signer"
obj.ExpirationSeconds = csr.DurationToExpirationSeconds(time.Hour + time.Minute + time.Second)
},
func(obj *certificates.CertificateSigningRequestCondition, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if len(obj.Status) == 0 {
obj.Status = api.ConditionTrue
}
},
func(obj *certificates.PodCertificateRequestSpec, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
// MaxExpirationSeconds has a field defaulter, so we should make
// sure it's non-nil. Otherwise,
// pkg/api/testing/serialization_test.go TestRoundTripTypes will
// fail with diffs due to the defaulting.
if obj.MaxExpirationSeconds == nil {
obj.MaxExpirationSeconds = ptr.To[int32](86400)
}
},
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
)
// ParseCSR extracts the CSR from the bytes and decodes it.
func ParseCSR(pemBytes []byte) (*x509.CertificateRequest, error) {
block, _ := pem.Decode(pemBytes)
if block == nil || block.Type != "CERTIFICATE REQUEST" {
return nil, errors.New("PEM block type must be CERTIFICATE REQUEST")
}
csr, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
}
return csr, nil
}
var (
organizationNotSystemNodesErr = fmt.Errorf("subject organization is not system:nodes")
commonNameNotSystemNode = fmt.Errorf("subject common name does not begin with system:node:")
dnsOrIPSANRequiredErr = fmt.Errorf("DNS or IP subjectAltName is required")
dnsSANNotAllowedErr = fmt.Errorf("DNS subjectAltNames are not allowed")
emailSANNotAllowedErr = fmt.Errorf("Email subjectAltNames are not allowed")
ipSANNotAllowedErr = fmt.Errorf("IP subjectAltNames are not allowed")
uriSANNotAllowedErr = fmt.Errorf("URI subjectAltNames are not allowed")
)
var (
kubeletServingRequiredUsages = sets.NewString(
string(UsageDigitalSignature),
string(UsageKeyEncipherment),
string(UsageServerAuth),
)
kubeletServingRequiredUsagesNoRSA = sets.NewString(
string(UsageDigitalSignature),
string(UsageServerAuth),
)
)
func IsKubeletServingCSR(req *x509.CertificateRequest, usages sets.String) bool {
return ValidateKubeletServingCSR(req, usages) == nil
}
func ValidateKubeletServingCSR(req *x509.CertificateRequest, usages sets.String) error {
if !reflect.DeepEqual([]string{"system:nodes"}, req.Subject.Organization) {
return organizationNotSystemNodesErr
}
// at least one of dnsNames or ipAddresses must be specified
if len(req.DNSNames) == 0 && len(req.IPAddresses) == 0 {
return dnsOrIPSANRequiredErr
}
if len(req.EmailAddresses) > 0 {
return emailSANNotAllowedErr
}
if len(req.URIs) > 0 {
return uriSANNotAllowedErr
}
if !kubeletServingRequiredUsages.Equal(usages) && !kubeletServingRequiredUsagesNoRSA.Equal(usages) {
return fmt.Errorf("usages did not match %v", kubeletServingRequiredUsages.List())
}
if !strings.HasPrefix(req.Subject.CommonName, "system:node:") {
return commonNameNotSystemNode
}
return nil
}
var (
kubeletClientRequiredUsagesNoRSA = sets.NewString(
string(UsageDigitalSignature),
string(UsageClientAuth),
)
kubeletClientRequiredUsages = sets.NewString(
string(UsageDigitalSignature),
string(UsageKeyEncipherment),
string(UsageClientAuth),
)
)
func IsKubeletClientCSR(req *x509.CertificateRequest, usages sets.String) bool {
return ValidateKubeletClientCSR(req, usages) == nil
}
func ValidateKubeletClientCSR(req *x509.CertificateRequest, usages sets.String) error {
if !reflect.DeepEqual([]string{"system:nodes"}, req.Subject.Organization) {
return organizationNotSystemNodesErr
}
if len(req.DNSNames) > 0 {
return dnsSANNotAllowedErr
}
if len(req.EmailAddresses) > 0 {
return emailSANNotAllowedErr
}
if len(req.IPAddresses) > 0 {
return ipSANNotAllowedErr
}
if len(req.URIs) > 0 {
return uriSANNotAllowedErr
}
if !strings.HasPrefix(req.Subject.CommonName, "system:node:") {
return commonNameNotSystemNode
}
if !kubeletClientRequiredUsages.Equal(usages) && !kubeletClientRequiredUsagesNoRSA.Equal(usages) {
return fmt.Errorf("usages did not match %v", kubeletClientRequiredUsages.List())
}
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the certificates API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/certificates"
v1 "k8s.io/kubernetes/pkg/apis/certificates/v1"
"k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
"k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(certificates.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name use in this package
const GroupName = "certificates.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CertificateSigningRequest{},
&CertificateSigningRequestList{},
&ClusterTrustBundle{},
&ClusterTrustBundleList{},
&PodCertificateRequest{},
&PodCertificateRequestList{},
)
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add field conversion funcs.
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("CertificateSigningRequest"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"spec.signerName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"crypto/x509"
"encoding/pem"
"errors"
)
// ParseCSR decodes a PEM encoded CSR
func ParseCSR(pemBytes []byte) (*x509.CertificateRequest, error) {
// extract PEM from request object
block, _ := pem.Decode(pemBytes)
if block == nil || block.Type != "CERTIFICATE REQUEST" {
return nil, errors.New("PEM block type must be CERTIFICATE REQUEST")
}
csr, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
}
return csr, nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
certificatesv1 "k8s.io/api/certificates/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "certificates.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &certificatesv1.SchemeBuilder
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
certificatesv1 "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
certificates "k8s.io/kubernetes/pkg/apis/certificates"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*certificatesv1.CertificateSigningRequest)(nil), (*certificates.CertificateSigningRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(a.(*certificatesv1.CertificateSigningRequest), b.(*certificates.CertificateSigningRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequest)(nil), (*certificatesv1.CertificateSigningRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequest_To_v1_CertificateSigningRequest(a.(*certificates.CertificateSigningRequest), b.(*certificatesv1.CertificateSigningRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1.CertificateSigningRequestCondition)(nil), (*certificates.CertificateSigningRequestCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(a.(*certificatesv1.CertificateSigningRequestCondition), b.(*certificates.CertificateSigningRequestCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestCondition)(nil), (*certificatesv1.CertificateSigningRequestCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestCondition_To_v1_CertificateSigningRequestCondition(a.(*certificates.CertificateSigningRequestCondition), b.(*certificatesv1.CertificateSigningRequestCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1.CertificateSigningRequestList)(nil), (*certificates.CertificateSigningRequestList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(a.(*certificatesv1.CertificateSigningRequestList), b.(*certificates.CertificateSigningRequestList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestList)(nil), (*certificatesv1.CertificateSigningRequestList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestList_To_v1_CertificateSigningRequestList(a.(*certificates.CertificateSigningRequestList), b.(*certificatesv1.CertificateSigningRequestList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1.CertificateSigningRequestSpec)(nil), (*certificates.CertificateSigningRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(a.(*certificatesv1.CertificateSigningRequestSpec), b.(*certificates.CertificateSigningRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestSpec)(nil), (*certificatesv1.CertificateSigningRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestSpec_To_v1_CertificateSigningRequestSpec(a.(*certificates.CertificateSigningRequestSpec), b.(*certificatesv1.CertificateSigningRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1.CertificateSigningRequestStatus)(nil), (*certificates.CertificateSigningRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(a.(*certificatesv1.CertificateSigningRequestStatus), b.(*certificates.CertificateSigningRequestStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestStatus)(nil), (*certificatesv1.CertificateSigningRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestStatus_To_v1_CertificateSigningRequestStatus(a.(*certificates.CertificateSigningRequestStatus), b.(*certificatesv1.CertificateSigningRequestStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *certificatesv1.CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_CertificateSigningRequest_To_certificates_CertificateSigningRequest is an autogenerated conversion function.
func Convert_v1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *certificatesv1.CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error {
return autoConvert_v1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequest_To_v1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *certificatesv1.CertificateSigningRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_certificates_CertificateSigningRequestSpec_To_v1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_certificates_CertificateSigningRequestStatus_To_v1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_certificates_CertificateSigningRequest_To_v1_CertificateSigningRequest is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequest_To_v1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *certificatesv1.CertificateSigningRequest, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequest_To_v1_CertificateSigningRequest(in, out, s)
}
func autoConvert_v1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *certificatesv1.CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error {
out.Type = certificates.RequestConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.Reason = in.Reason
out.Message = in.Message
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
return nil
}
// Convert_v1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition is an autogenerated conversion function.
func Convert_v1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *certificatesv1.CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error {
return autoConvert_v1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestCondition_To_v1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *certificatesv1.CertificateSigningRequestCondition, s conversion.Scope) error {
out.Type = certificatesv1.RequestConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.Reason = in.Reason
out.Message = in.Message
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
return nil
}
// Convert_certificates_CertificateSigningRequestCondition_To_v1_CertificateSigningRequestCondition is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestCondition_To_v1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *certificatesv1.CertificateSigningRequestCondition, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestCondition_To_v1_CertificateSigningRequestCondition(in, out, s)
}
func autoConvert_v1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *certificatesv1.CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificates.CertificateSigningRequest)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList is an autogenerated conversion function.
func Convert_v1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *certificatesv1.CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error {
return autoConvert_v1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestList_To_v1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *certificatesv1.CertificateSigningRequestList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificatesv1.CertificateSigningRequest)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_certificates_CertificateSigningRequestList_To_v1_CertificateSigningRequestList is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestList_To_v1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *certificatesv1.CertificateSigningRequestList, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestList_To_v1_CertificateSigningRequestList(in, out, s)
}
func autoConvert_v1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *certificatesv1.CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error {
out.Request = *(*[]byte)(unsafe.Pointer(&in.Request))
out.SignerName = in.SignerName
out.ExpirationSeconds = (*int32)(unsafe.Pointer(in.ExpirationSeconds))
out.Usages = *(*[]certificates.KeyUsage)(unsafe.Pointer(&in.Usages))
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]certificates.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
// Convert_v1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec is an autogenerated conversion function.
func Convert_v1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *certificatesv1.CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error {
return autoConvert_v1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestSpec_To_v1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *certificatesv1.CertificateSigningRequestSpec, s conversion.Scope) error {
out.Request = *(*[]byte)(unsafe.Pointer(&in.Request))
out.SignerName = in.SignerName
out.ExpirationSeconds = (*int32)(unsafe.Pointer(in.ExpirationSeconds))
out.Usages = *(*[]certificatesv1.KeyUsage)(unsafe.Pointer(&in.Usages))
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]certificatesv1.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
// Convert_certificates_CertificateSigningRequestSpec_To_v1_CertificateSigningRequestSpec is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestSpec_To_v1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *certificatesv1.CertificateSigningRequestSpec, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestSpec_To_v1_CertificateSigningRequestSpec(in, out, s)
}
func autoConvert_v1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *certificatesv1.CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error {
out.Conditions = *(*[]certificates.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions))
out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate))
return nil
}
// Convert_v1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus is an autogenerated conversion function.
func Convert_v1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *certificatesv1.CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error {
return autoConvert_v1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestStatus_To_v1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *certificatesv1.CertificateSigningRequestStatus, s conversion.Scope) error {
out.Conditions = *(*[]certificatesv1.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions))
out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate))
return nil
}
// Convert_certificates_CertificateSigningRequestStatus_To_v1_CertificateSigningRequestStatus is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestStatus_To_v1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *certificatesv1.CertificateSigningRequestStatus, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestStatus_To_v1_CertificateSigningRequestStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1
import (
context "context"
fmt "fmt"
certificatesv1 "k8s.io/api/certificates/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
operation "k8s.io/apimachinery/pkg/api/operation"
safe "k8s.io/apimachinery/pkg/api/safe"
validate "k8s.io/apimachinery/pkg/api/validate"
runtime "k8s.io/apimachinery/pkg/runtime"
field "k8s.io/apimachinery/pkg/util/validation/field"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
scheme.AddValidationFunc((*certificatesv1.CertificateSigningRequest)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/", "/approval", "/status":
return Validate_CertificateSigningRequest(ctx, op, nil /* fldPath */, obj.(*certificatesv1.CertificateSigningRequest), safe.Cast[*certificatesv1.CertificateSigningRequest](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
scheme.AddValidationFunc((*certificatesv1.CertificateSigningRequestList)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/":
return Validate_CertificateSigningRequestList(ctx, op, nil /* fldPath */, obj.(*certificatesv1.CertificateSigningRequestList), safe.Cast[*certificatesv1.CertificateSigningRequestList](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
return nil
}
func Validate_CertificateSigningRequest(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *certificatesv1.CertificateSigningRequest) (errs field.ErrorList) {
// field certificatesv1.CertificateSigningRequest.TypeMeta has no validation
// field certificatesv1.CertificateSigningRequest.ObjectMeta has no validation
// field certificatesv1.CertificateSigningRequest.Spec has no validation
// field certificatesv1.CertificateSigningRequest.Status
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *certificatesv1.CertificateSigningRequestStatus) (errs field.ErrorList) {
errs = append(errs, Validate_CertificateSigningRequestStatus(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("status"), &obj.Status, safe.Field(oldObj, func(oldObj *certificatesv1.CertificateSigningRequest) *certificatesv1.CertificateSigningRequestStatus {
return &oldObj.Status
}))...)
return errs
}
func Validate_CertificateSigningRequestList(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *certificatesv1.CertificateSigningRequestList) (errs field.ErrorList) {
// field certificatesv1.CertificateSigningRequestList.TypeMeta has no validation
// field certificatesv1.CertificateSigningRequestList.ListMeta has no validation
// field certificatesv1.CertificateSigningRequestList.Items
errs = append(errs,
func(fldPath *field.Path, obj, oldObj []certificatesv1.CertificateSigningRequest) (errs field.ErrorList) {
if op.Type == operation.Update && equality.Semantic.DeepEqual(obj, oldObj) {
return nil // no changes
}
errs = append(errs, validate.EachSliceVal(ctx, op, fldPath, obj, oldObj, nil, nil, Validate_CertificateSigningRequest)...)
return
}(fldPath.Child("items"), obj.Items, safe.Field(oldObj, func(oldObj *certificatesv1.CertificateSigningRequestList) []certificatesv1.CertificateSigningRequest {
return oldObj.Items
}))...)
return errs
}
var zeroOrOneOfMembershipFor_k8s_io_api_certificates_v1_CertificateSigningRequestStatus_Conditions_ = validate.NewUnionMembership([2]string{"Conditions[{\"type\": \"Approved\"}]", ""}, [2]string{"Conditions[{\"type\": \"Denied\"}]", ""})
func Validate_CertificateSigningRequestStatus(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *certificatesv1.CertificateSigningRequestStatus) (errs field.ErrorList) {
// field certificatesv1.CertificateSigningRequestStatus.Conditions
errs = append(errs,
func(fldPath *field.Path, obj, oldObj []certificatesv1.CertificateSigningRequestCondition) (errs field.ErrorList) {
if op.Type == operation.Update && equality.Semantic.DeepEqual(obj, oldObj) {
return nil // no changes
}
if e := validate.OptionalSlice(ctx, op, fldPath, obj, oldObj); len(e) != 0 {
return // do not proceed
}
errs = append(errs, validate.ZeroOrOneOfUnion(ctx, op, fldPath, obj, oldObj, zeroOrOneOfMembershipFor_k8s_io_api_certificates_v1_CertificateSigningRequestStatus_Conditions_, func(list []certificatesv1.CertificateSigningRequestCondition) bool {
for i := range list {
if list[i].Type == "Approved" {
return true
}
}
return false
}, func(list []certificatesv1.CertificateSigningRequestCondition) bool {
for i := range list {
if list[i].Type == "Denied" {
return true
}
}
return false
})...)
return
}(fldPath.Child("conditions"), obj.Conditions, safe.Field(oldObj, func(oldObj *certificatesv1.CertificateSigningRequestStatus) []certificatesv1.CertificateSigningRequestCondition {
return oldObj.Conditions
}))...)
// field certificatesv1.CertificateSigningRequestStatus.Certificate has no validation
return errs
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
err := scheme.AddFieldLabelConversionFunc(
SchemeGroupVersion.WithKind("ClusterTrustBundle"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "spec.signerName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return fmt.Errorf("while adding ClusterTrustBundle field label conversion func: %w", err)
}
err = scheme.AddFieldLabelConversionFunc(
SchemeGroupVersion.WithKind("PodCertificateRequest"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "spec.signerName", "spec.podName", "spec.nodeName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return fmt.Errorf("while adding PodCertificateRequest field label conversion func: %w", err)
}
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package.
const GroupName = "certificates.k8s.io"
// SchemeGroupVersion is the group and version used in this package.
var SchemeGroupVersion = schema.GroupVersion{
Group: GroupName,
Version: "v1alpha1",
}
var (
localSchemeBuilder = &certificatesv1alpha1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
certificates "k8s.io/kubernetes/pkg/apis/certificates"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*certificatesv1alpha1.ClusterTrustBundle)(nil), (*certificates.ClusterTrustBundle)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(a.(*certificatesv1alpha1.ClusterTrustBundle), b.(*certificates.ClusterTrustBundle), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.ClusterTrustBundle)(nil), (*certificatesv1alpha1.ClusterTrustBundle)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_ClusterTrustBundle_To_v1alpha1_ClusterTrustBundle(a.(*certificates.ClusterTrustBundle), b.(*certificatesv1alpha1.ClusterTrustBundle), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1alpha1.ClusterTrustBundleList)(nil), (*certificates.ClusterTrustBundleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(a.(*certificatesv1alpha1.ClusterTrustBundleList), b.(*certificates.ClusterTrustBundleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.ClusterTrustBundleList)(nil), (*certificatesv1alpha1.ClusterTrustBundleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_ClusterTrustBundleList_To_v1alpha1_ClusterTrustBundleList(a.(*certificates.ClusterTrustBundleList), b.(*certificatesv1alpha1.ClusterTrustBundleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1alpha1.ClusterTrustBundleSpec)(nil), (*certificates.ClusterTrustBundleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(a.(*certificatesv1alpha1.ClusterTrustBundleSpec), b.(*certificates.ClusterTrustBundleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.ClusterTrustBundleSpec)(nil), (*certificatesv1alpha1.ClusterTrustBundleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_ClusterTrustBundleSpec_To_v1alpha1_ClusterTrustBundleSpec(a.(*certificates.ClusterTrustBundleSpec), b.(*certificatesv1alpha1.ClusterTrustBundleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1alpha1.PodCertificateRequest)(nil), (*certificates.PodCertificateRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodCertificateRequest_To_certificates_PodCertificateRequest(a.(*certificatesv1alpha1.PodCertificateRequest), b.(*certificates.PodCertificateRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.PodCertificateRequest)(nil), (*certificatesv1alpha1.PodCertificateRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_PodCertificateRequest_To_v1alpha1_PodCertificateRequest(a.(*certificates.PodCertificateRequest), b.(*certificatesv1alpha1.PodCertificateRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1alpha1.PodCertificateRequestList)(nil), (*certificates.PodCertificateRequestList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodCertificateRequestList_To_certificates_PodCertificateRequestList(a.(*certificatesv1alpha1.PodCertificateRequestList), b.(*certificates.PodCertificateRequestList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.PodCertificateRequestList)(nil), (*certificatesv1alpha1.PodCertificateRequestList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_PodCertificateRequestList_To_v1alpha1_PodCertificateRequestList(a.(*certificates.PodCertificateRequestList), b.(*certificatesv1alpha1.PodCertificateRequestList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1alpha1.PodCertificateRequestSpec)(nil), (*certificates.PodCertificateRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodCertificateRequestSpec_To_certificates_PodCertificateRequestSpec(a.(*certificatesv1alpha1.PodCertificateRequestSpec), b.(*certificates.PodCertificateRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.PodCertificateRequestSpec)(nil), (*certificatesv1alpha1.PodCertificateRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_PodCertificateRequestSpec_To_v1alpha1_PodCertificateRequestSpec(a.(*certificates.PodCertificateRequestSpec), b.(*certificatesv1alpha1.PodCertificateRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1alpha1.PodCertificateRequestStatus)(nil), (*certificates.PodCertificateRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodCertificateRequestStatus_To_certificates_PodCertificateRequestStatus(a.(*certificatesv1alpha1.PodCertificateRequestStatus), b.(*certificates.PodCertificateRequestStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.PodCertificateRequestStatus)(nil), (*certificatesv1alpha1.PodCertificateRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_PodCertificateRequestStatus_To_v1alpha1_PodCertificateRequestStatus(a.(*certificates.PodCertificateRequestStatus), b.(*certificatesv1alpha1.PodCertificateRequestStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(in *certificatesv1alpha1.ClusterTrustBundle, out *certificates.ClusterTrustBundle, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ClusterTrustBundle_To_certificates_ClusterTrustBundle is an autogenerated conversion function.
func Convert_v1alpha1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(in *certificatesv1alpha1.ClusterTrustBundle, out *certificates.ClusterTrustBundle, s conversion.Scope) error {
return autoConvert_v1alpha1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(in, out, s)
}
func autoConvert_certificates_ClusterTrustBundle_To_v1alpha1_ClusterTrustBundle(in *certificates.ClusterTrustBundle, out *certificatesv1alpha1.ClusterTrustBundle, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_certificates_ClusterTrustBundleSpec_To_v1alpha1_ClusterTrustBundleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_certificates_ClusterTrustBundle_To_v1alpha1_ClusterTrustBundle is an autogenerated conversion function.
func Convert_certificates_ClusterTrustBundle_To_v1alpha1_ClusterTrustBundle(in *certificates.ClusterTrustBundle, out *certificatesv1alpha1.ClusterTrustBundle, s conversion.Scope) error {
return autoConvert_certificates_ClusterTrustBundle_To_v1alpha1_ClusterTrustBundle(in, out, s)
}
func autoConvert_v1alpha1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(in *certificatesv1alpha1.ClusterTrustBundleList, out *certificates.ClusterTrustBundleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificates.ClusterTrustBundle)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList is an autogenerated conversion function.
func Convert_v1alpha1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(in *certificatesv1alpha1.ClusterTrustBundleList, out *certificates.ClusterTrustBundleList, s conversion.Scope) error {
return autoConvert_v1alpha1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(in, out, s)
}
func autoConvert_certificates_ClusterTrustBundleList_To_v1alpha1_ClusterTrustBundleList(in *certificates.ClusterTrustBundleList, out *certificatesv1alpha1.ClusterTrustBundleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificatesv1alpha1.ClusterTrustBundle)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_certificates_ClusterTrustBundleList_To_v1alpha1_ClusterTrustBundleList is an autogenerated conversion function.
func Convert_certificates_ClusterTrustBundleList_To_v1alpha1_ClusterTrustBundleList(in *certificates.ClusterTrustBundleList, out *certificatesv1alpha1.ClusterTrustBundleList, s conversion.Scope) error {
return autoConvert_certificates_ClusterTrustBundleList_To_v1alpha1_ClusterTrustBundleList(in, out, s)
}
func autoConvert_v1alpha1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(in *certificatesv1alpha1.ClusterTrustBundleSpec, out *certificates.ClusterTrustBundleSpec, s conversion.Scope) error {
out.SignerName = in.SignerName
out.TrustBundle = in.TrustBundle
return nil
}
// Convert_v1alpha1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec is an autogenerated conversion function.
func Convert_v1alpha1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(in *certificatesv1alpha1.ClusterTrustBundleSpec, out *certificates.ClusterTrustBundleSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(in, out, s)
}
func autoConvert_certificates_ClusterTrustBundleSpec_To_v1alpha1_ClusterTrustBundleSpec(in *certificates.ClusterTrustBundleSpec, out *certificatesv1alpha1.ClusterTrustBundleSpec, s conversion.Scope) error {
out.SignerName = in.SignerName
out.TrustBundle = in.TrustBundle
return nil
}
// Convert_certificates_ClusterTrustBundleSpec_To_v1alpha1_ClusterTrustBundleSpec is an autogenerated conversion function.
func Convert_certificates_ClusterTrustBundleSpec_To_v1alpha1_ClusterTrustBundleSpec(in *certificates.ClusterTrustBundleSpec, out *certificatesv1alpha1.ClusterTrustBundleSpec, s conversion.Scope) error {
return autoConvert_certificates_ClusterTrustBundleSpec_To_v1alpha1_ClusterTrustBundleSpec(in, out, s)
}
func autoConvert_v1alpha1_PodCertificateRequest_To_certificates_PodCertificateRequest(in *certificatesv1alpha1.PodCertificateRequest, out *certificates.PodCertificateRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_PodCertificateRequestSpec_To_certificates_PodCertificateRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_PodCertificateRequestStatus_To_certificates_PodCertificateRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_PodCertificateRequest_To_certificates_PodCertificateRequest is an autogenerated conversion function.
func Convert_v1alpha1_PodCertificateRequest_To_certificates_PodCertificateRequest(in *certificatesv1alpha1.PodCertificateRequest, out *certificates.PodCertificateRequest, s conversion.Scope) error {
return autoConvert_v1alpha1_PodCertificateRequest_To_certificates_PodCertificateRequest(in, out, s)
}
func autoConvert_certificates_PodCertificateRequest_To_v1alpha1_PodCertificateRequest(in *certificates.PodCertificateRequest, out *certificatesv1alpha1.PodCertificateRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_certificates_PodCertificateRequestSpec_To_v1alpha1_PodCertificateRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_certificates_PodCertificateRequestStatus_To_v1alpha1_PodCertificateRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_certificates_PodCertificateRequest_To_v1alpha1_PodCertificateRequest is an autogenerated conversion function.
func Convert_certificates_PodCertificateRequest_To_v1alpha1_PodCertificateRequest(in *certificates.PodCertificateRequest, out *certificatesv1alpha1.PodCertificateRequest, s conversion.Scope) error {
return autoConvert_certificates_PodCertificateRequest_To_v1alpha1_PodCertificateRequest(in, out, s)
}
func autoConvert_v1alpha1_PodCertificateRequestList_To_certificates_PodCertificateRequestList(in *certificatesv1alpha1.PodCertificateRequestList, out *certificates.PodCertificateRequestList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificates.PodCertificateRequest)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_PodCertificateRequestList_To_certificates_PodCertificateRequestList is an autogenerated conversion function.
func Convert_v1alpha1_PodCertificateRequestList_To_certificates_PodCertificateRequestList(in *certificatesv1alpha1.PodCertificateRequestList, out *certificates.PodCertificateRequestList, s conversion.Scope) error {
return autoConvert_v1alpha1_PodCertificateRequestList_To_certificates_PodCertificateRequestList(in, out, s)
}
func autoConvert_certificates_PodCertificateRequestList_To_v1alpha1_PodCertificateRequestList(in *certificates.PodCertificateRequestList, out *certificatesv1alpha1.PodCertificateRequestList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificatesv1alpha1.PodCertificateRequest)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_certificates_PodCertificateRequestList_To_v1alpha1_PodCertificateRequestList is an autogenerated conversion function.
func Convert_certificates_PodCertificateRequestList_To_v1alpha1_PodCertificateRequestList(in *certificates.PodCertificateRequestList, out *certificatesv1alpha1.PodCertificateRequestList, s conversion.Scope) error {
return autoConvert_certificates_PodCertificateRequestList_To_v1alpha1_PodCertificateRequestList(in, out, s)
}
func autoConvert_v1alpha1_PodCertificateRequestSpec_To_certificates_PodCertificateRequestSpec(in *certificatesv1alpha1.PodCertificateRequestSpec, out *certificates.PodCertificateRequestSpec, s conversion.Scope) error {
out.SignerName = in.SignerName
out.PodName = in.PodName
out.PodUID = types.UID(in.PodUID)
out.ServiceAccountName = in.ServiceAccountName
out.ServiceAccountUID = types.UID(in.ServiceAccountUID)
out.NodeName = types.NodeName(in.NodeName)
out.NodeUID = types.UID(in.NodeUID)
out.MaxExpirationSeconds = (*int32)(unsafe.Pointer(in.MaxExpirationSeconds))
out.PKIXPublicKey = *(*[]byte)(unsafe.Pointer(&in.PKIXPublicKey))
out.ProofOfPossession = *(*[]byte)(unsafe.Pointer(&in.ProofOfPossession))
return nil
}
// Convert_v1alpha1_PodCertificateRequestSpec_To_certificates_PodCertificateRequestSpec is an autogenerated conversion function.
func Convert_v1alpha1_PodCertificateRequestSpec_To_certificates_PodCertificateRequestSpec(in *certificatesv1alpha1.PodCertificateRequestSpec, out *certificates.PodCertificateRequestSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_PodCertificateRequestSpec_To_certificates_PodCertificateRequestSpec(in, out, s)
}
func autoConvert_certificates_PodCertificateRequestSpec_To_v1alpha1_PodCertificateRequestSpec(in *certificates.PodCertificateRequestSpec, out *certificatesv1alpha1.PodCertificateRequestSpec, s conversion.Scope) error {
out.SignerName = in.SignerName
out.PodName = in.PodName
out.PodUID = types.UID(in.PodUID)
out.ServiceAccountName = in.ServiceAccountName
out.ServiceAccountUID = types.UID(in.ServiceAccountUID)
out.NodeName = types.NodeName(in.NodeName)
out.NodeUID = types.UID(in.NodeUID)
out.MaxExpirationSeconds = (*int32)(unsafe.Pointer(in.MaxExpirationSeconds))
out.PKIXPublicKey = *(*[]byte)(unsafe.Pointer(&in.PKIXPublicKey))
out.ProofOfPossession = *(*[]byte)(unsafe.Pointer(&in.ProofOfPossession))
return nil
}
// Convert_certificates_PodCertificateRequestSpec_To_v1alpha1_PodCertificateRequestSpec is an autogenerated conversion function.
func Convert_certificates_PodCertificateRequestSpec_To_v1alpha1_PodCertificateRequestSpec(in *certificates.PodCertificateRequestSpec, out *certificatesv1alpha1.PodCertificateRequestSpec, s conversion.Scope) error {
return autoConvert_certificates_PodCertificateRequestSpec_To_v1alpha1_PodCertificateRequestSpec(in, out, s)
}
func autoConvert_v1alpha1_PodCertificateRequestStatus_To_certificates_PodCertificateRequestStatus(in *certificatesv1alpha1.PodCertificateRequestStatus, out *certificates.PodCertificateRequestStatus, s conversion.Scope) error {
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
out.CertificateChain = in.CertificateChain
out.NotBefore = (*v1.Time)(unsafe.Pointer(in.NotBefore))
out.BeginRefreshAt = (*v1.Time)(unsafe.Pointer(in.BeginRefreshAt))
out.NotAfter = (*v1.Time)(unsafe.Pointer(in.NotAfter))
return nil
}
// Convert_v1alpha1_PodCertificateRequestStatus_To_certificates_PodCertificateRequestStatus is an autogenerated conversion function.
func Convert_v1alpha1_PodCertificateRequestStatus_To_certificates_PodCertificateRequestStatus(in *certificatesv1alpha1.PodCertificateRequestStatus, out *certificates.PodCertificateRequestStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_PodCertificateRequestStatus_To_certificates_PodCertificateRequestStatus(in, out, s)
}
func autoConvert_certificates_PodCertificateRequestStatus_To_v1alpha1_PodCertificateRequestStatus(in *certificates.PodCertificateRequestStatus, out *certificatesv1alpha1.PodCertificateRequestStatus, s conversion.Scope) error {
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
out.CertificateChain = in.CertificateChain
out.NotBefore = (*v1.Time)(unsafe.Pointer(in.NotBefore))
out.BeginRefreshAt = (*v1.Time)(unsafe.Pointer(in.BeginRefreshAt))
out.NotAfter = (*v1.Time)(unsafe.Pointer(in.NotAfter))
return nil
}
// Convert_certificates_PodCertificateRequestStatus_To_v1alpha1_PodCertificateRequestStatus is an autogenerated conversion function.
func Convert_certificates_PodCertificateRequestStatus_To_v1alpha1_PodCertificateRequestStatus(in *certificates.PodCertificateRequestStatus, out *certificatesv1alpha1.PodCertificateRequestStatus, s conversion.Scope) error {
return autoConvert_certificates_PodCertificateRequestStatus_To_v1alpha1_PodCertificateRequestStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&certificatesv1alpha1.PodCertificateRequest{}, func(obj interface{}) {
SetObjectDefaults_PodCertificateRequest(obj.(*certificatesv1alpha1.PodCertificateRequest))
})
scheme.AddTypeDefaultingFunc(&certificatesv1alpha1.PodCertificateRequestList{}, func(obj interface{}) {
SetObjectDefaults_PodCertificateRequestList(obj.(*certificatesv1alpha1.PodCertificateRequestList))
})
return nil
}
func SetObjectDefaults_PodCertificateRequest(in *certificatesv1alpha1.PodCertificateRequest) {
if in.Spec.MaxExpirationSeconds == nil {
var ptrVar1 int32 = 86400
in.Spec.MaxExpirationSeconds = &ptrVar1
}
}
func SetObjectDefaults_PodCertificateRequestList(in *certificatesv1alpha1.PodCertificateRequestList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PodCertificateRequest(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add field conversion funcs.
err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("CertificateSigningRequest"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"spec.signerName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return err
}
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ClusterTrustBundle"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"spec.signerName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"crypto/x509"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
certificates "k8s.io/kubernetes/pkg/apis/certificates"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_CertificateSigningRequestSpec(obj *certificatesv1beta1.CertificateSigningRequestSpec) {
if obj.Usages == nil {
obj.Usages = []certificatesv1beta1.KeyUsage{certificatesv1beta1.UsageDigitalSignature, certificatesv1beta1.UsageKeyEncipherment}
}
if obj.SignerName == nil {
signerName := DefaultSignerNameFromSpec(obj)
obj.SignerName = &signerName
}
}
func SetDefaults_CertificateSigningRequestCondition(obj *certificatesv1beta1.CertificateSigningRequestCondition) {
if len(obj.Status) == 0 {
obj.Status = v1.ConditionTrue
}
}
// DefaultSignerNameFromSpec will determine the signerName that should be set
// by attempting to inspect the 'request' content and the spec options.
func DefaultSignerNameFromSpec(obj *certificatesv1beta1.CertificateSigningRequestSpec) string {
csr, err := ParseCSR(obj.Request)
switch {
case err != nil:
// Set the signerName to 'legacy-unknown' as the CSR could not be
// recognised.
return certificatesv1beta1.LegacyUnknownSignerName
case IsKubeletClientCSR(csr, obj.Usages):
return certificatesv1beta1.KubeAPIServerClientKubeletSignerName
case IsKubeletServingCSR(csr, obj.Usages):
return certificatesv1beta1.KubeletServingSignerName
default:
return certificatesv1beta1.LegacyUnknownSignerName
}
}
func IsKubeletServingCSR(req *x509.CertificateRequest, usages []certificatesv1beta1.KeyUsage) bool {
return certificates.IsKubeletServingCSR(req, usagesToSet(usages))
}
func IsKubeletClientCSR(req *x509.CertificateRequest, usages []certificatesv1beta1.KeyUsage) bool {
return certificates.IsKubeletClientCSR(req, usagesToSet(usages))
}
func usagesToSet(usages []certificatesv1beta1.KeyUsage) sets.String {
result := sets.NewString()
for _, usage := range usages {
result.Insert(string(usage))
}
return result
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"crypto/x509"
"encoding/pem"
"errors"
)
// ParseCSR decodes a PEM encoded CSR
func ParseCSR(pemBytes []byte) (*x509.CertificateRequest, error) {
// extract PEM from request object
block, _ := pem.Decode(pemBytes)
if block == nil || block.Type != "CERTIFICATE REQUEST" {
return nil, errors.New("PEM block type must be CERTIFICATE REQUEST")
}
csr, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
}
return csr, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "certificates.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &certificatesv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
certificates "k8s.io/kubernetes/pkg/apis/certificates"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.CertificateSigningRequest)(nil), (*certificates.CertificateSigningRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(a.(*certificatesv1beta1.CertificateSigningRequest), b.(*certificates.CertificateSigningRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequest)(nil), (*certificatesv1beta1.CertificateSigningRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(a.(*certificates.CertificateSigningRequest), b.(*certificatesv1beta1.CertificateSigningRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.CertificateSigningRequestCondition)(nil), (*certificates.CertificateSigningRequestCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(a.(*certificatesv1beta1.CertificateSigningRequestCondition), b.(*certificates.CertificateSigningRequestCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestCondition)(nil), (*certificatesv1beta1.CertificateSigningRequestCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(a.(*certificates.CertificateSigningRequestCondition), b.(*certificatesv1beta1.CertificateSigningRequestCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.CertificateSigningRequestList)(nil), (*certificates.CertificateSigningRequestList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(a.(*certificatesv1beta1.CertificateSigningRequestList), b.(*certificates.CertificateSigningRequestList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestList)(nil), (*certificatesv1beta1.CertificateSigningRequestList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(a.(*certificates.CertificateSigningRequestList), b.(*certificatesv1beta1.CertificateSigningRequestList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.CertificateSigningRequestSpec)(nil), (*certificates.CertificateSigningRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(a.(*certificatesv1beta1.CertificateSigningRequestSpec), b.(*certificates.CertificateSigningRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestSpec)(nil), (*certificatesv1beta1.CertificateSigningRequestSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(a.(*certificates.CertificateSigningRequestSpec), b.(*certificatesv1beta1.CertificateSigningRequestSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.CertificateSigningRequestStatus)(nil), (*certificates.CertificateSigningRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(a.(*certificatesv1beta1.CertificateSigningRequestStatus), b.(*certificates.CertificateSigningRequestStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.CertificateSigningRequestStatus)(nil), (*certificatesv1beta1.CertificateSigningRequestStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(a.(*certificates.CertificateSigningRequestStatus), b.(*certificatesv1beta1.CertificateSigningRequestStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.ClusterTrustBundle)(nil), (*certificates.ClusterTrustBundle)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(a.(*certificatesv1beta1.ClusterTrustBundle), b.(*certificates.ClusterTrustBundle), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.ClusterTrustBundle)(nil), (*certificatesv1beta1.ClusterTrustBundle)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_ClusterTrustBundle_To_v1beta1_ClusterTrustBundle(a.(*certificates.ClusterTrustBundle), b.(*certificatesv1beta1.ClusterTrustBundle), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.ClusterTrustBundleList)(nil), (*certificates.ClusterTrustBundleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(a.(*certificatesv1beta1.ClusterTrustBundleList), b.(*certificates.ClusterTrustBundleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.ClusterTrustBundleList)(nil), (*certificatesv1beta1.ClusterTrustBundleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_ClusterTrustBundleList_To_v1beta1_ClusterTrustBundleList(a.(*certificates.ClusterTrustBundleList), b.(*certificatesv1beta1.ClusterTrustBundleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificatesv1beta1.ClusterTrustBundleSpec)(nil), (*certificates.ClusterTrustBundleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(a.(*certificatesv1beta1.ClusterTrustBundleSpec), b.(*certificates.ClusterTrustBundleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*certificates.ClusterTrustBundleSpec)(nil), (*certificatesv1beta1.ClusterTrustBundleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_certificates_ClusterTrustBundleSpec_To_v1beta1_ClusterTrustBundleSpec(a.(*certificates.ClusterTrustBundleSpec), b.(*certificatesv1beta1.ClusterTrustBundleSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *certificatesv1beta1.CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest is an autogenerated conversion function.
func Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *certificatesv1beta1.CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error {
return autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *certificatesv1beta1.CertificateSigningRequest, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *certificatesv1beta1.CertificateSigningRequest, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in, out, s)
}
func autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *certificatesv1beta1.CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error {
out.Type = certificates.RequestConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.Reason = in.Reason
out.Message = in.Message
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
return nil
}
// Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition is an autogenerated conversion function.
func Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *certificatesv1beta1.CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error {
return autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *certificatesv1beta1.CertificateSigningRequestCondition, s conversion.Scope) error {
out.Type = certificatesv1beta1.RequestConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.Reason = in.Reason
out.Message = in.Message
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
return nil
}
// Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *certificatesv1beta1.CertificateSigningRequestCondition, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in, out, s)
}
func autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *certificatesv1beta1.CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]certificates.CertificateSigningRequest, len(*in))
for i := range *in {
if err := Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList is an autogenerated conversion function.
func Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *certificatesv1beta1.CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error {
return autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *certificatesv1beta1.CertificateSigningRequestList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]certificatesv1beta1.CertificateSigningRequest, len(*in))
for i := range *in {
if err := Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *certificatesv1beta1.CertificateSigningRequestList, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in, out, s)
}
func autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *certificatesv1beta1.CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error {
out.Request = *(*[]byte)(unsafe.Pointer(&in.Request))
if err := metav1.Convert_Pointer_string_To_string(&in.SignerName, &out.SignerName, s); err != nil {
return err
}
out.ExpirationSeconds = (*int32)(unsafe.Pointer(in.ExpirationSeconds))
out.Usages = *(*[]certificates.KeyUsage)(unsafe.Pointer(&in.Usages))
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]certificates.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
// Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec is an autogenerated conversion function.
func Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *certificatesv1beta1.CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error {
return autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *certificatesv1beta1.CertificateSigningRequestSpec, s conversion.Scope) error {
out.Request = *(*[]byte)(unsafe.Pointer(&in.Request))
if err := metav1.Convert_string_To_Pointer_string(&in.SignerName, &out.SignerName, s); err != nil {
return err
}
out.ExpirationSeconds = (*int32)(unsafe.Pointer(in.ExpirationSeconds))
out.Usages = *(*[]certificatesv1beta1.KeyUsage)(unsafe.Pointer(&in.Usages))
out.Username = in.Username
out.UID = in.UID
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
out.Extra = *(*map[string]certificatesv1beta1.ExtraValue)(unsafe.Pointer(&in.Extra))
return nil
}
// Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *certificatesv1beta1.CertificateSigningRequestSpec, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in, out, s)
}
func autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *certificatesv1beta1.CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error {
out.Conditions = *(*[]certificates.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions))
out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate))
return nil
}
// Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus is an autogenerated conversion function.
func Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *certificatesv1beta1.CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error {
return autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s)
}
func autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *certificatesv1beta1.CertificateSigningRequestStatus, s conversion.Scope) error {
out.Conditions = *(*[]certificatesv1beta1.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions))
out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate))
return nil
}
// Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus is an autogenerated conversion function.
func Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *certificatesv1beta1.CertificateSigningRequestStatus, s conversion.Scope) error {
return autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in, out, s)
}
func autoConvert_v1beta1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(in *certificatesv1beta1.ClusterTrustBundle, out *certificates.ClusterTrustBundle, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ClusterTrustBundle_To_certificates_ClusterTrustBundle is an autogenerated conversion function.
func Convert_v1beta1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(in *certificatesv1beta1.ClusterTrustBundle, out *certificates.ClusterTrustBundle, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterTrustBundle_To_certificates_ClusterTrustBundle(in, out, s)
}
func autoConvert_certificates_ClusterTrustBundle_To_v1beta1_ClusterTrustBundle(in *certificates.ClusterTrustBundle, out *certificatesv1beta1.ClusterTrustBundle, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_certificates_ClusterTrustBundleSpec_To_v1beta1_ClusterTrustBundleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_certificates_ClusterTrustBundle_To_v1beta1_ClusterTrustBundle is an autogenerated conversion function.
func Convert_certificates_ClusterTrustBundle_To_v1beta1_ClusterTrustBundle(in *certificates.ClusterTrustBundle, out *certificatesv1beta1.ClusterTrustBundle, s conversion.Scope) error {
return autoConvert_certificates_ClusterTrustBundle_To_v1beta1_ClusterTrustBundle(in, out, s)
}
func autoConvert_v1beta1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(in *certificatesv1beta1.ClusterTrustBundleList, out *certificates.ClusterTrustBundleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificates.ClusterTrustBundle)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList is an autogenerated conversion function.
func Convert_v1beta1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(in *certificatesv1beta1.ClusterTrustBundleList, out *certificates.ClusterTrustBundleList, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterTrustBundleList_To_certificates_ClusterTrustBundleList(in, out, s)
}
func autoConvert_certificates_ClusterTrustBundleList_To_v1beta1_ClusterTrustBundleList(in *certificates.ClusterTrustBundleList, out *certificatesv1beta1.ClusterTrustBundleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]certificatesv1beta1.ClusterTrustBundle)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_certificates_ClusterTrustBundleList_To_v1beta1_ClusterTrustBundleList is an autogenerated conversion function.
func Convert_certificates_ClusterTrustBundleList_To_v1beta1_ClusterTrustBundleList(in *certificates.ClusterTrustBundleList, out *certificatesv1beta1.ClusterTrustBundleList, s conversion.Scope) error {
return autoConvert_certificates_ClusterTrustBundleList_To_v1beta1_ClusterTrustBundleList(in, out, s)
}
func autoConvert_v1beta1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(in *certificatesv1beta1.ClusterTrustBundleSpec, out *certificates.ClusterTrustBundleSpec, s conversion.Scope) error {
out.SignerName = in.SignerName
out.TrustBundle = in.TrustBundle
return nil
}
// Convert_v1beta1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec is an autogenerated conversion function.
func Convert_v1beta1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(in *certificatesv1beta1.ClusterTrustBundleSpec, out *certificates.ClusterTrustBundleSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterTrustBundleSpec_To_certificates_ClusterTrustBundleSpec(in, out, s)
}
func autoConvert_certificates_ClusterTrustBundleSpec_To_v1beta1_ClusterTrustBundleSpec(in *certificates.ClusterTrustBundleSpec, out *certificatesv1beta1.ClusterTrustBundleSpec, s conversion.Scope) error {
out.SignerName = in.SignerName
out.TrustBundle = in.TrustBundle
return nil
}
// Convert_certificates_ClusterTrustBundleSpec_To_v1beta1_ClusterTrustBundleSpec is an autogenerated conversion function.
func Convert_certificates_ClusterTrustBundleSpec_To_v1beta1_ClusterTrustBundleSpec(in *certificates.ClusterTrustBundleSpec, out *certificatesv1beta1.ClusterTrustBundleSpec, s conversion.Scope) error {
return autoConvert_certificates_ClusterTrustBundleSpec_To_v1beta1_ClusterTrustBundleSpec(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&certificatesv1beta1.CertificateSigningRequest{}, func(obj interface{}) {
SetObjectDefaults_CertificateSigningRequest(obj.(*certificatesv1beta1.CertificateSigningRequest))
})
scheme.AddTypeDefaultingFunc(&certificatesv1beta1.CertificateSigningRequestList{}, func(obj interface{}) {
SetObjectDefaults_CertificateSigningRequestList(obj.(*certificatesv1beta1.CertificateSigningRequestList))
})
return nil
}
func SetObjectDefaults_CertificateSigningRequest(in *certificatesv1beta1.CertificateSigningRequest) {
SetDefaults_CertificateSigningRequestSpec(&in.Spec)
for i := range in.Status.Conditions {
a := &in.Status.Conditions[i]
SetDefaults_CertificateSigningRequestCondition(a)
}
}
func SetObjectDefaults_CertificateSigningRequestList(in *certificatesv1beta1.CertificateSigningRequestList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_CertificateSigningRequest(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1beta1
import (
context "context"
fmt "fmt"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
equality "k8s.io/apimachinery/pkg/api/equality"
operation "k8s.io/apimachinery/pkg/api/operation"
safe "k8s.io/apimachinery/pkg/api/safe"
validate "k8s.io/apimachinery/pkg/api/validate"
runtime "k8s.io/apimachinery/pkg/runtime"
field "k8s.io/apimachinery/pkg/util/validation/field"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
scheme.AddValidationFunc((*certificatesv1beta1.CertificateSigningRequest)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/", "/approval", "/status":
return Validate_CertificateSigningRequest(ctx, op, nil /* fldPath */, obj.(*certificatesv1beta1.CertificateSigningRequest), safe.Cast[*certificatesv1beta1.CertificateSigningRequest](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
scheme.AddValidationFunc((*certificatesv1beta1.CertificateSigningRequestList)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/":
return Validate_CertificateSigningRequestList(ctx, op, nil /* fldPath */, obj.(*certificatesv1beta1.CertificateSigningRequestList), safe.Cast[*certificatesv1beta1.CertificateSigningRequestList](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
return nil
}
func Validate_CertificateSigningRequest(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *certificatesv1beta1.CertificateSigningRequest) (errs field.ErrorList) {
// field certificatesv1beta1.CertificateSigningRequest.TypeMeta has no validation
// field certificatesv1beta1.CertificateSigningRequest.ObjectMeta has no validation
// field certificatesv1beta1.CertificateSigningRequest.Spec has no validation
// field certificatesv1beta1.CertificateSigningRequest.Status
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *certificatesv1beta1.CertificateSigningRequestStatus) (errs field.ErrorList) {
errs = append(errs, Validate_CertificateSigningRequestStatus(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("status"), &obj.Status, safe.Field(oldObj, func(oldObj *certificatesv1beta1.CertificateSigningRequest) *certificatesv1beta1.CertificateSigningRequestStatus {
return &oldObj.Status
}))...)
return errs
}
func Validate_CertificateSigningRequestList(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *certificatesv1beta1.CertificateSigningRequestList) (errs field.ErrorList) {
// field certificatesv1beta1.CertificateSigningRequestList.TypeMeta has no validation
// field certificatesv1beta1.CertificateSigningRequestList.ListMeta has no validation
// field certificatesv1beta1.CertificateSigningRequestList.Items
errs = append(errs,
func(fldPath *field.Path, obj, oldObj []certificatesv1beta1.CertificateSigningRequest) (errs field.ErrorList) {
if op.Type == operation.Update && equality.Semantic.DeepEqual(obj, oldObj) {
return nil // no changes
}
errs = append(errs, validate.EachSliceVal(ctx, op, fldPath, obj, oldObj, nil, nil, Validate_CertificateSigningRequest)...)
return
}(fldPath.Child("items"), obj.Items, safe.Field(oldObj, func(oldObj *certificatesv1beta1.CertificateSigningRequestList) []certificatesv1beta1.CertificateSigningRequest {
return oldObj.Items
}))...)
return errs
}
var zeroOrOneOfMembershipFor_k8s_io_api_certificates_v1beta1_CertificateSigningRequestStatus_Conditions_ = validate.NewUnionMembership([2]string{"Conditions[{\"type\": \"Approved\"}]", ""}, [2]string{"Conditions[{\"type\": \"Denied\"}]", ""})
func Validate_CertificateSigningRequestStatus(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *certificatesv1beta1.CertificateSigningRequestStatus) (errs field.ErrorList) {
// field certificatesv1beta1.CertificateSigningRequestStatus.Conditions
errs = append(errs,
func(fldPath *field.Path, obj, oldObj []certificatesv1beta1.CertificateSigningRequestCondition) (errs field.ErrorList) {
if op.Type == operation.Update && equality.Semantic.DeepEqual(obj, oldObj) {
return nil // no changes
}
if e := validate.OptionalSlice(ctx, op, fldPath, obj, oldObj); len(e) != 0 {
return // do not proceed
}
errs = append(errs, validate.ZeroOrOneOfUnion(ctx, op, fldPath, obj, oldObj, zeroOrOneOfMembershipFor_k8s_io_api_certificates_v1beta1_CertificateSigningRequestStatus_Conditions_, func(list []certificatesv1beta1.CertificateSigningRequestCondition) bool {
for i := range list {
if list[i].Type == "Approved" {
return true
}
}
return false
}, func(list []certificatesv1beta1.CertificateSigningRequestCondition) bool {
for i := range list {
if list[i].Type == "Denied" {
return true
}
}
return false
})...)
return
}(fldPath.Child("conditions"), obj.Conditions, safe.Field(oldObj, func(oldObj *certificatesv1beta1.CertificateSigningRequestStatus) []certificatesv1beta1.CertificateSigningRequestCondition {
return oldObj.Conditions
}))...)
// field certificatesv1beta1.CertificateSigningRequestStatus.Certificate has no validation
return errs
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"fmt"
"strconv"
"time"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
utilcert "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/pkg/apis/certificates"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/utils/clock"
)
var (
// trueConditionTypes is the set of condition types which may only have a status of True if present
trueConditionTypes = sets.NewString(
string(certificates.CertificateApproved),
string(certificates.CertificateDenied),
string(certificates.CertificateFailed),
)
trueStatusOnly = sets.NewString(string(v1.ConditionTrue))
allStatusValues = sets.NewString(string(v1.ConditionTrue), string(v1.ConditionFalse), string(v1.ConditionUnknown))
)
type certificateValidationOptions struct {
// The following allow modifications only permitted via certain update paths
// allow populating/modifying Approved/Denied conditions
allowSettingApprovalConditions bool
// allow populating status.certificate
allowSettingCertificate bool
// allow Approved and Denied conditions to be exist.
// we tolerate this when the problem is already present in the persisted object for compatibility.
allowBothApprovedAndDenied bool
// The following are bad things we tolerate for compatibility reasons:
// * in requests made via the v1beta1 API
// * in update requests where the problem is already present in the persisted object
// allow modifying status.certificate on an update where the old object has a different certificate
allowResettingCertificate bool
// allow the legacy-unknown signerName
allowLegacySignerName bool
// allow conditions with duplicate types
allowDuplicateConditionTypes bool
// allow conditions with "" types
allowEmptyConditionType bool
// allow arbitrary content in status.certificate
allowArbitraryCertificate bool
// allow usages values outside the known set
allowUnknownUsages bool
// allow duplicate usages values
allowDuplicateUsages bool
}
// validateCSR validates the signature and formatting of a base64-wrapped,
// PEM-encoded PKCS#10 certificate signing request. If this is invalid, we must
// not accept the CSR for further processing.
func validateCSR(obj *certificates.CertificateSigningRequest) error {
csr, err := certificates.ParseCSR(obj.Spec.Request)
if err != nil {
return err
}
// check that the signature is valid
return csr.CheckSignature()
}
func validateCertificate(pemData []byte) error {
if len(pemData) == 0 {
return nil
}
blocks := 0
for {
block, remainingData := pem.Decode(pemData)
if block == nil {
break
}
if block.Type != utilcert.CertificateBlockType {
return fmt.Errorf("only CERTIFICATE PEM blocks are allowed, found %q", block.Type)
}
if len(block.Headers) != 0 {
return fmt.Errorf("no PEM block headers are permitted")
}
blocks++
certs, err := x509.ParseCertificates(block.Bytes)
if err != nil {
return err
}
if len(certs) == 0 {
return fmt.Errorf("found CERTIFICATE PEM block containing 0 certificates")
}
pemData = remainingData
}
if blocks == 0 {
return fmt.Errorf("must contain at least one CERTIFICATE PEM block")
}
return nil
}
// We don't care what you call your certificate requests.
func ValidateCertificateRequestName(name string, prefix bool) []string {
return nil
}
func ValidateCertificateSigningRequestCreate(csr *certificates.CertificateSigningRequest) field.ErrorList {
opts := getValidationOptions(csr, nil)
return validateCertificateSigningRequest(csr, opts)
}
var (
allValidUsages = sets.NewString(
string(certificates.UsageSigning),
string(certificates.UsageDigitalSignature),
string(certificates.UsageContentCommitment),
string(certificates.UsageKeyEncipherment),
string(certificates.UsageKeyAgreement),
string(certificates.UsageDataEncipherment),
string(certificates.UsageCertSign),
string(certificates.UsageCRLSign),
string(certificates.UsageEncipherOnly),
string(certificates.UsageDecipherOnly),
string(certificates.UsageAny),
string(certificates.UsageServerAuth),
string(certificates.UsageClientAuth),
string(certificates.UsageCodeSigning),
string(certificates.UsageEmailProtection),
string(certificates.UsageSMIME),
string(certificates.UsageIPsecEndSystem),
string(certificates.UsageIPsecTunnel),
string(certificates.UsageIPsecUser),
string(certificates.UsageTimestamping),
string(certificates.UsageOCSPSigning),
string(certificates.UsageMicrosoftSGC),
string(certificates.UsageNetscapeSGC),
)
)
func validateCertificateSigningRequest(csr *certificates.CertificateSigningRequest, opts certificateValidationOptions) field.ErrorList {
isNamespaced := false
allErrs := apivalidation.ValidateObjectMeta(&csr.ObjectMeta, isNamespaced, ValidateCertificateRequestName, field.NewPath("metadata"))
specPath := field.NewPath("spec")
err := validateCSR(csr)
if err != nil {
allErrs = append(allErrs, field.Invalid(specPath.Child("request"), csr.Spec.Request, fmt.Sprintf("%v", err)))
}
if len(csr.Spec.Usages) == 0 {
allErrs = append(allErrs, field.Required(specPath.Child("usages"), ""))
}
if !opts.allowUnknownUsages {
for i, usage := range csr.Spec.Usages {
if !allValidUsages.Has(string(usage)) {
allErrs = append(allErrs, field.NotSupported(specPath.Child("usages").Index(i), usage, allValidUsages.List()))
}
}
}
if !opts.allowDuplicateUsages {
seen := make(map[certificates.KeyUsage]bool, len(csr.Spec.Usages))
for i, usage := range csr.Spec.Usages {
if seen[usage] {
allErrs = append(allErrs, field.Duplicate(specPath.Child("usages").Index(i), usage))
}
seen[usage] = true
}
}
if !opts.allowLegacySignerName && csr.Spec.SignerName == certificates.LegacyUnknownSignerName {
allErrs = append(allErrs, field.Invalid(specPath.Child("signerName"), csr.Spec.SignerName, "the legacy signerName is not allowed via this API version"))
} else {
allErrs = append(allErrs, apivalidation.ValidateSignerName(specPath.Child("signerName"), csr.Spec.SignerName)...)
}
if csr.Spec.ExpirationSeconds != nil && *csr.Spec.ExpirationSeconds < 600 {
allErrs = append(allErrs, field.Invalid(specPath.Child("expirationSeconds"), *csr.Spec.ExpirationSeconds, "may not specify a duration less than 600 seconds (10 minutes)"))
}
allErrs = append(allErrs, validateConditions(field.NewPath("status", "conditions"), csr, opts)...)
if !opts.allowArbitraryCertificate {
if err := validateCertificate(csr.Status.Certificate); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("status", "certificate"), "<certificate data>", err.Error()))
}
}
return allErrs
}
func validateConditions(fldPath *field.Path, csr *certificates.CertificateSigningRequest, opts certificateValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
seenTypes := map[certificates.RequestConditionType]bool{}
hasApproved := false
hasDenied := false
for i, c := range csr.Status.Conditions {
if !opts.allowEmptyConditionType {
if len(c.Type) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("type"), ""))
}
}
allowedStatusValues := allStatusValues
if trueConditionTypes.Has(string(c.Type)) {
allowedStatusValues = trueStatusOnly
}
switch {
case c.Status == "":
allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("status"), ""))
case !allowedStatusValues.Has(string(c.Status)):
allErrs = append(allErrs, field.NotSupported(fldPath.Index(i).Child("status"), c.Status, allowedStatusValues.List()))
}
if !opts.allowBothApprovedAndDenied {
switch c.Type {
case certificates.CertificateApproved:
hasApproved = true
if hasDenied {
allErrs = append(allErrs, field.Invalid(fldPath, c.Type, "Approved and Denied conditions are mutually exclusive").WithOrigin("zeroOrOneOf").MarkCoveredByDeclarative())
}
case certificates.CertificateDenied:
hasDenied = true
if hasApproved {
allErrs = append(allErrs, field.Invalid(fldPath, c.Type, "Approved and Denied conditions are mutually exclusive").WithOrigin("zeroOrOneOf").MarkCoveredByDeclarative())
}
}
}
if !opts.allowDuplicateConditionTypes {
if seenTypes[c.Type] {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), c.Type))
}
seenTypes[c.Type] = true
}
}
return allErrs
}
func ValidateCertificateSigningRequestUpdate(newCSR, oldCSR *certificates.CertificateSigningRequest) field.ErrorList {
opts := getValidationOptions(newCSR, oldCSR)
return validateCertificateSigningRequestUpdate(newCSR, oldCSR, opts)
}
func ValidateCertificateSigningRequestStatusUpdate(newCSR, oldCSR *certificates.CertificateSigningRequest) field.ErrorList {
opts := getValidationOptions(newCSR, oldCSR)
opts.allowSettingCertificate = true
return validateCertificateSigningRequestUpdate(newCSR, oldCSR, opts)
}
func ValidateCertificateSigningRequestApprovalUpdate(newCSR, oldCSR *certificates.CertificateSigningRequest) field.ErrorList {
opts := getValidationOptions(newCSR, oldCSR)
opts.allowSettingApprovalConditions = true
return validateCertificateSigningRequestUpdate(newCSR, oldCSR, opts)
}
func validateCertificateSigningRequestUpdate(newCSR, oldCSR *certificates.CertificateSigningRequest, opts certificateValidationOptions) field.ErrorList {
validationErrorList := validateCertificateSigningRequest(newCSR, opts)
metaUpdateErrorList := apivalidation.ValidateObjectMetaUpdate(&newCSR.ObjectMeta, &oldCSR.ObjectMeta, field.NewPath("metadata"))
// prevent removal of existing Approved/Denied/Failed conditions
for _, t := range []certificates.RequestConditionType{certificates.CertificateApproved, certificates.CertificateDenied, certificates.CertificateFailed} {
oldConditions := findConditions(oldCSR, t)
newConditions := findConditions(newCSR, t)
if len(newConditions) < len(oldConditions) {
validationErrorList = append(validationErrorList, field.Forbidden(field.NewPath("status", "conditions"), fmt.Sprintf("updates may not remove a condition of type %q", t)))
}
}
if !opts.allowSettingApprovalConditions {
// prevent addition/removal/modification of Approved/Denied conditions
for _, t := range []certificates.RequestConditionType{certificates.CertificateApproved, certificates.CertificateDenied} {
oldConditions := findConditions(oldCSR, t)
newConditions := findConditions(newCSR, t)
switch {
case len(newConditions) < len(oldConditions):
// removals are prevented above
case len(newConditions) > len(oldConditions):
validationErrorList = append(validationErrorList, field.Forbidden(field.NewPath("status", "conditions"), fmt.Sprintf("updates may not add a condition of type %q", t)))
case !apiequality.Semantic.DeepEqual(oldConditions, newConditions):
conditionDiff := diff.Diff(oldConditions, newConditions)
validationErrorList = append(validationErrorList, field.Forbidden(field.NewPath("status", "conditions"), fmt.Sprintf("updates may not modify a condition of type %q\n%v", t, conditionDiff)))
}
}
}
if !bytes.Equal(newCSR.Status.Certificate, oldCSR.Status.Certificate) {
if !opts.allowSettingCertificate {
validationErrorList = append(validationErrorList, field.Forbidden(field.NewPath("status", "certificate"), "updates may not set certificate content"))
} else if !opts.allowResettingCertificate && len(oldCSR.Status.Certificate) > 0 {
validationErrorList = append(validationErrorList, field.Forbidden(field.NewPath("status", "certificate"), "updates may not modify existing certificate content"))
}
}
return append(validationErrorList, metaUpdateErrorList...)
}
// findConditions returns all instances of conditions of the specified type
func findConditions(csr *certificates.CertificateSigningRequest, conditionType certificates.RequestConditionType) []certificates.CertificateSigningRequestCondition {
var retval []certificates.CertificateSigningRequestCondition
for i, c := range csr.Status.Conditions {
if c.Type == conditionType {
retval = append(retval, csr.Status.Conditions[i])
}
}
return retval
}
// getValidationOptions returns the validation options to be
// compatible with the specified version and existing CSR.
// oldCSR may be nil if this is a create request.
// validation options related to subresource-specific capabilities are set to false.
func getValidationOptions(newCSR, oldCSR *certificates.CertificateSigningRequest) certificateValidationOptions {
return certificateValidationOptions{
allowResettingCertificate: false,
allowBothApprovedAndDenied: allowBothApprovedAndDenied(oldCSR),
allowLegacySignerName: allowLegacySignerName(oldCSR),
allowDuplicateConditionTypes: allowDuplicateConditionTypes(oldCSR),
allowEmptyConditionType: allowEmptyConditionType(oldCSR),
allowArbitraryCertificate: allowArbitraryCertificate(newCSR, oldCSR),
allowDuplicateUsages: allowDuplicateUsages(oldCSR),
allowUnknownUsages: allowUnknownUsages(oldCSR),
}
}
func allowBothApprovedAndDenied(oldCSR *certificates.CertificateSigningRequest) bool {
if oldCSR == nil {
return false
}
approved := false
denied := false
for _, c := range oldCSR.Status.Conditions {
if c.Type == certificates.CertificateApproved {
approved = true
} else if c.Type == certificates.CertificateDenied {
denied = true
}
}
// compatibility with existing data
return approved && denied
}
func allowLegacySignerName(oldCSR *certificates.CertificateSigningRequest) bool {
switch {
case oldCSR != nil && oldCSR.Spec.SignerName == certificates.LegacyUnknownSignerName:
return true // compatibility with existing data
default:
return false
}
}
func allowDuplicateConditionTypes(oldCSR *certificates.CertificateSigningRequest) bool {
switch {
case oldCSR != nil && hasDuplicateConditionTypes(oldCSR):
return true // compatibility with existing data
default:
return false
}
}
func hasDuplicateConditionTypes(csr *certificates.CertificateSigningRequest) bool {
seen := map[certificates.RequestConditionType]bool{}
for _, c := range csr.Status.Conditions {
if seen[c.Type] {
return true
}
seen[c.Type] = true
}
return false
}
func allowEmptyConditionType(oldCSR *certificates.CertificateSigningRequest) bool {
switch {
case oldCSR != nil && hasEmptyConditionType(oldCSR):
return true // compatibility with existing data
default:
return false
}
}
func hasEmptyConditionType(csr *certificates.CertificateSigningRequest) bool {
for _, c := range csr.Status.Conditions {
if len(c.Type) == 0 {
return true
}
}
return false
}
func allowArbitraryCertificate(newCSR, oldCSR *certificates.CertificateSigningRequest) bool {
switch {
case newCSR != nil && oldCSR != nil && bytes.Equal(newCSR.Status.Certificate, oldCSR.Status.Certificate):
return true // tolerate updates that don't touch status.certificate
case oldCSR != nil && validateCertificate(oldCSR.Status.Certificate) != nil:
return true // compatibility with existing data
default:
return false
}
}
func allowUnknownUsages(oldCSR *certificates.CertificateSigningRequest) bool {
switch {
case oldCSR != nil && hasUnknownUsage(oldCSR.Spec.Usages):
return true // compatibility with existing data
default:
return false
}
}
func hasUnknownUsage(usages []certificates.KeyUsage) bool {
for _, usage := range usages {
if !allValidUsages.Has(string(usage)) {
return true
}
}
return false
}
func allowDuplicateUsages(oldCSR *certificates.CertificateSigningRequest) bool {
switch {
case oldCSR != nil && hasDuplicateUsage(oldCSR.Spec.Usages):
return true // compatibility with existing data
default:
return false
}
}
func hasDuplicateUsage(usages []certificates.KeyUsage) bool {
seen := make(map[certificates.KeyUsage]bool, len(usages))
for _, usage := range usages {
if seen[usage] {
return true
}
seen[usage] = true
}
return false
}
type ValidateClusterTrustBundleOptions struct {
SuppressBundleParsing bool
}
// ValidateClusterTrustBundle runs all validation checks on bundle.
func ValidateClusterTrustBundle(bundle *certificates.ClusterTrustBundle, opts ValidateClusterTrustBundleOptions) field.ErrorList {
var allErrors field.ErrorList
metaErrors := apivalidation.ValidateObjectMeta(&bundle.ObjectMeta, false, apivalidation.ValidateClusterTrustBundleName(bundle.Spec.SignerName), field.NewPath("metadata"))
allErrors = append(allErrors, metaErrors...)
if bundle.Spec.SignerName != "" {
signerNameErrors := apivalidation.ValidateSignerName(field.NewPath("spec", "signerName"), bundle.Spec.SignerName)
allErrors = append(allErrors, signerNameErrors...)
}
if !opts.SuppressBundleParsing {
pemErrors := validateTrustBundle(field.NewPath("spec", "trustBundle"), bundle.Spec.TrustBundle)
allErrors = append(allErrors, pemErrors...)
}
return allErrors
}
// ValidateClusterTrustBundleUpdate runs all update validation checks on an
// update.
func ValidateClusterTrustBundleUpdate(newBundle, oldBundle *certificates.ClusterTrustBundle) field.ErrorList {
// If the caller isn't changing the TrustBundle field, don't parse it.
// This helps smoothly handle changes in Go's PEM or X.509 parsing
// libraries.
opts := ValidateClusterTrustBundleOptions{}
if newBundle.Spec.TrustBundle == oldBundle.Spec.TrustBundle {
opts.SuppressBundleParsing = true
}
var allErrors field.ErrorList
allErrors = append(allErrors, ValidateClusterTrustBundle(newBundle, opts)...)
allErrors = append(allErrors, apivalidation.ValidateObjectMetaUpdate(&newBundle.ObjectMeta, &oldBundle.ObjectMeta, field.NewPath("metadata"))...)
allErrors = append(allErrors, apivalidation.ValidateImmutableField(newBundle.Spec.SignerName, oldBundle.Spec.SignerName, field.NewPath("spec", "signerName"))...)
return allErrors
}
// validateTrustBundle rejects intra-block headers, blocks
// that don't parse as X.509 CA certificates, and duplicate trust anchors. It
// requires that at least one trust anchor is provided.
func validateTrustBundle(path *field.Path, in string) field.ErrorList {
var allErrors field.ErrorList
if len(in) > certificates.MaxTrustBundleSize {
allErrors = append(allErrors, field.TooLong(path, "" /*unused*/, certificates.MaxTrustBundleSize))
return allErrors
}
blockDedupe := map[string][]int{}
rest := []byte(in)
var b *pem.Block
i := -1
for {
b, rest = pem.Decode(rest)
if b == nil {
break
}
i++
if b.Type != "CERTIFICATE" {
allErrors = append(allErrors, field.Invalid(path, "<value omitted>", fmt.Sprintf("entry %d has bad block type: %v", i, b.Type)))
continue
}
if len(b.Headers) != 0 {
allErrors = append(allErrors, field.Invalid(path, "<value omitted>", fmt.Sprintf("entry %d has PEM block headers", i)))
continue
}
cert, err := x509.ParseCertificate(b.Bytes)
if err != nil {
allErrors = append(allErrors, field.Invalid(path, "<value omitted>", fmt.Sprintf("entry %d does not parse as X.509", i)))
continue
}
if !cert.IsCA {
allErrors = append(allErrors, field.Invalid(path, "<value omitted>", fmt.Sprintf("entry %d does not have the CA bit set", i)))
continue
}
if !cert.BasicConstraintsValid {
allErrors = append(allErrors, field.Invalid(path, "<value omitted>", fmt.Sprintf("entry %d has invalid basic constraints", i)))
continue
}
blockDedupe[string(b.Bytes)] = append(blockDedupe[string(b.Bytes)], i)
}
// If we had a malformed block, don't also output potentially-redundant
// errors about duplicate or missing trust anchors.
if len(allErrors) != 0 {
return allErrors
}
if len(blockDedupe) == 0 {
allErrors = append(allErrors, field.Invalid(path, "<value omitted>", "at least one trust anchor must be provided"))
}
for _, indices := range blockDedupe {
if len(indices) > 1 {
allErrors = append(allErrors, field.Invalid(path, "<value omitted>", fmt.Sprintf("duplicate trust anchor (indices %v)", indices)))
}
}
return allErrors
}
// ValidatePodCertificateRequestCreate runs all validation checks on a pod certificate request create.
func ValidatePodCertificateRequestCreate(req *certificates.PodCertificateRequest) field.ErrorList {
var allErrors field.ErrorList
metaErrors := apivalidation.ValidateObjectMeta(&req.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
allErrors = append(allErrors, metaErrors...)
signerNameErrors := apivalidation.ValidateSignerName(field.NewPath("spec", "signerName"), req.Spec.SignerName)
allErrors = append(allErrors, signerNameErrors...)
for _, msg := range apivalidation.ValidatePodName(req.Spec.PodName, false) {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "podName"), req.Spec.PodName, msg))
}
if len(req.Spec.PodUID) == 0 {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "podUID"), req.Spec.PodUID, "must not be empty"))
}
if len(req.Spec.PodUID) > 128 {
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "podUID"), req.Spec.PodUID, 128))
}
for _, msg := range apivalidation.ValidateServiceAccountName(req.Spec.ServiceAccountName, false) {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "serviceAccountName"), req.Spec.ServiceAccountName, msg))
}
if len(req.Spec.ServiceAccountUID) == 0 {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "serviceAccountUID"), req.Spec.ServiceAccountUID, "must not be empty"))
}
if len(req.Spec.ServiceAccountUID) > 128 {
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "serviceAccountUID"), req.Spec.ServiceAccountUID, 128))
}
for _, msg := range apivalidation.ValidateNodeName(string(req.Spec.NodeName), false) {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "nodeName"), req.Spec.NodeName, msg))
}
if len(req.Spec.NodeUID) == 0 {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "nodeUID"), req.Spec.NodeUID, "must not be empty"))
}
if len(req.Spec.NodeUID) > 128 {
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "nodeUID"), req.Spec.NodeUID, 128))
}
if req.Spec.MaxExpirationSeconds == nil {
allErrors = append(allErrors, field.Required(field.NewPath("spec", "maxExpirationSeconds"), "must be set"))
return allErrors
}
if apivalidation.IsKubernetesSignerName(req.Spec.SignerName) {
// Kubernetes signers are restricted to max 24 hour certs
if !(certificates.MinMaxExpirationSeconds <= *req.Spec.MaxExpirationSeconds && *req.Spec.MaxExpirationSeconds <= certificates.KubernetesMaxMaxExpirationSeconds) {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "maxExpirationSeconds"), req.Spec.MaxExpirationSeconds, fmt.Sprintf("must be in the range [%d, %d]", certificates.MinMaxExpirationSeconds, certificates.KubernetesMaxMaxExpirationSeconds)))
}
} else {
// All other signers are restricted to max 91 day certs.
if !(certificates.MinMaxExpirationSeconds <= *req.Spec.MaxExpirationSeconds && *req.Spec.MaxExpirationSeconds <= certificates.MaxMaxExpirationSeconds) {
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "maxExpirationSeconds"), req.Spec.MaxExpirationSeconds, fmt.Sprintf("must be in the range [%d, %d]", certificates.MinMaxExpirationSeconds, certificates.MaxMaxExpirationSeconds)))
}
}
if len(req.Spec.PKIXPublicKey) > certificates.MaxPKIXPublicKeySize {
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "pkixPublicKey"), req.Spec.PKIXPublicKey, certificates.MaxPKIXPublicKeySize))
return allErrors
}
if len(req.Spec.ProofOfPossession) > certificates.MaxProofOfPossessionSize {
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "proofOfPossession"), req.Spec.ProofOfPossession, certificates.MaxProofOfPossessionSize))
return allErrors
}
pubAny, err := x509.ParsePKIXPublicKey(req.Spec.PKIXPublicKey)
if err != nil {
allErrors = append(allErrors, field.Invalid(pkixPath, req.Spec.PKIXPublicKey, "must be a valid PKIX-serialized public key"))
return allErrors
}
// Verify public key properties and the proof-of-possession signature.
switch pub := pubAny.(type) {
case ed25519.PublicKey:
// ed25519 has no key configuration to check
if !ed25519.Verify(pub, []byte(req.Spec.PodUID), req.Spec.ProofOfPossession) {
allErrors = append(allErrors, field.Invalid(popPath, field.OmitValueType{}, "could not verify proof-of-possession signature"))
return allErrors
}
case *ecdsa.PublicKey:
if pub.Curve != elliptic.P256() && pub.Curve != elliptic.P384() && pub.Curve != elliptic.P521() {
allErrors = append(allErrors, field.Invalid(pkixPath, "curve "+pub.Curve.Params().Name, "elliptic public keys must use curve P256 or P384"))
return allErrors
}
if !ecdsa.VerifyASN1(pub, hashBytes([]byte(req.Spec.PodUID)), req.Spec.ProofOfPossession) {
allErrors = append(allErrors, field.Invalid(popPath, field.OmitValueType{}, "could not verify proof-of-possession signature"))
return allErrors
}
case *rsa.PublicKey:
if pub.Size()*8 != 3072 && pub.Size()*8 != 4096 {
allErrors = append(allErrors, field.Invalid(pkixPath, fmt.Sprintf("%d-bit modulus", pub.Size()*8), "RSA keys must have modulus size 3072 or 4096"))
return allErrors
}
if err := rsa.VerifyPSS(pub, crypto.SHA256, hashBytes([]byte(req.Spec.PodUID)), req.Spec.ProofOfPossession, nil); err != nil {
allErrors = append(allErrors, field.Invalid(popPath, field.OmitValueType{}, "could not verify proof-of-possession signature"))
return allErrors
}
default:
allErrors = append(allErrors, field.Invalid(pkixPath, req.Spec.PKIXPublicKey, "unknown public key type; supported types are Ed25519, ECDSA, and RSA"))
return allErrors
}
return allErrors
}
func hashBytes(in []byte) []byte {
out := sha256.Sum256(in)
return out[:]
}
var (
pkixPath = field.NewPath("spec", "pkixPublicKey")
popPath = field.NewPath("spec", "proofOfPossession")
certChainPath = field.NewPath("status", "certificateChain")
notBeforePath = field.NewPath("status", "notBefore")
notAfterPath = field.NewPath("status", "notAfter")
beginRefreshPath = field.NewPath("status", "beginRefreshAt")
)
// ValidatePodCertificateRequestUpdate runs all update validation checks on a
// non-status update.
//
// All spec fields are immutable after creation, and status updates must go
// through the dedicated status update verb, so only metadata updates are
// allowed.
func ValidatePodCertificateRequestUpdate(newReq, oldReq *certificates.PodCertificateRequest) field.ErrorList {
var allErrors field.ErrorList
allErrors = append(allErrors, apivalidation.ValidateObjectMetaUpdate(&newReq.ObjectMeta, &oldReq.ObjectMeta, field.NewPath("metadata"))...)
// All spec fields are immutable.
allErrors = append(allErrors, apivalidation.ValidateImmutableField(newReq.Spec, oldReq.Spec, field.NewPath("spec"))...)
return allErrors
}
// ValidatePodCertificateRequestStatusUpdate validates a status update for a
// PodCertificateRequest.
func ValidatePodCertificateRequestStatusUpdate(newReq, oldReq *certificates.PodCertificateRequest, clock clock.PassiveClock) field.ErrorList {
var allErrors field.ErrorList
// Metadata is *mostly* immutable... ManagedFields is allowed to change. We
// are reliant on the strategy that's calling us to have patched
// newReq.ObjectMeta using metav1.ResetObjectMetaForStatus.
allErrors = append(allErrors, apivalidation.ValidateObjectMetaUpdate(&newReq.ObjectMeta, &oldReq.ObjectMeta, field.NewPath("metadata"))...)
if len(allErrors) > 0 {
return allErrors
}
// Don't validate spec. Strategy has stomped it.
// There can be at most one of the known conditions, and it must have status "True"
numKnownConditions := 0
for i, cond := range newReq.Status.Conditions {
switch cond.Type {
case certificates.PodCertificateRequestConditionTypeIssued, certificates.PodCertificateRequestConditionTypeDenied, certificates.PodCertificateRequestConditionTypeFailed:
numKnownConditions++
if numKnownConditions > 1 {
allErrors = append(allErrors, field.Invalid(field.NewPath("status", "conditions", formatIndex(i), "type"), cond.Type, `There may be at most one condition with type "Issued", "Denied", or "Failed"`))
}
if cond.Status != metav1.ConditionTrue {
allErrors = append(allErrors, field.NotSupported(field.NewPath("status", "conditions", formatIndex(i), "status"), cond.Status, []metav1.ConditionStatus{metav1.ConditionTrue}))
}
default:
allErrors = append(allErrors, field.NotSupported(field.NewPath("status", "conditions", formatIndex(i), "type"), cond.Type, []string{certificates.PodCertificateRequestConditionTypeIssued, certificates.PodCertificateRequestConditionTypeDenied, certificates.PodCertificateRequestConditionTypeFailed}))
}
}
allErrors = append(allErrors, metav1validation.ValidateConditions(newReq.Status.Conditions, field.NewPath("status", "conditions"))...)
// Bail if something seems wrong with the conditions --- we use the
// conditions to drive validation of the remainder of the status fields.
if len(allErrors) > 0 {
return allErrors
}
// Is the original PCR in a terminal condition? If so, the entire status
// field (including conditions) is immutable. No more changes are
// permitted.
if pcrIsIssued(oldReq) || pcrIsDenied(oldReq) || pcrIsFailed(oldReq) {
allErrors = append(allErrors, validateSemanticEquality(newReq.Status, oldReq.Status, field.NewPath("status"), "immutable after PodCertificateRequest is issued, denied, or failed")...)
return allErrors
}
// Are we transitioning to the "denied" or "failed" terminal conditions?
if pcrIsDenied(newReq) || pcrIsFailed(newReq) {
// No other status fields may change besides conditions.
wantStatus := certificates.PodCertificateRequestStatus{
Conditions: newReq.Status.Conditions,
}
allErrors = append(allErrors, validateSemanticEquality(newReq.Status, wantStatus, field.NewPath("status"), "non-condition status fields must be empty when denying or failing the PodCertificateRequest")...)
return allErrors
}
// Are we transitioning to the "issued" terminal condition?
if pcrIsIssued(newReq) {
if len(newReq.Status.CertificateChain) > certificates.MaxCertificateChainSize {
allErrors = append(allErrors, field.TooLong(field.NewPath("status", "certificateChain"), newReq.Status.CertificateChain, certificates.MaxCertificateChainSize))
return allErrors
}
leafBlock, rest := pem.Decode([]byte(newReq.Status.CertificateChain))
if leafBlock == nil {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "issued certificate chain must contain at least one certificate"))
return allErrors
}
if leafBlock.Type != "CERTIFICATE" {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "issued certificate chain must consist entirely of CERTIFICATE PEM blocks"))
return allErrors
}
leafCert, err := x509.ParseCertificate(leafBlock.Bytes)
if err != nil {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate does not parse as valid X.509"))
return allErrors
}
// Was the certificate issued to the public key in the spec?
wantPKAny, err := x509.ParsePKIXPublicKey(oldReq.Spec.PKIXPublicKey)
if err != nil {
allErrors = append(allErrors, field.Invalid(pkixPath, oldReq.Spec.PKIXPublicKey, "must be a valid PKIX-serialized public key"))
return allErrors
}
switch wantPK := wantPKAny.(type) {
case ed25519.PublicKey:
if !wantPK.Equal(leafCert.PublicKey) {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate was not issued to the requested public key"))
return allErrors
}
case *rsa.PublicKey:
if !wantPK.Equal(leafCert.PublicKey) {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate was not issued to the requested public key"))
return allErrors
}
case *ecdsa.PublicKey:
if !wantPK.Equal(leafCert.PublicKey) {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate was not issued to the requested public key"))
return allErrors
}
}
// All timestamps must be set.
if newReq.Status.NotBefore == nil {
allErrors = append(allErrors, field.Required(notBeforePath, "must be present and consistent with the issued certificate"))
}
if newReq.Status.NotAfter == nil {
allErrors = append(allErrors, field.Required(notAfterPath, "must be present and consistent with the issued certificate"))
}
if newReq.Status.BeginRefreshAt == nil {
allErrors = append(allErrors, field.Required(beginRefreshPath, "must be present and in the range [notbefore+10min, notafter-10min]"))
}
if len(allErrors) > 0 {
return allErrors
}
// Validate that NotBefore is consistent with the status field, and within 5
// minutes of the current time.
if !newReq.Status.NotBefore.Time.Equal(leafCert.NotBefore) {
allErrors = append(allErrors, field.Invalid(notBeforePath, newReq.Status.NotBefore.Time, "must be set to the NotBefore time encoded in the leaf certificate"))
return allErrors
}
if !timeNear(newReq.Status.NotBefore.Time, clock.Now(), 5*time.Minute) {
allErrors = append(allErrors, field.Invalid(notBeforePath, newReq.Status.NotBefore.Time, "must be set to within 5 minutes of kube-apiserver's current time"))
return allErrors
}
// Validate that NotAfter is consistent with the status field
if !newReq.Status.NotAfter.Time.Equal(leafCert.NotAfter) {
allErrors = append(allErrors, field.Invalid(notAfterPath, newReq.Status.NotAfter.Time, "must be set to the NotAfter time encoded in the leaf certificate"))
return allErrors
}
// Validate that leaf cert lifetime against minimum and maximum constraints.
lifetime := leafCert.NotAfter.Sub(leafCert.NotBefore)
if lifetime < 1*time.Hour {
allErrors = append(allErrors, field.Invalid(certChainPath, lifetime, "leaf certificate lifetime must be >= 1 hour"))
return allErrors
}
if lifetime > time.Duration(*newReq.Spec.MaxExpirationSeconds)*time.Second {
allErrors = append(allErrors, field.Invalid(certChainPath, lifetime, fmt.Sprintf("leaf certificate lifetime must be <= spec.maxExpirationSeconds (%v)", *newReq.Spec.MaxExpirationSeconds)))
return allErrors
}
// Validate that BeginRefreshAt is within limits.
if newReq.Status.BeginRefreshAt.Time.Before(newReq.Status.NotBefore.Time.Add(10 * time.Minute)) {
allErrors = append(allErrors, field.Invalid(beginRefreshPath, newReq.Status.BeginRefreshAt.Time, "must be at least 10 minutes after status.notBefore"))
return allErrors
}
if newReq.Status.BeginRefreshAt.Time.After(newReq.Status.NotAfter.Time.Add(-10 * time.Minute)) {
allErrors = append(allErrors, field.Invalid(beginRefreshPath, newReq.Status.BeginRefreshAt.Time, "must be at least 10 minutes before status.notAfter"))
return allErrors
}
// Check the remainder of the certificates in the chain, if any. We cannot
// easily verify the chain, because the Golang X.509 libraries are wisely
// written to prevent us from doing stupid things like verifying a partial
// chain, but we can at least check that they are valid certificates.
for {
var nextBlock *pem.Block
nextBlock, rest = pem.Decode(rest)
if nextBlock == nil {
break
}
if nextBlock.Type != "CERTIFICATE" {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "issued certificate chain must consist entirely of CERTFICATE PEM blocks"))
return allErrors
}
_, err := x509.ParseCertificate(nextBlock.Bytes)
if err != nil {
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "intermediate certificate does not parse as valid X.509"))
return allErrors
}
}
return allErrors
}
// We are not transitioning to any terminal state. The whole status object
// is immutable.
allErrors = append(allErrors, validateSemanticEquality(newReq.Status, oldReq.Status, field.NewPath("status"), `status is immutable unless transitioning to "Issued", "Denied", or "Failed"`)...)
return allErrors
}
func pcrIsIssued(pcr *certificates.PodCertificateRequest) bool {
for _, cond := range pcr.Status.Conditions {
if cond.Type == certificates.PodCertificateRequestConditionTypeIssued && cond.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func pcrIsDenied(pcr *certificates.PodCertificateRequest) bool {
for _, cond := range pcr.Status.Conditions {
if cond.Type == certificates.PodCertificateRequestConditionTypeDenied && cond.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func pcrIsFailed(pcr *certificates.PodCertificateRequest) bool {
for _, cond := range pcr.Status.Conditions {
if cond.Type == certificates.PodCertificateRequestConditionTypeFailed && cond.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func formatIndex(i int) string {
return "[" + strconv.Itoa(i) + "]"
}
// Similar to apivalidation.ValidateImmutableField but we can supply our own detail string.
func validateSemanticEquality(oldVal, newVal any, fldPath *field.Path, detail string) field.ErrorList {
allErrs := field.ErrorList{}
if !apiequality.Semantic.DeepEqual(oldVal, newVal) {
allErrs = append(allErrs, field.Invalid(fldPath, field.OmitValueType{}, detail))
}
return allErrs
}
func timeNear(a, b time.Time, skew time.Duration) bool {
return a.After(b.Add(-skew)) && a.Before(b.Add(skew))
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package certificates
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequest.
func (in *CertificateSigningRequest) DeepCopy() *CertificateSigningRequest {
if in == nil {
return nil
}
out := new(CertificateSigningRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CertificateSigningRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestCondition) DeepCopyInto(out *CertificateSigningRequestCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestCondition.
func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequestCondition {
if in == nil {
return nil
}
out := new(CertificateSigningRequestCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CertificateSigningRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestList.
func (in *CertificateSigningRequestList) DeepCopy() *CertificateSigningRequestList {
if in == nil {
return nil
}
out := new(CertificateSigningRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CertificateSigningRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningRequestSpec) {
*out = *in
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.ExpirationSeconds != nil {
in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
*out = new(int32)
**out = **in
}
if in.Usages != nil {
in, out := &in.Usages, &out.Usages
*out = make([]KeyUsage, len(*in))
copy(*out, *in)
}
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Extra != nil {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(ExtraValue, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestSpec.
func (in *CertificateSigningRequestSpec) DeepCopy() *CertificateSigningRequestSpec {
if in == nil {
return nil
}
out := new(CertificateSigningRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestStatus) DeepCopyInto(out *CertificateSigningRequestStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]CertificateSigningRequestCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Certificate != nil {
in, out := &in.Certificate, &out.Certificate
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestStatus.
func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequestStatus {
if in == nil {
return nil
}
out := new(CertificateSigningRequestStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle.
func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle {
if in == nil {
return nil
}
out := new(ClusterTrustBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterTrustBundle, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList.
func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList {
if in == nil {
return nil
}
out := new(ClusterTrustBundleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec.
func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
if in == nil {
return nil
}
out := new(ClusterTrustBundleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest.
func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest {
if in == nil {
return nil
}
out := new(PodCertificateRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodCertificateRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodCertificateRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList.
func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList {
if in == nil {
return nil
}
out := new(PodCertificateRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) {
*out = *in
if in.MaxExpirationSeconds != nil {
in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
*out = new(int32)
**out = **in
}
if in.PKIXPublicKey != nil {
in, out := &in.PKIXPublicKey, &out.PKIXPublicKey
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.ProofOfPossession != nil {
in, out := &in.ProofOfPossession, &out.ProofOfPossession
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec.
func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec {
if in == nil {
return nil
}
out := new(PodCertificateRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NotBefore != nil {
in, out := &in.NotBefore, &out.NotBefore
*out = (*in).DeepCopy()
}
if in.BeginRefreshAt != nil {
in, out := &in.BeginRefreshAt, &out.BeginRefreshAt
*out = (*in).DeepCopy()
}
if in.NotAfter != nil {
in, out := &in.NotAfter, &out.NotAfter
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus.
func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus {
if in == nil {
return nil
}
out := new(PodCertificateRequestStatus)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the coordination API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/coordination"
v1 "k8s.io/kubernetes/pkg/apis/coordination/v1"
"k8s.io/kubernetes/pkg/apis/coordination/v1alpha2"
"k8s.io/kubernetes/pkg/apis/coordination/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(coordination.AddToScheme(scheme))
utilruntime.Must(v1alpha2.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha2.SchemeGroupVersion))
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package coordination
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "coordination.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&Lease{},
&LeaseList{},
&LeaseCandidate{},
&LeaseCandidateList{},
)
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
coordinationv1 "k8s.io/api/coordination/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "coordination.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &coordinationv1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
coordinationv1 "k8s.io/api/coordination/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
coordination "k8s.io/kubernetes/pkg/apis/coordination"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*coordinationv1.Lease)(nil), (*coordination.Lease)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Lease_To_coordination_Lease(a.(*coordinationv1.Lease), b.(*coordination.Lease), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.Lease)(nil), (*coordinationv1.Lease)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_Lease_To_v1_Lease(a.(*coordination.Lease), b.(*coordinationv1.Lease), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1.LeaseList)(nil), (*coordination.LeaseList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LeaseList_To_coordination_LeaseList(a.(*coordinationv1.LeaseList), b.(*coordination.LeaseList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseList)(nil), (*coordinationv1.LeaseList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseList_To_v1_LeaseList(a.(*coordination.LeaseList), b.(*coordinationv1.LeaseList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1.LeaseSpec)(nil), (*coordination.LeaseSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LeaseSpec_To_coordination_LeaseSpec(a.(*coordinationv1.LeaseSpec), b.(*coordination.LeaseSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseSpec)(nil), (*coordinationv1.LeaseSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseSpec_To_v1_LeaseSpec(a.(*coordination.LeaseSpec), b.(*coordinationv1.LeaseSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_Lease_To_coordination_Lease(in *coordinationv1.Lease, out *coordination.Lease, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_LeaseSpec_To_coordination_LeaseSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_Lease_To_coordination_Lease is an autogenerated conversion function.
func Convert_v1_Lease_To_coordination_Lease(in *coordinationv1.Lease, out *coordination.Lease, s conversion.Scope) error {
return autoConvert_v1_Lease_To_coordination_Lease(in, out, s)
}
func autoConvert_coordination_Lease_To_v1_Lease(in *coordination.Lease, out *coordinationv1.Lease, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_coordination_LeaseSpec_To_v1_LeaseSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_coordination_Lease_To_v1_Lease is an autogenerated conversion function.
func Convert_coordination_Lease_To_v1_Lease(in *coordination.Lease, out *coordinationv1.Lease, s conversion.Scope) error {
return autoConvert_coordination_Lease_To_v1_Lease(in, out, s)
}
func autoConvert_v1_LeaseList_To_coordination_LeaseList(in *coordinationv1.LeaseList, out *coordination.LeaseList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordination.Lease)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_LeaseList_To_coordination_LeaseList is an autogenerated conversion function.
func Convert_v1_LeaseList_To_coordination_LeaseList(in *coordinationv1.LeaseList, out *coordination.LeaseList, s conversion.Scope) error {
return autoConvert_v1_LeaseList_To_coordination_LeaseList(in, out, s)
}
func autoConvert_coordination_LeaseList_To_v1_LeaseList(in *coordination.LeaseList, out *coordinationv1.LeaseList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordinationv1.Lease)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_coordination_LeaseList_To_v1_LeaseList is an autogenerated conversion function.
func Convert_coordination_LeaseList_To_v1_LeaseList(in *coordination.LeaseList, out *coordinationv1.LeaseList, s conversion.Scope) error {
return autoConvert_coordination_LeaseList_To_v1_LeaseList(in, out, s)
}
func autoConvert_v1_LeaseSpec_To_coordination_LeaseSpec(in *coordinationv1.LeaseSpec, out *coordination.LeaseSpec, s conversion.Scope) error {
out.HolderIdentity = (*string)(unsafe.Pointer(in.HolderIdentity))
out.LeaseDurationSeconds = (*int32)(unsafe.Pointer(in.LeaseDurationSeconds))
out.AcquireTime = (*metav1.MicroTime)(unsafe.Pointer(in.AcquireTime))
out.RenewTime = (*metav1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.LeaseTransitions = (*int32)(unsafe.Pointer(in.LeaseTransitions))
out.Strategy = (*coordination.CoordinatedLeaseStrategy)(unsafe.Pointer(in.Strategy))
out.PreferredHolder = (*string)(unsafe.Pointer(in.PreferredHolder))
return nil
}
// Convert_v1_LeaseSpec_To_coordination_LeaseSpec is an autogenerated conversion function.
func Convert_v1_LeaseSpec_To_coordination_LeaseSpec(in *coordinationv1.LeaseSpec, out *coordination.LeaseSpec, s conversion.Scope) error {
return autoConvert_v1_LeaseSpec_To_coordination_LeaseSpec(in, out, s)
}
func autoConvert_coordination_LeaseSpec_To_v1_LeaseSpec(in *coordination.LeaseSpec, out *coordinationv1.LeaseSpec, s conversion.Scope) error {
out.HolderIdentity = (*string)(unsafe.Pointer(in.HolderIdentity))
out.LeaseDurationSeconds = (*int32)(unsafe.Pointer(in.LeaseDurationSeconds))
out.AcquireTime = (*metav1.MicroTime)(unsafe.Pointer(in.AcquireTime))
out.RenewTime = (*metav1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.LeaseTransitions = (*int32)(unsafe.Pointer(in.LeaseTransitions))
out.Strategy = (*coordinationv1.CoordinatedLeaseStrategy)(unsafe.Pointer(in.Strategy))
out.PreferredHolder = (*string)(unsafe.Pointer(in.PreferredHolder))
return nil
}
// Convert_coordination_LeaseSpec_To_v1_LeaseSpec is an autogenerated conversion function.
func Convert_coordination_LeaseSpec_To_v1_LeaseSpec(in *coordination.LeaseSpec, out *coordinationv1.LeaseSpec, s conversion.Scope) error {
return autoConvert_coordination_LeaseSpec_To_v1_LeaseSpec(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "coordination.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &coordinationv1alpha2.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha2
import (
unsafe "unsafe"
coordinationv1 "k8s.io/api/coordination/v1"
coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
coordination "k8s.io/kubernetes/pkg/apis/coordination"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*coordinationv1alpha2.LeaseCandidate)(nil), (*coordination.LeaseCandidate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_LeaseCandidate_To_coordination_LeaseCandidate(a.(*coordinationv1alpha2.LeaseCandidate), b.(*coordination.LeaseCandidate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseCandidate)(nil), (*coordinationv1alpha2.LeaseCandidate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseCandidate_To_v1alpha2_LeaseCandidate(a.(*coordination.LeaseCandidate), b.(*coordinationv1alpha2.LeaseCandidate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1alpha2.LeaseCandidateList)(nil), (*coordination.LeaseCandidateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_LeaseCandidateList_To_coordination_LeaseCandidateList(a.(*coordinationv1alpha2.LeaseCandidateList), b.(*coordination.LeaseCandidateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseCandidateList)(nil), (*coordinationv1alpha2.LeaseCandidateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseCandidateList_To_v1alpha2_LeaseCandidateList(a.(*coordination.LeaseCandidateList), b.(*coordinationv1alpha2.LeaseCandidateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1alpha2.LeaseCandidateSpec)(nil), (*coordination.LeaseCandidateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(a.(*coordinationv1alpha2.LeaseCandidateSpec), b.(*coordination.LeaseCandidateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseCandidateSpec)(nil), (*coordinationv1alpha2.LeaseCandidateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseCandidateSpec_To_v1alpha2_LeaseCandidateSpec(a.(*coordination.LeaseCandidateSpec), b.(*coordinationv1alpha2.LeaseCandidateSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha2_LeaseCandidate_To_coordination_LeaseCandidate(in *coordinationv1alpha2.LeaseCandidate, out *coordination.LeaseCandidate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha2_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_LeaseCandidate_To_coordination_LeaseCandidate is an autogenerated conversion function.
func Convert_v1alpha2_LeaseCandidate_To_coordination_LeaseCandidate(in *coordinationv1alpha2.LeaseCandidate, out *coordination.LeaseCandidate, s conversion.Scope) error {
return autoConvert_v1alpha2_LeaseCandidate_To_coordination_LeaseCandidate(in, out, s)
}
func autoConvert_coordination_LeaseCandidate_To_v1alpha2_LeaseCandidate(in *coordination.LeaseCandidate, out *coordinationv1alpha2.LeaseCandidate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_coordination_LeaseCandidateSpec_To_v1alpha2_LeaseCandidateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_coordination_LeaseCandidate_To_v1alpha2_LeaseCandidate is an autogenerated conversion function.
func Convert_coordination_LeaseCandidate_To_v1alpha2_LeaseCandidate(in *coordination.LeaseCandidate, out *coordinationv1alpha2.LeaseCandidate, s conversion.Scope) error {
return autoConvert_coordination_LeaseCandidate_To_v1alpha2_LeaseCandidate(in, out, s)
}
func autoConvert_v1alpha2_LeaseCandidateList_To_coordination_LeaseCandidateList(in *coordinationv1alpha2.LeaseCandidateList, out *coordination.LeaseCandidateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordination.LeaseCandidate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha2_LeaseCandidateList_To_coordination_LeaseCandidateList is an autogenerated conversion function.
func Convert_v1alpha2_LeaseCandidateList_To_coordination_LeaseCandidateList(in *coordinationv1alpha2.LeaseCandidateList, out *coordination.LeaseCandidateList, s conversion.Scope) error {
return autoConvert_v1alpha2_LeaseCandidateList_To_coordination_LeaseCandidateList(in, out, s)
}
func autoConvert_coordination_LeaseCandidateList_To_v1alpha2_LeaseCandidateList(in *coordination.LeaseCandidateList, out *coordinationv1alpha2.LeaseCandidateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordinationv1alpha2.LeaseCandidate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_coordination_LeaseCandidateList_To_v1alpha2_LeaseCandidateList is an autogenerated conversion function.
func Convert_coordination_LeaseCandidateList_To_v1alpha2_LeaseCandidateList(in *coordination.LeaseCandidateList, out *coordinationv1alpha2.LeaseCandidateList, s conversion.Scope) error {
return autoConvert_coordination_LeaseCandidateList_To_v1alpha2_LeaseCandidateList(in, out, s)
}
func autoConvert_v1alpha2_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(in *coordinationv1alpha2.LeaseCandidateSpec, out *coordination.LeaseCandidateSpec, s conversion.Scope) error {
out.LeaseName = in.LeaseName
out.PingTime = (*v1.MicroTime)(unsafe.Pointer(in.PingTime))
out.RenewTime = (*v1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.BinaryVersion = in.BinaryVersion
out.EmulationVersion = in.EmulationVersion
out.Strategy = coordination.CoordinatedLeaseStrategy(in.Strategy)
return nil
}
// Convert_v1alpha2_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec is an autogenerated conversion function.
func Convert_v1alpha2_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(in *coordinationv1alpha2.LeaseCandidateSpec, out *coordination.LeaseCandidateSpec, s conversion.Scope) error {
return autoConvert_v1alpha2_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(in, out, s)
}
func autoConvert_coordination_LeaseCandidateSpec_To_v1alpha2_LeaseCandidateSpec(in *coordination.LeaseCandidateSpec, out *coordinationv1alpha2.LeaseCandidateSpec, s conversion.Scope) error {
out.LeaseName = in.LeaseName
out.PingTime = (*v1.MicroTime)(unsafe.Pointer(in.PingTime))
out.RenewTime = (*v1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.BinaryVersion = in.BinaryVersion
out.EmulationVersion = in.EmulationVersion
out.Strategy = coordinationv1.CoordinatedLeaseStrategy(in.Strategy)
return nil
}
// Convert_coordination_LeaseCandidateSpec_To_v1alpha2_LeaseCandidateSpec is an autogenerated conversion function.
func Convert_coordination_LeaseCandidateSpec_To_v1alpha2_LeaseCandidateSpec(in *coordination.LeaseCandidateSpec, out *coordinationv1alpha2.LeaseCandidateSpec, s conversion.Scope) error {
return autoConvert_coordination_LeaseCandidateSpec_To_v1alpha2_LeaseCandidateSpec(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "coordination.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &coordinationv1beta1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
coordinationv1 "k8s.io/api/coordination/v1"
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
coordination "k8s.io/kubernetes/pkg/apis/coordination"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*coordinationv1beta1.Lease)(nil), (*coordination.Lease)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Lease_To_coordination_Lease(a.(*coordinationv1beta1.Lease), b.(*coordination.Lease), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.Lease)(nil), (*coordinationv1beta1.Lease)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_Lease_To_v1beta1_Lease(a.(*coordination.Lease), b.(*coordinationv1beta1.Lease), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1beta1.LeaseCandidate)(nil), (*coordination.LeaseCandidate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LeaseCandidate_To_coordination_LeaseCandidate(a.(*coordinationv1beta1.LeaseCandidate), b.(*coordination.LeaseCandidate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseCandidate)(nil), (*coordinationv1beta1.LeaseCandidate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseCandidate_To_v1beta1_LeaseCandidate(a.(*coordination.LeaseCandidate), b.(*coordinationv1beta1.LeaseCandidate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1beta1.LeaseCandidateList)(nil), (*coordination.LeaseCandidateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LeaseCandidateList_To_coordination_LeaseCandidateList(a.(*coordinationv1beta1.LeaseCandidateList), b.(*coordination.LeaseCandidateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseCandidateList)(nil), (*coordinationv1beta1.LeaseCandidateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseCandidateList_To_v1beta1_LeaseCandidateList(a.(*coordination.LeaseCandidateList), b.(*coordinationv1beta1.LeaseCandidateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1beta1.LeaseCandidateSpec)(nil), (*coordination.LeaseCandidateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(a.(*coordinationv1beta1.LeaseCandidateSpec), b.(*coordination.LeaseCandidateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseCandidateSpec)(nil), (*coordinationv1beta1.LeaseCandidateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseCandidateSpec_To_v1beta1_LeaseCandidateSpec(a.(*coordination.LeaseCandidateSpec), b.(*coordinationv1beta1.LeaseCandidateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1beta1.LeaseList)(nil), (*coordination.LeaseList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LeaseList_To_coordination_LeaseList(a.(*coordinationv1beta1.LeaseList), b.(*coordination.LeaseList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseList)(nil), (*coordinationv1beta1.LeaseList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseList_To_v1beta1_LeaseList(a.(*coordination.LeaseList), b.(*coordinationv1beta1.LeaseList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordinationv1beta1.LeaseSpec)(nil), (*coordination.LeaseSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LeaseSpec_To_coordination_LeaseSpec(a.(*coordinationv1beta1.LeaseSpec), b.(*coordination.LeaseSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*coordination.LeaseSpec)(nil), (*coordinationv1beta1.LeaseSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_coordination_LeaseSpec_To_v1beta1_LeaseSpec(a.(*coordination.LeaseSpec), b.(*coordinationv1beta1.LeaseSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_Lease_To_coordination_Lease(in *coordinationv1beta1.Lease, out *coordination.Lease, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_LeaseSpec_To_coordination_LeaseSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Lease_To_coordination_Lease is an autogenerated conversion function.
func Convert_v1beta1_Lease_To_coordination_Lease(in *coordinationv1beta1.Lease, out *coordination.Lease, s conversion.Scope) error {
return autoConvert_v1beta1_Lease_To_coordination_Lease(in, out, s)
}
func autoConvert_coordination_Lease_To_v1beta1_Lease(in *coordination.Lease, out *coordinationv1beta1.Lease, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_coordination_LeaseSpec_To_v1beta1_LeaseSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_coordination_Lease_To_v1beta1_Lease is an autogenerated conversion function.
func Convert_coordination_Lease_To_v1beta1_Lease(in *coordination.Lease, out *coordinationv1beta1.Lease, s conversion.Scope) error {
return autoConvert_coordination_Lease_To_v1beta1_Lease(in, out, s)
}
func autoConvert_v1beta1_LeaseCandidate_To_coordination_LeaseCandidate(in *coordinationv1beta1.LeaseCandidate, out *coordination.LeaseCandidate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_LeaseCandidate_To_coordination_LeaseCandidate is an autogenerated conversion function.
func Convert_v1beta1_LeaseCandidate_To_coordination_LeaseCandidate(in *coordinationv1beta1.LeaseCandidate, out *coordination.LeaseCandidate, s conversion.Scope) error {
return autoConvert_v1beta1_LeaseCandidate_To_coordination_LeaseCandidate(in, out, s)
}
func autoConvert_coordination_LeaseCandidate_To_v1beta1_LeaseCandidate(in *coordination.LeaseCandidate, out *coordinationv1beta1.LeaseCandidate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_coordination_LeaseCandidateSpec_To_v1beta1_LeaseCandidateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_coordination_LeaseCandidate_To_v1beta1_LeaseCandidate is an autogenerated conversion function.
func Convert_coordination_LeaseCandidate_To_v1beta1_LeaseCandidate(in *coordination.LeaseCandidate, out *coordinationv1beta1.LeaseCandidate, s conversion.Scope) error {
return autoConvert_coordination_LeaseCandidate_To_v1beta1_LeaseCandidate(in, out, s)
}
func autoConvert_v1beta1_LeaseCandidateList_To_coordination_LeaseCandidateList(in *coordinationv1beta1.LeaseCandidateList, out *coordination.LeaseCandidateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordination.LeaseCandidate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_LeaseCandidateList_To_coordination_LeaseCandidateList is an autogenerated conversion function.
func Convert_v1beta1_LeaseCandidateList_To_coordination_LeaseCandidateList(in *coordinationv1beta1.LeaseCandidateList, out *coordination.LeaseCandidateList, s conversion.Scope) error {
return autoConvert_v1beta1_LeaseCandidateList_To_coordination_LeaseCandidateList(in, out, s)
}
func autoConvert_coordination_LeaseCandidateList_To_v1beta1_LeaseCandidateList(in *coordination.LeaseCandidateList, out *coordinationv1beta1.LeaseCandidateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordinationv1beta1.LeaseCandidate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_coordination_LeaseCandidateList_To_v1beta1_LeaseCandidateList is an autogenerated conversion function.
func Convert_coordination_LeaseCandidateList_To_v1beta1_LeaseCandidateList(in *coordination.LeaseCandidateList, out *coordinationv1beta1.LeaseCandidateList, s conversion.Scope) error {
return autoConvert_coordination_LeaseCandidateList_To_v1beta1_LeaseCandidateList(in, out, s)
}
func autoConvert_v1beta1_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(in *coordinationv1beta1.LeaseCandidateSpec, out *coordination.LeaseCandidateSpec, s conversion.Scope) error {
out.LeaseName = in.LeaseName
out.PingTime = (*v1.MicroTime)(unsafe.Pointer(in.PingTime))
out.RenewTime = (*v1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.BinaryVersion = in.BinaryVersion
out.EmulationVersion = in.EmulationVersion
out.Strategy = coordination.CoordinatedLeaseStrategy(in.Strategy)
return nil
}
// Convert_v1beta1_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec is an autogenerated conversion function.
func Convert_v1beta1_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(in *coordinationv1beta1.LeaseCandidateSpec, out *coordination.LeaseCandidateSpec, s conversion.Scope) error {
return autoConvert_v1beta1_LeaseCandidateSpec_To_coordination_LeaseCandidateSpec(in, out, s)
}
func autoConvert_coordination_LeaseCandidateSpec_To_v1beta1_LeaseCandidateSpec(in *coordination.LeaseCandidateSpec, out *coordinationv1beta1.LeaseCandidateSpec, s conversion.Scope) error {
out.LeaseName = in.LeaseName
out.PingTime = (*v1.MicroTime)(unsafe.Pointer(in.PingTime))
out.RenewTime = (*v1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.BinaryVersion = in.BinaryVersion
out.EmulationVersion = in.EmulationVersion
out.Strategy = coordinationv1.CoordinatedLeaseStrategy(in.Strategy)
return nil
}
// Convert_coordination_LeaseCandidateSpec_To_v1beta1_LeaseCandidateSpec is an autogenerated conversion function.
func Convert_coordination_LeaseCandidateSpec_To_v1beta1_LeaseCandidateSpec(in *coordination.LeaseCandidateSpec, out *coordinationv1beta1.LeaseCandidateSpec, s conversion.Scope) error {
return autoConvert_coordination_LeaseCandidateSpec_To_v1beta1_LeaseCandidateSpec(in, out, s)
}
func autoConvert_v1beta1_LeaseList_To_coordination_LeaseList(in *coordinationv1beta1.LeaseList, out *coordination.LeaseList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordination.Lease)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_LeaseList_To_coordination_LeaseList is an autogenerated conversion function.
func Convert_v1beta1_LeaseList_To_coordination_LeaseList(in *coordinationv1beta1.LeaseList, out *coordination.LeaseList, s conversion.Scope) error {
return autoConvert_v1beta1_LeaseList_To_coordination_LeaseList(in, out, s)
}
func autoConvert_coordination_LeaseList_To_v1beta1_LeaseList(in *coordination.LeaseList, out *coordinationv1beta1.LeaseList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]coordinationv1beta1.Lease)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_coordination_LeaseList_To_v1beta1_LeaseList is an autogenerated conversion function.
func Convert_coordination_LeaseList_To_v1beta1_LeaseList(in *coordination.LeaseList, out *coordinationv1beta1.LeaseList, s conversion.Scope) error {
return autoConvert_coordination_LeaseList_To_v1beta1_LeaseList(in, out, s)
}
func autoConvert_v1beta1_LeaseSpec_To_coordination_LeaseSpec(in *coordinationv1beta1.LeaseSpec, out *coordination.LeaseSpec, s conversion.Scope) error {
out.HolderIdentity = (*string)(unsafe.Pointer(in.HolderIdentity))
out.LeaseDurationSeconds = (*int32)(unsafe.Pointer(in.LeaseDurationSeconds))
out.AcquireTime = (*v1.MicroTime)(unsafe.Pointer(in.AcquireTime))
out.RenewTime = (*v1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.LeaseTransitions = (*int32)(unsafe.Pointer(in.LeaseTransitions))
out.Strategy = (*coordination.CoordinatedLeaseStrategy)(unsafe.Pointer(in.Strategy))
out.PreferredHolder = (*string)(unsafe.Pointer(in.PreferredHolder))
return nil
}
// Convert_v1beta1_LeaseSpec_To_coordination_LeaseSpec is an autogenerated conversion function.
func Convert_v1beta1_LeaseSpec_To_coordination_LeaseSpec(in *coordinationv1beta1.LeaseSpec, out *coordination.LeaseSpec, s conversion.Scope) error {
return autoConvert_v1beta1_LeaseSpec_To_coordination_LeaseSpec(in, out, s)
}
func autoConvert_coordination_LeaseSpec_To_v1beta1_LeaseSpec(in *coordination.LeaseSpec, out *coordinationv1beta1.LeaseSpec, s conversion.Scope) error {
out.HolderIdentity = (*string)(unsafe.Pointer(in.HolderIdentity))
out.LeaseDurationSeconds = (*int32)(unsafe.Pointer(in.LeaseDurationSeconds))
out.AcquireTime = (*v1.MicroTime)(unsafe.Pointer(in.AcquireTime))
out.RenewTime = (*v1.MicroTime)(unsafe.Pointer(in.RenewTime))
out.LeaseTransitions = (*int32)(unsafe.Pointer(in.LeaseTransitions))
out.Strategy = (*coordinationv1.CoordinatedLeaseStrategy)(unsafe.Pointer(in.Strategy))
out.PreferredHolder = (*string)(unsafe.Pointer(in.PreferredHolder))
return nil
}
// Convert_coordination_LeaseSpec_To_v1beta1_LeaseSpec is an autogenerated conversion function.
func Convert_coordination_LeaseSpec_To_v1beta1_LeaseSpec(in *coordination.LeaseSpec, out *coordinationv1beta1.LeaseSpec, s conversion.Scope) error {
return autoConvert_coordination_LeaseSpec_To_v1beta1_LeaseSpec(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package coordination
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Lease) DeepCopyInto(out *Lease) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease.
func (in *Lease) DeepCopy() *Lease {
if in == nil {
return nil
}
out := new(Lease)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Lease) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate.
func (in *LeaseCandidate) DeepCopy() *LeaseCandidate {
if in == nil {
return nil
}
out := new(LeaseCandidate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LeaseCandidate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]LeaseCandidate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList.
func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList {
if in == nil {
return nil
}
out := new(LeaseCandidateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LeaseCandidateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) {
*out = *in
if in.PingTime != nil {
in, out := &in.PingTime, &out.PingTime
*out = (*in).DeepCopy()
}
if in.RenewTime != nil {
in, out := &in.RenewTime, &out.RenewTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec.
func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec {
if in == nil {
return nil
}
out := new(LeaseCandidateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseList) DeepCopyInto(out *LeaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Lease, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList.
func (in *LeaseList) DeepCopy() *LeaseList {
if in == nil {
return nil
}
out := new(LeaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LeaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) {
*out = *in
if in.HolderIdentity != nil {
in, out := &in.HolderIdentity, &out.HolderIdentity
*out = new(string)
**out = **in
}
if in.LeaseDurationSeconds != nil {
in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds
*out = new(int32)
**out = **in
}
if in.AcquireTime != nil {
in, out := &in.AcquireTime, &out.AcquireTime
*out = (*in).DeepCopy()
}
if in.RenewTime != nil {
in, out := &in.RenewTime, &out.RenewTime
*out = (*in).DeepCopy()
}
if in.LeaseTransitions != nil {
in, out := &in.LeaseTransitions, &out.LeaseTransitions
*out = new(int32)
**out = **in
}
if in.Strategy != nil {
in, out := &in.Strategy, &out.Strategy
*out = new(CoordinatedLeaseStrategy)
**out = **in
}
if in.PreferredHolder != nil {
in, out := &in.PreferredHolder, &out.PreferredHolder
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec.
func (in *LeaseSpec) DeepCopy() *LeaseSpec {
if in == nil {
return nil
}
out := new(LeaseSpec)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"reflect"
"strconv"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/ptr"
"sigs.k8s.io/randfill"
)
// Funcs returns the fuzzer functions for the core group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(q *resource.Quantity, c randfill.Continue) {
*q = *resource.NewQuantity(c.Int63n(1000), resource.DecimalExponent)
},
func(j *core.ObjectReference, c randfill.Continue) {
// We have to customize the randomization of TypeMetas because their
// APIVersion and Kind must remain blank in memory.
j.APIVersion = c.String(0)
j.Kind = c.String(0)
j.Namespace = c.String(0)
j.Name = c.String(0)
j.ResourceVersion = strconv.FormatUint(c.Uint64(), 10)
j.FieldPath = c.String(0)
},
func(j *core.PodExecOptions, c randfill.Continue) {
j.Stdout = true
j.Stderr = true
},
func(j *core.PodAttachOptions, c randfill.Continue) {
j.Stdout = true
j.Stderr = true
},
func(j *core.PodPortForwardOptions, c randfill.Continue) {
if c.Bool() {
j.Ports = make([]int32, c.Intn(10))
for i := range j.Ports {
j.Ports[i] = c.Int31n(65535)
}
}
},
func(s *core.PodSpec, c randfill.Continue) {
c.FillNoCustom(s)
// has a default value
ttl := int64(30)
if c.Bool() {
ttl = int64(c.Uint32())
}
s.TerminationGracePeriodSeconds = &ttl
c.Fill(s.SecurityContext)
if s.SecurityContext == nil {
s.SecurityContext = new(core.PodSecurityContext)
}
if s.Affinity == nil {
s.Affinity = new(core.Affinity)
}
if s.SchedulerName == "" {
s.SchedulerName = v1.DefaultSchedulerName
}
if s.EnableServiceLinks == nil {
enableServiceLinks := v1.DefaultEnableServiceLinks
s.EnableServiceLinks = &enableServiceLinks
}
},
func(s *core.PodStatus, c randfill.Continue) {
c.Fill(&s)
s.HostIPs = []core.HostIP{{IP: s.HostIP}}
},
func(j *core.PodPhase, c randfill.Continue) {
statuses := []core.PodPhase{core.PodPending, core.PodRunning, core.PodFailed, core.PodUnknown}
*j = statuses[c.Rand.Intn(len(statuses))]
},
func(j *core.Binding, c randfill.Continue) {
c.Fill(&j.ObjectMeta)
j.Target.Name = c.String(0)
},
func(j *core.ReplicationController, c randfill.Continue) {
c.FillNoCustom(j)
// match defaulting
if j.Spec.Template != nil {
if len(j.Labels) == 0 {
j.Labels = j.Spec.Template.Labels
}
if len(j.Spec.Selector) == 0 {
j.Spec.Selector = j.Spec.Template.Labels
}
}
},
func(j *core.ReplicationControllerSpec, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
//j.TemplateRef = nil // this is required for round trip
// match defaulting
if j.Replicas == nil {
replicas := int32(0)
j.Replicas = &replicas
}
},
func(j *core.List, c randfill.Continue) {
c.FillNoCustom(j) // fuzz self without calling this function again
// TODO: uncomment when round trip starts from a versioned object
if false { //j.Items == nil {
j.Items = []runtime.Object{}
}
},
func(q *core.ResourceRequirements, c randfill.Continue) {
randomQuantity := func() resource.Quantity {
var q resource.Quantity
c.Fill(&q)
// precalc the string for benchmarking purposes
_ = q.String()
return q
}
q.Limits = make(core.ResourceList)
q.Requests = make(core.ResourceList)
cpuLimit := randomQuantity()
q.Limits[core.ResourceCPU] = cpuLimit.DeepCopy()
q.Requests[core.ResourceCPU] = cpuLimit.DeepCopy()
memoryLimit := randomQuantity()
q.Limits[core.ResourceMemory] = memoryLimit.DeepCopy()
q.Requests[core.ResourceMemory] = memoryLimit.DeepCopy()
storageLimit := randomQuantity()
q.Limits[core.ResourceStorage] = storageLimit.DeepCopy()
q.Requests[core.ResourceStorage] = storageLimit.DeepCopy()
},
func(q *core.LimitRangeItem, c randfill.Continue) {
var cpuLimit resource.Quantity
c.Fill(&cpuLimit)
q.Type = core.LimitTypeContainer
q.Default = make(core.ResourceList)
q.Default[core.ResourceCPU] = cpuLimit.DeepCopy()
q.DefaultRequest = make(core.ResourceList)
q.DefaultRequest[core.ResourceCPU] = cpuLimit.DeepCopy()
q.Max = make(core.ResourceList)
q.Max[core.ResourceCPU] = cpuLimit.DeepCopy()
q.Min = make(core.ResourceList)
q.Min[core.ResourceCPU] = cpuLimit.DeepCopy()
q.MaxLimitRequestRatio = make(core.ResourceList)
q.MaxLimitRequestRatio[core.ResourceCPU] = resource.MustParse("10")
},
func(p *core.PullPolicy, c randfill.Continue) {
policies := []core.PullPolicy{core.PullAlways, core.PullNever, core.PullIfNotPresent}
*p = policies[c.Rand.Intn(len(policies))]
},
func(rp *core.RestartPolicy, c randfill.Continue) {
policies := []core.RestartPolicy{core.RestartPolicyAlways, core.RestartPolicyNever, core.RestartPolicyOnFailure}
*rp = policies[c.Rand.Intn(len(policies))]
},
// core.DownwardAPIVolumeFile needs to have a specific func since FieldRef has to be
// defaulted to a version otherwise roundtrip will fail
func(m *core.DownwardAPIVolumeFile, c randfill.Continue) {
m.Path = c.String(0)
versions := []string{"v1"}
m.FieldRef = &core.ObjectFieldSelector{}
m.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))]
m.FieldRef.FieldPath = c.String(0)
c.Fill(m.Mode)
if m.Mode != nil {
*m.Mode &= 0777
}
},
func(s *core.SecretVolumeSource, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
if c.Bool() {
opt := c.Bool()
s.Optional = &opt
}
// DefaultMode should always be set, it has a default
// value and it is expected to be between 0 and 0777
var mode int32
c.Fill(&mode)
mode &= 0777
s.DefaultMode = &mode
},
func(cm *core.ConfigMapVolumeSource, c randfill.Continue) {
c.FillNoCustom(cm) // fuzz self without calling this function again
if c.Bool() {
opt := c.Bool()
cm.Optional = &opt
}
// DefaultMode should always be set, it has a default
// value and it is expected to be between 0 and 0777
var mode int32
c.Fill(&mode)
mode &= 0777
cm.DefaultMode = &mode
},
func(d *core.DownwardAPIVolumeSource, c randfill.Continue) {
c.FillNoCustom(d) // fuzz self without calling this function again
// DefaultMode should always be set, it has a default
// value and it is expected to be between 0 and 0777
var mode int32
c.Fill(&mode)
mode &= 0777
d.DefaultMode = &mode
},
func(s *core.ProjectedVolumeSource, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
// DefaultMode should always be set, it has a default
// value and it is expected to be between 0 and 0777
var mode int32
c.Fill(&mode)
mode &= 0777
s.DefaultMode = &mode
},
func(k *core.KeyToPath, c randfill.Continue) {
c.FillNoCustom(k) // fuzz self without calling this function again
k.Key = c.String(0)
k.Path = c.String(0)
// Mode is not mandatory, but if it is set, it should be
// a value between 0 and 0777
if k.Mode != nil {
*k.Mode &= 0777
}
},
func(vs *core.VolumeSource, c randfill.Continue) {
// Exactly one of the fields must be set.
v := reflect.ValueOf(vs).Elem()
i := int(c.Uint64() % uint64(v.NumField()))
t := v.Field(i).Addr()
for v.Field(i).IsNil() {
c.Fill(t.Interface())
}
},
func(i *core.ISCSIVolumeSource, c randfill.Continue) {
i.ISCSIInterface = c.String(0)
if i.ISCSIInterface == "" {
i.ISCSIInterface = "default"
}
},
func(i *core.ISCSIPersistentVolumeSource, c randfill.Continue) {
i.ISCSIInterface = c.String(0)
if i.ISCSIInterface == "" {
i.ISCSIInterface = "default"
}
},
func(i *core.PersistentVolumeClaimSpec, c randfill.Continue) {
// Match defaulting in pkg/apis/core/v1/defaults.go.
volumeMode := core.PersistentVolumeMode(c.String(0))
if volumeMode == "" {
volumeMode = core.PersistentVolumeFilesystem
}
i.VolumeMode = &volumeMode
},
func(d *core.DNSPolicy, c randfill.Continue) {
policies := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault}
*d = policies[c.Rand.Intn(len(policies))]
},
func(p *core.Protocol, c randfill.Continue) {
protocols := []core.Protocol{core.ProtocolTCP, core.ProtocolUDP, core.ProtocolSCTP}
*p = protocols[c.Rand.Intn(len(protocols))]
},
func(p *core.ServiceAffinity, c randfill.Continue) {
types := []core.ServiceAffinity{core.ServiceAffinityClientIP, core.ServiceAffinityNone}
*p = types[c.Rand.Intn(len(types))]
},
func(p *core.ServiceType, c randfill.Continue) {
types := []core.ServiceType{core.ServiceTypeClusterIP, core.ServiceTypeNodePort, core.ServiceTypeLoadBalancer}
*p = types[c.Rand.Intn(len(types))]
},
func(p *core.IPFamily, c randfill.Continue) {
types := []core.IPFamily{core.IPv4Protocol, core.IPv6Protocol}
selected := types[c.Rand.Intn(len(types))]
*p = selected
},
func(p *core.ServiceExternalTrafficPolicy, c randfill.Continue) {
types := []core.ServiceExternalTrafficPolicy{core.ServiceExternalTrafficPolicyCluster, core.ServiceExternalTrafficPolicyLocal}
*p = types[c.Rand.Intn(len(types))]
},
func(p *core.ServiceInternalTrafficPolicy, c randfill.Continue) {
types := []core.ServiceInternalTrafficPolicy{core.ServiceInternalTrafficPolicyCluster, core.ServiceInternalTrafficPolicyLocal}
*p = types[c.Rand.Intn(len(types))]
},
func(ct *core.Container, c randfill.Continue) {
c.FillNoCustom(ct) // fuzz self without calling this function again
ct.TerminationMessagePath = "/" + ct.TerminationMessagePath // Must be non-empty
ct.TerminationMessagePolicy = "File"
},
func(ep *core.EphemeralContainer, c randfill.Continue) {
c.FillNoCustom(ep) // fuzz self without calling this function again
ep.EphemeralContainerCommon.TerminationMessagePath = "/" + ep.TerminationMessagePath // Must be non-empty
ep.EphemeralContainerCommon.TerminationMessagePolicy = "File"
},
func(p *core.Probe, c randfill.Continue) {
c.FillNoCustom(p)
// These fields have default values.
intFieldsWithDefaults := [...]string{"TimeoutSeconds", "PeriodSeconds", "SuccessThreshold", "FailureThreshold"}
v := reflect.ValueOf(p).Elem()
for _, field := range intFieldsWithDefaults {
f := v.FieldByName(field)
if f.Int() == 0 {
f.SetInt(1)
}
}
},
func(ev *core.EnvVar, c randfill.Continue) {
ev.Name = c.String(0)
if c.Bool() {
ev.Value = c.String(0)
} else {
ev.ValueFrom = &core.EnvVarSource{}
ev.ValueFrom.FieldRef = &core.ObjectFieldSelector{}
versions := []schema.GroupVersion{
{Group: "admission.k8s.io", Version: "v1alpha1"},
{Group: "apps", Version: "v1beta1"},
{Group: "apps", Version: "v1beta2"},
{Group: "foo", Version: "v42"},
}
ev.ValueFrom.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))].String()
ev.ValueFrom.FieldRef.FieldPath = c.String(0)
}
},
func(ev *core.EnvFromSource, c randfill.Continue) {
if c.Bool() {
ev.Prefix = "p_"
}
if c.Bool() {
c.Fill(&ev.ConfigMapRef)
} else {
c.Fill(&ev.SecretRef)
}
},
func(cm *core.ConfigMapEnvSource, c randfill.Continue) {
c.FillNoCustom(cm) // fuzz self without calling this function again
if c.Bool() {
opt := c.Bool()
cm.Optional = &opt
}
},
func(s *core.SecretEnvSource, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
},
func(sc *core.SecurityContext, c randfill.Continue) {
c.FillNoCustom(sc) // fuzz self without calling this function again
if c.Bool() {
priv := c.Bool()
sc.Privileged = &priv
}
if c.Bool() {
sc.Capabilities = &core.Capabilities{
Add: make([]core.Capability, 0),
Drop: make([]core.Capability, 0),
}
c.Fill(&sc.Capabilities.Add)
c.Fill(&sc.Capabilities.Drop)
}
},
func(s *core.Secret, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
s.Type = core.SecretTypeOpaque
},
func(r *core.RBDVolumeSource, c randfill.Continue) {
r.RBDPool = c.String(0)
if r.RBDPool == "" {
r.RBDPool = "rbd"
}
r.RadosUser = c.String(0)
if r.RadosUser == "" {
r.RadosUser = "admin"
}
r.Keyring = c.String(0)
if r.Keyring == "" {
r.Keyring = "/etc/ceph/keyring"
}
},
func(r *core.RBDPersistentVolumeSource, c randfill.Continue) {
r.RBDPool = c.String(0)
if r.RBDPool == "" {
r.RBDPool = "rbd"
}
r.RadosUser = c.String(0)
if r.RadosUser == "" {
r.RadosUser = "admin"
}
r.Keyring = c.String(0)
if r.Keyring == "" {
r.Keyring = "/etc/ceph/keyring"
}
},
func(obj *core.HostPathVolumeSource, c randfill.Continue) {
c.FillNoCustom(obj)
types := []core.HostPathType{core.HostPathUnset, core.HostPathDirectoryOrCreate, core.HostPathDirectory,
core.HostPathFileOrCreate, core.HostPathFile, core.HostPathSocket, core.HostPathCharDev, core.HostPathBlockDev}
typeVol := types[c.Rand.Intn(len(types))]
if obj.Type == nil {
obj.Type = &typeVol
}
},
func(pv *core.PersistentVolume, c randfill.Continue) {
c.FillNoCustom(pv) // fuzz self without calling this function again
types := []core.PersistentVolumePhase{core.VolumeAvailable, core.VolumePending, core.VolumeBound, core.VolumeReleased, core.VolumeFailed}
pv.Status.Phase = types[c.Rand.Intn(len(types))]
pv.Status.Message = c.String(0)
reclamationPolicies := []core.PersistentVolumeReclaimPolicy{core.PersistentVolumeReclaimRecycle, core.PersistentVolumeReclaimRetain}
pv.Spec.PersistentVolumeReclaimPolicy = reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))]
volumeModes := []core.PersistentVolumeMode{core.PersistentVolumeFilesystem, core.PersistentVolumeBlock}
pv.Spec.VolumeMode = &volumeModes[c.Rand.Intn(len(volumeModes))]
},
func(pvc *core.PersistentVolumeClaim, c randfill.Continue) {
c.FillNoCustom(pvc) // fuzz self without calling this function again
types := []core.PersistentVolumeClaimPhase{core.ClaimBound, core.ClaimPending, core.ClaimLost}
pvc.Status.Phase = types[c.Rand.Intn(len(types))]
volumeModes := []core.PersistentVolumeMode{core.PersistentVolumeFilesystem, core.PersistentVolumeBlock}
pvc.Spec.VolumeMode = &volumeModes[c.Rand.Intn(len(volumeModes))]
},
func(obj *core.AzureDiskVolumeSource, c randfill.Continue) {
if obj.CachingMode == nil {
obj.CachingMode = new(core.AzureDataDiskCachingMode)
*obj.CachingMode = core.AzureDataDiskCachingReadWrite
}
if obj.Kind == nil {
obj.Kind = new(core.AzureDataDiskKind)
*obj.Kind = core.AzureSharedBlobDisk
}
if obj.FSType == nil {
obj.FSType = new(string)
*obj.FSType = "ext4"
}
if obj.ReadOnly == nil {
obj.ReadOnly = new(bool)
*obj.ReadOnly = false
}
},
func(sio *core.ScaleIOVolumeSource, c randfill.Continue) {
sio.StorageMode = c.String(0)
if sio.StorageMode == "" {
sio.StorageMode = "ThinProvisioned"
}
sio.FSType = c.String(0)
if sio.FSType == "" {
sio.FSType = "xfs"
}
},
func(sio *core.ScaleIOPersistentVolumeSource, c randfill.Continue) {
sio.StorageMode = c.String(0)
if sio.StorageMode == "" {
sio.StorageMode = "ThinProvisioned"
}
sio.FSType = c.String(0)
if sio.FSType == "" {
sio.FSType = "xfs"
}
},
func(s *core.NamespaceSpec, c randfill.Continue) {
s.Finalizers = []core.FinalizerName{core.FinalizerKubernetes}
},
func(s *core.Namespace, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
// Match name --> label defaulting
if len(s.Name) > 0 {
if s.Labels == nil {
s.Labels = map[string]string{}
}
s.Labels["kubernetes.io/metadata.name"] = s.Name
}
},
func(s *core.NamespaceStatus, c randfill.Continue) {
s.Phase = core.NamespaceActive
},
func(http *core.HTTPGetAction, c randfill.Continue) {
c.FillNoCustom(http) // fuzz self without calling this function again
http.Path = "/" + http.Path // can't be blank
http.Scheme = "x" + http.Scheme // can't be blank
},
func(ss *core.ServiceSpec, c randfill.Continue) {
c.FillNoCustom(ss) // fuzz self without calling this function again
if len(ss.Ports) == 0 {
// There must be at least 1 port.
ss.Ports = append(ss.Ports, core.ServicePort{})
c.Fill(&ss.Ports[0])
}
for i := range ss.Ports {
switch ss.Ports[i].TargetPort.Type {
case intstr.Int:
ss.Ports[i].TargetPort.IntVal = 1 + ss.Ports[i].TargetPort.IntVal%65535 // non-zero
case intstr.String:
ss.Ports[i].TargetPort.StrVal = "x" + ss.Ports[i].TargetPort.StrVal // non-empty
}
}
types := []core.ServiceAffinity{core.ServiceAffinityNone, core.ServiceAffinityClientIP}
ss.SessionAffinity = types[c.Rand.Intn(len(types))]
switch ss.SessionAffinity {
case core.ServiceAffinityClientIP:
timeoutSeconds := int32(c.Rand.Intn(int(core.MaxClientIPServiceAffinitySeconds)))
ss.SessionAffinityConfig = &core.SessionAffinityConfig{
ClientIP: &core.ClientIPConfig{
TimeoutSeconds: &timeoutSeconds,
},
}
case core.ServiceAffinityNone:
ss.SessionAffinityConfig = nil
}
if ss.AllocateLoadBalancerNodePorts == nil {
ss.AllocateLoadBalancerNodePorts = ptr.To(true)
}
},
func(s *core.NodeStatus, c randfill.Continue) {
c.FillNoCustom(s)
s.Allocatable = s.Capacity
},
func(e *core.Event, c randfill.Continue) {
c.FillNoCustom(e)
e.EventTime = metav1.MicroTime{Time: time.Unix(1, 1000)}
if e.Series != nil {
e.Series.LastObservedTime = metav1.MicroTime{Time: time.Unix(3, 3000)}
}
},
func(j *core.GRPCAction, c randfill.Continue) {
empty := ""
if j.Service == nil {
j.Service = &empty
}
},
func(j *core.LoadBalancerStatus, c randfill.Continue) {
ipMode := core.LoadBalancerIPModeVIP
for i := range j.Ingress {
if j.Ingress[i].IPMode == nil {
j.Ingress[i].IPMode = &ipMode
}
}
},
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core"
)
// IsHugePageResourceName returns true if the resource name has the huge page
// resource prefix.
func IsHugePageResourceName(name core.ResourceName) bool {
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix)
}
// IsHugePageResourceValueDivisible returns true if the resource value of storage is
// integer multiple of page size.
func IsHugePageResourceValueDivisible(name core.ResourceName, quantity resource.Quantity) bool {
pageSize, err := HugePageSizeFromResourceName(name)
if err != nil {
return false
}
if pageSize.Sign() <= 0 || pageSize.MilliValue()%int64(1000) != int64(0) {
return false
}
return quantity.Value()%pageSize.Value() == 0
}
// IsQuotaHugePageResourceName returns true if the resource name has the quota
// related huge page resource prefix.
func IsQuotaHugePageResourceName(name core.ResourceName) bool {
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix)
}
// HugePageResourceName returns a ResourceName with the canonical hugepage
// prefix prepended for the specified page size. The page size is converted
// to its canonical representation.
func HugePageResourceName(pageSize resource.Quantity) core.ResourceName {
return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String()))
}
// HugePageSizeFromResourceName returns the page size for the specified huge page
// resource name. If the specified input is not a valid huge page resource name
// an error is returned.
func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) {
if !IsHugePageResourceName(name) {
return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name)
}
pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix)
return resource.ParseQuantity(pageSize)
}
// NonConvertibleFields iterates over the provided map and filters out all but
// any keys with the "non-convertible.kubernetes.io" prefix.
func NonConvertibleFields(annotations map[string]string) map[string]string {
nonConvertibleKeys := map[string]string{}
for key, value := range annotations {
if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) {
nonConvertibleKeys[key] = value
}
}
return nonConvertibleKeys
}
// Semantic can do semantic deep equality checks for core objects.
// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
var Semantic = conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
return a.Cmp(b) == 0
},
func(a, b metav1.MicroTime) bool {
return a.UTC() == b.UTC()
},
func(a, b metav1.Time) bool {
return a.UTC() == b.UTC()
},
func(a, b labels.Selector) bool {
return a.String() == b.String()
},
func(a, b fields.Selector) bool {
return a.String() == b.String()
},
)
var standardResourceQuotaScopes = sets.New(
core.ResourceQuotaScopeTerminating,
core.ResourceQuotaScopeNotTerminating,
core.ResourceQuotaScopeBestEffort,
core.ResourceQuotaScopeNotBestEffort,
core.ResourceQuotaScopePriorityClass,
core.ResourceQuotaScopeVolumeAttributesClass,
)
// IsStandardResourceQuotaScope returns true if the scope is a standard value
func IsStandardResourceQuotaScope(scope core.ResourceQuotaScope) bool {
return standardResourceQuotaScopes.Has(scope) || scope == core.ResourceQuotaScopeCrossNamespacePodAffinity
}
var podObjectCountQuotaResources = sets.New(
core.ResourcePods,
)
var podComputeQuotaResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
)
var pvcObjectCountQuotaResources = sets.New(
core.ResourcePersistentVolumeClaims,
)
var pvcStorageQuotaResources = sets.New(
core.ResourceRequestsStorage,
)
// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource core.ResourceName) bool {
switch scope {
case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort,
core.ResourceQuotaScopePriorityClass, core.ResourceQuotaScopeCrossNamespacePodAffinity:
return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)
case core.ResourceQuotaScopeBestEffort:
return podObjectCountQuotaResources.Has(resource)
case core.ResourceQuotaScopeVolumeAttributesClass:
return pvcObjectCountQuotaResources.Has(resource) || pvcStorageQuotaResources.Has(resource)
default:
return true
}
}
var standardContainerResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
)
// IsStandardContainerResourceName returns true if the container can make a resource request
// for the specified resource
func IsStandardContainerResourceName(name core.ResourceName) bool {
return standardContainerResources.Has(name) || IsHugePageResourceName(name)
}
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name core.ResourceName) bool {
if IsNativeResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
return false
}
return true
}
// IsNativeResource returns true if the resource name is in the
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
// implicitly in the kubernetes.io/ namespace.
func IsNativeResource(name core.ResourceName) bool {
return !strings.Contains(string(name), "/") ||
strings.Contains(string(name), core.ResourceDefaultNamespacePrefix)
}
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and is not hugepages.
func IsOvercommitAllowed(name core.ResourceName) bool {
return IsNativeResource(name) &&
!IsHugePageResourceName(name)
}
var standardLimitRangeTypes = sets.New(
core.LimitTypePod,
core.LimitTypeContainer,
core.LimitTypePersistentVolumeClaim,
)
// IsStandardLimitRangeType returns true if the type is Pod or Container
func IsStandardLimitRangeType(value core.LimitType) bool {
return standardLimitRangeTypes.Has(value)
}
var standardQuotaResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
core.ResourceRequestsStorage,
core.ResourceRequestsEphemeralStorage,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceLimitsEphemeralStorage,
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourcePersistentVolumeClaims,
core.ResourceConfigMaps,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsStandardQuotaResourceName returns true if the resource is known to
// the quota tracking system
func IsStandardQuotaResourceName(name core.ResourceName) bool {
return standardQuotaResources.Has(name) || IsQuotaHugePageResourceName(name)
}
var standardResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
core.ResourceRequestsEphemeralStorage,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceLimitsEphemeralStorage,
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourceConfigMaps,
core.ResourcePersistentVolumeClaims,
core.ResourceStorage,
core.ResourceRequestsStorage,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsStandardResourceName returns true if the resource is known to the system
func IsStandardResourceName(name core.ResourceName) bool {
return standardResources.Has(name) || IsQuotaHugePageResourceName(name)
}
var integerResources = sets.New(
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourceConfigMaps,
core.ResourcePersistentVolumeClaims,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsIntegerResourceName returns true if the resource is measured in integer values
func IsIntegerResourceName(name core.ResourceName) bool {
return integerResources.Has(name) || IsExtendedResourceName(name)
}
// IsServiceIPSet aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *core.Service) bool {
// This function assumes that the service is semantically validated
// it does not test if the IP is valid, just makes sure that it is set.
return len(service.Spec.ClusterIP) > 0 &&
service.Spec.ClusterIP != core.ClusterIPNone
}
var standardFinalizers = sets.New(
string(core.FinalizerKubernetes),
metav1.FinalizerOrphanDependents,
metav1.FinalizerDeleteDependents,
)
// IsStandardFinalizerName checks if the input string is a standard finalizer name
func IsStandardFinalizerName(str string) bool {
return standardFinalizers.Has(str)
}
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX,RWOP.
func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string {
modes = removeDuplicateAccessModes(modes)
modesStr := []string{}
if ContainsAccessMode(modes, core.ReadWriteOnce) {
modesStr = append(modesStr, "RWO")
}
if ContainsAccessMode(modes, core.ReadOnlyMany) {
modesStr = append(modesStr, "ROX")
}
if ContainsAccessMode(modes, core.ReadWriteMany) {
modesStr = append(modesStr, "RWX")
}
if ContainsAccessMode(modes, core.ReadWriteOncePod) {
modesStr = append(modesStr, "RWOP")
}
return strings.Join(modesStr, ",")
}
// GetAccessModesFromString returns an array of AccessModes from a string created by GetAccessModesAsString
func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode {
strmodes := strings.Split(modes, ",")
accessModes := []core.PersistentVolumeAccessMode{}
for _, s := range strmodes {
s = strings.Trim(s, " ")
switch {
case s == "RWO":
accessModes = append(accessModes, core.ReadWriteOnce)
case s == "ROX":
accessModes = append(accessModes, core.ReadOnlyMany)
case s == "RWX":
accessModes = append(accessModes, core.ReadWriteMany)
case s == "RWOP":
accessModes = append(accessModes, core.ReadWriteOncePod)
}
}
return accessModes
}
// removeDuplicateAccessModes returns an array of access modes without any duplicates
func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode {
accessModes := []core.PersistentVolumeAccessMode{}
for _, m := range modes {
if !ContainsAccessMode(accessModes, m) {
accessModes = append(accessModes, m)
}
}
return accessModes
}
func ContainsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
func ClaimContainsAllocatedResources(pvc *core.PersistentVolumeClaim) bool {
if pvc == nil {
return false
}
if pvc.Status.AllocatedResources != nil {
return true
}
return false
}
func ClaimContainsAllocatedResourceStatus(pvc *core.PersistentVolumeClaim) bool {
if pvc == nil {
return false
}
if pvc.Status.AllocatedResourceStatuses != nil {
return true
}
return false
}
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
// and converts it to the []Toleration type in core.
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) {
var tolerations []core.Toleration
if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations)
if err != nil {
return tolerations, err
}
}
return tolerations, nil
}
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
// Returns true if something was updated, false otherwise.
func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool {
podTolerations := pod.Spec.Tolerations
var newTolerations []core.Toleration
updated := false
for i := range podTolerations {
if toleration.MatchToleration(&podTolerations[i]) {
if Semantic.DeepEqual(toleration, podTolerations[i]) {
return false
}
newTolerations = append(newTolerations, *toleration)
updated = true
continue
}
newTolerations = append(newTolerations, podTolerations[i])
}
if !updated {
newTolerations = append(newTolerations, *toleration)
}
pod.Spec.Tolerations = newTolerations
return true
}
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
// and converts it to the []Taint type in core.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) {
var taints []core.Taint
if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints)
if err != nil {
return []core.Taint{}, err
}
}
return taints, nil
}
// GetPersistentVolumeClass returns StorageClassName.
func GetPersistentVolumeClass(volume *core.PersistentVolume) string {
// Use beta annotation first
if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found {
return class
}
return volume.Spec.StorageClassName
}
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
// requested, it returns "".
func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string {
// Use beta annotation first
if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found {
return class
}
if claim.Spec.StorageClassName != nil {
return *claim.Spec.StorageClassName
}
return ""
}
// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field.
func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool {
// Use beta annotation first
if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found {
return true
}
if claim.Spec.StorageClassName != nil {
return true
}
return false
}
// GetDeletionCostFromPodAnnotations returns the integer value of pod-deletion-cost. Returns 0
// if not set or the value is invalid.
func GetDeletionCostFromPodAnnotations(annotations map[string]string) (int32, error) {
if value, exist := annotations[core.PodDeletionCost]; exist {
// values that start with plus sign (e.g, "+10") or leading zeros (e.g., "008") are not valid.
if !validFirstDigit(value) {
return 0, fmt.Errorf("invalid value %q", value)
}
i, err := strconv.ParseInt(value, 10, 32)
if err != nil {
// make sure we default to 0 on error.
return 0, err
}
return int32(i), nil
}
return 0, nil
}
func validFirstDigit(str string) bool {
if len(str) == 0 {
return false
}
return str[0] == '-' || (str[0] == '0' && str == "0") || (str[0] >= '1' && str[0] <= '9')
}
// HasInvalidLabelValueInNodeSelectorTerms checks if there's an invalid label value
// in one NodeSelectorTerm's MatchExpression values
func HasInvalidLabelValueInNodeSelectorTerms(terms []core.NodeSelectorTerm) bool {
for _, term := range terms {
for _, expression := range term.MatchExpressions {
for _, value := range expression.Values {
if len(validation.IsValidLabelValue(value)) > 0 {
return true
}
}
}
}
return false
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// NOTE: DO NOT use those helper functions through client-go, the
// package path will be changed in the future.
package qos
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return ComputePodQOS(pod)
}
// zeroQuantity represents a resource.Quantity with value "0", used as a baseline
// for resource comparisons.
var zeroQuantity = resource.MustParse("0")
// processResourceList adds non-zero quantities for supported QoS compute resources
// quantities from newList to list.
func processResourceList(list, newList core.ResourceList) {
for name, quantity := range newList {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := list[name]; !exists {
list[name] = delta
} else {
delta.Add(list[name])
list[name] = delta
}
}
}
}
// getQOSResources returns a set of resource names from the provided resource list that:
// 1. Are supported QoS compute resources
// 2. Have quantities greater than zero
func getQOSResources(list core.ResourceList) sets.Set[string] {
qosResources := sets.New[string]()
for name, quantity := range list {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosResources.Insert(string(name))
}
}
return qosResources
}
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more
// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go
func ComputePodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
isGuaranteed := true
// When pod-level resources are specified, we use them to determine QoS class.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
pod.Spec.Resources != nil {
if len(pod.Spec.Resources.Requests) > 0 {
// process requests
processResourceList(requests, pod.Spec.Resources.Requests)
}
if len(pod.Spec.Resources.Limits) > 0 {
// process limits
processResourceList(limits, pod.Spec.Resources.Limits)
qosLimitResources := getQOSResources(pod.Spec.Resources.Limits)
if !qosLimitResources.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
} else {
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []core.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := requests[name]; !exists {
requests[name] = delta
} else {
delta.Add(requests[name])
requests[name] = delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.DeepCopy()
if _, exists := limits[name]; !exists {
limits[name] = delta
} else {
delta.Add(limits[name])
limits[name] = delta
}
}
}
if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
}
if len(requests) == 0 && len(limits) == 0 {
return core.PodQOSBestEffort
}
// Check if requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return core.PodQOSGuaranteed
}
return core.PodQOSBurstable
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the v1 monolithic api, making it available as an
// option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/v1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(core.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion))
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import "encoding/json"
// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations
// to prevent anyone from marshaling these internal structs.
var _ = json.Marshaler(&AvoidPods{})
var _ = json.Unmarshaler(&AvoidPods{})
// MarshalJSON panics to prevent marshalling of internal structs
func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") }
// UnmarshalJSON panics to prevent unmarshalling of internal structs
func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") }
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package core
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SetGroupVersionKind sets the API version and kind of the object reference
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
// GroupVersionKind returns the API version and kind of the object reference
func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}
// GetObjectKind returns the kind of object reference
func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pods
import (
"fmt"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/fieldpath"
)
// ContainerVisitorWithPath is called with each container and the field.Path to that container,
// and returns true if visiting should continue.
type ContainerVisitorWithPath func(container *api.Container, path *field.Path) bool
// VisitContainersWithPath invokes the visitor function with a pointer to the spec
// of every container in the given pod spec and the field.Path to that container.
// If visitor returns false, visiting is short-circuited. VisitContainersWithPath returns true if visiting completes,
// false if visiting was short-circuited.
func VisitContainersWithPath(podSpec *api.PodSpec, specPath *field.Path, visitor ContainerVisitorWithPath) bool {
fldPath := specPath.Child("initContainers")
for i := range podSpec.InitContainers {
if !visitor(&podSpec.InitContainers[i], fldPath.Index(i)) {
return false
}
}
fldPath = specPath.Child("containers")
for i := range podSpec.Containers {
if !visitor(&podSpec.Containers[i], fldPath.Index(i)) {
return false
}
}
fldPath = specPath.Child("ephemeralContainers")
for i := range podSpec.EphemeralContainers {
if !visitor((*api.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), fldPath.Index(i)) {
return false
}
}
return true
}
// ConvertDownwardAPIFieldLabel converts the specified downward API field label
// and its value in the pod of the specified version to the internal version,
// and returns the converted label and value. This function returns an error if
// the conversion fails.
func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, error) {
if version != "v1" {
return "", "", fmt.Errorf("unsupported pod version: %s", version)
}
if path, _, ok := fieldpath.SplitMaybeSubscriptedPath(label); ok {
switch path {
case "metadata.annotations", "metadata.labels":
return label, value, nil
default:
return "", "", fmt.Errorf("field label does not support subscript: %s", label)
}
}
switch label {
case "metadata.annotations",
"metadata.labels",
"metadata.name",
"metadata.namespace",
"metadata.uid",
"spec.nodeName",
"spec.restartPolicy",
"spec.serviceAccountName",
"spec.schedulerName",
"status.phase",
"status.hostIP",
"status.hostIPs",
"status.podIP",
"status.podIPs":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":
return "spec.nodeName", value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder object to register various known types
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme represents a func that can be used to apply all the registered
// funcs in a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
scheme.AddKnownTypes(SchemeGroupVersion,
&Pod{},
&PodList{},
&PodStatusResult{},
&PodTemplate{},
&PodTemplateList{},
&ReplicationControllerList{},
&ReplicationController{},
&ServiceList{},
&Service{},
&ServiceProxyOptions{},
&NodeList{},
&Node{},
&NodeProxyOptions{},
&Endpoints{},
&EndpointsList{},
&Binding{},
&Event{},
&EventList{},
&List{},
&LimitRange{},
&LimitRangeList{},
&ResourceQuota{},
&ResourceQuotaList{},
&Namespace{},
&NamespaceList{},
&ServiceAccount{},
&ServiceAccountList{},
&Secret{},
&SecretList{},
&PersistentVolume{},
&PersistentVolumeList{},
&PersistentVolumeClaim{},
&PersistentVolumeClaimList{},
&PodAttachOptions{},
&PodLogOptions{},
&PodExecOptions{},
&PodPortForwardOptions{},
&PodProxyOptions{},
&ComponentStatus{},
&ComponentStatusList{},
&SerializedReference{},
&RangeAllocation{},
&ConfigMap{},
&ConfigMapList{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"k8s.io/apimachinery/pkg/api/resource"
)
func (rn ResourceName) String() string {
return string(rn)
}
// CPU returns the CPU limit if specified.
func (rl *ResourceList) CPU() *resource.Quantity {
return rl.Name(ResourceCPU, resource.DecimalSI)
}
// Memory returns the Memory limit if specified.
func (rl *ResourceList) Memory() *resource.Quantity {
return rl.Name(ResourceMemory, resource.BinarySI)
}
// Storage returns the Storage limit if specified.
func (rl *ResourceList) Storage() *resource.Quantity {
return rl.Name(ResourceStorage, resource.BinarySI)
}
// Pods returns the list of pods
func (rl *ResourceList) Pods() *resource.Quantity {
return rl.Name(ResourcePods, resource.DecimalSI)
}
// StorageEphemeral returns the list of ephemeral storage volumes, if any
func (rl *ResourceList) StorageEphemeral() *resource.Quantity {
return rl.Name(ResourceEphemeralStorage, resource.BinarySI)
}
// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format.
func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity {
if val, ok := (*rl)[name]; ok {
return &val
}
return &resource.Quantity{Format: defaultFormat}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package core
import "fmt"
// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
// if the two taints have same key:effect, regard as they match.
func (t *Taint) MatchTaint(taintToMatch Taint) bool {
return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
}
// ToString converts taint struct to string in format '<key>=<value>:<effect>', '<key>=<value>:', '<key>:<effect>', or '<key>'.
func (t *Taint) ToString() string {
if len(t.Effect) == 0 {
if len(t.Value) == 0 {
return fmt.Sprintf("%v", t.Key)
}
return fmt.Sprintf("%v=%v:", t.Key, t.Value)
}
if len(t.Value) == 0 {
return fmt.Sprintf("%v:%v", t.Key, t.Effect)
}
return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//TODO: consider making these methods functions, because we don't want helper
//functions in the k8s.io/api repo.
package core
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>,
// if the two tolerations have same <key,effect,operator,value> combination, regard as they match.
// TODO: uniqueness check for tolerations in api validations.
func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
return t.Key == tolerationToMatch.Key &&
t.Effect == tolerationToMatch.Effect &&
t.Operator == tolerationToMatch.Operator &&
t.Value == tolerationToMatch.Value
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"reflect"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/core"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add field conversion funcs.
err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Pod"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"spec.nodeName",
"spec.restartPolicy",
"spec.schedulerName",
"spec.serviceAccountName",
"spec.hostNetwork",
"status.phase",
"status.podIP",
"status.podIPs",
"status.nominatedNodeName":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":
return "spec.nodeName", value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return err
}
err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Node"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
case "spec.unschedulable":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return err
}
err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ReplicationController"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"status.replicas":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
return err
}
if err := AddFieldLabelConversionsForEvent(scheme); err != nil {
return err
}
if err := AddFieldLabelConversionsForNamespace(scheme); err != nil {
return err
}
if err := AddFieldLabelConversionsForSecret(scheme); err != nil {
return err
}
if err := AddFieldLabelConversionsForService(scheme); err != nil {
return err
}
return nil
}
func Convert_v1_ReplicationController_To_apps_ReplicaSet(in *v1.ReplicationController, out *apps.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
out.Replicas = *in.Replicas
out.MinReadySeconds = in.MinReadySeconds
if in.Selector != nil {
out.Selector = new(metav1.LabelSelector)
metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&in.Selector, out.Selector, s)
}
if in.Template != nil {
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, &out.Template, s); err != nil {
return err
}
}
return nil
}
func Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
for _, cond := range in.Conditions {
out.Conditions = append(out.Conditions, apps.ReplicaSetCondition{
Type: apps.ReplicaSetConditionType(cond.Type),
Status: core.ConditionStatus(cond.Status),
LastTransitionTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
})
}
return nil
}
func Convert_apps_ReplicaSet_To_v1_ReplicationController(in *apps.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil {
fieldErr, ok := err.(*field.Error)
if !ok {
return err
}
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
out.Annotations[v1.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String()
}
if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *apps.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = new(int32)
*out.Replicas = in.Replicas
out.MinReadySeconds = in.MinReadySeconds
var invalidErr error
if in.Selector != nil {
invalidErr = metav1.Convert_v1_LabelSelector_To_Map_string_To_string(in.Selector, &out.Selector, s)
}
out.Template = new(v1.PodTemplateSpec)
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil {
return err
}
return invalidErr
}
func Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *apps.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
for _, cond := range in.Conditions {
out.Conditions = append(out.Conditions, v1.ReplicationControllerCondition{
Type: v1.ReplicationControllerConditionType(cond.Type),
Status: v1.ConditionStatus(cond.Status),
LastTransitionTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
})
}
return nil
}
func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error {
if err := autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in, out, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if in.Template != nil {
out.Template = new(v1.PodTemplateSpec)
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error {
if err := autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in, out, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if in.Template != nil {
out.Template = new(core.PodTemplateSpec)
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, out.Template, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
return nil
}
func Convert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error {
if err := autoConvert_v1_PodStatus_To_core_PodStatus(in, out, s); err != nil {
return err
}
// If both fields (v1.PodIPs and v1.PodIP) are provided and differ, then PodIP is authoritative for compatibility with older kubelets
if (len(in.PodIP) > 0 && len(in.PodIPs) > 0) && (in.PodIP != in.PodIPs[0].IP) {
out.PodIPs = []core.PodIP{
{
IP: in.PodIP,
},
}
}
// at the this point, autoConvert copied v1.PodIPs -> core.PodIPs
// if v1.PodIPs was empty but v1.PodIP is not, then set core.PodIPs[0] with v1.PodIP
if len(in.PodIP) > 0 && len(in.PodIPs) == 0 {
out.PodIPs = []core.PodIP{
{
IP: in.PodIP,
},
}
}
return nil
}
func Convert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error {
if err := autoConvert_core_PodStatus_To_v1_PodStatus(in, out, s); err != nil {
return err
}
// at the this point autoConvert copied core.PodIPs -> v1.PodIPs
// v1.PodIP (singular value field, which does not exist in core) needs to
// be set with core.PodIPs[0]
if len(in.PodIPs) > 0 {
out.PodIP = in.PodIPs[0].IP
}
return nil
}
// The following two v1.PodSpec conversions are done here to support v1.ServiceAccount
// as an alias for ServiceAccountName.
func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error {
if err := autoConvert_core_PodSpec_To_v1_PodSpec(in, out, s); err != nil {
return err
}
// DeprecatedServiceAccount is an alias for ServiceAccountName.
out.DeprecatedServiceAccount = in.ServiceAccountName
if in.SecurityContext != nil {
// the host namespace fields have to be handled here for backward compatibility
// with v1.0.0
out.HostPID = in.SecurityContext.HostPID
out.HostNetwork = in.SecurityContext.HostNetwork
out.HostIPC = in.SecurityContext.HostIPC
out.ShareProcessNamespace = in.SecurityContext.ShareProcessNamespace
out.HostUsers = in.SecurityContext.HostUsers
}
return nil
}
func Convert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error {
if err := autoConvert_core_NodeSpec_To_v1_NodeSpec(in, out, s); err != nil {
return err
}
// at the this point autoConvert copied core.PodCIDRs -> v1.PodCIDRs
// v1.PodCIDR (singular value field, which does not exist in core) needs to
// be set with core.PodCIDRs[0]
if len(in.PodCIDRs) > 0 {
out.PodCIDR = in.PodCIDRs[0]
}
return nil
}
func Convert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error {
if err := autoConvert_v1_NodeSpec_To_core_NodeSpec(in, out, s); err != nil {
return err
}
// If both fields (v1.PodCIDRs and v1.PodCIDR) are provided and differ, then PodCIDR is authoritative for compatibility with older clients
if (len(in.PodCIDR) > 0 && len(in.PodCIDRs) > 0) && (in.PodCIDR != in.PodCIDRs[0]) {
out.PodCIDRs = []string{in.PodCIDR}
}
// at the this point, autoConvert copied v1.PodCIDRs -> core.PodCIDRs
// if v1.PodCIDRs was empty but v1.PodCIDR is not, then set core.PodCIDRs[0] with v1.PodCIDR
if len(in.PodCIDR) > 0 && len(in.PodCIDRs) == 0 {
out.PodCIDRs = []string{in.PodCIDR}
}
return nil
}
func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error {
if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil {
return err
}
// We support DeprecatedServiceAccount as an alias for ServiceAccountName.
// If both are specified, ServiceAccountName (the new field) wins.
if in.ServiceAccountName == "" {
out.ServiceAccountName = in.DeprecatedServiceAccount
}
// the host namespace fields have to be handled specially for backward compatibility
// with v1.0.0
if out.SecurityContext == nil {
out.SecurityContext = new(core.PodSecurityContext)
}
out.SecurityContext.HostNetwork = in.HostNetwork
out.SecurityContext.HostPID = in.HostPID
out.SecurityContext.HostIPC = in.HostIPC
out.SecurityContext.ShareProcessNamespace = in.ShareProcessNamespace
out.SecurityContext.HostUsers = in.HostUsers
return nil
}
func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error {
if err := autoConvert_v1_Pod_To_core_Pod(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = ptr.To[int64](1)
}
return nil
}
func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error {
if err := autoConvert_core_Pod_To_v1_Pod(in, out, s); err != nil {
return err
}
// drop init container annotations so they don't take effect on legacy kubelets.
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = ptr.To[int64](1)
}
return nil
}
func Convert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error {
if err := autoConvert_v1_Secret_To_core_Secret(in, out, s); err != nil {
return err
}
// StringData overwrites Data
if len(in.StringData) > 0 {
if out.Data == nil {
out.Data = map[string][]byte{}
}
for k, v := range in.StringData {
out.Data[k] = []byte(v)
}
}
return nil
}
// +k8s:conversion-fn=copy-only
func Convert_v1_ResourceList_To_core_ResourceList(in *v1.ResourceList, out *core.ResourceList, s conversion.Scope) error {
if *in == nil {
return nil
}
if *out == nil {
*out = make(core.ResourceList, len(*in))
}
for key, val := range *in {
// Moved to defaults
// TODO(#18538): We round up resource values to milli scale to maintain API compatibility.
// In the future, we should instead reject values that need rounding.
// const milliScale = -3
// val.RoundUp(milliScale)
(*out)[core.ResourceName(key)] = val
}
return nil
}
func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Event"),
func(label, value string) (string, string, error) {
switch label {
case "involvedObject.kind",
"involvedObject.namespace",
"involvedObject.name",
"involvedObject.uid",
"involvedObject.apiVersion",
"involvedObject.resourceVersion",
"involvedObject.fieldPath",
"reason",
"reportingComponent",
"source",
"type",
"metadata.namespace",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Namespace"),
func(label, value string) (string, string, error) {
switch label {
case "status.phase",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Secret"),
func(label, value string) (string, string, error) {
switch label {
case "type",
"metadata.namespace",
"metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
func AddFieldLabelConversionsForService(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Service"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.namespace",
"metadata.name",
"spec.clusterIP",
"spec.type":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
var initContainerAnnotations = map[string]bool{
"pod.beta.kubernetes.io/init-containers": true,
"pod.alpha.kubernetes.io/init-containers": true,
"pod.beta.kubernetes.io/init-container-statuses": true,
"pod.alpha.kubernetes.io/init-container-statuses": true,
}
// dropInitContainerAnnotations returns a copy of the annotations with init container annotations removed,
// or the original annotations if no init container annotations were present.
//
// this can be removed once no clients prior to 1.8 are supported, and no kubelets prior to 1.8 can be run
// (we don't support kubelets older than 2 versions skewed from the apiserver, but we don't prevent them, either)
func dropInitContainerAnnotations(oldAnnotations map[string]string) map[string]string {
if len(oldAnnotations) == 0 {
return oldAnnotations
}
found := false
for k := range initContainerAnnotations {
if _, ok := oldAnnotations[k]; ok {
found = true
break
}
}
if !found {
return oldAnnotations
}
newAnnotations := make(map[string]string, len(oldAnnotations))
for k, v := range oldAnnotations {
if !initContainerAnnotations[k] {
newAnnotations[k] = v
}
}
return newAnnotations
}
// Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s)
}
// Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s)
}
// Convert_Slice_string_To_Pointer_string is needed because decoding URL parameters requires manual assistance.
func Convert_Slice_string_To_Pointer_string(in *[]string, out **string, s conversion.Scope) error {
if len(*in) == 0 {
return nil
}
temp := (*in)[0]
*out = &temp
return nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/kubernetes/pkg/api/v1/service"
corev1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ResourceList(obj *v1.ResourceList) {
for key, val := range *obj {
// TODO(#18538): We round up resource values to milli scale to maintain API compatibility.
// In the future, we should instead reject values that need rounding.
const milliScale = -3
val.RoundUp(milliScale)
(*obj)[v1.ResourceName(key)] = val
}
}
func SetDefaults_ReplicationController(obj *v1.ReplicationController) {
var labels map[string]string
if obj.Spec.Template != nil {
labels = obj.Spec.Template.Labels
}
// TODO: support templates defined elsewhere when we support them in the API
if labels != nil {
if len(obj.Spec.Selector) == 0 {
obj.Spec.Selector = labels
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
// obj.Spec.Replicas is defaulted declaratively
}
func SetDefaults_Volume(obj *v1.Volume) {
if ptr.AllPtrFieldsNil(&obj.VolumeSource) {
obj.VolumeSource = v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) && obj.Image != nil && obj.Image.PullPolicy == "" {
// PullPolicy defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
_, tag, _, _ := parsers.ParseImageName(obj.Image.Reference)
if tag == "latest" {
obj.Image.PullPolicy = v1.PullAlways
} else {
obj.Image.PullPolicy = v1.PullIfNotPresent
}
}
}
func SetDefaults_Container(obj *v1.Container) {
if obj.ImagePullPolicy == "" {
// Ignore error and assume it has been validated elsewhere
_, tag, _, _ := parsers.ParseImageName(obj.Image)
// Check image tag
if tag == "latest" {
obj.ImagePullPolicy = v1.PullAlways
} else {
obj.ImagePullPolicy = v1.PullIfNotPresent
}
}
if obj.TerminationMessagePath == "" {
obj.TerminationMessagePath = v1.TerminationMessagePathDefault
}
if obj.TerminationMessagePolicy == "" {
obj.TerminationMessagePolicy = v1.TerminationMessageReadFile
}
}
func SetDefaults_EphemeralContainer(obj *v1.EphemeralContainer) {
SetDefaults_Container((*v1.Container)(&obj.EphemeralContainerCommon))
}
func SetDefaults_Service(obj *v1.Service) {
if obj.Spec.SessionAffinity == "" {
obj.Spec.SessionAffinity = v1.ServiceAffinityNone
}
if obj.Spec.SessionAffinity == v1.ServiceAffinityNone {
obj.Spec.SessionAffinityConfig = nil
}
if obj.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
if obj.Spec.SessionAffinityConfig == nil || obj.Spec.SessionAffinityConfig.ClientIP == nil || obj.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds == nil {
timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds
obj.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
ClientIP: &v1.ClientIPConfig{
TimeoutSeconds: &timeoutSeconds,
},
}
}
}
if obj.Spec.Type == "" {
obj.Spec.Type = v1.ServiceTypeClusterIP
}
for i := range obj.Spec.Ports {
sp := &obj.Spec.Ports[i]
if sp.Protocol == "" {
sp.Protocol = v1.ProtocolTCP
}
if sp.TargetPort == intstr.FromInt32(0) || sp.TargetPort == intstr.FromString("") {
sp.TargetPort = intstr.FromInt32(sp.Port)
}
}
// Defaults ExternalTrafficPolicy field for externally-accessible service
// to Global for consistency.
if service.ExternallyAccessible(obj) && obj.Spec.ExternalTrafficPolicy == "" {
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
}
if obj.Spec.InternalTrafficPolicy == nil {
if obj.Spec.Type == v1.ServiceTypeNodePort || obj.Spec.Type == v1.ServiceTypeLoadBalancer || obj.Spec.Type == v1.ServiceTypeClusterIP {
serviceInternalTrafficPolicyCluster := v1.ServiceInternalTrafficPolicyCluster
obj.Spec.InternalTrafficPolicy = &serviceInternalTrafficPolicyCluster
}
}
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
if obj.Spec.AllocateLoadBalancerNodePorts == nil {
obj.Spec.AllocateLoadBalancerNodePorts = ptr.To(true)
}
}
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
if utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) {
ipMode := v1.LoadBalancerIPModeVIP
for i, ing := range obj.Status.LoadBalancer.Ingress {
if ing.IP != "" && ing.IPMode == nil {
obj.Status.LoadBalancer.Ingress[i].IPMode = &ipMode
}
}
}
}
}
func SetDefaults_Pod(obj *v1.Pod) {
// If limits are specified, but requests are not, default requests to limits
// This is done here rather than a more specific defaulting pass on v1.ResourceRequirements
// because we only want this defaulting semantic to take place on a v1.Pod and not a v1.PodTemplate
for i := range obj.Spec.Containers {
// set requests to limits if requests are not specified, but limits are
if obj.Spec.Containers[i].Resources.Limits != nil {
if obj.Spec.Containers[i].Resources.Requests == nil {
obj.Spec.Containers[i].Resources.Requests = make(v1.ResourceList)
}
for key, value := range obj.Spec.Containers[i].Resources.Limits {
if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists {
obj.Spec.Containers[i].Resources.Requests[key] = value.DeepCopy()
}
}
}
}
for i := range obj.Spec.InitContainers {
if obj.Spec.InitContainers[i].Resources.Limits != nil {
if obj.Spec.InitContainers[i].Resources.Requests == nil {
obj.Spec.InitContainers[i].Resources.Requests = make(v1.ResourceList)
}
for key, value := range obj.Spec.InitContainers[i].Resources.Limits {
if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists {
obj.Spec.InitContainers[i].Resources.Requests[key] = value.DeepCopy()
}
}
}
}
// Pod Requests default values must be applied after container-level default values
// have been populated.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) {
defaultHugePagePodLimits(obj)
defaultPodRequests(obj)
}
if obj.Spec.EnableServiceLinks == nil {
enableServiceLinks := v1.DefaultEnableServiceLinks
obj.Spec.EnableServiceLinks = &enableServiceLinks
}
if obj.Spec.HostNetwork {
defaultHostNetworkPorts(&obj.Spec.Containers)
defaultHostNetworkPorts(&obj.Spec.InitContainers)
}
}
func SetDefaults_PodSpec(obj *v1.PodSpec) {
// New fields added here will break upgrade tests:
// https://github.com/kubernetes/kubernetes/issues/69445
// In most cases the new defaulted field can added to SetDefaults_Pod instead of here, so
// that it only materializes in the Pod object and not all objects with a PodSpec field.
if obj.DNSPolicy == "" {
obj.DNSPolicy = v1.DNSClusterFirst
}
if obj.RestartPolicy == "" {
obj.RestartPolicy = v1.RestartPolicyAlways
}
if obj.SecurityContext == nil {
obj.SecurityContext = &v1.PodSecurityContext{}
}
if obj.TerminationGracePeriodSeconds == nil {
period := int64(v1.DefaultTerminationGracePeriodSeconds)
obj.TerminationGracePeriodSeconds = &period
}
if obj.SchedulerName == "" {
obj.SchedulerName = v1.DefaultSchedulerName
}
}
func SetDefaults_Probe(obj *v1.Probe) {
if obj.TimeoutSeconds == 0 {
obj.TimeoutSeconds = 1
}
if obj.PeriodSeconds == 0 {
obj.PeriodSeconds = 10
}
if obj.SuccessThreshold == 0 {
obj.SuccessThreshold = 1
}
if obj.FailureThreshold == 0 {
obj.FailureThreshold = 3
}
}
func SetDefaults_SecretVolumeSource(obj *v1.SecretVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.SecretVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_ConfigMapVolumeSource(obj *v1.ConfigMapVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.ConfigMapVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_DownwardAPIVolumeSource(obj *v1.DownwardAPIVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.DownwardAPIVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_Secret(obj *v1.Secret) {
if obj.Type == "" {
obj.Type = v1.SecretTypeOpaque
}
}
func SetDefaults_ProjectedVolumeSource(obj *v1.ProjectedVolumeSource) {
if obj.DefaultMode == nil {
perm := int32(v1.ProjectedVolumeSourceDefaultMode)
obj.DefaultMode = &perm
}
}
func SetDefaults_ServiceAccountTokenProjection(obj *v1.ServiceAccountTokenProjection) {
hour := int64(time.Hour.Seconds())
if obj.ExpirationSeconds == nil {
obj.ExpirationSeconds = &hour
}
}
func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) {
if obj.Status.Phase == "" {
obj.Status.Phase = v1.VolumePending
}
if obj.Spec.PersistentVolumeReclaimPolicy == "" {
obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain
}
if obj.Spec.VolumeMode == nil {
obj.Spec.VolumeMode = new(v1.PersistentVolumeMode)
*obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem
}
}
func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) {
if obj.Status.Phase == "" {
obj.Status.Phase = v1.ClaimPending
}
}
func SetDefaults_PersistentVolumeClaimSpec(obj *v1.PersistentVolumeClaimSpec) {
if obj.VolumeMode == nil {
obj.VolumeMode = new(v1.PersistentVolumeMode)
*obj.VolumeMode = v1.PersistentVolumeFilesystem
}
}
func SetDefaults_Endpoints(obj *v1.Endpoints) {
for i := range obj.Subsets {
ss := &obj.Subsets[i]
for i := range ss.Ports {
ep := &ss.Ports[i]
if ep.Protocol == "" {
ep.Protocol = v1.ProtocolTCP
}
}
}
}
func SetDefaults_HTTPGetAction(obj *v1.HTTPGetAction) {
if obj.Path == "" {
obj.Path = "/"
}
if obj.Scheme == "" {
obj.Scheme = v1.URISchemeHTTP
}
}
// SetDefaults_Namespace adds a default label for all namespaces
func SetDefaults_Namespace(obj *v1.Namespace) {
// we can't SetDefaults for nameless namespaces (generateName).
// This code needs to be kept in sync with the implementation that exists
// in Namespace Canonicalize strategy (pkg/registry/core/namespace)
// note that this can result in many calls to feature enablement in some cases, but
// we assume that there's no real cost there.
if len(obj.Name) > 0 {
if obj.Labels == nil {
obj.Labels = map[string]string{}
}
obj.Labels[v1.LabelMetadataName] = obj.Name
}
}
func SetDefaults_NamespaceStatus(obj *v1.NamespaceStatus) {
if obj.Phase == "" {
obj.Phase = v1.NamespaceActive
}
}
func SetDefaults_NodeStatus(obj *v1.NodeStatus) {
if obj.Allocatable == nil && obj.Capacity != nil {
obj.Allocatable = make(v1.ResourceList, len(obj.Capacity))
for key, value := range obj.Capacity {
obj.Allocatable[key] = value.DeepCopy()
}
obj.Allocatable = obj.Capacity
}
}
func SetDefaults_ObjectFieldSelector(obj *v1.ObjectFieldSelector) {
if obj.APIVersion == "" {
obj.APIVersion = "v1"
}
}
func SetDefaults_LimitRangeItem(obj *v1.LimitRangeItem) {
// for container limits, we apply default values
if obj.Type == v1.LimitTypeContainer {
if obj.Default == nil {
obj.Default = make(v1.ResourceList)
}
if obj.DefaultRequest == nil {
obj.DefaultRequest = make(v1.ResourceList)
}
// If a default limit is unspecified, but the max is specified, default the limit to the max
for key, value := range obj.Max {
if _, exists := obj.Default[key]; !exists {
obj.Default[key] = value.DeepCopy()
}
}
// If a default limit is specified, but the default request is not, default request to limit
for key, value := range obj.Default {
if _, exists := obj.DefaultRequest[key]; !exists {
obj.DefaultRequest[key] = value.DeepCopy()
}
}
// If a default request is not specified, but the min is provided, default request to the min
for key, value := range obj.Min {
if _, exists := obj.DefaultRequest[key]; !exists {
obj.DefaultRequest[key] = value.DeepCopy()
}
}
}
}
func SetDefaults_ConfigMap(obj *v1.ConfigMap) {
if obj.Data == nil {
obj.Data = make(map[string]string)
}
}
// With host networking default all container ports to host ports.
func defaultHostNetworkPorts(containers *[]v1.Container) {
for i := range *containers {
for j := range (*containers)[i].Ports {
if (*containers)[i].Ports[j].HostPort == 0 {
(*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort
}
}
}
}
func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) {
typeVol := v1.HostPathUnset
if obj.Type == nil {
obj.Type = &typeVol
}
}
func SetDefaults_PodLogOptions(obj *v1.PodLogOptions) {
if utilfeature.DefaultFeatureGate.Enabled(features.PodLogsQuerySplitStreams) {
if obj.Stream == nil {
obj.Stream = ptr.To(v1.LogStreamAll)
}
}
}
// defaultPodRequests applies default values for pod-level requests, only when
// pod-level limits are set, in following scenarios:
// 1. When at least one container (regular, init or sidecar) has requests set:
// The pod-level requests become equal to the effective requests of all containers
// in the pod.
// 2. When no containers have requests set: The pod-level requests become equal to
// pod-level limits.
// This defaulting behavior ensures consistent resource accounting at the pod-level
// while maintaining compatibility with the container-level specifications, as detailed
// in KEP-2837: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#proposed-validation--defaulting-rules
func defaultPodRequests(obj *v1.Pod) {
// We only populate defaults when the pod-level resources are partly specified already.
if obj.Spec.Resources == nil {
return
}
if len(obj.Spec.Resources.Limits) == 0 {
return
}
var podReqs v1.ResourceList
podReqs = obj.Spec.Resources.Requests
if podReqs == nil {
podReqs = make(v1.ResourceList)
}
aggrCtrReqs := resourcehelper.AggregateContainerRequests(obj, resourcehelper.PodResourcesOptions{})
// When containers specify requests for a resource (supported by
// PodLevelResources feature) and pod-level requests are not set, the pod-level requests
// default to the effective requests of all the containers for that resource.
for key, aggrCtrLim := range aggrCtrReqs {
// Default pod level requests for overcommittable resources from aggregated container requests.
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) && corev1helper.IsOvercommitAllowed(key) {
podReqs[key] = aggrCtrLim.DeepCopy()
}
}
// When no containers specify requests for a resource, the pod-level requests
// will default to match the pod-level limits, if pod-level
// limits exist for that resource.
// Defaulting for pod level hugepages requests is dependent on defaultHugePagePodLimits,
// if defaultHugePagePodLimits defined the limit, the request will be set here.
for key, podLim := range obj.Spec.Resources.Limits {
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
podReqs[key] = podLim.DeepCopy()
}
}
// Only set pod-level resource requests in the PodSpec if the requirements map
// contains entries after collecting container-level requests and pod-level limits.
if len(podReqs) > 0 {
obj.Spec.Resources.Requests = podReqs
}
}
// defaultHugePagePodLimits applies default values for pod-level limits, only when
// container hugepage limits are set, but not at pod level, in following
// scenario:
// 1. When at least one container (regular, init or sidecar) has hugepage
// limits set:
// The pod-level limit becomes equal to the aggregated hugepages limit of all
// the containers in the pod.
func defaultHugePagePodLimits(pod *v1.Pod) {
// We only populate hugepage limit defaults when the pod-level resources are partly specified.
if pod.Spec.Resources == nil {
return
}
if len(pod.Spec.Resources.Limits) == 0 && len(pod.Spec.Resources.Requests) == 0 {
return
}
var podLims v1.ResourceList
podLims = pod.Spec.Resources.Limits
if podLims == nil {
podLims = make(v1.ResourceList)
}
aggrCtrLims := resourcehelper.AggregateContainerLimits(pod, resourcehelper.PodResourcesOptions{})
// When containers specify limits for hugepages and pod-level limits are not
// set for that resource, the pod-level limit will default to the aggregated
// hugepages limit of all the containers.
for key, aggrCtrLim := range aggrCtrLims {
if !resourcehelper.IsSupportedPodLevelResource(key) || !corev1helper.IsHugePageResourceName(key) {
continue
}
// We do not default pod-level hugepage limits if there is a hugepage request.
if _, exists := pod.Spec.Resources.Requests[key]; exists {
continue
}
if _, exists := podLims[key]; !exists {
podLims[key] = aggrCtrLim.DeepCopy()
}
}
// Only set pod-level resource limits in the PodSpec if the requirements map
// contains entries after collecting container-level limits and pod-level limits for hugepages.
if len(podLims) > 0 {
pod.Spec.Resources.Limits = podLims
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
// IsExtendedResourceName returns true if:
// 1. the resource name is not in the default namespace;
// 2. resource name does not have "requests." prefix,
// to avoid confusion with the convention in quota
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
func IsExtendedResourceName(name v1.ResourceName) bool {
if IsNativeResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) {
return false
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
return false
}
return true
}
// IsPrefixedNativeResource returns true if the resource name is in the
// *kubernetes.io/ namespace.
func IsPrefixedNativeResource(name v1.ResourceName) bool {
return strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix)
}
// IsNativeResource returns true if the resource name is in the
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
// implicitly in the kubernetes.io/ namespace.
func IsNativeResource(name v1.ResourceName) bool {
return !strings.Contains(string(name), "/") ||
IsPrefixedNativeResource(name)
}
// IsHugePageResourceName returns true if the resource name has the huge page
// resource prefix.
func IsHugePageResourceName(name v1.ResourceName) bool {
return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
}
// HugePageResourceName returns a ResourceName with the canonical hugepage
// prefix prepended for the specified page size. The page size is converted
// to its canonical representation.
func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName {
return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String()))
}
// HugePageSizeFromResourceName returns the page size for the specified huge page
// resource name. If the specified input is not a valid huge page resource name
// an error is returned.
func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) {
if !IsHugePageResourceName(name) {
return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name)
}
pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix)
return resource.ParseQuantity(pageSize)
}
// HugePageUnitSizeFromByteSize returns hugepage size has the format.
// `size` must be guaranteed to divisible into the largest units that can be expressed.
// <size><unit-prefix>B (1024 = "1KB", 1048576 = "1MB", etc).
func HugePageUnitSizeFromByteSize(size int64) (string, error) {
// hugePageSizeUnitList is borrowed from opencontainers/runc/libcontainer/cgroups/utils.go
var hugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
idx := 0
len := len(hugePageSizeUnitList) - 1
for size%1024 == 0 && idx < len {
size /= 1024
idx++
}
if size > 1024 && idx < len {
return "", fmt.Errorf("size: %d%s must be guaranteed to divisible into the largest units", size, hugePageSizeUnitList[idx])
}
return fmt.Sprintf("%d%s", size, hugePageSizeUnitList[idx]), nil
}
// IsHugePageMedium returns true if the volume medium is in 'HugePages[-size]' format
func IsHugePageMedium(medium v1.StorageMedium) bool {
if medium == v1.StorageMediumHugePages {
return true
}
return strings.HasPrefix(string(medium), string(v1.StorageMediumHugePagesPrefix))
}
// HugePageSizeFromMedium returns the page size for the specified huge page medium.
// If the specified input is not a valid huge page medium an error is returned.
func HugePageSizeFromMedium(medium v1.StorageMedium) (resource.Quantity, error) {
if !IsHugePageMedium(medium) {
return resource.Quantity{}, fmt.Errorf("medium: %s is not a hugepage medium", medium)
}
if medium == v1.StorageMediumHugePages {
return resource.Quantity{}, fmt.Errorf("medium: %s doesn't have size information", medium)
}
pageSize := strings.TrimPrefix(string(medium), string(v1.StorageMediumHugePagesPrefix))
return resource.ParseQuantity(pageSize)
}
// IsOvercommitAllowed returns true if the resource is in the default
// namespace and is not hugepages.
func IsOvercommitAllowed(name v1.ResourceName) bool {
return IsNativeResource(name) &&
!IsHugePageResourceName(name)
}
// IsAttachableVolumeResourceName returns true when the resource name is prefixed in attachable volume
func IsAttachableVolumeResourceName(name v1.ResourceName) bool {
return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix)
}
// IsServiceIPSet aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *v1.Service) bool {
return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != ""
}
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX,RWOP.
func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string {
modes = removeDuplicateAccessModes(modes)
modesStr := []string{}
if ContainsAccessMode(modes, v1.ReadWriteOnce) {
modesStr = append(modesStr, "RWO")
}
if ContainsAccessMode(modes, v1.ReadOnlyMany) {
modesStr = append(modesStr, "ROX")
}
if ContainsAccessMode(modes, v1.ReadWriteMany) {
modesStr = append(modesStr, "RWX")
}
if ContainsAccessMode(modes, v1.ReadWriteOncePod) {
modesStr = append(modesStr, "RWOP")
}
return strings.Join(modesStr, ",")
}
// GetAccessModesFromString returns an array of AccessModes from a string created by GetAccessModesAsString
func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode {
strmodes := strings.Split(modes, ",")
accessModes := []v1.PersistentVolumeAccessMode{}
for _, s := range strmodes {
s = strings.Trim(s, " ")
switch {
case s == "RWO":
accessModes = append(accessModes, v1.ReadWriteOnce)
case s == "ROX":
accessModes = append(accessModes, v1.ReadOnlyMany)
case s == "RWX":
accessModes = append(accessModes, v1.ReadWriteMany)
case s == "RWOP":
accessModes = append(accessModes, v1.ReadWriteOncePod)
}
}
return accessModes
}
// removeDuplicateAccessModes returns an array of access modes without any duplicates
func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode {
accessModes := []v1.PersistentVolumeAccessMode{}
for _, m := range modes {
if !ContainsAccessMode(accessModes, m) {
accessModes = append(accessModes, m)
}
}
return accessModes
}
func ContainsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms
func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool {
for _, req := range reqs {
for _, term := range terms {
for _, r := range term.MatchExpressions {
if r.Key == req.Key {
return true
}
}
}
}
return false
}
// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct
// that implements labels.Selector.
func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) {
if len(tsm) == 0 {
return labels.Nothing(), nil
}
selector := labels.NewSelector()
for _, expr := range tsm {
r, err := labels.NewRequirement(expr.Key, selection.In, expr.Values)
if err != nil {
return nil, err
}
selector = selector.Add(*r)
}
return selector, nil
}
// MatchTopologySelectorTerms checks whether given labels match topology selector terms in ORed;
// nil or empty term matches no objects; while empty term list matches all objects.
func MatchTopologySelectorTerms(topologySelectorTerms []v1.TopologySelectorTerm, lbls labels.Set) bool {
if len(topologySelectorTerms) == 0 {
// empty term list matches all objects
return true
}
for _, req := range topologySelectorTerms {
// nil or empty term selects no objects
if len(req.MatchLabelExpressions) == 0 {
continue
}
labelSelector, err := TopologySelectorRequirementsAsSelector(req.MatchLabelExpressions)
if err != nil || !labelSelector.Matches(lbls) {
continue
}
return true
}
return false
}
// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec.
// Returns true if something was updated, false otherwise.
func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool {
podTolerations := spec.Tolerations
var newTolerations []v1.Toleration
updated := false
for i := range podTolerations {
if toleration.MatchToleration(&podTolerations[i]) {
if helper.Semantic.DeepEqual(toleration, podTolerations[i]) {
return false
}
newTolerations = append(newTolerations, *toleration)
updated = true
continue
}
newTolerations = append(newTolerations, podTolerations[i])
}
if !updated {
newTolerations = append(newTolerations, *toleration)
}
spec.Tolerations = newTolerations
return true
}
// GetMatchingTolerations returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise.
func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) {
if len(taints) == 0 {
return true, []v1.Toleration{}
}
if len(tolerations) == 0 && len(taints) > 0 {
return false, []v1.Toleration{}
}
result := []v1.Toleration{}
for i := range taints {
tolerated := false
for j := range tolerations {
if tolerations[j].ToleratesTaint(&taints[i]) {
result = append(result, tolerations[j])
tolerated = true
break
}
}
if !tolerated {
return false, []v1.Toleration{}
}
}
return true, result
}
// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements
// labels.Selector.
func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) {
selector := labels.NewSelector()
var op selection.Operator
switch ssr.Operator {
case v1.ScopeSelectorOpIn:
op = selection.In
case v1.ScopeSelectorOpNotIn:
op = selection.NotIn
case v1.ScopeSelectorOpExists:
op = selection.Exists
case v1.ScopeSelectorOpDoesNotExist:
op = selection.DoesNotExist
default:
return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator)
}
r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values)
if err != nil {
return nil, err
}
selector = selector.Add(*r)
return selector, nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
// QOSList is a set of (resource name, QoS class) pairs.
type QOSList map[v1.ResourceName]v1.PodQOSClass
func isSupportedQoSComputeResource(name v1.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return ComputePodQOS(pod)
}
// zeroQuantity represents a resource.Quantity with value "0", used as a baseline
// for resource comparisons.
var zeroQuantity = resource.MustParse("0")
// processResourceList adds non-zero quantities for supported QoS compute resources
// quantities from newList to list.
func processResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := list[name]; !exists {
list[name] = delta
} else {
delta.Add(list[name])
list[name] = delta
}
}
}
}
// getQOSResources returns a set of resource names from the provided resource list that:
// 1. Are supported QoS compute resources
// 2. Have quantities greater than zero
func getQOSResources(list v1.ResourceList) sets.Set[string] {
qosResources := sets.New[string]()
for name, quantity := range list {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosResources.Insert(string(name))
}
}
return qosResources
}
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more
// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// TODO(ndixita): Refactor ComputePodQOS into smaller functions to make it more
// readable and maintainable.
func ComputePodQOS(pod *v1.Pod) v1.PodQOSClass {
requests := v1.ResourceList{}
limits := v1.ResourceList{}
isGuaranteed := true
// When pod-level resources are specified, we use them to determine QoS class.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
pod.Spec.Resources != nil {
if len(pod.Spec.Resources.Requests) > 0 {
// process requests
processResourceList(requests, pod.Spec.Resources.Requests)
}
if len(pod.Spec.Resources.Limits) > 0 {
// process limits
processResourceList(limits, pod.Spec.Resources.Limits)
qosLimitResources := getQOSResources(pod.Spec.Resources.Limits)
if !qosLimitResources.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) {
isGuaranteed = false
}
}
} else {
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []v1.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := requests[name]; !exists {
requests[name] = delta
} else {
delta.Add(requests[name])
requests[name] = delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.DeepCopy()
if _, exists := limits[name]; !exists {
limits[name] = delta
} else {
delta.Add(limits[name])
limits[name] = delta
}
}
}
if !qosLimitsFound.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) {
isGuaranteed = false
}
}
}
if len(requests) == 0 && len(limits) == 0 {
return v1.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return v1.PodQOSGuaranteed
}
return v1.PodQOSBurstable
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
localSchemeBuilder = &v1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
// TODO: remove these global variables
// GroupName is the group name use in this package
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
)
const isNegativeErrorMsg string = `must be greater than or equal to 0`
const isNotIntegerErrorMsg string = `must be an integer`
// ValidateResourceRequirements will check if any of the resource
// Limits/Requests are of a valid value. Any incorrect value will be added to
// the ErrorList.
func ValidateResourceRequirements(requirements *v1.ResourceRequirements, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
limPath := fldPath.Child("limits")
reqPath := fldPath.Child("requests")
for resourceName, quantity := range requirements.Limits {
fldPath := limPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, ValidateContainerResourceName(core.ResourceName(resourceName), fldPath)...)
// Validate resource quantity.
allErrs = append(allErrs, ValidateResourceQuantityValue(core.ResourceName(resourceName), quantity, fldPath)...)
}
for resourceName, quantity := range requirements.Requests {
fldPath := reqPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, ValidateContainerResourceName(core.ResourceName(resourceName), fldPath)...)
// Validate resource quantity.
allErrs = append(allErrs, ValidateResourceQuantityValue(core.ResourceName(resourceName), quantity, fldPath)...)
// Check that request <= limit.
limitQuantity, exists := requirements.Limits[resourceName]
if exists {
// For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if quantity.Cmp(limitQuantity) != 0 && !v1helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit of %s", resourceName, limitQuantity.String())))
} else if quantity.Cmp(limitQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit of %s", resourceName, limitQuantity.String())))
}
}
}
return allErrs
}
// ValidateContainerResourceName checks the name of resource specified for a container
func ValidateContainerResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList {
allErrs := validateResourceName(value, fldPath)
if len(strings.Split(string(value), "/")) == 1 {
if !helper.IsStandardContainerResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
}
} else if !v1helper.IsNativeResource(v1.ResourceName(value)) {
if !v1helper.IsExtendedResourceName(v1.ResourceName(value)) {
return append(allErrs, field.Invalid(fldPath, value, "doesn't follow extended resource name standard"))
}
}
return allErrs
}
// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource
func ValidateResourceQuantityValue(resource core.ResourceName, value resource.Quantity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...)
if helper.IsIntegerResourceName(resource) {
if value.MilliValue()%int64(1000) != int64(0) {
allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg))
}
}
return allErrs
}
// ValidateNonnegativeQuantity checks that a Quantity is not negative.
func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if value.Cmp(resource.Quantity{}) < 0 {
allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg))
}
return allErrs
}
// Validate compute resource typename.
// Refer to docs/design/resources.md for more details.
func validateResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList {
allErrs := apivalidation.ValidateQualifiedName(string(value), fldPath)
if len(allErrs) != 0 {
return allErrs
}
if len(strings.Split(string(value), "/")) == 1 {
if !helper.IsStandardResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified"))
}
}
return allErrs
}
var validLogStreams = sets.New[string](
v1.LogStreamStdout,
v1.LogStreamStderr,
v1.LogStreamAll,
)
// ValidatePodLogOptions checks if options that are set are at the correct
// value. Any incorrect value will be returned to the ErrorList.
func ValidatePodLogOptions(opts *v1.PodLogOptions) field.ErrorList {
allErrs := field.ErrorList{}
if opts.TailLines != nil && *opts.TailLines < 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg))
}
if opts.LimitBytes != nil && *opts.LimitBytes < 1 {
allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0"))
}
switch {
case opts.SinceSeconds != nil && opts.SinceTime != nil:
allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified"))
case opts.SinceSeconds != nil:
if *opts.SinceSeconds < 1 {
allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0"))
}
}
// opts.Stream can be nil because defaulting might not apply if no URL params are provided.
if opts.Stream != nil {
if !validLogStreams.Has(*opts.Stream) {
allErrs = append(allErrs, field.NotSupported(field.NewPath("stream"), *opts.Stream, validLogStreams.UnsortedList()))
}
if *opts.Stream != v1.LogStreamAll && opts.TailLines != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "`tailLines` and specific `stream` are mutually exclusive for now"))
}
}
return allErrs
}
// AccumulateUniqueHostPorts checks all the containers for duplicates ports. Any
// duplicate port will be returned in the ErrorList.
func AccumulateUniqueHostPorts(containers []v1.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for ci, ctr := range containers {
idxPath := fldPath.Index(ci)
portsPath := idxPath.Child("ports")
for pi := range ctr.Ports {
idxPath := portsPath.Index(pi)
port := ctr.Ports[pi].HostPort
if port == 0 {
continue
}
str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol)
if accumulator.Has(str) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str))
} else {
accumulator.Insert(str)
}
}
}
return allErrs
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
url "net/url"
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
apps "k8s.io/kubernetes/pkg/apis/apps"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*corev1.AWSElasticBlockStoreVolumeSource)(nil), (*core.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(a.(*corev1.AWSElasticBlockStoreVolumeSource), b.(*core.AWSElasticBlockStoreVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.AWSElasticBlockStoreVolumeSource)(nil), (*corev1.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(a.(*core.AWSElasticBlockStoreVolumeSource), b.(*corev1.AWSElasticBlockStoreVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Affinity)(nil), (*core.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Affinity_To_core_Affinity(a.(*corev1.Affinity), b.(*core.Affinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Affinity)(nil), (*corev1.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Affinity_To_v1_Affinity(a.(*core.Affinity), b.(*corev1.Affinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.AppArmorProfile)(nil), (*core.AppArmorProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AppArmorProfile_To_core_AppArmorProfile(a.(*corev1.AppArmorProfile), b.(*core.AppArmorProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.AppArmorProfile)(nil), (*corev1.AppArmorProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_AppArmorProfile_To_v1_AppArmorProfile(a.(*core.AppArmorProfile), b.(*corev1.AppArmorProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.AttachedVolume)(nil), (*core.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AttachedVolume_To_core_AttachedVolume(a.(*corev1.AttachedVolume), b.(*core.AttachedVolume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.AttachedVolume)(nil), (*corev1.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_AttachedVolume_To_v1_AttachedVolume(a.(*core.AttachedVolume), b.(*corev1.AttachedVolume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.AvoidPods)(nil), (*core.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AvoidPods_To_core_AvoidPods(a.(*corev1.AvoidPods), b.(*core.AvoidPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.AvoidPods)(nil), (*corev1.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_AvoidPods_To_v1_AvoidPods(a.(*core.AvoidPods), b.(*corev1.AvoidPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.AzureDiskVolumeSource)(nil), (*core.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(a.(*corev1.AzureDiskVolumeSource), b.(*core.AzureDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.AzureDiskVolumeSource)(nil), (*corev1.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(a.(*core.AzureDiskVolumeSource), b.(*corev1.AzureDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.AzureFilePersistentVolumeSource)(nil), (*core.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(a.(*corev1.AzureFilePersistentVolumeSource), b.(*core.AzureFilePersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.AzureFilePersistentVolumeSource)(nil), (*corev1.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(a.(*core.AzureFilePersistentVolumeSource), b.(*corev1.AzureFilePersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.AzureFileVolumeSource)(nil), (*core.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(a.(*corev1.AzureFileVolumeSource), b.(*core.AzureFileVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.AzureFileVolumeSource)(nil), (*corev1.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(a.(*core.AzureFileVolumeSource), b.(*corev1.AzureFileVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Binding)(nil), (*core.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Binding_To_core_Binding(a.(*corev1.Binding), b.(*core.Binding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Binding)(nil), (*corev1.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Binding_To_v1_Binding(a.(*core.Binding), b.(*corev1.Binding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.CSIPersistentVolumeSource)(nil), (*core.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(a.(*corev1.CSIPersistentVolumeSource), b.(*core.CSIPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.CSIPersistentVolumeSource)(nil), (*corev1.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(a.(*core.CSIPersistentVolumeSource), b.(*corev1.CSIPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.CSIVolumeSource)(nil), (*core.CSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource(a.(*corev1.CSIVolumeSource), b.(*core.CSIVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.CSIVolumeSource)(nil), (*corev1.CSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource(a.(*core.CSIVolumeSource), b.(*corev1.CSIVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Capabilities)(nil), (*core.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Capabilities_To_core_Capabilities(a.(*corev1.Capabilities), b.(*core.Capabilities), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Capabilities)(nil), (*corev1.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Capabilities_To_v1_Capabilities(a.(*core.Capabilities), b.(*corev1.Capabilities), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.CephFSPersistentVolumeSource)(nil), (*core.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(a.(*corev1.CephFSPersistentVolumeSource), b.(*core.CephFSPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.CephFSPersistentVolumeSource)(nil), (*corev1.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(a.(*core.CephFSPersistentVolumeSource), b.(*corev1.CephFSPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.CephFSVolumeSource)(nil), (*core.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(a.(*corev1.CephFSVolumeSource), b.(*core.CephFSVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.CephFSVolumeSource)(nil), (*corev1.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(a.(*core.CephFSVolumeSource), b.(*corev1.CephFSVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.CinderPersistentVolumeSource)(nil), (*core.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(a.(*corev1.CinderPersistentVolumeSource), b.(*core.CinderPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.CinderPersistentVolumeSource)(nil), (*corev1.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(a.(*core.CinderPersistentVolumeSource), b.(*corev1.CinderPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.CinderVolumeSource)(nil), (*core.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(a.(*corev1.CinderVolumeSource), b.(*core.CinderVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.CinderVolumeSource)(nil), (*corev1.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(a.(*core.CinderVolumeSource), b.(*corev1.CinderVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*corev1.ClientIPConfig), b.(*core.ClientIPConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ClientIPConfig)(nil), (*corev1.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ClientIPConfig_To_v1_ClientIPConfig(a.(*core.ClientIPConfig), b.(*corev1.ClientIPConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ClusterTrustBundleProjection)(nil), (*core.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(a.(*corev1.ClusterTrustBundleProjection), b.(*core.ClusterTrustBundleProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ClusterTrustBundleProjection)(nil), (*corev1.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(a.(*core.ClusterTrustBundleProjection), b.(*corev1.ClusterTrustBundleProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ComponentCondition)(nil), (*core.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ComponentCondition_To_core_ComponentCondition(a.(*corev1.ComponentCondition), b.(*core.ComponentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ComponentCondition)(nil), (*corev1.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ComponentCondition_To_v1_ComponentCondition(a.(*core.ComponentCondition), b.(*corev1.ComponentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ComponentStatus)(nil), (*core.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ComponentStatus_To_core_ComponentStatus(a.(*corev1.ComponentStatus), b.(*core.ComponentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ComponentStatus)(nil), (*corev1.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ComponentStatus_To_v1_ComponentStatus(a.(*core.ComponentStatus), b.(*corev1.ComponentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ComponentStatusList)(nil), (*core.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ComponentStatusList_To_core_ComponentStatusList(a.(*corev1.ComponentStatusList), b.(*core.ComponentStatusList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ComponentStatusList)(nil), (*corev1.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ComponentStatusList_To_v1_ComponentStatusList(a.(*core.ComponentStatusList), b.(*corev1.ComponentStatusList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ConfigMap)(nil), (*core.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ConfigMap_To_core_ConfigMap(a.(*corev1.ConfigMap), b.(*core.ConfigMap), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ConfigMap)(nil), (*corev1.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ConfigMap_To_v1_ConfigMap(a.(*core.ConfigMap), b.(*corev1.ConfigMap), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapEnvSource)(nil), (*core.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(a.(*corev1.ConfigMapEnvSource), b.(*core.ConfigMapEnvSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ConfigMapEnvSource)(nil), (*corev1.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(a.(*core.ConfigMapEnvSource), b.(*corev1.ConfigMapEnvSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapKeySelector)(nil), (*core.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(a.(*corev1.ConfigMapKeySelector), b.(*core.ConfigMapKeySelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ConfigMapKeySelector)(nil), (*corev1.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(a.(*core.ConfigMapKeySelector), b.(*corev1.ConfigMapKeySelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapList)(nil), (*core.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ConfigMapList_To_core_ConfigMapList(a.(*corev1.ConfigMapList), b.(*core.ConfigMapList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ConfigMapList)(nil), (*corev1.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ConfigMapList_To_v1_ConfigMapList(a.(*core.ConfigMapList), b.(*corev1.ConfigMapList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapNodeConfigSource)(nil), (*core.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(a.(*corev1.ConfigMapNodeConfigSource), b.(*core.ConfigMapNodeConfigSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ConfigMapNodeConfigSource)(nil), (*corev1.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(a.(*core.ConfigMapNodeConfigSource), b.(*corev1.ConfigMapNodeConfigSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapProjection)(nil), (*core.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(a.(*corev1.ConfigMapProjection), b.(*core.ConfigMapProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ConfigMapProjection)(nil), (*corev1.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(a.(*core.ConfigMapProjection), b.(*corev1.ConfigMapProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ConfigMapVolumeSource)(nil), (*core.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(a.(*corev1.ConfigMapVolumeSource), b.(*core.ConfigMapVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ConfigMapVolumeSource)(nil), (*corev1.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(a.(*core.ConfigMapVolumeSource), b.(*corev1.ConfigMapVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Container)(nil), (*core.Container)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Container_To_core_Container(a.(*corev1.Container), b.(*core.Container), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Container)(nil), (*corev1.Container)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Container_To_v1_Container(a.(*core.Container), b.(*corev1.Container), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerExtendedResourceRequest)(nil), (*core.ContainerExtendedResourceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerExtendedResourceRequest_To_core_ContainerExtendedResourceRequest(a.(*corev1.ContainerExtendedResourceRequest), b.(*core.ContainerExtendedResourceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerExtendedResourceRequest)(nil), (*corev1.ContainerExtendedResourceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerExtendedResourceRequest_To_v1_ContainerExtendedResourceRequest(a.(*core.ContainerExtendedResourceRequest), b.(*corev1.ContainerExtendedResourceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerImage)(nil), (*core.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerImage_To_core_ContainerImage(a.(*corev1.ContainerImage), b.(*core.ContainerImage), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerImage)(nil), (*corev1.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerImage_To_v1_ContainerImage(a.(*core.ContainerImage), b.(*corev1.ContainerImage), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerPort)(nil), (*core.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerPort_To_core_ContainerPort(a.(*corev1.ContainerPort), b.(*core.ContainerPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerPort)(nil), (*corev1.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerPort_To_v1_ContainerPort(a.(*core.ContainerPort), b.(*corev1.ContainerPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerResizePolicy)(nil), (*core.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(a.(*corev1.ContainerResizePolicy), b.(*core.ContainerResizePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerResizePolicy)(nil), (*corev1.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(a.(*core.ContainerResizePolicy), b.(*corev1.ContainerResizePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerRestartRule)(nil), (*core.ContainerRestartRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerRestartRule_To_core_ContainerRestartRule(a.(*corev1.ContainerRestartRule), b.(*core.ContainerRestartRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerRestartRule)(nil), (*corev1.ContainerRestartRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerRestartRule_To_v1_ContainerRestartRule(a.(*core.ContainerRestartRule), b.(*corev1.ContainerRestartRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerRestartRuleOnExitCodes)(nil), (*core.ContainerRestartRuleOnExitCodes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerRestartRuleOnExitCodes_To_core_ContainerRestartRuleOnExitCodes(a.(*corev1.ContainerRestartRuleOnExitCodes), b.(*core.ContainerRestartRuleOnExitCodes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerRestartRuleOnExitCodes)(nil), (*corev1.ContainerRestartRuleOnExitCodes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerRestartRuleOnExitCodes_To_v1_ContainerRestartRuleOnExitCodes(a.(*core.ContainerRestartRuleOnExitCodes), b.(*corev1.ContainerRestartRuleOnExitCodes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerState)(nil), (*core.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerState_To_core_ContainerState(a.(*corev1.ContainerState), b.(*core.ContainerState), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerState)(nil), (*corev1.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerState_To_v1_ContainerState(a.(*core.ContainerState), b.(*corev1.ContainerState), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerStateRunning)(nil), (*core.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(a.(*corev1.ContainerStateRunning), b.(*core.ContainerStateRunning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerStateRunning)(nil), (*corev1.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(a.(*core.ContainerStateRunning), b.(*corev1.ContainerStateRunning), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerStateTerminated)(nil), (*core.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(a.(*corev1.ContainerStateTerminated), b.(*core.ContainerStateTerminated), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerStateTerminated)(nil), (*corev1.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(a.(*core.ContainerStateTerminated), b.(*corev1.ContainerStateTerminated), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerStateWaiting)(nil), (*core.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(a.(*corev1.ContainerStateWaiting), b.(*core.ContainerStateWaiting), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerStateWaiting)(nil), (*corev1.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(a.(*core.ContainerStateWaiting), b.(*corev1.ContainerStateWaiting), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerStatus)(nil), (*core.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerStatus_To_core_ContainerStatus(a.(*corev1.ContainerStatus), b.(*core.ContainerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerStatus)(nil), (*corev1.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerStatus_To_v1_ContainerStatus(a.(*core.ContainerStatus), b.(*corev1.ContainerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ContainerUser)(nil), (*core.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerUser_To_core_ContainerUser(a.(*corev1.ContainerUser), b.(*core.ContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerUser)(nil), (*corev1.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerUser_To_v1_ContainerUser(a.(*core.ContainerUser), b.(*corev1.ContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.DaemonEndpoint)(nil), (*core.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(a.(*corev1.DaemonEndpoint), b.(*core.DaemonEndpoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.DaemonEndpoint)(nil), (*corev1.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(a.(*core.DaemonEndpoint), b.(*corev1.DaemonEndpoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.DownwardAPIProjection)(nil), (*core.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(a.(*corev1.DownwardAPIProjection), b.(*core.DownwardAPIProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.DownwardAPIProjection)(nil), (*corev1.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(a.(*core.DownwardAPIProjection), b.(*corev1.DownwardAPIProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.DownwardAPIVolumeFile)(nil), (*core.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(a.(*corev1.DownwardAPIVolumeFile), b.(*core.DownwardAPIVolumeFile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeFile)(nil), (*corev1.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(a.(*core.DownwardAPIVolumeFile), b.(*corev1.DownwardAPIVolumeFile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.DownwardAPIVolumeSource)(nil), (*core.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(a.(*corev1.DownwardAPIVolumeSource), b.(*core.DownwardAPIVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeSource)(nil), (*corev1.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(a.(*core.DownwardAPIVolumeSource), b.(*corev1.DownwardAPIVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EmptyDirVolumeSource)(nil), (*core.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(a.(*corev1.EmptyDirVolumeSource), b.(*core.EmptyDirVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EmptyDirVolumeSource)(nil), (*corev1.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(a.(*core.EmptyDirVolumeSource), b.(*corev1.EmptyDirVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EndpointAddress)(nil), (*core.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointAddress_To_core_EndpointAddress(a.(*corev1.EndpointAddress), b.(*core.EndpointAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EndpointAddress)(nil), (*corev1.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EndpointAddress_To_v1_EndpointAddress(a.(*core.EndpointAddress), b.(*corev1.EndpointAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EndpointPort)(nil), (*core.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointPort_To_core_EndpointPort(a.(*corev1.EndpointPort), b.(*core.EndpointPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EndpointPort)(nil), (*corev1.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EndpointPort_To_v1_EndpointPort(a.(*core.EndpointPort), b.(*corev1.EndpointPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EndpointSubset)(nil), (*core.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointSubset_To_core_EndpointSubset(a.(*corev1.EndpointSubset), b.(*core.EndpointSubset), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EndpointSubset)(nil), (*corev1.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EndpointSubset_To_v1_EndpointSubset(a.(*core.EndpointSubset), b.(*corev1.EndpointSubset), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Endpoints)(nil), (*core.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Endpoints_To_core_Endpoints(a.(*corev1.Endpoints), b.(*core.Endpoints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Endpoints)(nil), (*corev1.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Endpoints_To_v1_Endpoints(a.(*core.Endpoints), b.(*corev1.Endpoints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EndpointsList)(nil), (*core.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointsList_To_core_EndpointsList(a.(*corev1.EndpointsList), b.(*core.EndpointsList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EndpointsList)(nil), (*corev1.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EndpointsList_To_v1_EndpointsList(a.(*core.EndpointsList), b.(*corev1.EndpointsList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EnvFromSource)(nil), (*core.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EnvFromSource_To_core_EnvFromSource(a.(*corev1.EnvFromSource), b.(*core.EnvFromSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EnvFromSource)(nil), (*corev1.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EnvFromSource_To_v1_EnvFromSource(a.(*core.EnvFromSource), b.(*corev1.EnvFromSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EnvVar)(nil), (*core.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EnvVar_To_core_EnvVar(a.(*corev1.EnvVar), b.(*core.EnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EnvVar)(nil), (*corev1.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EnvVar_To_v1_EnvVar(a.(*core.EnvVar), b.(*corev1.EnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EnvVarSource)(nil), (*core.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EnvVarSource_To_core_EnvVarSource(a.(*corev1.EnvVarSource), b.(*core.EnvVarSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EnvVarSource)(nil), (*corev1.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EnvVarSource_To_v1_EnvVarSource(a.(*core.EnvVarSource), b.(*corev1.EnvVarSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EphemeralContainer)(nil), (*core.EphemeralContainer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EphemeralContainer_To_core_EphemeralContainer(a.(*corev1.EphemeralContainer), b.(*core.EphemeralContainer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EphemeralContainer)(nil), (*corev1.EphemeralContainer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EphemeralContainer_To_v1_EphemeralContainer(a.(*core.EphemeralContainer), b.(*corev1.EphemeralContainer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EphemeralContainerCommon)(nil), (*core.EphemeralContainerCommon)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(a.(*corev1.EphemeralContainerCommon), b.(*core.EphemeralContainerCommon), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EphemeralContainerCommon)(nil), (*corev1.EphemeralContainerCommon)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(a.(*core.EphemeralContainerCommon), b.(*corev1.EphemeralContainerCommon), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EphemeralVolumeSource)(nil), (*core.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(a.(*corev1.EphemeralVolumeSource), b.(*core.EphemeralVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EphemeralVolumeSource)(nil), (*corev1.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(a.(*core.EphemeralVolumeSource), b.(*corev1.EphemeralVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Event_To_core_Event(a.(*corev1.Event), b.(*core.Event), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Event)(nil), (*corev1.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Event_To_v1_Event(a.(*core.Event), b.(*corev1.Event), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EventList)(nil), (*core.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EventList_To_core_EventList(a.(*corev1.EventList), b.(*core.EventList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EventList)(nil), (*corev1.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EventList_To_v1_EventList(a.(*core.EventList), b.(*corev1.EventList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EventSeries)(nil), (*core.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EventSeries_To_core_EventSeries(a.(*corev1.EventSeries), b.(*core.EventSeries), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EventSeries)(nil), (*corev1.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EventSeries_To_v1_EventSeries(a.(*core.EventSeries), b.(*corev1.EventSeries), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.EventSource)(nil), (*core.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EventSource_To_core_EventSource(a.(*corev1.EventSource), b.(*core.EventSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EventSource)(nil), (*corev1.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EventSource_To_v1_EventSource(a.(*core.EventSource), b.(*corev1.EventSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ExecAction)(nil), (*core.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExecAction_To_core_ExecAction(a.(*corev1.ExecAction), b.(*core.ExecAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ExecAction)(nil), (*corev1.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ExecAction_To_v1_ExecAction(a.(*core.ExecAction), b.(*corev1.ExecAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.FCVolumeSource)(nil), (*core.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FCVolumeSource_To_core_FCVolumeSource(a.(*corev1.FCVolumeSource), b.(*core.FCVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.FCVolumeSource)(nil), (*corev1.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_FCVolumeSource_To_v1_FCVolumeSource(a.(*core.FCVolumeSource), b.(*corev1.FCVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.FileKeySelector)(nil), (*core.FileKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FileKeySelector_To_core_FileKeySelector(a.(*corev1.FileKeySelector), b.(*core.FileKeySelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.FileKeySelector)(nil), (*corev1.FileKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_FileKeySelector_To_v1_FileKeySelector(a.(*core.FileKeySelector), b.(*corev1.FileKeySelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.FlexPersistentVolumeSource)(nil), (*core.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(a.(*corev1.FlexPersistentVolumeSource), b.(*core.FlexPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.FlexPersistentVolumeSource)(nil), (*corev1.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(a.(*core.FlexPersistentVolumeSource), b.(*corev1.FlexPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.FlexVolumeSource)(nil), (*core.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(a.(*corev1.FlexVolumeSource), b.(*core.FlexVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.FlexVolumeSource)(nil), (*corev1.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(a.(*core.FlexVolumeSource), b.(*corev1.FlexVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.FlockerVolumeSource)(nil), (*core.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(a.(*corev1.FlockerVolumeSource), b.(*core.FlockerVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.FlockerVolumeSource)(nil), (*corev1.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(a.(*core.FlockerVolumeSource), b.(*corev1.FlockerVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.GCEPersistentDiskVolumeSource)(nil), (*core.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(a.(*corev1.GCEPersistentDiskVolumeSource), b.(*core.GCEPersistentDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.GCEPersistentDiskVolumeSource)(nil), (*corev1.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(a.(*core.GCEPersistentDiskVolumeSource), b.(*corev1.GCEPersistentDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.GRPCAction)(nil), (*core.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GRPCAction_To_core_GRPCAction(a.(*corev1.GRPCAction), b.(*core.GRPCAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.GRPCAction)(nil), (*corev1.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_GRPCAction_To_v1_GRPCAction(a.(*core.GRPCAction), b.(*corev1.GRPCAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.GitRepoVolumeSource)(nil), (*core.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(a.(*corev1.GitRepoVolumeSource), b.(*core.GitRepoVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.GitRepoVolumeSource)(nil), (*corev1.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(a.(*core.GitRepoVolumeSource), b.(*corev1.GitRepoVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.GlusterfsPersistentVolumeSource)(nil), (*core.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(a.(*corev1.GlusterfsPersistentVolumeSource), b.(*core.GlusterfsPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.GlusterfsPersistentVolumeSource)(nil), (*corev1.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(a.(*core.GlusterfsPersistentVolumeSource), b.(*corev1.GlusterfsPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.GlusterfsVolumeSource)(nil), (*core.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(a.(*corev1.GlusterfsVolumeSource), b.(*core.GlusterfsVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.GlusterfsVolumeSource)(nil), (*corev1.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(a.(*core.GlusterfsVolumeSource), b.(*corev1.GlusterfsVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.HTTPGetAction)(nil), (*core.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HTTPGetAction_To_core_HTTPGetAction(a.(*corev1.HTTPGetAction), b.(*core.HTTPGetAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.HTTPGetAction)(nil), (*corev1.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_HTTPGetAction_To_v1_HTTPGetAction(a.(*core.HTTPGetAction), b.(*corev1.HTTPGetAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.HTTPHeader)(nil), (*core.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HTTPHeader_To_core_HTTPHeader(a.(*corev1.HTTPHeader), b.(*core.HTTPHeader), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.HTTPHeader)(nil), (*corev1.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_HTTPHeader_To_v1_HTTPHeader(a.(*core.HTTPHeader), b.(*corev1.HTTPHeader), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.HostAlias)(nil), (*core.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HostAlias_To_core_HostAlias(a.(*corev1.HostAlias), b.(*core.HostAlias), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.HostAlias)(nil), (*corev1.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_HostAlias_To_v1_HostAlias(a.(*core.HostAlias), b.(*corev1.HostAlias), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.HostIP)(nil), (*core.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HostIP_To_core_HostIP(a.(*corev1.HostIP), b.(*core.HostIP), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.HostIP)(nil), (*corev1.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_HostIP_To_v1_HostIP(a.(*core.HostIP), b.(*corev1.HostIP), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.HostPathVolumeSource)(nil), (*core.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(a.(*corev1.HostPathVolumeSource), b.(*core.HostPathVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.HostPathVolumeSource)(nil), (*corev1.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(a.(*core.HostPathVolumeSource), b.(*corev1.HostPathVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ISCSIPersistentVolumeSource)(nil), (*core.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(a.(*corev1.ISCSIPersistentVolumeSource), b.(*core.ISCSIPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ISCSIPersistentVolumeSource)(nil), (*corev1.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(a.(*core.ISCSIPersistentVolumeSource), b.(*corev1.ISCSIPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ISCSIVolumeSource)(nil), (*core.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(a.(*corev1.ISCSIVolumeSource), b.(*core.ISCSIVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ISCSIVolumeSource)(nil), (*corev1.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(a.(*core.ISCSIVolumeSource), b.(*corev1.ISCSIVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ImageVolumeSource)(nil), (*core.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(a.(*corev1.ImageVolumeSource), b.(*core.ImageVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ImageVolumeSource)(nil), (*corev1.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(a.(*core.ImageVolumeSource), b.(*corev1.ImageVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.KeyToPath)(nil), (*core.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_KeyToPath_To_core_KeyToPath(a.(*corev1.KeyToPath), b.(*core.KeyToPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.KeyToPath)(nil), (*corev1.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_KeyToPath_To_v1_KeyToPath(a.(*core.KeyToPath), b.(*corev1.KeyToPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Lifecycle)(nil), (*core.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Lifecycle_To_core_Lifecycle(a.(*corev1.Lifecycle), b.(*core.Lifecycle), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Lifecycle)(nil), (*corev1.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Lifecycle_To_v1_Lifecycle(a.(*core.Lifecycle), b.(*corev1.Lifecycle), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LifecycleHandler)(nil), (*core.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LifecycleHandler_To_core_LifecycleHandler(a.(*corev1.LifecycleHandler), b.(*core.LifecycleHandler), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LifecycleHandler)(nil), (*corev1.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LifecycleHandler_To_v1_LifecycleHandler(a.(*core.LifecycleHandler), b.(*corev1.LifecycleHandler), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LimitRange)(nil), (*core.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LimitRange_To_core_LimitRange(a.(*corev1.LimitRange), b.(*core.LimitRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LimitRange)(nil), (*corev1.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LimitRange_To_v1_LimitRange(a.(*core.LimitRange), b.(*corev1.LimitRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LimitRangeItem)(nil), (*core.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LimitRangeItem_To_core_LimitRangeItem(a.(*corev1.LimitRangeItem), b.(*core.LimitRangeItem), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LimitRangeItem)(nil), (*corev1.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LimitRangeItem_To_v1_LimitRangeItem(a.(*core.LimitRangeItem), b.(*corev1.LimitRangeItem), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LimitRangeList)(nil), (*core.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LimitRangeList_To_core_LimitRangeList(a.(*corev1.LimitRangeList), b.(*core.LimitRangeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LimitRangeList)(nil), (*corev1.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LimitRangeList_To_v1_LimitRangeList(a.(*core.LimitRangeList), b.(*corev1.LimitRangeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LimitRangeSpec)(nil), (*core.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(a.(*corev1.LimitRangeSpec), b.(*core.LimitRangeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LimitRangeSpec)(nil), (*corev1.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(a.(*core.LimitRangeSpec), b.(*corev1.LimitRangeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LinuxContainerUser)(nil), (*core.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(a.(*corev1.LinuxContainerUser), b.(*core.LinuxContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LinuxContainerUser)(nil), (*corev1.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(a.(*core.LinuxContainerUser), b.(*corev1.LinuxContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.List)(nil), (*core.List)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_List_To_core_List(a.(*corev1.List), b.(*core.List), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.List)(nil), (*corev1.List)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_List_To_v1_List(a.(*core.List), b.(*corev1.List), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LoadBalancerIngress)(nil), (*core.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(a.(*corev1.LoadBalancerIngress), b.(*core.LoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LoadBalancerIngress)(nil), (*corev1.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(a.(*core.LoadBalancerIngress), b.(*corev1.LoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*corev1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LoadBalancerStatus)(nil), (*corev1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*corev1.LoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LocalObjectReference)(nil), (*core.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LocalObjectReference_To_core_LocalObjectReference(a.(*corev1.LocalObjectReference), b.(*core.LocalObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LocalObjectReference)(nil), (*corev1.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LocalObjectReference_To_v1_LocalObjectReference(a.(*core.LocalObjectReference), b.(*corev1.LocalObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.LocalVolumeSource)(nil), (*core.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(a.(*corev1.LocalVolumeSource), b.(*core.LocalVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LocalVolumeSource)(nil), (*corev1.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(a.(*core.LocalVolumeSource), b.(*corev1.LocalVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ModifyVolumeStatus)(nil), (*core.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(a.(*corev1.ModifyVolumeStatus), b.(*core.ModifyVolumeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ModifyVolumeStatus)(nil), (*corev1.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(a.(*core.ModifyVolumeStatus), b.(*corev1.ModifyVolumeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NFSVolumeSource)(nil), (*core.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(a.(*corev1.NFSVolumeSource), b.(*core.NFSVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NFSVolumeSource)(nil), (*corev1.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(a.(*core.NFSVolumeSource), b.(*corev1.NFSVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Namespace)(nil), (*core.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Namespace_To_core_Namespace(a.(*corev1.Namespace), b.(*core.Namespace), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Namespace)(nil), (*corev1.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Namespace_To_v1_Namespace(a.(*core.Namespace), b.(*corev1.Namespace), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NamespaceCondition)(nil), (*core.NamespaceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NamespaceCondition_To_core_NamespaceCondition(a.(*corev1.NamespaceCondition), b.(*core.NamespaceCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NamespaceCondition)(nil), (*corev1.NamespaceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NamespaceCondition_To_v1_NamespaceCondition(a.(*core.NamespaceCondition), b.(*corev1.NamespaceCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NamespaceList)(nil), (*core.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NamespaceList_To_core_NamespaceList(a.(*corev1.NamespaceList), b.(*core.NamespaceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NamespaceList)(nil), (*corev1.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NamespaceList_To_v1_NamespaceList(a.(*core.NamespaceList), b.(*corev1.NamespaceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NamespaceSpec)(nil), (*core.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NamespaceSpec_To_core_NamespaceSpec(a.(*corev1.NamespaceSpec), b.(*core.NamespaceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NamespaceSpec)(nil), (*corev1.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NamespaceSpec_To_v1_NamespaceSpec(a.(*core.NamespaceSpec), b.(*corev1.NamespaceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NamespaceStatus)(nil), (*core.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NamespaceStatus_To_core_NamespaceStatus(a.(*corev1.NamespaceStatus), b.(*core.NamespaceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NamespaceStatus)(nil), (*corev1.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NamespaceStatus_To_v1_NamespaceStatus(a.(*core.NamespaceStatus), b.(*corev1.NamespaceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Node)(nil), (*core.Node)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Node_To_core_Node(a.(*corev1.Node), b.(*core.Node), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Node)(nil), (*corev1.Node)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Node_To_v1_Node(a.(*core.Node), b.(*corev1.Node), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeAddress)(nil), (*core.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeAddress_To_core_NodeAddress(a.(*corev1.NodeAddress), b.(*core.NodeAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeAddress)(nil), (*corev1.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeAddress_To_v1_NodeAddress(a.(*core.NodeAddress), b.(*corev1.NodeAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeAffinity)(nil), (*core.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeAffinity_To_core_NodeAffinity(a.(*corev1.NodeAffinity), b.(*core.NodeAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeAffinity)(nil), (*corev1.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeAffinity_To_v1_NodeAffinity(a.(*core.NodeAffinity), b.(*corev1.NodeAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeCondition)(nil), (*core.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeCondition_To_core_NodeCondition(a.(*corev1.NodeCondition), b.(*core.NodeCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeCondition)(nil), (*corev1.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeCondition_To_v1_NodeCondition(a.(*core.NodeCondition), b.(*corev1.NodeCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeConfigSource)(nil), (*core.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeConfigSource_To_core_NodeConfigSource(a.(*corev1.NodeConfigSource), b.(*core.NodeConfigSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeConfigSource)(nil), (*corev1.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeConfigSource_To_v1_NodeConfigSource(a.(*core.NodeConfigSource), b.(*corev1.NodeConfigSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeConfigStatus)(nil), (*core.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(a.(*corev1.NodeConfigStatus), b.(*core.NodeConfigStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeConfigStatus)(nil), (*corev1.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(a.(*core.NodeConfigStatus), b.(*corev1.NodeConfigStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeDaemonEndpoints)(nil), (*core.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(a.(*corev1.NodeDaemonEndpoints), b.(*core.NodeDaemonEndpoints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeDaemonEndpoints)(nil), (*corev1.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(a.(*core.NodeDaemonEndpoints), b.(*corev1.NodeDaemonEndpoints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeFeatures)(nil), (*core.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeFeatures_To_core_NodeFeatures(a.(*corev1.NodeFeatures), b.(*core.NodeFeatures), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeFeatures)(nil), (*corev1.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeFeatures_To_v1_NodeFeatures(a.(*core.NodeFeatures), b.(*corev1.NodeFeatures), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeList)(nil), (*core.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeList_To_core_NodeList(a.(*corev1.NodeList), b.(*core.NodeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeList)(nil), (*corev1.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeList_To_v1_NodeList(a.(*core.NodeList), b.(*corev1.NodeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeProxyOptions)(nil), (*core.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(a.(*corev1.NodeProxyOptions), b.(*core.NodeProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeProxyOptions)(nil), (*corev1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(a.(*core.NodeProxyOptions), b.(*corev1.NodeProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeRuntimeHandler)(nil), (*core.NodeRuntimeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(a.(*corev1.NodeRuntimeHandler), b.(*core.NodeRuntimeHandler), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeRuntimeHandler)(nil), (*corev1.NodeRuntimeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(a.(*core.NodeRuntimeHandler), b.(*corev1.NodeRuntimeHandler), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeRuntimeHandlerFeatures)(nil), (*core.NodeRuntimeHandlerFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(a.(*corev1.NodeRuntimeHandlerFeatures), b.(*core.NodeRuntimeHandlerFeatures), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeRuntimeHandlerFeatures)(nil), (*corev1.NodeRuntimeHandlerFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(a.(*core.NodeRuntimeHandlerFeatures), b.(*corev1.NodeRuntimeHandlerFeatures), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeSelector)(nil), (*core.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeSelector_To_core_NodeSelector(a.(*corev1.NodeSelector), b.(*core.NodeSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeSelector)(nil), (*corev1.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeSelector_To_v1_NodeSelector(a.(*core.NodeSelector), b.(*corev1.NodeSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeSelectorRequirement)(nil), (*core.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(a.(*corev1.NodeSelectorRequirement), b.(*core.NodeSelectorRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeSelectorRequirement)(nil), (*corev1.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(a.(*core.NodeSelectorRequirement), b.(*corev1.NodeSelectorRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeSelectorTerm)(nil), (*core.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(a.(*corev1.NodeSelectorTerm), b.(*core.NodeSelectorTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeSelectorTerm)(nil), (*corev1.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(a.(*core.NodeSelectorTerm), b.(*corev1.NodeSelectorTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeStatus)(nil), (*core.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeStatus_To_core_NodeStatus(a.(*corev1.NodeStatus), b.(*core.NodeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeStatus)(nil), (*corev1.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeStatus_To_v1_NodeStatus(a.(*core.NodeStatus), b.(*corev1.NodeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeSwapStatus)(nil), (*core.NodeSwapStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeSwapStatus_To_core_NodeSwapStatus(a.(*corev1.NodeSwapStatus), b.(*core.NodeSwapStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeSwapStatus)(nil), (*corev1.NodeSwapStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeSwapStatus_To_v1_NodeSwapStatus(a.(*core.NodeSwapStatus), b.(*corev1.NodeSwapStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.NodeSystemInfo)(nil), (*core.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(a.(*corev1.NodeSystemInfo), b.(*core.NodeSystemInfo), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeSystemInfo)(nil), (*corev1.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(a.(*core.NodeSystemInfo), b.(*corev1.NodeSystemInfo), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ObjectFieldSelector)(nil), (*core.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(a.(*corev1.ObjectFieldSelector), b.(*core.ObjectFieldSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ObjectFieldSelector)(nil), (*corev1.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(a.(*core.ObjectFieldSelector), b.(*corev1.ObjectFieldSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ObjectReference)(nil), (*core.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ObjectReference_To_core_ObjectReference(a.(*corev1.ObjectReference), b.(*core.ObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ObjectReference)(nil), (*corev1.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ObjectReference_To_v1_ObjectReference(a.(*core.ObjectReference), b.(*corev1.ObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolume)(nil), (*core.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolume_To_core_PersistentVolume(a.(*corev1.PersistentVolume), b.(*core.PersistentVolume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolume)(nil), (*corev1.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolume_To_v1_PersistentVolume(a.(*core.PersistentVolume), b.(*corev1.PersistentVolume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaim)(nil), (*core.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(a.(*corev1.PersistentVolumeClaim), b.(*core.PersistentVolumeClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaim)(nil), (*corev1.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(a.(*core.PersistentVolumeClaim), b.(*corev1.PersistentVolumeClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimCondition)(nil), (*core.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(a.(*corev1.PersistentVolumeClaimCondition), b.(*core.PersistentVolumeClaimCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimCondition)(nil), (*corev1.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(a.(*core.PersistentVolumeClaimCondition), b.(*corev1.PersistentVolumeClaimCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimList)(nil), (*core.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(a.(*corev1.PersistentVolumeClaimList), b.(*core.PersistentVolumeClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimList)(nil), (*corev1.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(a.(*core.PersistentVolumeClaimList), b.(*corev1.PersistentVolumeClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimSpec)(nil), (*core.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(a.(*corev1.PersistentVolumeClaimSpec), b.(*core.PersistentVolumeClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimSpec)(nil), (*corev1.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(a.(*core.PersistentVolumeClaimSpec), b.(*corev1.PersistentVolumeClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimStatus)(nil), (*core.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(a.(*corev1.PersistentVolumeClaimStatus), b.(*core.PersistentVolumeClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimStatus)(nil), (*corev1.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(a.(*core.PersistentVolumeClaimStatus), b.(*corev1.PersistentVolumeClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimTemplate)(nil), (*core.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(a.(*corev1.PersistentVolumeClaimTemplate), b.(*core.PersistentVolumeClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimTemplate)(nil), (*corev1.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(a.(*core.PersistentVolumeClaimTemplate), b.(*corev1.PersistentVolumeClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeClaimVolumeSource)(nil), (*core.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(a.(*corev1.PersistentVolumeClaimVolumeSource), b.(*core.PersistentVolumeClaimVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimVolumeSource)(nil), (*corev1.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(a.(*core.PersistentVolumeClaimVolumeSource), b.(*corev1.PersistentVolumeClaimVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeList)(nil), (*core.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(a.(*corev1.PersistentVolumeList), b.(*core.PersistentVolumeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeList)(nil), (*corev1.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(a.(*core.PersistentVolumeList), b.(*corev1.PersistentVolumeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeSource)(nil), (*core.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(a.(*corev1.PersistentVolumeSource), b.(*core.PersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeSource)(nil), (*corev1.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(a.(*core.PersistentVolumeSource), b.(*corev1.PersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PersistentVolumeStatus)(nil), (*core.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(a.(*corev1.PersistentVolumeStatus), b.(*core.PersistentVolumeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeStatus)(nil), (*corev1.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(a.(*core.PersistentVolumeStatus), b.(*corev1.PersistentVolumeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PhotonPersistentDiskVolumeSource)(nil), (*core.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(a.(*corev1.PhotonPersistentDiskVolumeSource), b.(*core.PhotonPersistentDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PhotonPersistentDiskVolumeSource)(nil), (*corev1.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(a.(*core.PhotonPersistentDiskVolumeSource), b.(*corev1.PhotonPersistentDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodAffinity)(nil), (*core.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodAffinity_To_core_PodAffinity(a.(*corev1.PodAffinity), b.(*core.PodAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodAffinity)(nil), (*corev1.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodAffinity_To_v1_PodAffinity(a.(*core.PodAffinity), b.(*corev1.PodAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodAffinityTerm)(nil), (*core.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(a.(*corev1.PodAffinityTerm), b.(*core.PodAffinityTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodAffinityTerm)(nil), (*corev1.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(a.(*core.PodAffinityTerm), b.(*corev1.PodAffinityTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodAntiAffinity)(nil), (*core.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(a.(*corev1.PodAntiAffinity), b.(*core.PodAntiAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodAntiAffinity)(nil), (*corev1.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(a.(*core.PodAntiAffinity), b.(*corev1.PodAntiAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodAttachOptions)(nil), (*core.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodAttachOptions_To_core_PodAttachOptions(a.(*corev1.PodAttachOptions), b.(*core.PodAttachOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodAttachOptions)(nil), (*corev1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodAttachOptions_To_v1_PodAttachOptions(a.(*core.PodAttachOptions), b.(*corev1.PodAttachOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodCertificateProjection)(nil), (*core.PodCertificateProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodCertificateProjection_To_core_PodCertificateProjection(a.(*corev1.PodCertificateProjection), b.(*core.PodCertificateProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodCertificateProjection)(nil), (*corev1.PodCertificateProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodCertificateProjection_To_v1_PodCertificateProjection(a.(*core.PodCertificateProjection), b.(*corev1.PodCertificateProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodCondition)(nil), (*core.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodCondition_To_core_PodCondition(a.(*corev1.PodCondition), b.(*core.PodCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodCondition)(nil), (*corev1.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodCondition_To_v1_PodCondition(a.(*core.PodCondition), b.(*corev1.PodCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodDNSConfig)(nil), (*core.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodDNSConfig_To_core_PodDNSConfig(a.(*corev1.PodDNSConfig), b.(*core.PodDNSConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodDNSConfig)(nil), (*corev1.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodDNSConfig_To_v1_PodDNSConfig(a.(*core.PodDNSConfig), b.(*corev1.PodDNSConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodDNSConfigOption)(nil), (*core.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(a.(*corev1.PodDNSConfigOption), b.(*core.PodDNSConfigOption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodDNSConfigOption)(nil), (*corev1.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(a.(*core.PodDNSConfigOption), b.(*corev1.PodDNSConfigOption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodExecOptions)(nil), (*core.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodExecOptions_To_core_PodExecOptions(a.(*corev1.PodExecOptions), b.(*core.PodExecOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodExecOptions)(nil), (*corev1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodExecOptions_To_v1_PodExecOptions(a.(*core.PodExecOptions), b.(*corev1.PodExecOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodExtendedResourceClaimStatus)(nil), (*core.PodExtendedResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodExtendedResourceClaimStatus_To_core_PodExtendedResourceClaimStatus(a.(*corev1.PodExtendedResourceClaimStatus), b.(*core.PodExtendedResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodExtendedResourceClaimStatus)(nil), (*corev1.PodExtendedResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodExtendedResourceClaimStatus_To_v1_PodExtendedResourceClaimStatus(a.(*core.PodExtendedResourceClaimStatus), b.(*corev1.PodExtendedResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodIP)(nil), (*core.PodIP)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodIP_To_core_PodIP(a.(*corev1.PodIP), b.(*core.PodIP), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodIP)(nil), (*corev1.PodIP)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodIP_To_v1_PodIP(a.(*core.PodIP), b.(*corev1.PodIP), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodList)(nil), (*core.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodList_To_core_PodList(a.(*corev1.PodList), b.(*core.PodList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodList)(nil), (*corev1.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodList_To_v1_PodList(a.(*core.PodList), b.(*corev1.PodList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodLogOptions)(nil), (*core.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodLogOptions_To_core_PodLogOptions(a.(*corev1.PodLogOptions), b.(*core.PodLogOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodLogOptions)(nil), (*corev1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodLogOptions_To_v1_PodLogOptions(a.(*core.PodLogOptions), b.(*corev1.PodLogOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodOS)(nil), (*core.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodOS_To_core_PodOS(a.(*corev1.PodOS), b.(*core.PodOS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodOS)(nil), (*corev1.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodOS_To_v1_PodOS(a.(*core.PodOS), b.(*corev1.PodOS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodPortForwardOptions)(nil), (*core.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(a.(*corev1.PodPortForwardOptions), b.(*core.PodPortForwardOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodPortForwardOptions)(nil), (*corev1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(a.(*core.PodPortForwardOptions), b.(*corev1.PodPortForwardOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodProxyOptions)(nil), (*core.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodProxyOptions_To_core_PodProxyOptions(a.(*corev1.PodProxyOptions), b.(*core.PodProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodProxyOptions)(nil), (*corev1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodProxyOptions_To_v1_PodProxyOptions(a.(*core.PodProxyOptions), b.(*corev1.PodProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodReadinessGate)(nil), (*core.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodReadinessGate_To_core_PodReadinessGate(a.(*corev1.PodReadinessGate), b.(*core.PodReadinessGate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodReadinessGate)(nil), (*corev1.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodReadinessGate_To_v1_PodReadinessGate(a.(*core.PodReadinessGate), b.(*corev1.PodReadinessGate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodResourceClaim)(nil), (*core.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodResourceClaim_To_core_PodResourceClaim(a.(*corev1.PodResourceClaim), b.(*core.PodResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodResourceClaim)(nil), (*corev1.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodResourceClaim_To_v1_PodResourceClaim(a.(*core.PodResourceClaim), b.(*corev1.PodResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodResourceClaimStatus)(nil), (*core.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(a.(*corev1.PodResourceClaimStatus), b.(*core.PodResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodResourceClaimStatus)(nil), (*corev1.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(a.(*core.PodResourceClaimStatus), b.(*corev1.PodResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodSchedulingGate)(nil), (*core.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(a.(*corev1.PodSchedulingGate), b.(*core.PodSchedulingGate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodSchedulingGate)(nil), (*corev1.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(a.(*core.PodSchedulingGate), b.(*corev1.PodSchedulingGate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*corev1.PodSecurityContext), b.(*core.PodSecurityContext), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodSecurityContext)(nil), (*corev1.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodSecurityContext_To_v1_PodSecurityContext(a.(*core.PodSecurityContext), b.(*corev1.PodSecurityContext), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodSignature)(nil), (*core.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodSignature_To_core_PodSignature(a.(*corev1.PodSignature), b.(*core.PodSignature), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodSignature)(nil), (*corev1.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodSignature_To_v1_PodSignature(a.(*core.PodSignature), b.(*corev1.PodSignature), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodStatusResult)(nil), (*core.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodStatusResult_To_core_PodStatusResult(a.(*corev1.PodStatusResult), b.(*core.PodStatusResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodStatusResult)(nil), (*corev1.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodStatusResult_To_v1_PodStatusResult(a.(*core.PodStatusResult), b.(*corev1.PodStatusResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodTemplate)(nil), (*core.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodTemplate_To_core_PodTemplate(a.(*corev1.PodTemplate), b.(*core.PodTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodTemplate)(nil), (*corev1.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodTemplate_To_v1_PodTemplate(a.(*core.PodTemplate), b.(*corev1.PodTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PodTemplateList)(nil), (*core.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodTemplateList_To_core_PodTemplateList(a.(*corev1.PodTemplateList), b.(*core.PodTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodTemplateList)(nil), (*corev1.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodTemplateList_To_v1_PodTemplateList(a.(*core.PodTemplateList), b.(*corev1.PodTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PortStatus)(nil), (*core.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PortStatus_To_core_PortStatus(a.(*corev1.PortStatus), b.(*core.PortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PortStatus)(nil), (*corev1.PortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PortStatus_To_v1_PortStatus(a.(*core.PortStatus), b.(*corev1.PortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PortworxVolumeSource)(nil), (*core.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(a.(*corev1.PortworxVolumeSource), b.(*core.PortworxVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PortworxVolumeSource)(nil), (*corev1.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(a.(*core.PortworxVolumeSource), b.(*corev1.PortworxVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Preconditions)(nil), (*core.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Preconditions_To_core_Preconditions(a.(*corev1.Preconditions), b.(*core.Preconditions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Preconditions)(nil), (*corev1.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Preconditions_To_v1_Preconditions(a.(*core.Preconditions), b.(*corev1.Preconditions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PreferAvoidPodsEntry)(nil), (*core.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(a.(*corev1.PreferAvoidPodsEntry), b.(*core.PreferAvoidPodsEntry), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PreferAvoidPodsEntry)(nil), (*corev1.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(a.(*core.PreferAvoidPodsEntry), b.(*corev1.PreferAvoidPodsEntry), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.PreferredSchedulingTerm)(nil), (*core.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(a.(*corev1.PreferredSchedulingTerm), b.(*core.PreferredSchedulingTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PreferredSchedulingTerm)(nil), (*corev1.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(a.(*core.PreferredSchedulingTerm), b.(*corev1.PreferredSchedulingTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Probe)(nil), (*core.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Probe_To_core_Probe(a.(*corev1.Probe), b.(*core.Probe), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Probe)(nil), (*corev1.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Probe_To_v1_Probe(a.(*core.Probe), b.(*corev1.Probe), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ProbeHandler)(nil), (*core.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ProbeHandler_To_core_ProbeHandler(a.(*corev1.ProbeHandler), b.(*core.ProbeHandler), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ProbeHandler)(nil), (*corev1.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ProbeHandler_To_v1_ProbeHandler(a.(*core.ProbeHandler), b.(*corev1.ProbeHandler), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ProjectedVolumeSource)(nil), (*core.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(a.(*corev1.ProjectedVolumeSource), b.(*core.ProjectedVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ProjectedVolumeSource)(nil), (*corev1.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(a.(*core.ProjectedVolumeSource), b.(*corev1.ProjectedVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.QuobyteVolumeSource)(nil), (*core.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(a.(*corev1.QuobyteVolumeSource), b.(*core.QuobyteVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.QuobyteVolumeSource)(nil), (*corev1.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(a.(*core.QuobyteVolumeSource), b.(*corev1.QuobyteVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.RBDPersistentVolumeSource)(nil), (*core.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(a.(*corev1.RBDPersistentVolumeSource), b.(*core.RBDPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.RBDPersistentVolumeSource)(nil), (*corev1.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(a.(*core.RBDPersistentVolumeSource), b.(*corev1.RBDPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.RBDVolumeSource)(nil), (*core.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(a.(*corev1.RBDVolumeSource), b.(*core.RBDVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.RBDVolumeSource)(nil), (*corev1.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(a.(*core.RBDVolumeSource), b.(*corev1.RBDVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.RangeAllocation)(nil), (*core.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RangeAllocation_To_core_RangeAllocation(a.(*corev1.RangeAllocation), b.(*core.RangeAllocation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.RangeAllocation)(nil), (*corev1.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_RangeAllocation_To_v1_RangeAllocation(a.(*core.RangeAllocation), b.(*corev1.RangeAllocation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ReplicationController)(nil), (*core.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationController_To_core_ReplicationController(a.(*corev1.ReplicationController), b.(*core.ReplicationController), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ReplicationController)(nil), (*corev1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ReplicationController_To_v1_ReplicationController(a.(*core.ReplicationController), b.(*corev1.ReplicationController), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ReplicationControllerCondition)(nil), (*core.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(a.(*corev1.ReplicationControllerCondition), b.(*core.ReplicationControllerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerCondition)(nil), (*corev1.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(a.(*core.ReplicationControllerCondition), b.(*corev1.ReplicationControllerCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ReplicationControllerList)(nil), (*core.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(a.(*corev1.ReplicationControllerList), b.(*core.ReplicationControllerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerList)(nil), (*corev1.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(a.(*core.ReplicationControllerList), b.(*corev1.ReplicationControllerList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ReplicationControllerStatus)(nil), (*core.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(a.(*corev1.ReplicationControllerStatus), b.(*core.ReplicationControllerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerStatus)(nil), (*corev1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(a.(*core.ReplicationControllerStatus), b.(*corev1.ReplicationControllerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceClaim)(nil), (*core.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaim_To_core_ResourceClaim(a.(*corev1.ResourceClaim), b.(*core.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceClaim)(nil), (*corev1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceClaim_To_v1_ResourceClaim(a.(*core.ResourceClaim), b.(*corev1.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceFieldSelector)(nil), (*core.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(a.(*corev1.ResourceFieldSelector), b.(*core.ResourceFieldSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceFieldSelector)(nil), (*corev1.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(a.(*core.ResourceFieldSelector), b.(*corev1.ResourceFieldSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceHealth)(nil), (*core.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceHealth_To_core_ResourceHealth(a.(*corev1.ResourceHealth), b.(*core.ResourceHealth), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceHealth)(nil), (*corev1.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceHealth_To_v1_ResourceHealth(a.(*core.ResourceHealth), b.(*corev1.ResourceHealth), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuota)(nil), (*core.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceQuota_To_core_ResourceQuota(a.(*corev1.ResourceQuota), b.(*core.ResourceQuota), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceQuota)(nil), (*corev1.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceQuota_To_v1_ResourceQuota(a.(*core.ResourceQuota), b.(*corev1.ResourceQuota), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuotaList)(nil), (*core.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(a.(*corev1.ResourceQuotaList), b.(*core.ResourceQuotaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaList)(nil), (*corev1.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(a.(*core.ResourceQuotaList), b.(*corev1.ResourceQuotaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuotaSpec)(nil), (*core.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(a.(*corev1.ResourceQuotaSpec), b.(*core.ResourceQuotaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaSpec)(nil), (*corev1.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(a.(*core.ResourceQuotaSpec), b.(*corev1.ResourceQuotaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceQuotaStatus)(nil), (*core.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(a.(*corev1.ResourceQuotaStatus), b.(*core.ResourceQuotaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaStatus)(nil), (*corev1.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(a.(*core.ResourceQuotaStatus), b.(*corev1.ResourceQuotaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceRequirements)(nil), (*core.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceRequirements_To_core_ResourceRequirements(a.(*corev1.ResourceRequirements), b.(*core.ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceRequirements)(nil), (*corev1.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceRequirements_To_v1_ResourceRequirements(a.(*core.ResourceRequirements), b.(*corev1.ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ResourceStatus)(nil), (*core.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceStatus_To_core_ResourceStatus(a.(*corev1.ResourceStatus), b.(*core.ResourceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceStatus)(nil), (*corev1.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceStatus_To_v1_ResourceStatus(a.(*core.ResourceStatus), b.(*corev1.ResourceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SELinuxOptions)(nil), (*core.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SELinuxOptions_To_core_SELinuxOptions(a.(*corev1.SELinuxOptions), b.(*core.SELinuxOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SELinuxOptions)(nil), (*corev1.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SELinuxOptions_To_v1_SELinuxOptions(a.(*core.SELinuxOptions), b.(*corev1.SELinuxOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ScaleIOPersistentVolumeSource)(nil), (*core.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(a.(*corev1.ScaleIOPersistentVolumeSource), b.(*core.ScaleIOPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ScaleIOPersistentVolumeSource)(nil), (*corev1.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(a.(*core.ScaleIOPersistentVolumeSource), b.(*corev1.ScaleIOPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ScaleIOVolumeSource)(nil), (*core.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(a.(*corev1.ScaleIOVolumeSource), b.(*core.ScaleIOVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ScaleIOVolumeSource)(nil), (*corev1.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(a.(*core.ScaleIOVolumeSource), b.(*corev1.ScaleIOVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ScopeSelector)(nil), (*core.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ScopeSelector_To_core_ScopeSelector(a.(*corev1.ScopeSelector), b.(*core.ScopeSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ScopeSelector)(nil), (*corev1.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ScopeSelector_To_v1_ScopeSelector(a.(*core.ScopeSelector), b.(*corev1.ScopeSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ScopedResourceSelectorRequirement)(nil), (*core.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(a.(*corev1.ScopedResourceSelectorRequirement), b.(*core.ScopedResourceSelectorRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ScopedResourceSelectorRequirement)(nil), (*corev1.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(a.(*core.ScopedResourceSelectorRequirement), b.(*corev1.ScopedResourceSelectorRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SeccompProfile)(nil), (*core.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SeccompProfile_To_core_SeccompProfile(a.(*corev1.SeccompProfile), b.(*core.SeccompProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SeccompProfile)(nil), (*corev1.SeccompProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SeccompProfile_To_v1_SeccompProfile(a.(*core.SeccompProfile), b.(*corev1.SeccompProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Secret)(nil), (*corev1.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Secret_To_v1_Secret(a.(*core.Secret), b.(*corev1.Secret), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SecretEnvSource)(nil), (*core.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecretEnvSource_To_core_SecretEnvSource(a.(*corev1.SecretEnvSource), b.(*core.SecretEnvSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SecretEnvSource)(nil), (*corev1.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SecretEnvSource_To_v1_SecretEnvSource(a.(*core.SecretEnvSource), b.(*corev1.SecretEnvSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SecretKeySelector)(nil), (*core.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecretKeySelector_To_core_SecretKeySelector(a.(*corev1.SecretKeySelector), b.(*core.SecretKeySelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SecretKeySelector)(nil), (*corev1.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SecretKeySelector_To_v1_SecretKeySelector(a.(*core.SecretKeySelector), b.(*corev1.SecretKeySelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SecretList)(nil), (*core.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecretList_To_core_SecretList(a.(*corev1.SecretList), b.(*core.SecretList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SecretList)(nil), (*corev1.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SecretList_To_v1_SecretList(a.(*core.SecretList), b.(*corev1.SecretList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SecretProjection)(nil), (*core.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecretProjection_To_core_SecretProjection(a.(*corev1.SecretProjection), b.(*core.SecretProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SecretProjection)(nil), (*corev1.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SecretProjection_To_v1_SecretProjection(a.(*core.SecretProjection), b.(*corev1.SecretProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SecretReference)(nil), (*core.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecretReference_To_core_SecretReference(a.(*corev1.SecretReference), b.(*core.SecretReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SecretReference)(nil), (*corev1.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SecretReference_To_v1_SecretReference(a.(*core.SecretReference), b.(*corev1.SecretReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SecretVolumeSource)(nil), (*core.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(a.(*corev1.SecretVolumeSource), b.(*core.SecretVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SecretVolumeSource)(nil), (*corev1.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(a.(*core.SecretVolumeSource), b.(*corev1.SecretVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SecurityContext)(nil), (*core.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SecurityContext_To_core_SecurityContext(a.(*corev1.SecurityContext), b.(*core.SecurityContext), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SecurityContext)(nil), (*corev1.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SecurityContext_To_v1_SecurityContext(a.(*core.SecurityContext), b.(*corev1.SecurityContext), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SerializedReference)(nil), (*core.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SerializedReference_To_core_SerializedReference(a.(*corev1.SerializedReference), b.(*core.SerializedReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SerializedReference)(nil), (*corev1.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SerializedReference_To_v1_SerializedReference(a.(*core.SerializedReference), b.(*corev1.SerializedReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Service)(nil), (*core.Service)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Service_To_core_Service(a.(*corev1.Service), b.(*core.Service), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Service)(nil), (*corev1.Service)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Service_To_v1_Service(a.(*core.Service), b.(*corev1.Service), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServiceAccount)(nil), (*core.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceAccount_To_core_ServiceAccount(a.(*corev1.ServiceAccount), b.(*core.ServiceAccount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServiceAccount)(nil), (*corev1.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServiceAccount_To_v1_ServiceAccount(a.(*core.ServiceAccount), b.(*corev1.ServiceAccount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServiceAccountList)(nil), (*core.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceAccountList_To_core_ServiceAccountList(a.(*corev1.ServiceAccountList), b.(*core.ServiceAccountList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServiceAccountList)(nil), (*corev1.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServiceAccountList_To_v1_ServiceAccountList(a.(*core.ServiceAccountList), b.(*corev1.ServiceAccountList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServiceAccountTokenProjection)(nil), (*core.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(a.(*corev1.ServiceAccountTokenProjection), b.(*core.ServiceAccountTokenProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServiceAccountTokenProjection)(nil), (*corev1.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(a.(*core.ServiceAccountTokenProjection), b.(*corev1.ServiceAccountTokenProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServiceList)(nil), (*core.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceList_To_core_ServiceList(a.(*corev1.ServiceList), b.(*core.ServiceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServiceList)(nil), (*corev1.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServiceList_To_v1_ServiceList(a.(*core.ServiceList), b.(*corev1.ServiceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServicePort)(nil), (*core.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServicePort_To_core_ServicePort(a.(*corev1.ServicePort), b.(*core.ServicePort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServicePort)(nil), (*corev1.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServicePort_To_v1_ServicePort(a.(*core.ServicePort), b.(*corev1.ServicePort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServiceProxyOptions)(nil), (*core.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(a.(*corev1.ServiceProxyOptions), b.(*core.ServiceProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServiceProxyOptions)(nil), (*corev1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(a.(*core.ServiceProxyOptions), b.(*corev1.ServiceProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServiceSpec)(nil), (*core.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceSpec_To_core_ServiceSpec(a.(*corev1.ServiceSpec), b.(*core.ServiceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServiceSpec)(nil), (*corev1.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServiceSpec_To_v1_ServiceSpec(a.(*core.ServiceSpec), b.(*corev1.ServiceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.ServiceStatus)(nil), (*core.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceStatus_To_core_ServiceStatus(a.(*corev1.ServiceStatus), b.(*core.ServiceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ServiceStatus)(nil), (*corev1.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ServiceStatus_To_v1_ServiceStatus(a.(*core.ServiceStatus), b.(*corev1.ServiceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SessionAffinityConfig)(nil), (*core.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(a.(*corev1.SessionAffinityConfig), b.(*core.SessionAffinityConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SessionAffinityConfig)(nil), (*corev1.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(a.(*core.SessionAffinityConfig), b.(*corev1.SessionAffinityConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.SleepAction)(nil), (*core.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SleepAction_To_core_SleepAction(a.(*corev1.SleepAction), b.(*core.SleepAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SleepAction)(nil), (*corev1.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SleepAction_To_v1_SleepAction(a.(*core.SleepAction), b.(*corev1.SleepAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.StorageOSPersistentVolumeSource)(nil), (*core.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(a.(*corev1.StorageOSPersistentVolumeSource), b.(*core.StorageOSPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.StorageOSPersistentVolumeSource)(nil), (*corev1.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(a.(*core.StorageOSPersistentVolumeSource), b.(*corev1.StorageOSPersistentVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.StorageOSVolumeSource)(nil), (*core.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(a.(*corev1.StorageOSVolumeSource), b.(*core.StorageOSVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.StorageOSVolumeSource)(nil), (*corev1.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(a.(*core.StorageOSVolumeSource), b.(*corev1.StorageOSVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Sysctl)(nil), (*core.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Sysctl_To_core_Sysctl(a.(*corev1.Sysctl), b.(*core.Sysctl), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Sysctl)(nil), (*corev1.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Sysctl_To_v1_Sysctl(a.(*core.Sysctl), b.(*corev1.Sysctl), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.TCPSocketAction)(nil), (*core.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TCPSocketAction_To_core_TCPSocketAction(a.(*corev1.TCPSocketAction), b.(*core.TCPSocketAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TCPSocketAction)(nil), (*corev1.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TCPSocketAction_To_v1_TCPSocketAction(a.(*core.TCPSocketAction), b.(*corev1.TCPSocketAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Taint)(nil), (*core.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Taint_To_core_Taint(a.(*corev1.Taint), b.(*core.Taint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Taint)(nil), (*corev1.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Taint_To_v1_Taint(a.(*core.Taint), b.(*corev1.Taint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Toleration_To_core_Toleration(a.(*corev1.Toleration), b.(*core.Toleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*corev1.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Toleration_To_v1_Toleration(a.(*core.Toleration), b.(*corev1.Toleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.TopologySelectorLabelRequirement)(nil), (*core.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(a.(*corev1.TopologySelectorLabelRequirement), b.(*core.TopologySelectorLabelRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TopologySelectorLabelRequirement)(nil), (*corev1.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(a.(*core.TopologySelectorLabelRequirement), b.(*corev1.TopologySelectorLabelRequirement), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.TopologySelectorTerm)(nil), (*core.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(a.(*corev1.TopologySelectorTerm), b.(*core.TopologySelectorTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TopologySelectorTerm)(nil), (*corev1.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(a.(*core.TopologySelectorTerm), b.(*corev1.TopologySelectorTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.TopologySpreadConstraint)(nil), (*core.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(a.(*corev1.TopologySpreadConstraint), b.(*core.TopologySpreadConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TopologySpreadConstraint)(nil), (*corev1.TopologySpreadConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(a.(*core.TopologySpreadConstraint), b.(*corev1.TopologySpreadConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.TypedLocalObjectReference)(nil), (*core.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(a.(*corev1.TypedLocalObjectReference), b.(*core.TypedLocalObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TypedLocalObjectReference)(nil), (*corev1.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(a.(*core.TypedLocalObjectReference), b.(*corev1.TypedLocalObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.TypedObjectReference)(nil), (*core.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TypedObjectReference_To_core_TypedObjectReference(a.(*corev1.TypedObjectReference), b.(*core.TypedObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TypedObjectReference)(nil), (*corev1.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TypedObjectReference_To_v1_TypedObjectReference(a.(*core.TypedObjectReference), b.(*corev1.TypedObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Volume_To_core_Volume(a.(*corev1.Volume), b.(*core.Volume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*corev1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*corev1.Volume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*corev1.VolumeDevice), b.(*core.VolumeDevice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeDevice)(nil), (*corev1.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeDevice_To_v1_VolumeDevice(a.(*core.VolumeDevice), b.(*corev1.VolumeDevice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VolumeMount)(nil), (*core.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeMount_To_core_VolumeMount(a.(*corev1.VolumeMount), b.(*core.VolumeMount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeMount)(nil), (*corev1.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeMount_To_v1_VolumeMount(a.(*core.VolumeMount), b.(*corev1.VolumeMount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VolumeMountStatus)(nil), (*core.VolumeMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus(a.(*corev1.VolumeMountStatus), b.(*core.VolumeMountStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeMountStatus)(nil), (*corev1.VolumeMountStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus(a.(*core.VolumeMountStatus), b.(*corev1.VolumeMountStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VolumeNodeAffinity)(nil), (*core.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(a.(*corev1.VolumeNodeAffinity), b.(*core.VolumeNodeAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeNodeAffinity)(nil), (*corev1.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(a.(*core.VolumeNodeAffinity), b.(*corev1.VolumeNodeAffinity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VolumeProjection)(nil), (*core.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeProjection_To_core_VolumeProjection(a.(*corev1.VolumeProjection), b.(*core.VolumeProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeProjection)(nil), (*corev1.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeProjection_To_v1_VolumeProjection(a.(*core.VolumeProjection), b.(*corev1.VolumeProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VolumeResourceRequirements)(nil), (*core.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(a.(*corev1.VolumeResourceRequirements), b.(*core.VolumeResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeResourceRequirements)(nil), (*corev1.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(a.(*core.VolumeResourceRequirements), b.(*corev1.VolumeResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VolumeSource)(nil), (*core.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeSource_To_core_VolumeSource(a.(*corev1.VolumeSource), b.(*core.VolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeSource)(nil), (*corev1.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeSource_To_v1_VolumeSource(a.(*core.VolumeSource), b.(*corev1.VolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.VsphereVirtualDiskVolumeSource)(nil), (*core.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(a.(*corev1.VsphereVirtualDiskVolumeSource), b.(*core.VsphereVirtualDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VsphereVirtualDiskVolumeSource)(nil), (*corev1.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(a.(*core.VsphereVirtualDiskVolumeSource), b.(*corev1.VsphereVirtualDiskVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.WeightedPodAffinityTerm)(nil), (*core.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(a.(*corev1.WeightedPodAffinityTerm), b.(*core.WeightedPodAffinityTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.WeightedPodAffinityTerm)(nil), (*corev1.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(a.(*core.WeightedPodAffinityTerm), b.(*corev1.WeightedPodAffinityTerm), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*corev1.WindowsSecurityContextOptions)(nil), (*core.WindowsSecurityContextOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(a.(*corev1.WindowsSecurityContextOptions), b.(*core.WindowsSecurityContextOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.WindowsSecurityContextOptions)(nil), (*corev1.WindowsSecurityContextOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(a.(*core.WindowsSecurityContextOptions), b.(*corev1.WindowsSecurityContextOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_url_Values_To_v1_NodeProxyOptions(a.(*url.Values), b.(*corev1.NodeProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_url_Values_To_v1_PodAttachOptions(a.(*url.Values), b.(*corev1.PodAttachOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_url_Values_To_v1_PodExecOptions(a.(*url.Values), b.(*corev1.PodExecOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_url_Values_To_v1_PodLogOptions(a.(*url.Values), b.(*corev1.PodLogOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_url_Values_To_v1_PodPortForwardOptions(a.(*url.Values), b.(*corev1.PodPortForwardOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_url_Values_To_v1_PodProxyOptions(a.(*url.Values), b.(*corev1.PodProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*corev1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_url_Values_To_v1_ServiceProxyOptions(a.(*url.Values), b.(*corev1.ServiceProxyOptions), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*[]string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_Slice_string_To_Pointer_string(a.(*[]string), b.(**string), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*corev1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*apps.ReplicaSetSpec), b.(*corev1.ReplicationControllerSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.ReplicaSetStatus)(nil), (*corev1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*apps.ReplicaSetStatus), b.(*corev1.ReplicationControllerStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.ReplicaSet)(nil), (*corev1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSet_To_v1_ReplicationController(a.(*apps.ReplicaSet), b.(*corev1.ReplicationController), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.NodeSpec)(nil), (*corev1.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeSpec_To_v1_NodeSpec(a.(*core.NodeSpec), b.(*corev1.NodeSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.PersistentVolumeSpec)(nil), (*corev1.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(a.(*core.PersistentVolumeSpec), b.(*corev1.PersistentVolumeSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.PodSpec)(nil), (*corev1.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodSpec_To_v1_PodSpec(a.(*core.PodSpec), b.(*corev1.PodSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.PodStatus)(nil), (*corev1.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodStatus_To_v1_PodStatus(a.(*core.PodStatus), b.(*corev1.PodStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.PodTemplateSpec)(nil), (*corev1.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(a.(*core.PodTemplateSpec), b.(*corev1.PodTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.Pod)(nil), (*corev1.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Pod_To_v1_Pod(a.(*core.Pod), b.(*corev1.Pod), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.ReplicationControllerSpec)(nil), (*corev1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(a.(*core.ReplicationControllerSpec), b.(*corev1.ReplicationControllerSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.NodeSpec)(nil), (*core.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeSpec_To_core_NodeSpec(a.(*corev1.NodeSpec), b.(*core.NodeSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.PersistentVolumeSpec)(nil), (*core.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(a.(*corev1.PersistentVolumeSpec), b.(*core.PersistentVolumeSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.PodSpec)(nil), (*core.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodSpec_To_core_PodSpec(a.(*corev1.PodSpec), b.(*core.PodSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.PodStatus)(nil), (*core.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodStatus_To_core_PodStatus(a.(*corev1.PodStatus), b.(*core.PodStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.PodTemplateSpec)(nil), (*core.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(a.(*corev1.PodTemplateSpec), b.(*core.PodTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.Pod)(nil), (*core.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Pod_To_core_Pod(a.(*corev1.Pod), b.(*core.Pod), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.ReplicationControllerSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(a.(*corev1.ReplicationControllerSpec), b.(*apps.ReplicaSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*corev1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.ReplicationControllerStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(a.(*corev1.ReplicationControllerStatus), b.(*apps.ReplicaSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.ReplicationController)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ReplicationController_To_apps_ReplicaSet(a.(*corev1.ReplicationController), b.(*apps.ReplicaSet), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.ResourceList)(nil), (*core.ResourceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceList_To_core_ResourceList(a.(*corev1.ResourceList), b.(*core.ResourceList), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*corev1.Secret)(nil), (*core.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Secret_To_core_Secret(a.(*corev1.Secret), b.(*core.Secret), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *corev1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.Partition = in.Partition
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function.
func Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *corev1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in, out, s)
}
func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *corev1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.Partition = in.Partition
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function.
func Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *corev1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
return autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s)
}
func autoConvert_v1_Affinity_To_core_Affinity(in *corev1.Affinity, out *core.Affinity, s conversion.Scope) error {
out.NodeAffinity = (*core.NodeAffinity)(unsafe.Pointer(in.NodeAffinity))
out.PodAffinity = (*core.PodAffinity)(unsafe.Pointer(in.PodAffinity))
out.PodAntiAffinity = (*core.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity))
return nil
}
// Convert_v1_Affinity_To_core_Affinity is an autogenerated conversion function.
func Convert_v1_Affinity_To_core_Affinity(in *corev1.Affinity, out *core.Affinity, s conversion.Scope) error {
return autoConvert_v1_Affinity_To_core_Affinity(in, out, s)
}
func autoConvert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *corev1.Affinity, s conversion.Scope) error {
out.NodeAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity))
out.PodAffinity = (*corev1.PodAffinity)(unsafe.Pointer(in.PodAffinity))
out.PodAntiAffinity = (*corev1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity))
return nil
}
// Convert_core_Affinity_To_v1_Affinity is an autogenerated conversion function.
func Convert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *corev1.Affinity, s conversion.Scope) error {
return autoConvert_core_Affinity_To_v1_Affinity(in, out, s)
}
func autoConvert_v1_AppArmorProfile_To_core_AppArmorProfile(in *corev1.AppArmorProfile, out *core.AppArmorProfile, s conversion.Scope) error {
out.Type = core.AppArmorProfileType(in.Type)
out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile))
return nil
}
// Convert_v1_AppArmorProfile_To_core_AppArmorProfile is an autogenerated conversion function.
func Convert_v1_AppArmorProfile_To_core_AppArmorProfile(in *corev1.AppArmorProfile, out *core.AppArmorProfile, s conversion.Scope) error {
return autoConvert_v1_AppArmorProfile_To_core_AppArmorProfile(in, out, s)
}
func autoConvert_core_AppArmorProfile_To_v1_AppArmorProfile(in *core.AppArmorProfile, out *corev1.AppArmorProfile, s conversion.Scope) error {
out.Type = corev1.AppArmorProfileType(in.Type)
out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile))
return nil
}
// Convert_core_AppArmorProfile_To_v1_AppArmorProfile is an autogenerated conversion function.
func Convert_core_AppArmorProfile_To_v1_AppArmorProfile(in *core.AppArmorProfile, out *corev1.AppArmorProfile, s conversion.Scope) error {
return autoConvert_core_AppArmorProfile_To_v1_AppArmorProfile(in, out, s)
}
func autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in *corev1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error {
out.Name = core.UniqueVolumeName(in.Name)
out.DevicePath = in.DevicePath
return nil
}
// Convert_v1_AttachedVolume_To_core_AttachedVolume is an autogenerated conversion function.
func Convert_v1_AttachedVolume_To_core_AttachedVolume(in *corev1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error {
return autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in, out, s)
}
func autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *corev1.AttachedVolume, s conversion.Scope) error {
out.Name = corev1.UniqueVolumeName(in.Name)
out.DevicePath = in.DevicePath
return nil
}
// Convert_core_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function.
func Convert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *corev1.AttachedVolume, s conversion.Scope) error {
return autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in, out, s)
}
func autoConvert_v1_AvoidPods_To_core_AvoidPods(in *corev1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error {
out.PreferAvoidPods = *(*[]core.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods))
return nil
}
// Convert_v1_AvoidPods_To_core_AvoidPods is an autogenerated conversion function.
func Convert_v1_AvoidPods_To_core_AvoidPods(in *corev1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error {
return autoConvert_v1_AvoidPods_To_core_AvoidPods(in, out, s)
}
func autoConvert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *corev1.AvoidPods, s conversion.Scope) error {
out.PreferAvoidPods = *(*[]corev1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods))
return nil
}
// Convert_core_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function.
func Convert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *corev1.AvoidPods, s conversion.Scope) error {
return autoConvert_core_AvoidPods_To_v1_AvoidPods(in, out, s)
}
func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *corev1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error {
out.DiskName = in.DiskName
out.DataDiskURI = in.DataDiskURI
out.CachingMode = (*core.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode))
out.FSType = (*string)(unsafe.Pointer(in.FSType))
out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly))
out.Kind = (*core.AzureDataDiskKind)(unsafe.Pointer(in.Kind))
return nil
}
// Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource is an autogenerated conversion function.
func Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *corev1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error {
return autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in, out, s)
}
func autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *corev1.AzureDiskVolumeSource, s conversion.Scope) error {
out.DiskName = in.DiskName
out.DataDiskURI = in.DataDiskURI
out.CachingMode = (*corev1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode))
out.FSType = (*string)(unsafe.Pointer(in.FSType))
out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly))
out.Kind = (*corev1.AzureDataDiskKind)(unsafe.Pointer(in.Kind))
return nil
}
// Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function.
func Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *corev1.AzureDiskVolumeSource, s conversion.Scope) error {
return autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s)
}
func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *corev1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
out.ShareName = in.ShareName
out.ReadOnly = in.ReadOnly
out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace))
return nil
}
// Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *corev1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in, out, s)
}
func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *corev1.AzureFilePersistentVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
out.ShareName = in.ShareName
out.ReadOnly = in.ReadOnly
out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace))
return nil
}
// Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function.
func Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *corev1.AzureFilePersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s)
}
func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *corev1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
out.ShareName = in.ShareName
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource is an autogenerated conversion function.
func Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *corev1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error {
return autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in, out, s)
}
func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *corev1.AzureFileVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
out.ShareName = in.ShareName
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function.
func Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *corev1.AzureFileVolumeSource, s conversion.Scope) error {
return autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s)
}
func autoConvert_v1_Binding_To_core_Binding(in *corev1.Binding, out *core.Binding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_v1_Binding_To_core_Binding is an autogenerated conversion function.
func Convert_v1_Binding_To_core_Binding(in *corev1.Binding, out *core.Binding, s conversion.Scope) error {
return autoConvert_v1_Binding_To_core_Binding(in, out, s)
}
func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *corev1.Binding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil {
return err
}
return nil
}
// Convert_core_Binding_To_v1_Binding is an autogenerated conversion function.
func Convert_core_Binding_To_v1_Binding(in *core.Binding, out *corev1.Binding, s conversion.Scope) error {
return autoConvert_core_Binding_To_v1_Binding(in, out, s)
}
func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *corev1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.ControllerPublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef))
out.NodeStageSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef))
out.NodePublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef))
out.ControllerExpandSecretRef = (*core.SecretReference)(unsafe.Pointer(in.ControllerExpandSecretRef))
out.NodeExpandSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodeExpandSecretRef))
return nil
}
// Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *corev1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in, out, s)
}
func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *corev1.CSIPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.VolumeHandle = in.VolumeHandle
out.ReadOnly = in.ReadOnly
out.FSType = in.FSType
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.ControllerPublishSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef))
out.NodeStageSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef))
out.NodePublishSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef))
out.ControllerExpandSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.ControllerExpandSecretRef))
out.NodeExpandSecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.NodeExpandSecretRef))
return nil
}
// Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *corev1.CSIPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in *corev1.CSIVolumeSource, out *core.CSIVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly))
out.FSType = (*string)(unsafe.Pointer(in.FSType))
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.NodePublishSecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.NodePublishSecretRef))
return nil
}
// Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource is an autogenerated conversion function.
func Convert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in *corev1.CSIVolumeSource, out *core.CSIVolumeSource, s conversion.Scope) error {
return autoConvert_v1_CSIVolumeSource_To_core_CSIVolumeSource(in, out, s)
}
func autoConvert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in *core.CSIVolumeSource, out *corev1.CSIVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly))
out.FSType = (*string)(unsafe.Pointer(in.FSType))
out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes))
out.NodePublishSecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.NodePublishSecretRef))
return nil
}
// Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource is an autogenerated conversion function.
func Convert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in *core.CSIVolumeSource, out *corev1.CSIVolumeSource, s conversion.Scope) error {
return autoConvert_core_CSIVolumeSource_To_v1_CSIVolumeSource(in, out, s)
}
func autoConvert_v1_Capabilities_To_core_Capabilities(in *corev1.Capabilities, out *core.Capabilities, s conversion.Scope) error {
out.Add = *(*[]core.Capability)(unsafe.Pointer(&in.Add))
out.Drop = *(*[]core.Capability)(unsafe.Pointer(&in.Drop))
return nil
}
// Convert_v1_Capabilities_To_core_Capabilities is an autogenerated conversion function.
func Convert_v1_Capabilities_To_core_Capabilities(in *corev1.Capabilities, out *core.Capabilities, s conversion.Scope) error {
return autoConvert_v1_Capabilities_To_core_Capabilities(in, out, s)
}
func autoConvert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *corev1.Capabilities, s conversion.Scope) error {
out.Add = *(*[]corev1.Capability)(unsafe.Pointer(&in.Add))
out.Drop = *(*[]corev1.Capability)(unsafe.Pointer(&in.Drop))
return nil
}
// Convert_core_Capabilities_To_v1_Capabilities is an autogenerated conversion function.
func Convert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *corev1.Capabilities, s conversion.Scope) error {
return autoConvert_core_Capabilities_To_v1_Capabilities(in, out, s)
}
func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *corev1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error {
out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors))
out.Path = in.Path
out.User = in.User
out.SecretFile = in.SecretFile
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *corev1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in, out, s)
}
func autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *corev1.CephFSPersistentVolumeSource, s conversion.Scope) error {
out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors))
out.Path = in.Path
out.User = in.User
out.SecretFile = in.SecretFile
out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *corev1.CephFSPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *corev1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error {
out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors))
out.Path = in.Path
out.User = in.User
out.SecretFile = in.SecretFile
out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource is an autogenerated conversion function.
func Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *corev1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error {
return autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in, out, s)
}
func autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *corev1.CephFSVolumeSource, s conversion.Scope) error {
out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors))
out.Path = in.Path
out.User = in.User
out.SecretFile = in.SecretFile
out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function.
func Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *corev1.CephFSVolumeSource, s conversion.Scope) error {
return autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s)
}
func autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *corev1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *corev1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in, out, s)
}
func autoConvert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *corev1.CinderPersistentVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *corev1.CinderPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *corev1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource is an autogenerated conversion function.
func Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *corev1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error {
return autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in, out, s)
}
func autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *corev1.CinderVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function.
func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *corev1.CinderVolumeSource, s conversion.Scope) error {
return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s)
}
func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *corev1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error {
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
return nil
}
// Convert_v1_ClientIPConfig_To_core_ClientIPConfig is an autogenerated conversion function.
func Convert_v1_ClientIPConfig_To_core_ClientIPConfig(in *corev1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error {
return autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in, out, s)
}
func autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *corev1.ClientIPConfig, s conversion.Scope) error {
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
return nil
}
// Convert_core_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function.
func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *corev1.ClientIPConfig, s conversion.Scope) error {
return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s)
}
func autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *corev1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.SignerName = (*string)(unsafe.Pointer(in.SignerName))
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
out.Path = in.Path
return nil
}
// Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection is an autogenerated conversion function.
func Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *corev1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error {
return autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in, out, s)
}
func autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *corev1.ClusterTrustBundleProjection, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.SignerName = (*string)(unsafe.Pointer(in.SignerName))
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
out.Path = in.Path
return nil
}
// Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection is an autogenerated conversion function.
func Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *corev1.ClusterTrustBundleProjection, s conversion.Scope) error {
return autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in, out, s)
}
func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *corev1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error {
out.Type = core.ComponentConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.Message = in.Message
out.Error = in.Error
return nil
}
// Convert_v1_ComponentCondition_To_core_ComponentCondition is an autogenerated conversion function.
func Convert_v1_ComponentCondition_To_core_ComponentCondition(in *corev1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error {
return autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in, out, s)
}
func autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *corev1.ComponentCondition, s conversion.Scope) error {
out.Type = corev1.ComponentConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.Message = in.Message
out.Error = in.Error
return nil
}
// Convert_core_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function.
func Convert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *corev1.ComponentCondition, s conversion.Scope) error {
return autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in, out, s)
}
func autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in *corev1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Conditions = *(*[]core.ComponentCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_ComponentStatus_To_core_ComponentStatus is an autogenerated conversion function.
func Convert_v1_ComponentStatus_To_core_ComponentStatus(in *corev1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error {
return autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in, out, s)
}
func autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *corev1.ComponentStatus, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Conditions = *(*[]corev1.ComponentCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_core_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function.
func Convert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *corev1.ComponentStatus, s conversion.Scope) error {
return autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in, out, s)
}
func autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in *corev1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.ComponentStatus)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ComponentStatusList_To_core_ComponentStatusList is an autogenerated conversion function.
func Convert_v1_ComponentStatusList_To_core_ComponentStatusList(in *corev1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error {
return autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in, out, s)
}
func autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *corev1.ComponentStatusList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.ComponentStatus)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function.
func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *corev1.ComponentStatusList, s conversion.Scope) error {
return autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in, out, s)
}
func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *corev1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Immutable = (*bool)(unsafe.Pointer(in.Immutable))
out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data))
out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData))
return nil
}
// Convert_v1_ConfigMap_To_core_ConfigMap is an autogenerated conversion function.
func Convert_v1_ConfigMap_To_core_ConfigMap(in *corev1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error {
return autoConvert_v1_ConfigMap_To_core_ConfigMap(in, out, s)
}
func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *corev1.ConfigMap, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Immutable = (*bool)(unsafe.Pointer(in.Immutable))
out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data))
out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData))
return nil
}
// Convert_core_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function.
func Convert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *corev1.ConfigMap, s conversion.Scope) error {
return autoConvert_core_ConfigMap_To_v1_ConfigMap(in, out, s)
}
func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *corev1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error {
if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource is an autogenerated conversion function.
func Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *corev1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error {
return autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in, out, s)
}
func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *corev1.ConfigMapEnvSource, s conversion.Scope) error {
if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function.
func Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *corev1.ConfigMapEnvSource, s conversion.Scope) error {
return autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s)
}
func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *corev1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error {
if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Key = in.Key
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector is an autogenerated conversion function.
func Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *corev1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error {
return autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in, out, s)
}
func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *corev1.ConfigMapKeySelector, s conversion.Scope) error {
if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Key = in.Key
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function.
func Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *corev1.ConfigMapKeySelector, s conversion.Scope) error {
return autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s)
}
func autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in *corev1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.ConfigMap)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ConfigMapList_To_core_ConfigMapList is an autogenerated conversion function.
func Convert_v1_ConfigMapList_To_core_ConfigMapList(in *corev1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error {
return autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in, out, s)
}
func autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *corev1.ConfigMapList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.ConfigMap)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function.
func Convert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *corev1.ConfigMapList, s conversion.Scope) error {
return autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in, out, s)
}
func autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *corev1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
out.UID = types.UID(in.UID)
out.ResourceVersion = in.ResourceVersion
out.KubeletConfigKey = in.KubeletConfigKey
return nil
}
// Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource is an autogenerated conversion function.
func Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *corev1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error {
return autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in, out, s)
}
func autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *corev1.ConfigMapNodeConfigSource, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
out.UID = types.UID(in.UID)
out.ResourceVersion = in.ResourceVersion
out.KubeletConfigKey = in.KubeletConfigKey
return nil
}
// Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource is an autogenerated conversion function.
func Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *corev1.ConfigMapNodeConfigSource, s conversion.Scope) error {
return autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in, out, s)
}
func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *corev1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error {
if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection is an autogenerated conversion function.
func Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *corev1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error {
return autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in, out, s)
}
func autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *corev1.ConfigMapProjection, s conversion.Scope) error {
if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function.
func Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *corev1.ConfigMapProjection, s conversion.Scope) error {
return autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s)
}
func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *corev1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error {
if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items))
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource is an autogenerated conversion function.
func Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *corev1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in, out, s)
}
func autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *corev1.ConfigMapVolumeSource, s conversion.Scope) error {
if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items))
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function.
func Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *corev1.ConfigMapVolumeSource, s conversion.Scope) error {
return autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s)
}
func autoConvert_v1_Container_To_core_Container(in *corev1.Container, out *core.Container, s conversion.Scope) error {
out.Name = in.Name
out.Image = in.Image
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.WorkingDir = in.WorkingDir
out.Ports = *(*[]core.ContainerPort)(unsafe.Pointer(&in.Ports))
out.EnvFrom = *(*[]core.EnvFromSource)(unsafe.Pointer(&in.EnvFrom))
out.Env = *(*[]core.EnvVar)(unsafe.Pointer(&in.Env))
if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.RestartPolicy = (*core.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
out.RestartPolicyRules = *(*[]core.ContainerRestartRule)(unsafe.Pointer(&in.RestartPolicyRules))
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
out.ReadinessProbe = (*core.Probe)(unsafe.Pointer(in.ReadinessProbe))
out.StartupProbe = (*core.Probe)(unsafe.Pointer(in.StartupProbe))
out.Lifecycle = (*core.Lifecycle)(unsafe.Pointer(in.Lifecycle))
out.TerminationMessagePath = in.TerminationMessagePath
out.TerminationMessagePolicy = core.TerminationMessagePolicy(in.TerminationMessagePolicy)
out.ImagePullPolicy = core.PullPolicy(in.ImagePullPolicy)
out.SecurityContext = (*core.SecurityContext)(unsafe.Pointer(in.SecurityContext))
out.Stdin = in.Stdin
out.StdinOnce = in.StdinOnce
out.TTY = in.TTY
return nil
}
// Convert_v1_Container_To_core_Container is an autogenerated conversion function.
func Convert_v1_Container_To_core_Container(in *corev1.Container, out *core.Container, s conversion.Scope) error {
return autoConvert_v1_Container_To_core_Container(in, out, s)
}
func autoConvert_core_Container_To_v1_Container(in *core.Container, out *corev1.Container, s conversion.Scope) error {
out.Name = in.Name
out.Image = in.Image
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.WorkingDir = in.WorkingDir
out.Ports = *(*[]corev1.ContainerPort)(unsafe.Pointer(&in.Ports))
out.EnvFrom = *(*[]corev1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom))
out.Env = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.Env))
if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]corev1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.RestartPolicy = (*corev1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
out.RestartPolicyRules = *(*[]corev1.ContainerRestartRule)(unsafe.Pointer(&in.RestartPolicyRules))
out.VolumeMounts = *(*[]corev1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]corev1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*corev1.Probe)(unsafe.Pointer(in.LivenessProbe))
out.ReadinessProbe = (*corev1.Probe)(unsafe.Pointer(in.ReadinessProbe))
out.StartupProbe = (*corev1.Probe)(unsafe.Pointer(in.StartupProbe))
out.Lifecycle = (*corev1.Lifecycle)(unsafe.Pointer(in.Lifecycle))
out.TerminationMessagePath = in.TerminationMessagePath
out.TerminationMessagePolicy = corev1.TerminationMessagePolicy(in.TerminationMessagePolicy)
out.ImagePullPolicy = corev1.PullPolicy(in.ImagePullPolicy)
out.SecurityContext = (*corev1.SecurityContext)(unsafe.Pointer(in.SecurityContext))
out.Stdin = in.Stdin
out.StdinOnce = in.StdinOnce
out.TTY = in.TTY
return nil
}
// Convert_core_Container_To_v1_Container is an autogenerated conversion function.
func Convert_core_Container_To_v1_Container(in *core.Container, out *corev1.Container, s conversion.Scope) error {
return autoConvert_core_Container_To_v1_Container(in, out, s)
}
func autoConvert_v1_ContainerExtendedResourceRequest_To_core_ContainerExtendedResourceRequest(in *corev1.ContainerExtendedResourceRequest, out *core.ContainerExtendedResourceRequest, s conversion.Scope) error {
out.ContainerName = in.ContainerName
out.ResourceName = in.ResourceName
out.RequestName = in.RequestName
return nil
}
// Convert_v1_ContainerExtendedResourceRequest_To_core_ContainerExtendedResourceRequest is an autogenerated conversion function.
func Convert_v1_ContainerExtendedResourceRequest_To_core_ContainerExtendedResourceRequest(in *corev1.ContainerExtendedResourceRequest, out *core.ContainerExtendedResourceRequest, s conversion.Scope) error {
return autoConvert_v1_ContainerExtendedResourceRequest_To_core_ContainerExtendedResourceRequest(in, out, s)
}
func autoConvert_core_ContainerExtendedResourceRequest_To_v1_ContainerExtendedResourceRequest(in *core.ContainerExtendedResourceRequest, out *corev1.ContainerExtendedResourceRequest, s conversion.Scope) error {
out.ContainerName = in.ContainerName
out.ResourceName = in.ResourceName
out.RequestName = in.RequestName
return nil
}
// Convert_core_ContainerExtendedResourceRequest_To_v1_ContainerExtendedResourceRequest is an autogenerated conversion function.
func Convert_core_ContainerExtendedResourceRequest_To_v1_ContainerExtendedResourceRequest(in *core.ContainerExtendedResourceRequest, out *corev1.ContainerExtendedResourceRequest, s conversion.Scope) error {
return autoConvert_core_ContainerExtendedResourceRequest_To_v1_ContainerExtendedResourceRequest(in, out, s)
}
func autoConvert_v1_ContainerImage_To_core_ContainerImage(in *corev1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error {
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
out.SizeBytes = in.SizeBytes
return nil
}
// Convert_v1_ContainerImage_To_core_ContainerImage is an autogenerated conversion function.
func Convert_v1_ContainerImage_To_core_ContainerImage(in *corev1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error {
return autoConvert_v1_ContainerImage_To_core_ContainerImage(in, out, s)
}
func autoConvert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *corev1.ContainerImage, s conversion.Scope) error {
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
out.SizeBytes = in.SizeBytes
return nil
}
// Convert_core_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function.
func Convert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *corev1.ContainerImage, s conversion.Scope) error {
return autoConvert_core_ContainerImage_To_v1_ContainerImage(in, out, s)
}
func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *corev1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error {
out.Name = in.Name
out.HostPort = in.HostPort
out.ContainerPort = in.ContainerPort
out.Protocol = core.Protocol(in.Protocol)
out.HostIP = in.HostIP
return nil
}
// Convert_v1_ContainerPort_To_core_ContainerPort is an autogenerated conversion function.
func Convert_v1_ContainerPort_To_core_ContainerPort(in *corev1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error {
return autoConvert_v1_ContainerPort_To_core_ContainerPort(in, out, s)
}
func autoConvert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *corev1.ContainerPort, s conversion.Scope) error {
out.Name = in.Name
out.HostPort = in.HostPort
out.ContainerPort = in.ContainerPort
out.Protocol = corev1.Protocol(in.Protocol)
out.HostIP = in.HostIP
return nil
}
// Convert_core_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function.
func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *corev1.ContainerPort, s conversion.Scope) error {
return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s)
}
func autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *corev1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error {
out.ResourceName = core.ResourceName(in.ResourceName)
out.RestartPolicy = core.ResourceResizeRestartPolicy(in.RestartPolicy)
return nil
}
// Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy is an autogenerated conversion function.
func Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *corev1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error {
return autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in, out, s)
}
func autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *corev1.ContainerResizePolicy, s conversion.Scope) error {
out.ResourceName = corev1.ResourceName(in.ResourceName)
out.RestartPolicy = corev1.ResourceResizeRestartPolicy(in.RestartPolicy)
return nil
}
// Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy is an autogenerated conversion function.
func Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *corev1.ContainerResizePolicy, s conversion.Scope) error {
return autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in, out, s)
}
func autoConvert_v1_ContainerRestartRule_To_core_ContainerRestartRule(in *corev1.ContainerRestartRule, out *core.ContainerRestartRule, s conversion.Scope) error {
out.Action = core.ContainerRestartRuleAction(in.Action)
out.ExitCodes = (*core.ContainerRestartRuleOnExitCodes)(unsafe.Pointer(in.ExitCodes))
return nil
}
// Convert_v1_ContainerRestartRule_To_core_ContainerRestartRule is an autogenerated conversion function.
func Convert_v1_ContainerRestartRule_To_core_ContainerRestartRule(in *corev1.ContainerRestartRule, out *core.ContainerRestartRule, s conversion.Scope) error {
return autoConvert_v1_ContainerRestartRule_To_core_ContainerRestartRule(in, out, s)
}
func autoConvert_core_ContainerRestartRule_To_v1_ContainerRestartRule(in *core.ContainerRestartRule, out *corev1.ContainerRestartRule, s conversion.Scope) error {
out.Action = corev1.ContainerRestartRuleAction(in.Action)
out.ExitCodes = (*corev1.ContainerRestartRuleOnExitCodes)(unsafe.Pointer(in.ExitCodes))
return nil
}
// Convert_core_ContainerRestartRule_To_v1_ContainerRestartRule is an autogenerated conversion function.
func Convert_core_ContainerRestartRule_To_v1_ContainerRestartRule(in *core.ContainerRestartRule, out *corev1.ContainerRestartRule, s conversion.Scope) error {
return autoConvert_core_ContainerRestartRule_To_v1_ContainerRestartRule(in, out, s)
}
func autoConvert_v1_ContainerRestartRuleOnExitCodes_To_core_ContainerRestartRuleOnExitCodes(in *corev1.ContainerRestartRuleOnExitCodes, out *core.ContainerRestartRuleOnExitCodes, s conversion.Scope) error {
out.Operator = core.ContainerRestartRuleOnExitCodesOperator(in.Operator)
out.Values = *(*[]int32)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_v1_ContainerRestartRuleOnExitCodes_To_core_ContainerRestartRuleOnExitCodes is an autogenerated conversion function.
func Convert_v1_ContainerRestartRuleOnExitCodes_To_core_ContainerRestartRuleOnExitCodes(in *corev1.ContainerRestartRuleOnExitCodes, out *core.ContainerRestartRuleOnExitCodes, s conversion.Scope) error {
return autoConvert_v1_ContainerRestartRuleOnExitCodes_To_core_ContainerRestartRuleOnExitCodes(in, out, s)
}
func autoConvert_core_ContainerRestartRuleOnExitCodes_To_v1_ContainerRestartRuleOnExitCodes(in *core.ContainerRestartRuleOnExitCodes, out *corev1.ContainerRestartRuleOnExitCodes, s conversion.Scope) error {
out.Operator = corev1.ContainerRestartRuleOnExitCodesOperator(in.Operator)
out.Values = *(*[]int32)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_core_ContainerRestartRuleOnExitCodes_To_v1_ContainerRestartRuleOnExitCodes is an autogenerated conversion function.
func Convert_core_ContainerRestartRuleOnExitCodes_To_v1_ContainerRestartRuleOnExitCodes(in *core.ContainerRestartRuleOnExitCodes, out *corev1.ContainerRestartRuleOnExitCodes, s conversion.Scope) error {
return autoConvert_core_ContainerRestartRuleOnExitCodes_To_v1_ContainerRestartRuleOnExitCodes(in, out, s)
}
func autoConvert_v1_ContainerState_To_core_ContainerState(in *corev1.ContainerState, out *core.ContainerState, s conversion.Scope) error {
out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting))
out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running))
out.Terminated = (*core.ContainerStateTerminated)(unsafe.Pointer(in.Terminated))
return nil
}
// Convert_v1_ContainerState_To_core_ContainerState is an autogenerated conversion function.
func Convert_v1_ContainerState_To_core_ContainerState(in *corev1.ContainerState, out *core.ContainerState, s conversion.Scope) error {
return autoConvert_v1_ContainerState_To_core_ContainerState(in, out, s)
}
func autoConvert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *corev1.ContainerState, s conversion.Scope) error {
out.Waiting = (*corev1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting))
out.Running = (*corev1.ContainerStateRunning)(unsafe.Pointer(in.Running))
out.Terminated = (*corev1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated))
return nil
}
// Convert_core_ContainerState_To_v1_ContainerState is an autogenerated conversion function.
func Convert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *corev1.ContainerState, s conversion.Scope) error {
return autoConvert_core_ContainerState_To_v1_ContainerState(in, out, s)
}
func autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *corev1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error {
out.StartedAt = in.StartedAt
return nil
}
// Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning is an autogenerated conversion function.
func Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *corev1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error {
return autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in, out, s)
}
func autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *corev1.ContainerStateRunning, s conversion.Scope) error {
out.StartedAt = in.StartedAt
return nil
}
// Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function.
func Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *corev1.ContainerStateRunning, s conversion.Scope) error {
return autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s)
}
func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *corev1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error {
out.ExitCode = in.ExitCode
out.Signal = in.Signal
out.Reason = in.Reason
out.Message = in.Message
out.StartedAt = in.StartedAt
out.FinishedAt = in.FinishedAt
out.ContainerID = in.ContainerID
return nil
}
// Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated is an autogenerated conversion function.
func Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *corev1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error {
return autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in, out, s)
}
func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *corev1.ContainerStateTerminated, s conversion.Scope) error {
out.ExitCode = in.ExitCode
out.Signal = in.Signal
out.Reason = in.Reason
out.Message = in.Message
out.StartedAt = in.StartedAt
out.FinishedAt = in.FinishedAt
out.ContainerID = in.ContainerID
return nil
}
// Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function.
func Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *corev1.ContainerStateTerminated, s conversion.Scope) error {
return autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s)
}
func autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *corev1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error {
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting is an autogenerated conversion function.
func Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *corev1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error {
return autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in, out, s)
}
func autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *corev1.ContainerStateWaiting, s conversion.Scope) error {
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function.
func Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *corev1.ContainerStateWaiting, s conversion.Scope) error {
return autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s)
}
func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *corev1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_ContainerState_To_core_ContainerState(&in.State, &out.State, s); err != nil {
return err
}
if err := Convert_v1_ContainerState_To_core_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil {
return err
}
out.Ready = in.Ready
out.RestartCount = in.RestartCount
out.Image = in.Image
out.ImageID = in.ImageID
out.ContainerID = in.ContainerID
out.Started = (*bool)(unsafe.Pointer(in.Started))
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*core.ResourceRequirements)(unsafe.Pointer(in.Resources))
out.VolumeMounts = *(*[]core.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts))
out.User = (*core.ContainerUser)(unsafe.Pointer(in.User))
out.AllocatedResourcesStatus = *(*[]core.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus))
out.StopSignal = (*core.Signal)(unsafe.Pointer(in.StopSignal))
return nil
}
// Convert_v1_ContainerStatus_To_core_ContainerStatus is an autogenerated conversion function.
func Convert_v1_ContainerStatus_To_core_ContainerStatus(in *corev1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error {
return autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in, out, s)
}
func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *corev1.ContainerStatus, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_core_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil {
return err
}
if err := Convert_core_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil {
return err
}
out.Ready = in.Ready
out.RestartCount = in.RestartCount
out.Image = in.Image
out.ImageID = in.ImageID
out.ContainerID = in.ContainerID
out.Started = (*bool)(unsafe.Pointer(in.Started))
out.AllocatedResources = *(*corev1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*corev1.ResourceRequirements)(unsafe.Pointer(in.Resources))
out.VolumeMounts = *(*[]corev1.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts))
out.User = (*corev1.ContainerUser)(unsafe.Pointer(in.User))
out.AllocatedResourcesStatus = *(*[]corev1.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus))
out.StopSignal = (*corev1.Signal)(unsafe.Pointer(in.StopSignal))
return nil
}
// Convert_core_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function.
func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *corev1.ContainerStatus, s conversion.Scope) error {
return autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in, out, s)
}
func autoConvert_v1_ContainerUser_To_core_ContainerUser(in *corev1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error {
out.Linux = (*core.LinuxContainerUser)(unsafe.Pointer(in.Linux))
return nil
}
// Convert_v1_ContainerUser_To_core_ContainerUser is an autogenerated conversion function.
func Convert_v1_ContainerUser_To_core_ContainerUser(in *corev1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error {
return autoConvert_v1_ContainerUser_To_core_ContainerUser(in, out, s)
}
func autoConvert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *corev1.ContainerUser, s conversion.Scope) error {
out.Linux = (*corev1.LinuxContainerUser)(unsafe.Pointer(in.Linux))
return nil
}
// Convert_core_ContainerUser_To_v1_ContainerUser is an autogenerated conversion function.
func Convert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *corev1.ContainerUser, s conversion.Scope) error {
return autoConvert_core_ContainerUser_To_v1_ContainerUser(in, out, s)
}
func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *corev1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error {
out.Port = in.Port
return nil
}
// Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint is an autogenerated conversion function.
func Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *corev1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error {
return autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in, out, s)
}
func autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *corev1.DaemonEndpoint, s conversion.Scope) error {
out.Port = in.Port
return nil
}
// Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function.
func Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *corev1.DaemonEndpoint, s conversion.Scope) error {
return autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s)
}
func autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *corev1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error {
out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection is an autogenerated conversion function.
func Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *corev1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error {
return autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in, out, s)
}
func autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *corev1.DownwardAPIProjection, s conversion.Scope) error {
out.Items = *(*[]corev1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function.
func Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *corev1.DownwardAPIProjection, s conversion.Scope) error {
return autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s)
}
func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *corev1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error {
out.Path = in.Path
out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef))
out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef))
out.Mode = (*int32)(unsafe.Pointer(in.Mode))
return nil
}
// Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile is an autogenerated conversion function.
func Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *corev1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error {
return autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in, out, s)
}
func autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *corev1.DownwardAPIVolumeFile, s conversion.Scope) error {
out.Path = in.Path
out.FieldRef = (*corev1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef))
out.ResourceFieldRef = (*corev1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef))
out.Mode = (*int32)(unsafe.Pointer(in.Mode))
return nil
}
// Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function.
func Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *corev1.DownwardAPIVolumeFile, s conversion.Scope) error {
return autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s)
}
func autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *corev1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error {
out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items))
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
return nil
}
// Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource is an autogenerated conversion function.
func Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *corev1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error {
return autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in, out, s)
}
func autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *corev1.DownwardAPIVolumeSource, s conversion.Scope) error {
out.Items = *(*[]corev1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items))
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
return nil
}
// Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function.
func Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *corev1.DownwardAPIVolumeSource, s conversion.Scope) error {
return autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s)
}
func autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *corev1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error {
out.Medium = core.StorageMedium(in.Medium)
out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit))
return nil
}
// Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource is an autogenerated conversion function.
func Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *corev1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error {
return autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in, out, s)
}
func autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *corev1.EmptyDirVolumeSource, s conversion.Scope) error {
out.Medium = corev1.StorageMedium(in.Medium)
out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit))
return nil
}
// Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function.
func Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *corev1.EmptyDirVolumeSource, s conversion.Scope) error {
return autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s)
}
func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *corev1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef))
return nil
}
// Convert_v1_EndpointAddress_To_core_EndpointAddress is an autogenerated conversion function.
func Convert_v1_EndpointAddress_To_core_EndpointAddress(in *corev1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error {
return autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in, out, s)
}
func autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *corev1.EndpointAddress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.TargetRef = (*corev1.ObjectReference)(unsafe.Pointer(in.TargetRef))
return nil
}
// Convert_core_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function.
func Convert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *corev1.EndpointAddress, s conversion.Scope) error {
return autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in, out, s)
}
func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *corev1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error {
out.Name = in.Name
out.Port = in.Port
out.Protocol = core.Protocol(in.Protocol)
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
return nil
}
// Convert_v1_EndpointPort_To_core_EndpointPort is an autogenerated conversion function.
func Convert_v1_EndpointPort_To_core_EndpointPort(in *corev1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error {
return autoConvert_v1_EndpointPort_To_core_EndpointPort(in, out, s)
}
func autoConvert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *corev1.EndpointPort, s conversion.Scope) error {
out.Name = in.Name
out.Port = in.Port
out.Protocol = corev1.Protocol(in.Protocol)
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
return nil
}
// Convert_core_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function.
func Convert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *corev1.EndpointPort, s conversion.Scope) error {
return autoConvert_core_EndpointPort_To_v1_EndpointPort(in, out, s)
}
func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *corev1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error {
out.Addresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.Addresses))
out.NotReadyAddresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses))
out.Ports = *(*[]core.EndpointPort)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1_EndpointSubset_To_core_EndpointSubset is an autogenerated conversion function.
func Convert_v1_EndpointSubset_To_core_EndpointSubset(in *corev1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error {
return autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in, out, s)
}
func autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *corev1.EndpointSubset, s conversion.Scope) error {
out.Addresses = *(*[]corev1.EndpointAddress)(unsafe.Pointer(&in.Addresses))
out.NotReadyAddresses = *(*[]corev1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses))
out.Ports = *(*[]corev1.EndpointPort)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_core_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function.
func Convert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *corev1.EndpointSubset, s conversion.Scope) error {
return autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in, out, s)
}
func autoConvert_v1_Endpoints_To_core_Endpoints(in *corev1.Endpoints, out *core.Endpoints, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subsets = *(*[]core.EndpointSubset)(unsafe.Pointer(&in.Subsets))
return nil
}
// Convert_v1_Endpoints_To_core_Endpoints is an autogenerated conversion function.
func Convert_v1_Endpoints_To_core_Endpoints(in *corev1.Endpoints, out *core.Endpoints, s conversion.Scope) error {
return autoConvert_v1_Endpoints_To_core_Endpoints(in, out, s)
}
func autoConvert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *corev1.Endpoints, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subsets = *(*[]corev1.EndpointSubset)(unsafe.Pointer(&in.Subsets))
return nil
}
// Convert_core_Endpoints_To_v1_Endpoints is an autogenerated conversion function.
func Convert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *corev1.Endpoints, s conversion.Scope) error {
return autoConvert_core_Endpoints_To_v1_Endpoints(in, out, s)
}
func autoConvert_v1_EndpointsList_To_core_EndpointsList(in *corev1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.Endpoints)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_EndpointsList_To_core_EndpointsList is an autogenerated conversion function.
func Convert_v1_EndpointsList_To_core_EndpointsList(in *corev1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error {
return autoConvert_v1_EndpointsList_To_core_EndpointsList(in, out, s)
}
func autoConvert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *corev1.EndpointsList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.Endpoints)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function.
func Convert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *corev1.EndpointsList, s conversion.Scope) error {
return autoConvert_core_EndpointsList_To_v1_EndpointsList(in, out, s)
}
func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *corev1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error {
out.Prefix = in.Prefix
out.ConfigMapRef = (*core.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef))
out.SecretRef = (*core.SecretEnvSource)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_v1_EnvFromSource_To_core_EnvFromSource is an autogenerated conversion function.
func Convert_v1_EnvFromSource_To_core_EnvFromSource(in *corev1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error {
return autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in, out, s)
}
func autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *corev1.EnvFromSource, s conversion.Scope) error {
out.Prefix = in.Prefix
out.ConfigMapRef = (*corev1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef))
out.SecretRef = (*corev1.SecretEnvSource)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_core_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function.
func Convert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *corev1.EnvFromSource, s conversion.Scope) error {
return autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in, out, s)
}
func autoConvert_v1_EnvVar_To_core_EnvVar(in *corev1.EnvVar, out *core.EnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
out.ValueFrom = (*core.EnvVarSource)(unsafe.Pointer(in.ValueFrom))
return nil
}
// Convert_v1_EnvVar_To_core_EnvVar is an autogenerated conversion function.
func Convert_v1_EnvVar_To_core_EnvVar(in *corev1.EnvVar, out *core.EnvVar, s conversion.Scope) error {
return autoConvert_v1_EnvVar_To_core_EnvVar(in, out, s)
}
func autoConvert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *corev1.EnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
out.ValueFrom = (*corev1.EnvVarSource)(unsafe.Pointer(in.ValueFrom))
return nil
}
// Convert_core_EnvVar_To_v1_EnvVar is an autogenerated conversion function.
func Convert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *corev1.EnvVar, s conversion.Scope) error {
return autoConvert_core_EnvVar_To_v1_EnvVar(in, out, s)
}
func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *corev1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error {
out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef))
out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef))
out.ConfigMapKeyRef = (*core.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef))
out.SecretKeyRef = (*core.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef))
out.FileKeyRef = (*core.FileKeySelector)(unsafe.Pointer(in.FileKeyRef))
return nil
}
// Convert_v1_EnvVarSource_To_core_EnvVarSource is an autogenerated conversion function.
func Convert_v1_EnvVarSource_To_core_EnvVarSource(in *corev1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error {
return autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in, out, s)
}
func autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *corev1.EnvVarSource, s conversion.Scope) error {
out.FieldRef = (*corev1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef))
out.ResourceFieldRef = (*corev1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef))
out.ConfigMapKeyRef = (*corev1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef))
out.SecretKeyRef = (*corev1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef))
out.FileKeyRef = (*corev1.FileKeySelector)(unsafe.Pointer(in.FileKeyRef))
return nil
}
// Convert_core_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function.
func Convert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *corev1.EnvVarSource, s conversion.Scope) error {
return autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in, out, s)
}
func autoConvert_v1_EphemeralContainer_To_core_EphemeralContainer(in *corev1.EphemeralContainer, out *core.EphemeralContainer, s conversion.Scope) error {
if err := Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(&in.EphemeralContainerCommon, &out.EphemeralContainerCommon, s); err != nil {
return err
}
out.TargetContainerName = in.TargetContainerName
return nil
}
// Convert_v1_EphemeralContainer_To_core_EphemeralContainer is an autogenerated conversion function.
func Convert_v1_EphemeralContainer_To_core_EphemeralContainer(in *corev1.EphemeralContainer, out *core.EphemeralContainer, s conversion.Scope) error {
return autoConvert_v1_EphemeralContainer_To_core_EphemeralContainer(in, out, s)
}
func autoConvert_core_EphemeralContainer_To_v1_EphemeralContainer(in *core.EphemeralContainer, out *corev1.EphemeralContainer, s conversion.Scope) error {
if err := Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(&in.EphemeralContainerCommon, &out.EphemeralContainerCommon, s); err != nil {
return err
}
out.TargetContainerName = in.TargetContainerName
return nil
}
// Convert_core_EphemeralContainer_To_v1_EphemeralContainer is an autogenerated conversion function.
func Convert_core_EphemeralContainer_To_v1_EphemeralContainer(in *core.EphemeralContainer, out *corev1.EphemeralContainer, s conversion.Scope) error {
return autoConvert_core_EphemeralContainer_To_v1_EphemeralContainer(in, out, s)
}
func autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in *corev1.EphemeralContainerCommon, out *core.EphemeralContainerCommon, s conversion.Scope) error {
out.Name = in.Name
out.Image = in.Image
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.WorkingDir = in.WorkingDir
out.Ports = *(*[]core.ContainerPort)(unsafe.Pointer(&in.Ports))
out.EnvFrom = *(*[]core.EnvFromSource)(unsafe.Pointer(&in.EnvFrom))
out.Env = *(*[]core.EnvVar)(unsafe.Pointer(&in.Env))
if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.RestartPolicy = (*core.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
out.RestartPolicyRules = *(*[]core.ContainerRestartRule)(unsafe.Pointer(&in.RestartPolicyRules))
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
out.ReadinessProbe = (*core.Probe)(unsafe.Pointer(in.ReadinessProbe))
out.StartupProbe = (*core.Probe)(unsafe.Pointer(in.StartupProbe))
out.Lifecycle = (*core.Lifecycle)(unsafe.Pointer(in.Lifecycle))
out.TerminationMessagePath = in.TerminationMessagePath
out.TerminationMessagePolicy = core.TerminationMessagePolicy(in.TerminationMessagePolicy)
out.ImagePullPolicy = core.PullPolicy(in.ImagePullPolicy)
out.SecurityContext = (*core.SecurityContext)(unsafe.Pointer(in.SecurityContext))
out.Stdin = in.Stdin
out.StdinOnce = in.StdinOnce
out.TTY = in.TTY
return nil
}
// Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon is an autogenerated conversion function.
func Convert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in *corev1.EphemeralContainerCommon, out *core.EphemeralContainerCommon, s conversion.Scope) error {
return autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in, out, s)
}
func autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in *core.EphemeralContainerCommon, out *corev1.EphemeralContainerCommon, s conversion.Scope) error {
out.Name = in.Name
out.Image = in.Image
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.WorkingDir = in.WorkingDir
out.Ports = *(*[]corev1.ContainerPort)(unsafe.Pointer(&in.Ports))
out.EnvFrom = *(*[]corev1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom))
out.Env = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.Env))
if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]corev1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.RestartPolicy = (*corev1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
out.RestartPolicyRules = *(*[]corev1.ContainerRestartRule)(unsafe.Pointer(&in.RestartPolicyRules))
out.VolumeMounts = *(*[]corev1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]corev1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*corev1.Probe)(unsafe.Pointer(in.LivenessProbe))
out.ReadinessProbe = (*corev1.Probe)(unsafe.Pointer(in.ReadinessProbe))
out.StartupProbe = (*corev1.Probe)(unsafe.Pointer(in.StartupProbe))
out.Lifecycle = (*corev1.Lifecycle)(unsafe.Pointer(in.Lifecycle))
out.TerminationMessagePath = in.TerminationMessagePath
out.TerminationMessagePolicy = corev1.TerminationMessagePolicy(in.TerminationMessagePolicy)
out.ImagePullPolicy = corev1.PullPolicy(in.ImagePullPolicy)
out.SecurityContext = (*corev1.SecurityContext)(unsafe.Pointer(in.SecurityContext))
out.Stdin = in.Stdin
out.StdinOnce = in.StdinOnce
out.TTY = in.TTY
return nil
}
// Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon is an autogenerated conversion function.
func Convert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in *core.EphemeralContainerCommon, out *corev1.EphemeralContainerCommon, s conversion.Scope) error {
return autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in, out, s)
}
func autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *corev1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error {
out.VolumeClaimTemplate = (*core.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate))
return nil
}
// Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource is an autogenerated conversion function.
func Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *corev1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error {
return autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in, out, s)
}
func autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *corev1.EphemeralVolumeSource, s conversion.Scope) error {
out.VolumeClaimTemplate = (*corev1.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate))
return nil
}
// Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource is an autogenerated conversion function.
func Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *corev1.EphemeralVolumeSource, s conversion.Scope) error {
return autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in, out, s)
}
func autoConvert_v1_Event_To_core_Event(in *corev1.Event, out *core.Event, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil {
return err
}
out.Reason = in.Reason
out.Message = in.Message
if err := Convert_v1_EventSource_To_core_EventSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.FirstTimestamp = in.FirstTimestamp
out.LastTimestamp = in.LastTimestamp
out.Count = in.Count
out.Type = in.Type
out.EventTime = in.EventTime
out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series))
out.Action = in.Action
out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related))
out.ReportingController = in.ReportingController
out.ReportingInstance = in.ReportingInstance
return nil
}
// Convert_v1_Event_To_core_Event is an autogenerated conversion function.
func Convert_v1_Event_To_core_Event(in *corev1.Event, out *core.Event, s conversion.Scope) error {
return autoConvert_v1_Event_To_core_Event(in, out, s)
}
func autoConvert_core_Event_To_v1_Event(in *core.Event, out *corev1.Event, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil {
return err
}
out.Reason = in.Reason
out.Message = in.Message
if err := Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.FirstTimestamp = in.FirstTimestamp
out.LastTimestamp = in.LastTimestamp
out.Count = in.Count
out.Type = in.Type
out.EventTime = in.EventTime
out.Series = (*corev1.EventSeries)(unsafe.Pointer(in.Series))
out.Action = in.Action
out.Related = (*corev1.ObjectReference)(unsafe.Pointer(in.Related))
out.ReportingController = in.ReportingController
out.ReportingInstance = in.ReportingInstance
return nil
}
// Convert_core_Event_To_v1_Event is an autogenerated conversion function.
func Convert_core_Event_To_v1_Event(in *core.Event, out *corev1.Event, s conversion.Scope) error {
return autoConvert_core_Event_To_v1_Event(in, out, s)
}
func autoConvert_v1_EventList_To_core_EventList(in *corev1.EventList, out *core.EventList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.Event)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_EventList_To_core_EventList is an autogenerated conversion function.
func Convert_v1_EventList_To_core_EventList(in *corev1.EventList, out *core.EventList, s conversion.Scope) error {
return autoConvert_v1_EventList_To_core_EventList(in, out, s)
}
func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *corev1.EventList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.Event)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_EventList_To_v1_EventList is an autogenerated conversion function.
func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *corev1.EventList, s conversion.Scope) error {
return autoConvert_core_EventList_To_v1_EventList(in, out, s)
}
func autoConvert_v1_EventSeries_To_core_EventSeries(in *corev1.EventSeries, out *core.EventSeries, s conversion.Scope) error {
out.Count = in.Count
out.LastObservedTime = in.LastObservedTime
return nil
}
// Convert_v1_EventSeries_To_core_EventSeries is an autogenerated conversion function.
func Convert_v1_EventSeries_To_core_EventSeries(in *corev1.EventSeries, out *core.EventSeries, s conversion.Scope) error {
return autoConvert_v1_EventSeries_To_core_EventSeries(in, out, s)
}
func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *corev1.EventSeries, s conversion.Scope) error {
out.Count = in.Count
out.LastObservedTime = in.LastObservedTime
return nil
}
// Convert_core_EventSeries_To_v1_EventSeries is an autogenerated conversion function.
func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *corev1.EventSeries, s conversion.Scope) error {
return autoConvert_core_EventSeries_To_v1_EventSeries(in, out, s)
}
func autoConvert_v1_EventSource_To_core_EventSource(in *corev1.EventSource, out *core.EventSource, s conversion.Scope) error {
out.Component = in.Component
out.Host = in.Host
return nil
}
// Convert_v1_EventSource_To_core_EventSource is an autogenerated conversion function.
func Convert_v1_EventSource_To_core_EventSource(in *corev1.EventSource, out *core.EventSource, s conversion.Scope) error {
return autoConvert_v1_EventSource_To_core_EventSource(in, out, s)
}
func autoConvert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *corev1.EventSource, s conversion.Scope) error {
out.Component = in.Component
out.Host = in.Host
return nil
}
// Convert_core_EventSource_To_v1_EventSource is an autogenerated conversion function.
func Convert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *corev1.EventSource, s conversion.Scope) error {
return autoConvert_core_EventSource_To_v1_EventSource(in, out, s)
}
func autoConvert_v1_ExecAction_To_core_ExecAction(in *corev1.ExecAction, out *core.ExecAction, s conversion.Scope) error {
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
return nil
}
// Convert_v1_ExecAction_To_core_ExecAction is an autogenerated conversion function.
func Convert_v1_ExecAction_To_core_ExecAction(in *corev1.ExecAction, out *core.ExecAction, s conversion.Scope) error {
return autoConvert_v1_ExecAction_To_core_ExecAction(in, out, s)
}
func autoConvert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *corev1.ExecAction, s conversion.Scope) error {
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
return nil
}
// Convert_core_ExecAction_To_v1_ExecAction is an autogenerated conversion function.
func Convert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *corev1.ExecAction, s conversion.Scope) error {
return autoConvert_core_ExecAction_To_v1_ExecAction(in, out, s)
}
func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *corev1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error {
out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs))
out.Lun = (*int32)(unsafe.Pointer(in.Lun))
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs))
return nil
}
// Convert_v1_FCVolumeSource_To_core_FCVolumeSource is an autogenerated conversion function.
func Convert_v1_FCVolumeSource_To_core_FCVolumeSource(in *corev1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error {
return autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in, out, s)
}
func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *corev1.FCVolumeSource, s conversion.Scope) error {
out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs))
out.Lun = (*int32)(unsafe.Pointer(in.Lun))
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs))
return nil
}
// Convert_core_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function.
func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *corev1.FCVolumeSource, s conversion.Scope) error {
return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s)
}
func autoConvert_v1_FileKeySelector_To_core_FileKeySelector(in *corev1.FileKeySelector, out *core.FileKeySelector, s conversion.Scope) error {
out.VolumeName = in.VolumeName
out.Path = in.Path
out.Key = in.Key
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_FileKeySelector_To_core_FileKeySelector is an autogenerated conversion function.
func Convert_v1_FileKeySelector_To_core_FileKeySelector(in *corev1.FileKeySelector, out *core.FileKeySelector, s conversion.Scope) error {
return autoConvert_v1_FileKeySelector_To_core_FileKeySelector(in, out, s)
}
func autoConvert_core_FileKeySelector_To_v1_FileKeySelector(in *core.FileKeySelector, out *corev1.FileKeySelector, s conversion.Scope) error {
out.VolumeName = in.VolumeName
out.Path = in.Path
out.Key = in.Key
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_FileKeySelector_To_v1_FileKeySelector is an autogenerated conversion function.
func Convert_core_FileKeySelector_To_v1_FileKeySelector(in *core.FileKeySelector, out *corev1.FileKeySelector, s conversion.Scope) error {
return autoConvert_core_FileKeySelector_To_v1_FileKeySelector(in, out, s)
}
func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *corev1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *corev1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in, out, s)
}
func autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *corev1.FlexPersistentVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *corev1.FlexPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *corev1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource is an autogenerated conversion function.
func Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *corev1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error {
return autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in, out, s)
}
func autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *corev1.FlexVolumeSource, s conversion.Scope) error {
out.Driver = in.Driver
out.FSType = in.FSType
out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function.
func Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *corev1.FlexVolumeSource, s conversion.Scope) error {
return autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s)
}
func autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *corev1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error {
out.DatasetName = in.DatasetName
out.DatasetUUID = in.DatasetUUID
return nil
}
// Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource is an autogenerated conversion function.
func Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *corev1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error {
return autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in, out, s)
}
func autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *corev1.FlockerVolumeSource, s conversion.Scope) error {
out.DatasetName = in.DatasetName
out.DatasetUUID = in.DatasetUUID
return nil
}
// Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function.
func Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *corev1.FlockerVolumeSource, s conversion.Scope) error {
return autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s)
}
func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *corev1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
out.PDName = in.PDName
out.FSType = in.FSType
out.Partition = in.Partition
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource is an autogenerated conversion function.
func Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *corev1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
return autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in, out, s)
}
func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *corev1.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
out.PDName = in.PDName
out.FSType = in.FSType
out.Partition = in.Partition
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function.
func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *corev1.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
return autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s)
}
func autoConvert_v1_GRPCAction_To_core_GRPCAction(in *corev1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error {
out.Port = in.Port
out.Service = (*string)(unsafe.Pointer(in.Service))
return nil
}
// Convert_v1_GRPCAction_To_core_GRPCAction is an autogenerated conversion function.
func Convert_v1_GRPCAction_To_core_GRPCAction(in *corev1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error {
return autoConvert_v1_GRPCAction_To_core_GRPCAction(in, out, s)
}
func autoConvert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *corev1.GRPCAction, s conversion.Scope) error {
out.Port = in.Port
out.Service = (*string)(unsafe.Pointer(in.Service))
return nil
}
// Convert_core_GRPCAction_To_v1_GRPCAction is an autogenerated conversion function.
func Convert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *corev1.GRPCAction, s conversion.Scope) error {
return autoConvert_core_GRPCAction_To_v1_GRPCAction(in, out, s)
}
func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *corev1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error {
out.Repository = in.Repository
out.Revision = in.Revision
out.Directory = in.Directory
return nil
}
// Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource is an autogenerated conversion function.
func Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *corev1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error {
return autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in, out, s)
}
func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *corev1.GitRepoVolumeSource, s conversion.Scope) error {
out.Repository = in.Repository
out.Revision = in.Revision
out.Directory = in.Directory
return nil
}
// Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function.
func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *corev1.GitRepoVolumeSource, s conversion.Scope) error {
return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s)
}
func autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *corev1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error {
out.EndpointsName = in.EndpointsName
out.Path = in.Path
out.ReadOnly = in.ReadOnly
out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace))
return nil
}
// Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *corev1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in, out, s)
}
func autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *corev1.GlusterfsPersistentVolumeSource, s conversion.Scope) error {
out.EndpointsName = in.EndpointsName
out.Path = in.Path
out.ReadOnly = in.ReadOnly
out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace))
return nil
}
// Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *corev1.GlusterfsPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *corev1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error {
out.EndpointsName = in.EndpointsName
out.Path = in.Path
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource is an autogenerated conversion function.
func Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *corev1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error {
return autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in, out, s)
}
func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *corev1.GlusterfsVolumeSource, s conversion.Scope) error {
out.EndpointsName = in.EndpointsName
out.Path = in.Path
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function.
func Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *corev1.GlusterfsVolumeSource, s conversion.Scope) error {
return autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s)
}
func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *corev1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error {
out.Path = in.Path
out.Port = in.Port
out.Host = in.Host
out.Scheme = core.URIScheme(in.Scheme)
out.HTTPHeaders = *(*[]core.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders))
return nil
}
// Convert_v1_HTTPGetAction_To_core_HTTPGetAction is an autogenerated conversion function.
func Convert_v1_HTTPGetAction_To_core_HTTPGetAction(in *corev1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error {
return autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in, out, s)
}
func autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *corev1.HTTPGetAction, s conversion.Scope) error {
out.Path = in.Path
out.Port = in.Port
out.Host = in.Host
out.Scheme = corev1.URIScheme(in.Scheme)
out.HTTPHeaders = *(*[]corev1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders))
return nil
}
// Convert_core_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function.
func Convert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *corev1.HTTPGetAction, s conversion.Scope) error {
return autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in, out, s)
}
func autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in *corev1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_v1_HTTPHeader_To_core_HTTPHeader is an autogenerated conversion function.
func Convert_v1_HTTPHeader_To_core_HTTPHeader(in *corev1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error {
return autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in, out, s)
}
func autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *corev1.HTTPHeader, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_core_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function.
func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *corev1.HTTPHeader, s conversion.Scope) error {
return autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in, out, s)
}
func autoConvert_v1_HostAlias_To_core_HostAlias(in *corev1.HostAlias, out *core.HostAlias, s conversion.Scope) error {
out.IP = in.IP
out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames))
return nil
}
// Convert_v1_HostAlias_To_core_HostAlias is an autogenerated conversion function.
func Convert_v1_HostAlias_To_core_HostAlias(in *corev1.HostAlias, out *core.HostAlias, s conversion.Scope) error {
return autoConvert_v1_HostAlias_To_core_HostAlias(in, out, s)
}
func autoConvert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *corev1.HostAlias, s conversion.Scope) error {
out.IP = in.IP
out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames))
return nil
}
// Convert_core_HostAlias_To_v1_HostAlias is an autogenerated conversion function.
func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *corev1.HostAlias, s conversion.Scope) error {
return autoConvert_core_HostAlias_To_v1_HostAlias(in, out, s)
}
func autoConvert_v1_HostIP_To_core_HostIP(in *corev1.HostIP, out *core.HostIP, s conversion.Scope) error {
out.IP = in.IP
return nil
}
// Convert_v1_HostIP_To_core_HostIP is an autogenerated conversion function.
func Convert_v1_HostIP_To_core_HostIP(in *corev1.HostIP, out *core.HostIP, s conversion.Scope) error {
return autoConvert_v1_HostIP_To_core_HostIP(in, out, s)
}
func autoConvert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *corev1.HostIP, s conversion.Scope) error {
out.IP = in.IP
return nil
}
// Convert_core_HostIP_To_v1_HostIP is an autogenerated conversion function.
func Convert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *corev1.HostIP, s conversion.Scope) error {
return autoConvert_core_HostIP_To_v1_HostIP(in, out, s)
}
func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *corev1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error {
out.Path = in.Path
out.Type = (*core.HostPathType)(unsafe.Pointer(in.Type))
return nil
}
// Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource is an autogenerated conversion function.
func Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *corev1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error {
return autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in, out, s)
}
func autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *corev1.HostPathVolumeSource, s conversion.Scope) error {
out.Path = in.Path
out.Type = (*corev1.HostPathType)(unsafe.Pointer(in.Type))
return nil
}
// Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function.
func Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *corev1.HostPathVolumeSource, s conversion.Scope) error {
return autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s)
}
func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *corev1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error {
out.TargetPortal = in.TargetPortal
out.IQN = in.IQN
out.Lun = in.Lun
out.ISCSIInterface = in.ISCSIInterface
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth
out.SessionCHAPAuth = in.SessionCHAPAuth
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName))
return nil
}
// Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *corev1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in, out, s)
}
func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *corev1.ISCSIPersistentVolumeSource, s conversion.Scope) error {
out.TargetPortal = in.TargetPortal
out.IQN = in.IQN
out.Lun = in.Lun
out.ISCSIInterface = in.ISCSIInterface
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth
out.SessionCHAPAuth = in.SessionCHAPAuth
out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName))
return nil
}
// Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *corev1.ISCSIPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *corev1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error {
out.TargetPortal = in.TargetPortal
out.IQN = in.IQN
out.Lun = in.Lun
out.ISCSIInterface = in.ISCSIInterface
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth
out.SessionCHAPAuth = in.SessionCHAPAuth
out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName))
return nil
}
// Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource is an autogenerated conversion function.
func Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *corev1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in, out, s)
}
func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *corev1.ISCSIVolumeSource, s conversion.Scope) error {
out.TargetPortal = in.TargetPortal
out.IQN = in.IQN
out.Lun = in.Lun
out.ISCSIInterface = in.ISCSIInterface
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals))
out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth
out.SessionCHAPAuth = in.SessionCHAPAuth
out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName))
return nil
}
// Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function.
func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *corev1.ISCSIVolumeSource, s conversion.Scope) error {
return autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s)
}
func autoConvert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *corev1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error {
out.Reference = in.Reference
out.PullPolicy = core.PullPolicy(in.PullPolicy)
return nil
}
// Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource is an autogenerated conversion function.
func Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *corev1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in, out, s)
}
func autoConvert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *corev1.ImageVolumeSource, s conversion.Scope) error {
out.Reference = in.Reference
out.PullPolicy = corev1.PullPolicy(in.PullPolicy)
return nil
}
// Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource is an autogenerated conversion function.
func Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *corev1.ImageVolumeSource, s conversion.Scope) error {
return autoConvert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in, out, s)
}
func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *corev1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error {
out.Key = in.Key
out.Path = in.Path
out.Mode = (*int32)(unsafe.Pointer(in.Mode))
return nil
}
// Convert_v1_KeyToPath_To_core_KeyToPath is an autogenerated conversion function.
func Convert_v1_KeyToPath_To_core_KeyToPath(in *corev1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error {
return autoConvert_v1_KeyToPath_To_core_KeyToPath(in, out, s)
}
func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *corev1.KeyToPath, s conversion.Scope) error {
out.Key = in.Key
out.Path = in.Path
out.Mode = (*int32)(unsafe.Pointer(in.Mode))
return nil
}
// Convert_core_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function.
func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *corev1.KeyToPath, s conversion.Scope) error {
return autoConvert_core_KeyToPath_To_v1_KeyToPath(in, out, s)
}
func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *corev1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error {
out.PostStart = (*core.LifecycleHandler)(unsafe.Pointer(in.PostStart))
out.PreStop = (*core.LifecycleHandler)(unsafe.Pointer(in.PreStop))
out.StopSignal = (*core.Signal)(unsafe.Pointer(in.StopSignal))
return nil
}
// Convert_v1_Lifecycle_To_core_Lifecycle is an autogenerated conversion function.
func Convert_v1_Lifecycle_To_core_Lifecycle(in *corev1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error {
return autoConvert_v1_Lifecycle_To_core_Lifecycle(in, out, s)
}
func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *corev1.Lifecycle, s conversion.Scope) error {
out.PostStart = (*corev1.LifecycleHandler)(unsafe.Pointer(in.PostStart))
out.PreStop = (*corev1.LifecycleHandler)(unsafe.Pointer(in.PreStop))
out.StopSignal = (*corev1.Signal)(unsafe.Pointer(in.StopSignal))
return nil
}
// Convert_core_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function.
func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *corev1.Lifecycle, s conversion.Scope) error {
return autoConvert_core_Lifecycle_To_v1_Lifecycle(in, out, s)
}
func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *corev1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error {
out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec))
out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
out.Sleep = (*core.SleepAction)(unsafe.Pointer(in.Sleep))
return nil
}
// Convert_v1_LifecycleHandler_To_core_LifecycleHandler is an autogenerated conversion function.
func Convert_v1_LifecycleHandler_To_core_LifecycleHandler(in *corev1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error {
return autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in, out, s)
}
func autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *corev1.LifecycleHandler, s conversion.Scope) error {
out.Exec = (*corev1.ExecAction)(unsafe.Pointer(in.Exec))
out.HTTPGet = (*corev1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
out.TCPSocket = (*corev1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
out.Sleep = (*corev1.SleepAction)(unsafe.Pointer(in.Sleep))
return nil
}
// Convert_core_LifecycleHandler_To_v1_LifecycleHandler is an autogenerated conversion function.
func Convert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *corev1.LifecycleHandler, s conversion.Scope) error {
return autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in, out, s)
}
func autoConvert_v1_LimitRange_To_core_LimitRange(in *corev1.LimitRange, out *core.LimitRange, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_LimitRange_To_core_LimitRange is an autogenerated conversion function.
func Convert_v1_LimitRange_To_core_LimitRange(in *corev1.LimitRange, out *core.LimitRange, s conversion.Scope) error {
return autoConvert_v1_LimitRange_To_core_LimitRange(in, out, s)
}
func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *corev1.LimitRange, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_core_LimitRange_To_v1_LimitRange is an autogenerated conversion function.
func Convert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *corev1.LimitRange, s conversion.Scope) error {
return autoConvert_core_LimitRange_To_v1_LimitRange(in, out, s)
}
func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *corev1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error {
out.Type = core.LimitType(in.Type)
out.Max = *(*core.ResourceList)(unsafe.Pointer(&in.Max))
out.Min = *(*core.ResourceList)(unsafe.Pointer(&in.Min))
out.Default = *(*core.ResourceList)(unsafe.Pointer(&in.Default))
out.DefaultRequest = *(*core.ResourceList)(unsafe.Pointer(&in.DefaultRequest))
out.MaxLimitRequestRatio = *(*core.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio))
return nil
}
// Convert_v1_LimitRangeItem_To_core_LimitRangeItem is an autogenerated conversion function.
func Convert_v1_LimitRangeItem_To_core_LimitRangeItem(in *corev1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error {
return autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in, out, s)
}
func autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *corev1.LimitRangeItem, s conversion.Scope) error {
out.Type = corev1.LimitType(in.Type)
out.Max = *(*corev1.ResourceList)(unsafe.Pointer(&in.Max))
out.Min = *(*corev1.ResourceList)(unsafe.Pointer(&in.Min))
out.Default = *(*corev1.ResourceList)(unsafe.Pointer(&in.Default))
out.DefaultRequest = *(*corev1.ResourceList)(unsafe.Pointer(&in.DefaultRequest))
out.MaxLimitRequestRatio = *(*corev1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio))
return nil
}
// Convert_core_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function.
func Convert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *corev1.LimitRangeItem, s conversion.Scope) error {
return autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in, out, s)
}
func autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in *corev1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.LimitRange)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_LimitRangeList_To_core_LimitRangeList is an autogenerated conversion function.
func Convert_v1_LimitRangeList_To_core_LimitRangeList(in *corev1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error {
return autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in, out, s)
}
func autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *corev1.LimitRangeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.LimitRange)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function.
func Convert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *corev1.LimitRangeList, s conversion.Scope) error {
return autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in, out, s)
}
func autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *corev1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error {
out.Limits = *(*[]core.LimitRangeItem)(unsafe.Pointer(&in.Limits))
return nil
}
// Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec is an autogenerated conversion function.
func Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *corev1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error {
return autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in, out, s)
}
func autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *corev1.LimitRangeSpec, s conversion.Scope) error {
out.Limits = *(*[]corev1.LimitRangeItem)(unsafe.Pointer(&in.Limits))
return nil
}
// Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function.
func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *corev1.LimitRangeSpec, s conversion.Scope) error {
return autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s)
}
func autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *corev1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error {
out.UID = in.UID
out.GID = in.GID
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
return nil
}
// Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser is an autogenerated conversion function.
func Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *corev1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error {
return autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in, out, s)
}
func autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *corev1.LinuxContainerUser, s conversion.Scope) error {
out.UID = in.UID
out.GID = in.GID
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
return nil
}
// Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser is an autogenerated conversion function.
func Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *corev1.LinuxContainerUser, s conversion.Scope) error {
return autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in, out, s)
}
func autoConvert_v1_List_To_core_List(in *corev1.List, out *core.List, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.Object, len(*in))
for i := range *in {
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_List_To_core_List is an autogenerated conversion function.
func Convert_v1_List_To_core_List(in *corev1.List, out *core.List, s conversion.Scope) error {
return autoConvert_v1_List_To_core_List(in, out, s)
}
func autoConvert_core_List_To_v1_List(in *core.List, out *corev1.List, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.RawExtension, len(*in))
for i := range *in {
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_List_To_v1_List is an autogenerated conversion function.
func Convert_core_List_To_v1_List(in *core.List, out *corev1.List, s conversion.Scope) error {
return autoConvert_core_List_To_v1_List(in, out, s)
}
func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *corev1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.IPMode = (*core.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode))
out.Ports = *(*[]core.PortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress is an autogenerated conversion function.
func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *corev1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error {
return autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in, out, s)
}
func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *corev1.LoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.IPMode = (*corev1.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode))
out.Ports = *(*[]corev1.PortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function.
func Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *corev1.LoadBalancerIngress, s conversion.Scope) error {
return autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s)
}
func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *corev1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]core.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function.
func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *corev1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error {
return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s)
}
func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *corev1.LoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]corev1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function.
func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *corev1.LoadBalancerStatus, s conversion.Scope) error {
return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s)
}
func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *corev1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_LocalObjectReference_To_core_LocalObjectReference is an autogenerated conversion function.
func Convert_v1_LocalObjectReference_To_core_LocalObjectReference(in *corev1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error {
return autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in, out, s)
}
func autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *corev1.LocalObjectReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_core_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function.
func Convert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *corev1.LocalObjectReference, s conversion.Scope) error {
return autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in, out, s)
}
func autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *corev1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error {
out.Path = in.Path
out.FSType = (*string)(unsafe.Pointer(in.FSType))
return nil
}
// Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource is an autogenerated conversion function.
func Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *corev1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error {
return autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in, out, s)
}
func autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *corev1.LocalVolumeSource, s conversion.Scope) error {
out.Path = in.Path
out.FSType = (*string)(unsafe.Pointer(in.FSType))
return nil
}
// Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function.
func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *corev1.LocalVolumeSource, s conversion.Scope) error {
return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s)
}
func autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *corev1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error {
out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName
out.Status = core.PersistentVolumeClaimModifyVolumeStatus(in.Status)
return nil
}
// Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus is an autogenerated conversion function.
func Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *corev1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error {
return autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in, out, s)
}
func autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *corev1.ModifyVolumeStatus, s conversion.Scope) error {
out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName
out.Status = corev1.PersistentVolumeClaimModifyVolumeStatus(in.Status)
return nil
}
// Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus is an autogenerated conversion function.
func Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *corev1.ModifyVolumeStatus, s conversion.Scope) error {
return autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in, out, s)
}
func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *corev1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error {
out.Server = in.Server
out.Path = in.Path
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource is an autogenerated conversion function.
func Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *corev1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error {
return autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in, out, s)
}
func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *corev1.NFSVolumeSource, s conversion.Scope) error {
out.Server = in.Server
out.Path = in.Path
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function.
func Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *corev1.NFSVolumeSource, s conversion.Scope) error {
return autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s)
}
func autoConvert_v1_Namespace_To_core_Namespace(in *corev1.Namespace, out *core.Namespace, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_NamespaceSpec_To_core_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_NamespaceStatus_To_core_NamespaceStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_Namespace_To_core_Namespace is an autogenerated conversion function.
func Convert_v1_Namespace_To_core_Namespace(in *corev1.Namespace, out *core.Namespace, s conversion.Scope) error {
return autoConvert_v1_Namespace_To_core_Namespace(in, out, s)
}
func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *corev1.Namespace, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_Namespace_To_v1_Namespace is an autogenerated conversion function.
func Convert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *corev1.Namespace, s conversion.Scope) error {
return autoConvert_core_Namespace_To_v1_Namespace(in, out, s)
}
func autoConvert_v1_NamespaceCondition_To_core_NamespaceCondition(in *corev1.NamespaceCondition, out *core.NamespaceCondition, s conversion.Scope) error {
out.Type = core.NamespaceConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_NamespaceCondition_To_core_NamespaceCondition is an autogenerated conversion function.
func Convert_v1_NamespaceCondition_To_core_NamespaceCondition(in *corev1.NamespaceCondition, out *core.NamespaceCondition, s conversion.Scope) error {
return autoConvert_v1_NamespaceCondition_To_core_NamespaceCondition(in, out, s)
}
func autoConvert_core_NamespaceCondition_To_v1_NamespaceCondition(in *core.NamespaceCondition, out *corev1.NamespaceCondition, s conversion.Scope) error {
out.Type = corev1.NamespaceConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_core_NamespaceCondition_To_v1_NamespaceCondition is an autogenerated conversion function.
func Convert_core_NamespaceCondition_To_v1_NamespaceCondition(in *core.NamespaceCondition, out *corev1.NamespaceCondition, s conversion.Scope) error {
return autoConvert_core_NamespaceCondition_To_v1_NamespaceCondition(in, out, s)
}
func autoConvert_v1_NamespaceList_To_core_NamespaceList(in *corev1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.Namespace)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_NamespaceList_To_core_NamespaceList is an autogenerated conversion function.
func Convert_v1_NamespaceList_To_core_NamespaceList(in *corev1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error {
return autoConvert_v1_NamespaceList_To_core_NamespaceList(in, out, s)
}
func autoConvert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *corev1.NamespaceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.Namespace)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function.
func Convert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *corev1.NamespaceList, s conversion.Scope) error {
return autoConvert_core_NamespaceList_To_v1_NamespaceList(in, out, s)
}
func autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in *corev1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error {
out.Finalizers = *(*[]core.FinalizerName)(unsafe.Pointer(&in.Finalizers))
return nil
}
// Convert_v1_NamespaceSpec_To_core_NamespaceSpec is an autogenerated conversion function.
func Convert_v1_NamespaceSpec_To_core_NamespaceSpec(in *corev1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error {
return autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in, out, s)
}
func autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *corev1.NamespaceSpec, s conversion.Scope) error {
out.Finalizers = *(*[]corev1.FinalizerName)(unsafe.Pointer(&in.Finalizers))
return nil
}
// Convert_core_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function.
func Convert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *corev1.NamespaceSpec, s conversion.Scope) error {
return autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in, out, s)
}
func autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in *corev1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error {
out.Phase = core.NamespacePhase(in.Phase)
out.Conditions = *(*[]core.NamespaceCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_NamespaceStatus_To_core_NamespaceStatus is an autogenerated conversion function.
func Convert_v1_NamespaceStatus_To_core_NamespaceStatus(in *corev1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error {
return autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in, out, s)
}
func autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *corev1.NamespaceStatus, s conversion.Scope) error {
out.Phase = corev1.NamespacePhase(in.Phase)
out.Conditions = *(*[]corev1.NamespaceCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_core_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function.
func Convert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *corev1.NamespaceStatus, s conversion.Scope) error {
return autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in, out, s)
}
func autoConvert_v1_Node_To_core_Node(in *corev1.Node, out *core.Node, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_NodeSpec_To_core_NodeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_NodeStatus_To_core_NodeStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_Node_To_core_Node is an autogenerated conversion function.
func Convert_v1_Node_To_core_Node(in *corev1.Node, out *core.Node, s conversion.Scope) error {
return autoConvert_v1_Node_To_core_Node(in, out, s)
}
func autoConvert_core_Node_To_v1_Node(in *core.Node, out *corev1.Node, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_Node_To_v1_Node is an autogenerated conversion function.
func Convert_core_Node_To_v1_Node(in *core.Node, out *corev1.Node, s conversion.Scope) error {
return autoConvert_core_Node_To_v1_Node(in, out, s)
}
func autoConvert_v1_NodeAddress_To_core_NodeAddress(in *corev1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error {
out.Type = core.NodeAddressType(in.Type)
out.Address = in.Address
return nil
}
// Convert_v1_NodeAddress_To_core_NodeAddress is an autogenerated conversion function.
func Convert_v1_NodeAddress_To_core_NodeAddress(in *corev1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error {
return autoConvert_v1_NodeAddress_To_core_NodeAddress(in, out, s)
}
func autoConvert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *corev1.NodeAddress, s conversion.Scope) error {
out.Type = corev1.NodeAddressType(in.Type)
out.Address = in.Address
return nil
}
// Convert_core_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function.
func Convert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *corev1.NodeAddress, s conversion.Scope) error {
return autoConvert_core_NodeAddress_To_v1_NodeAddress(in, out, s)
}
func autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in *corev1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error {
out.RequiredDuringSchedulingIgnoredDuringExecution = (*core.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution))
out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution))
return nil
}
// Convert_v1_NodeAffinity_To_core_NodeAffinity is an autogenerated conversion function.
func Convert_v1_NodeAffinity_To_core_NodeAffinity(in *corev1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error {
return autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in, out, s)
}
func autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *corev1.NodeAffinity, s conversion.Scope) error {
out.RequiredDuringSchedulingIgnoredDuringExecution = (*corev1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution))
out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution))
return nil
}
// Convert_core_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function.
func Convert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *corev1.NodeAffinity, s conversion.Scope) error {
return autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in, out, s)
}
func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *corev1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error {
out.Type = core.NodeConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastHeartbeatTime = in.LastHeartbeatTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_NodeCondition_To_core_NodeCondition is an autogenerated conversion function.
func Convert_v1_NodeCondition_To_core_NodeCondition(in *corev1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error {
return autoConvert_v1_NodeCondition_To_core_NodeCondition(in, out, s)
}
func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *corev1.NodeCondition, s conversion.Scope) error {
out.Type = corev1.NodeConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastHeartbeatTime = in.LastHeartbeatTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_core_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function.
func Convert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *corev1.NodeCondition, s conversion.Scope) error {
return autoConvert_core_NodeCondition_To_v1_NodeCondition(in, out, s)
}
func autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in *corev1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error {
out.ConfigMap = (*core.ConfigMapNodeConfigSource)(unsafe.Pointer(in.ConfigMap))
return nil
}
// Convert_v1_NodeConfigSource_To_core_NodeConfigSource is an autogenerated conversion function.
func Convert_v1_NodeConfigSource_To_core_NodeConfigSource(in *corev1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error {
return autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in, out, s)
}
func autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *corev1.NodeConfigSource, s conversion.Scope) error {
out.ConfigMap = (*corev1.ConfigMapNodeConfigSource)(unsafe.Pointer(in.ConfigMap))
return nil
}
// Convert_core_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function.
func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *corev1.NodeConfigSource, s conversion.Scope) error {
return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s)
}
func autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *corev1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error {
out.Assigned = (*core.NodeConfigSource)(unsafe.Pointer(in.Assigned))
out.Active = (*core.NodeConfigSource)(unsafe.Pointer(in.Active))
out.LastKnownGood = (*core.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood))
out.Error = in.Error
return nil
}
// Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus is an autogenerated conversion function.
func Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *corev1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error {
return autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in, out, s)
}
func autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *corev1.NodeConfigStatus, s conversion.Scope) error {
out.Assigned = (*corev1.NodeConfigSource)(unsafe.Pointer(in.Assigned))
out.Active = (*corev1.NodeConfigSource)(unsafe.Pointer(in.Active))
out.LastKnownGood = (*corev1.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood))
out.Error = in.Error
return nil
}
// Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus is an autogenerated conversion function.
func Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *corev1.NodeConfigStatus, s conversion.Scope) error {
return autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in, out, s)
}
func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *corev1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error {
if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil {
return err
}
return nil
}
// Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints is an autogenerated conversion function.
func Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *corev1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error {
return autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in, out, s)
}
func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *corev1.NodeDaemonEndpoints, s conversion.Scope) error {
if err := Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil {
return err
}
return nil
}
// Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function.
func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *corev1.NodeDaemonEndpoints, s conversion.Scope) error {
return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s)
}
func autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in *corev1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error {
out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy))
return nil
}
// Convert_v1_NodeFeatures_To_core_NodeFeatures is an autogenerated conversion function.
func Convert_v1_NodeFeatures_To_core_NodeFeatures(in *corev1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error {
return autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in, out, s)
}
func autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *corev1.NodeFeatures, s conversion.Scope) error {
out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy))
return nil
}
// Convert_core_NodeFeatures_To_v1_NodeFeatures is an autogenerated conversion function.
func Convert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *corev1.NodeFeatures, s conversion.Scope) error {
return autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in, out, s)
}
func autoConvert_v1_NodeList_To_core_NodeList(in *corev1.NodeList, out *core.NodeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.Node, len(*in))
for i := range *in {
if err := Convert_v1_Node_To_core_Node(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_NodeList_To_core_NodeList is an autogenerated conversion function.
func Convert_v1_NodeList_To_core_NodeList(in *corev1.NodeList, out *core.NodeList, s conversion.Scope) error {
return autoConvert_v1_NodeList_To_core_NodeList(in, out, s)
}
func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *corev1.NodeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.Node, len(*in))
for i := range *in {
if err := Convert_core_Node_To_v1_Node(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_NodeList_To_v1_NodeList is an autogenerated conversion function.
func Convert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *corev1.NodeList, s conversion.Scope) error {
return autoConvert_core_NodeList_To_v1_NodeList(in, out, s)
}
func autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *corev1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions is an autogenerated conversion function.
func Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *corev1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error {
return autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in, out, s)
}
func autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *corev1.NodeProxyOptions, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function.
func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *corev1.NodeProxyOptions, s conversion.Scope) error {
return autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s)
}
func autoConvert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *corev1.NodeProxyOptions, s conversion.Scope) error {
// WARNING: Field TypeMeta does not have json tag, skipping.
if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_string(&values, &out.Path, s); err != nil {
return err
}
} else {
out.Path = ""
}
return nil
}
// Convert_url_Values_To_v1_NodeProxyOptions is an autogenerated conversion function.
func Convert_url_Values_To_v1_NodeProxyOptions(in *url.Values, out *corev1.NodeProxyOptions, s conversion.Scope) error {
return autoConvert_url_Values_To_v1_NodeProxyOptions(in, out, s)
}
func autoConvert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in *corev1.NodeRuntimeHandler, out *core.NodeRuntimeHandler, s conversion.Scope) error {
out.Name = in.Name
out.Features = (*core.NodeRuntimeHandlerFeatures)(unsafe.Pointer(in.Features))
return nil
}
// Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler is an autogenerated conversion function.
func Convert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in *corev1.NodeRuntimeHandler, out *core.NodeRuntimeHandler, s conversion.Scope) error {
return autoConvert_v1_NodeRuntimeHandler_To_core_NodeRuntimeHandler(in, out, s)
}
func autoConvert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in *core.NodeRuntimeHandler, out *corev1.NodeRuntimeHandler, s conversion.Scope) error {
out.Name = in.Name
out.Features = (*corev1.NodeRuntimeHandlerFeatures)(unsafe.Pointer(in.Features))
return nil
}
// Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler is an autogenerated conversion function.
func Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in *core.NodeRuntimeHandler, out *corev1.NodeRuntimeHandler, s conversion.Scope) error {
return autoConvert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in, out, s)
}
func autoConvert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in *corev1.NodeRuntimeHandlerFeatures, out *core.NodeRuntimeHandlerFeatures, s conversion.Scope) error {
out.RecursiveReadOnlyMounts = (*bool)(unsafe.Pointer(in.RecursiveReadOnlyMounts))
out.UserNamespaces = (*bool)(unsafe.Pointer(in.UserNamespaces))
return nil
}
// Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures is an autogenerated conversion function.
func Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in *corev1.NodeRuntimeHandlerFeatures, out *core.NodeRuntimeHandlerFeatures, s conversion.Scope) error {
return autoConvert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in, out, s)
}
func autoConvert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in *core.NodeRuntimeHandlerFeatures, out *corev1.NodeRuntimeHandlerFeatures, s conversion.Scope) error {
out.RecursiveReadOnlyMounts = (*bool)(unsafe.Pointer(in.RecursiveReadOnlyMounts))
out.UserNamespaces = (*bool)(unsafe.Pointer(in.UserNamespaces))
return nil
}
// Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures is an autogenerated conversion function.
func Convert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in *core.NodeRuntimeHandlerFeatures, out *corev1.NodeRuntimeHandlerFeatures, s conversion.Scope) error {
return autoConvert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in, out, s)
}
func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *corev1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error {
out.NodeSelectorTerms = *(*[]core.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms))
return nil
}
// Convert_v1_NodeSelector_To_core_NodeSelector is an autogenerated conversion function.
func Convert_v1_NodeSelector_To_core_NodeSelector(in *corev1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error {
return autoConvert_v1_NodeSelector_To_core_NodeSelector(in, out, s)
}
func autoConvert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *corev1.NodeSelector, s conversion.Scope) error {
out.NodeSelectorTerms = *(*[]corev1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms))
return nil
}
// Convert_core_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function.
func Convert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *corev1.NodeSelector, s conversion.Scope) error {
return autoConvert_core_NodeSelector_To_v1_NodeSelector(in, out, s)
}
func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *corev1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error {
out.Key = in.Key
out.Operator = core.NodeSelectorOperator(in.Operator)
out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement is an autogenerated conversion function.
func Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *corev1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error {
return autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in, out, s)
}
func autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *corev1.NodeSelectorRequirement, s conversion.Scope) error {
out.Key = in.Key
out.Operator = corev1.NodeSelectorOperator(in.Operator)
out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function.
func Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *corev1.NodeSelectorRequirement, s conversion.Scope) error {
return autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s)
}
func autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *corev1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error {
out.MatchExpressions = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions))
out.MatchFields = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchFields))
return nil
}
// Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm is an autogenerated conversion function.
func Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *corev1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error {
return autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in, out, s)
}
func autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *corev1.NodeSelectorTerm, s conversion.Scope) error {
out.MatchExpressions = *(*[]corev1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions))
out.MatchFields = *(*[]corev1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchFields))
return nil
}
// Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function.
func Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *corev1.NodeSelectorTerm, s conversion.Scope) error {
return autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s)
}
func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *corev1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error {
// WARNING: in.PodCIDR requires manual conversion: does not exist in peer-type
out.PodCIDRs = *(*[]string)(unsafe.Pointer(&in.PodCIDRs))
out.ProviderID = in.ProviderID
out.Unschedulable = in.Unschedulable
out.Taints = *(*[]core.Taint)(unsafe.Pointer(&in.Taints))
out.ConfigSource = (*core.NodeConfigSource)(unsafe.Pointer(in.ConfigSource))
out.DoNotUseExternalID = in.DoNotUseExternalID
return nil
}
func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *corev1.NodeSpec, s conversion.Scope) error {
out.PodCIDRs = *(*[]string)(unsafe.Pointer(&in.PodCIDRs))
out.ProviderID = in.ProviderID
out.Unschedulable = in.Unschedulable
out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints))
out.ConfigSource = (*corev1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource))
out.DoNotUseExternalID = in.DoNotUseExternalID
return nil
}
func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *corev1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error {
out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity))
out.Allocatable = *(*core.ResourceList)(unsafe.Pointer(&in.Allocatable))
out.Phase = core.NodePhase(in.Phase)
out.Conditions = *(*[]core.NodeCondition)(unsafe.Pointer(&in.Conditions))
out.Addresses = *(*[]core.NodeAddress)(unsafe.Pointer(&in.Addresses))
if err := Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil {
return err
}
if err := Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil {
return err
}
out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images))
out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse))
out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
out.Config = (*core.NodeConfigStatus)(unsafe.Pointer(in.Config))
out.RuntimeHandlers = *(*[]core.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers))
out.Features = (*core.NodeFeatures)(unsafe.Pointer(in.Features))
return nil
}
// Convert_v1_NodeStatus_To_core_NodeStatus is an autogenerated conversion function.
func Convert_v1_NodeStatus_To_core_NodeStatus(in *corev1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error {
return autoConvert_v1_NodeStatus_To_core_NodeStatus(in, out, s)
}
func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *corev1.NodeStatus, s conversion.Scope) error {
out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity))
out.Allocatable = *(*corev1.ResourceList)(unsafe.Pointer(&in.Allocatable))
out.Phase = corev1.NodePhase(in.Phase)
out.Conditions = *(*[]corev1.NodeCondition)(unsafe.Pointer(&in.Conditions))
out.Addresses = *(*[]corev1.NodeAddress)(unsafe.Pointer(&in.Addresses))
if err := Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil {
return err
}
if err := Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil {
return err
}
out.Images = *(*[]corev1.ContainerImage)(unsafe.Pointer(&in.Images))
out.VolumesInUse = *(*[]corev1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse))
out.VolumesAttached = *(*[]corev1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
out.Config = (*corev1.NodeConfigStatus)(unsafe.Pointer(in.Config))
out.RuntimeHandlers = *(*[]corev1.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers))
out.Features = (*corev1.NodeFeatures)(unsafe.Pointer(in.Features))
return nil
}
// Convert_core_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function.
func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *corev1.NodeStatus, s conversion.Scope) error {
return autoConvert_core_NodeStatus_To_v1_NodeStatus(in, out, s)
}
func autoConvert_v1_NodeSwapStatus_To_core_NodeSwapStatus(in *corev1.NodeSwapStatus, out *core.NodeSwapStatus, s conversion.Scope) error {
out.Capacity = (*int64)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_v1_NodeSwapStatus_To_core_NodeSwapStatus is an autogenerated conversion function.
func Convert_v1_NodeSwapStatus_To_core_NodeSwapStatus(in *corev1.NodeSwapStatus, out *core.NodeSwapStatus, s conversion.Scope) error {
return autoConvert_v1_NodeSwapStatus_To_core_NodeSwapStatus(in, out, s)
}
func autoConvert_core_NodeSwapStatus_To_v1_NodeSwapStatus(in *core.NodeSwapStatus, out *corev1.NodeSwapStatus, s conversion.Scope) error {
out.Capacity = (*int64)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_core_NodeSwapStatus_To_v1_NodeSwapStatus is an autogenerated conversion function.
func Convert_core_NodeSwapStatus_To_v1_NodeSwapStatus(in *core.NodeSwapStatus, out *corev1.NodeSwapStatus, s conversion.Scope) error {
return autoConvert_core_NodeSwapStatus_To_v1_NodeSwapStatus(in, out, s)
}
func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *corev1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error {
out.MachineID = in.MachineID
out.SystemUUID = in.SystemUUID
out.BootID = in.BootID
out.KernelVersion = in.KernelVersion
out.OSImage = in.OSImage
out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
out.KubeletVersion = in.KubeletVersion
out.KubeProxyVersion = in.KubeProxyVersion
out.OperatingSystem = in.OperatingSystem
out.Architecture = in.Architecture
out.Swap = (*core.NodeSwapStatus)(unsafe.Pointer(in.Swap))
return nil
}
// Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo is an autogenerated conversion function.
func Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *corev1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error {
return autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in, out, s)
}
func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *corev1.NodeSystemInfo, s conversion.Scope) error {
out.MachineID = in.MachineID
out.SystemUUID = in.SystemUUID
out.BootID = in.BootID
out.KernelVersion = in.KernelVersion
out.OSImage = in.OSImage
out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
out.KubeletVersion = in.KubeletVersion
out.KubeProxyVersion = in.KubeProxyVersion
out.OperatingSystem = in.OperatingSystem
out.Architecture = in.Architecture
out.Swap = (*corev1.NodeSwapStatus)(unsafe.Pointer(in.Swap))
return nil
}
// Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function.
func Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *corev1.NodeSystemInfo, s conversion.Scope) error {
return autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s)
}
func autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *corev1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.FieldPath = in.FieldPath
return nil
}
// Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector is an autogenerated conversion function.
func Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *corev1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error {
return autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in, out, s)
}
func autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *corev1.ObjectFieldSelector, s conversion.Scope) error {
out.APIVersion = in.APIVersion
out.FieldPath = in.FieldPath
return nil
}
// Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function.
func Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *corev1.ObjectFieldSelector, s conversion.Scope) error {
return autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s)
}
func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *corev1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Namespace = in.Namespace
out.Name = in.Name
out.UID = types.UID(in.UID)
out.APIVersion = in.APIVersion
out.ResourceVersion = in.ResourceVersion
out.FieldPath = in.FieldPath
return nil
}
// Convert_v1_ObjectReference_To_core_ObjectReference is an autogenerated conversion function.
func Convert_v1_ObjectReference_To_core_ObjectReference(in *corev1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error {
return autoConvert_v1_ObjectReference_To_core_ObjectReference(in, out, s)
}
func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *corev1.ObjectReference, s conversion.Scope) error {
out.Kind = in.Kind
out.Namespace = in.Namespace
out.Name = in.Name
out.UID = types.UID(in.UID)
out.APIVersion = in.APIVersion
out.ResourceVersion = in.ResourceVersion
out.FieldPath = in.FieldPath
return nil
}
// Convert_core_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function.
func Convert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *corev1.ObjectReference, s conversion.Scope) error {
return autoConvert_core_ObjectReference_To_v1_ObjectReference(in, out, s)
}
func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *corev1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_PersistentVolume_To_core_PersistentVolume is an autogenerated conversion function.
func Convert_v1_PersistentVolume_To_core_PersistentVolume(in *corev1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error {
return autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in, out, s)
}
func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *corev1.PersistentVolume, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function.
func Convert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *corev1.PersistentVolume, s conversion.Scope) error {
return autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in, out, s)
}
func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *corev1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim is an autogenerated conversion function.
func Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *corev1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in, out, s)
}
func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *corev1.PersistentVolumeClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function.
func Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *corev1.PersistentVolumeClaim, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s)
}
func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *corev1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error {
out.Type = core.PersistentVolumeClaimConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastProbeTime = in.LastProbeTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition is an autogenerated conversion function.
func Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *corev1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in, out, s)
}
func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *corev1.PersistentVolumeClaimCondition, s conversion.Scope) error {
out.Type = corev1.PersistentVolumeClaimConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastProbeTime = in.LastProbeTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function.
func Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *corev1.PersistentVolumeClaimCondition, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s)
}
func autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *corev1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList is an autogenerated conversion function.
func Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *corev1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in, out, s)
}
func autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *corev1.PersistentVolumeClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function.
func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *corev1.PersistentVolumeClaimList, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s)
}
func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *corev1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error {
out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.VolumeName = in.VolumeName
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.DataSource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
out.DataSourceRef = (*core.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
// Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec is an autogenerated conversion function.
func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *corev1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in, out, s)
}
func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *corev1.PersistentVolumeClaimSpec, s conversion.Scope) error {
out.AccessModes = *(*[]corev1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.VolumeName = in.VolumeName
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
out.VolumeMode = (*corev1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.DataSource = (*corev1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
out.DataSourceRef = (*corev1.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
// Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function.
func Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *corev1.PersistentVolumeClaimSpec, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s)
}
func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *corev1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error {
out.Phase = core.PersistentVolumeClaimPhase(in.Phase)
out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity))
out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.AllocatedResourceStatuses = *(*map[core.ResourceName]core.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses))
out.CurrentVolumeAttributesClassName = (*string)(unsafe.Pointer(in.CurrentVolumeAttributesClassName))
out.ModifyVolumeStatus = (*core.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus))
return nil
}
// Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus is an autogenerated conversion function.
func Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *corev1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in, out, s)
}
func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *corev1.PersistentVolumeClaimStatus, s conversion.Scope) error {
out.Phase = corev1.PersistentVolumeClaimPhase(in.Phase)
out.AccessModes = *(*[]corev1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity))
out.Conditions = *(*[]corev1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
out.AllocatedResources = *(*corev1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.AllocatedResourceStatuses = *(*map[corev1.ResourceName]corev1.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses))
out.CurrentVolumeAttributesClassName = (*string)(unsafe.Pointer(in.CurrentVolumeAttributesClassName))
out.ModifyVolumeStatus = (*corev1.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus))
return nil
}
// Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function.
func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *corev1.PersistentVolumeClaimStatus, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s)
}
func autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *corev1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate is an autogenerated conversion function.
func Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *corev1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in, out, s)
}
func autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *corev1.PersistentVolumeClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate is an autogenerated conversion function.
func Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *corev1.PersistentVolumeClaimTemplate, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in, out, s)
}
func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *corev1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
out.ClaimName = in.ClaimName
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource is an autogenerated conversion function.
func Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *corev1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in, out, s)
}
func autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *corev1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
out.ClaimName = in.ClaimName
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function.
func Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *corev1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s)
}
func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *corev1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.PersistentVolume, len(*in))
for i := range *in {
if err := Convert_v1_PersistentVolume_To_core_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList is an autogenerated conversion function.
func Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *corev1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in, out, s)
}
func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *corev1.PersistentVolumeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.PersistentVolume, len(*in))
for i := range *in {
if err := Convert_core_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function.
func Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *corev1.PersistentVolumeList, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s)
}
func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *corev1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error {
out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk))
out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore))
out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
out.Glusterfs = (*core.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs))
out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS))
out.RBD = (*core.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD))
out.ISCSI = (*core.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI))
out.Cinder = (*core.CinderPersistentVolumeSource)(unsafe.Pointer(in.Cinder))
out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS))
out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC))
out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker))
out.FlexVolume = (*core.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile))
out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
out.ScaleIO = (*core.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.Local = (*core.LocalVolumeSource)(unsafe.Pointer(in.Local))
out.StorageOS = (*core.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS))
out.CSI = (*core.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI))
return nil
}
// Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *corev1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in, out, s)
}
func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *corev1.PersistentVolumeSource, s conversion.Scope) error {
out.GCEPersistentDisk = (*corev1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk))
out.AWSElasticBlockStore = (*corev1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore))
out.HostPath = (*corev1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
out.Glusterfs = (*corev1.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs))
out.NFS = (*corev1.NFSVolumeSource)(unsafe.Pointer(in.NFS))
out.RBD = (*corev1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD))
out.Quobyte = (*corev1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.ISCSI = (*corev1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI))
out.FlexVolume = (*corev1.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.Cinder = (*corev1.CinderPersistentVolumeSource)(unsafe.Pointer(in.Cinder))
out.CephFS = (*corev1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS))
out.FC = (*corev1.FCVolumeSource)(unsafe.Pointer(in.FC))
out.Flocker = (*corev1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker))
out.AzureFile = (*corev1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile))
out.VsphereVolume = (*corev1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.AzureDisk = (*corev1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*corev1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
out.PortworxVolume = (*corev1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
out.ScaleIO = (*corev1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.Local = (*corev1.LocalVolumeSource)(unsafe.Pointer(in.Local))
out.StorageOS = (*corev1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS))
out.CSI = (*corev1.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI))
return nil
}
// Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function.
func Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *corev1.PersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s)
}
func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *corev1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error {
out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity))
if err := Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil {
return err
}
out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.ClaimRef = (*core.ObjectReference)(unsafe.Pointer(in.ClaimRef))
out.PersistentVolumeReclaimPolicy = core.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy)
out.StorageClassName = in.StorageClassName
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*core.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *corev1.PersistentVolumeSpec, s conversion.Scope) error {
out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity))
if err := Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil {
return err
}
out.AccessModes = *(*[]corev1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.ClaimRef = (*corev1.ObjectReference)(unsafe.Pointer(in.ClaimRef))
out.PersistentVolumeReclaimPolicy = corev1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy)
out.StorageClassName = in.StorageClassName
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*corev1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*corev1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *corev1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error {
out.Phase = core.PersistentVolumePhase(in.Phase)
out.Message = in.Message
out.Reason = in.Reason
out.LastPhaseTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastPhaseTransitionTime))
return nil
}
// Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus is an autogenerated conversion function.
func Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *corev1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in, out, s)
}
func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *corev1.PersistentVolumeStatus, s conversion.Scope) error {
out.Phase = corev1.PersistentVolumePhase(in.Phase)
out.Message = in.Message
out.Reason = in.Reason
out.LastPhaseTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastPhaseTransitionTime))
return nil
}
// Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function.
func Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *corev1.PersistentVolumeStatus, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s)
}
func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *corev1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
out.PdID = in.PdID
out.FSType = in.FSType
return nil
}
// Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource is an autogenerated conversion function.
func Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *corev1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in, out, s)
}
func autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *corev1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
out.PdID = in.PdID
out.FSType = in.FSType
return nil
}
// Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function.
func Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *corev1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error {
return autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s)
}
func autoConvert_v1_Pod_To_core_Pod(in *corev1.Pod, out *core.Pod, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *corev1.Pod, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1_PodAffinity_To_core_PodAffinity(in *corev1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error {
out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution))
out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution))
return nil
}
// Convert_v1_PodAffinity_To_core_PodAffinity is an autogenerated conversion function.
func Convert_v1_PodAffinity_To_core_PodAffinity(in *corev1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error {
return autoConvert_v1_PodAffinity_To_core_PodAffinity(in, out, s)
}
func autoConvert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *corev1.PodAffinity, s conversion.Scope) error {
out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution))
out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution))
return nil
}
// Convert_core_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function.
func Convert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *corev1.PodAffinity, s conversion.Scope) error {
return autoConvert_core_PodAffinity_To_v1_PodAffinity(in, out, s)
}
func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *corev1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error {
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
out.TopologyKey = in.TopologyKey
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys))
out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys))
return nil
}
// Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm is an autogenerated conversion function.
func Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *corev1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error {
return autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in, out, s)
}
func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *corev1.PodAffinityTerm, s conversion.Scope) error {
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
out.TopologyKey = in.TopologyKey
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys))
out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys))
return nil
}
// Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function.
func Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *corev1.PodAffinityTerm, s conversion.Scope) error {
return autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s)
}
func autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *corev1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error {
out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution))
out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution))
return nil
}
// Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity is an autogenerated conversion function.
func Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *corev1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error {
return autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in, out, s)
}
func autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *corev1.PodAntiAffinity, s conversion.Scope) error {
out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution))
out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]corev1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution))
return nil
}
// Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function.
func Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *corev1.PodAntiAffinity, s conversion.Scope) error {
return autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s)
}
func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *corev1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error {
out.Stdin = in.Stdin
out.Stdout = in.Stdout
out.Stderr = in.Stderr
out.TTY = in.TTY
out.Container = in.Container
return nil
}
// Convert_v1_PodAttachOptions_To_core_PodAttachOptions is an autogenerated conversion function.
func Convert_v1_PodAttachOptions_To_core_PodAttachOptions(in *corev1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error {
return autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in, out, s)
}
func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *corev1.PodAttachOptions, s conversion.Scope) error {
out.Stdin = in.Stdin
out.Stdout = in.Stdout
out.Stderr = in.Stderr
out.TTY = in.TTY
out.Container = in.Container
return nil
}
// Convert_core_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function.
func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *corev1.PodAttachOptions, s conversion.Scope) error {
return autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in, out, s)
}
func autoConvert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *corev1.PodAttachOptions, s conversion.Scope) error {
// WARNING: Field TypeMeta does not have json tag, skipping.
if values, ok := map[string][]string(*in)["stdin"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdin, s); err != nil {
return err
}
} else {
out.Stdin = false
}
if values, ok := map[string][]string(*in)["stdout"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdout, s); err != nil {
return err
}
} else {
out.Stdout = false
}
if values, ok := map[string][]string(*in)["stderr"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stderr, s); err != nil {
return err
}
} else {
out.Stderr = false
}
if values, ok := map[string][]string(*in)["tty"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.TTY, s); err != nil {
return err
}
} else {
out.TTY = false
}
if values, ok := map[string][]string(*in)["container"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_string(&values, &out.Container, s); err != nil {
return err
}
} else {
out.Container = ""
}
return nil
}
// Convert_url_Values_To_v1_PodAttachOptions is an autogenerated conversion function.
func Convert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *corev1.PodAttachOptions, s conversion.Scope) error {
return autoConvert_url_Values_To_v1_PodAttachOptions(in, out, s)
}
func autoConvert_v1_PodCertificateProjection_To_core_PodCertificateProjection(in *corev1.PodCertificateProjection, out *core.PodCertificateProjection, s conversion.Scope) error {
out.SignerName = in.SignerName
out.KeyType = in.KeyType
out.MaxExpirationSeconds = (*int32)(unsafe.Pointer(in.MaxExpirationSeconds))
out.CredentialBundlePath = in.CredentialBundlePath
out.KeyPath = in.KeyPath
out.CertificateChainPath = in.CertificateChainPath
return nil
}
// Convert_v1_PodCertificateProjection_To_core_PodCertificateProjection is an autogenerated conversion function.
func Convert_v1_PodCertificateProjection_To_core_PodCertificateProjection(in *corev1.PodCertificateProjection, out *core.PodCertificateProjection, s conversion.Scope) error {
return autoConvert_v1_PodCertificateProjection_To_core_PodCertificateProjection(in, out, s)
}
func autoConvert_core_PodCertificateProjection_To_v1_PodCertificateProjection(in *core.PodCertificateProjection, out *corev1.PodCertificateProjection, s conversion.Scope) error {
out.SignerName = in.SignerName
out.KeyType = in.KeyType
out.MaxExpirationSeconds = (*int32)(unsafe.Pointer(in.MaxExpirationSeconds))
out.CredentialBundlePath = in.CredentialBundlePath
out.KeyPath = in.KeyPath
out.CertificateChainPath = in.CertificateChainPath
return nil
}
// Convert_core_PodCertificateProjection_To_v1_PodCertificateProjection is an autogenerated conversion function.
func Convert_core_PodCertificateProjection_To_v1_PodCertificateProjection(in *core.PodCertificateProjection, out *corev1.PodCertificateProjection, s conversion.Scope) error {
return autoConvert_core_PodCertificateProjection_To_v1_PodCertificateProjection(in, out, s)
}
func autoConvert_v1_PodCondition_To_core_PodCondition(in *corev1.PodCondition, out *core.PodCondition, s conversion.Scope) error {
out.Type = core.PodConditionType(in.Type)
out.ObservedGeneration = in.ObservedGeneration
out.Status = core.ConditionStatus(in.Status)
out.LastProbeTime = in.LastProbeTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_PodCondition_To_core_PodCondition is an autogenerated conversion function.
func Convert_v1_PodCondition_To_core_PodCondition(in *corev1.PodCondition, out *core.PodCondition, s conversion.Scope) error {
return autoConvert_v1_PodCondition_To_core_PodCondition(in, out, s)
}
func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *corev1.PodCondition, s conversion.Scope) error {
out.Type = corev1.PodConditionType(in.Type)
out.ObservedGeneration = in.ObservedGeneration
out.Status = corev1.ConditionStatus(in.Status)
out.LastProbeTime = in.LastProbeTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_core_PodCondition_To_v1_PodCondition is an autogenerated conversion function.
func Convert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *corev1.PodCondition, s conversion.Scope) error {
return autoConvert_core_PodCondition_To_v1_PodCondition(in, out, s)
}
func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *corev1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error {
out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers))
out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches))
out.Options = *(*[]core.PodDNSConfigOption)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_v1_PodDNSConfig_To_core_PodDNSConfig is an autogenerated conversion function.
func Convert_v1_PodDNSConfig_To_core_PodDNSConfig(in *corev1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error {
return autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in, out, s)
}
func autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *corev1.PodDNSConfig, s conversion.Scope) error {
out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers))
out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches))
out.Options = *(*[]corev1.PodDNSConfigOption)(unsafe.Pointer(&in.Options))
return nil
}
// Convert_core_PodDNSConfig_To_v1_PodDNSConfig is an autogenerated conversion function.
func Convert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *corev1.PodDNSConfig, s conversion.Scope) error {
return autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in, out, s)
}
func autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *corev1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error {
out.Name = in.Name
out.Value = (*string)(unsafe.Pointer(in.Value))
return nil
}
// Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption is an autogenerated conversion function.
func Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *corev1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error {
return autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in, out, s)
}
func autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *corev1.PodDNSConfigOption, s conversion.Scope) error {
out.Name = in.Name
out.Value = (*string)(unsafe.Pointer(in.Value))
return nil
}
// Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption is an autogenerated conversion function.
func Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *corev1.PodDNSConfigOption, s conversion.Scope) error {
return autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in, out, s)
}
func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *corev1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error {
out.Stdin = in.Stdin
out.Stdout = in.Stdout
out.Stderr = in.Stderr
out.TTY = in.TTY
out.Container = in.Container
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
return nil
}
// Convert_v1_PodExecOptions_To_core_PodExecOptions is an autogenerated conversion function.
func Convert_v1_PodExecOptions_To_core_PodExecOptions(in *corev1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error {
return autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in, out, s)
}
func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *corev1.PodExecOptions, s conversion.Scope) error {
out.Stdin = in.Stdin
out.Stdout = in.Stdout
out.Stderr = in.Stderr
out.TTY = in.TTY
out.Container = in.Container
out.Command = *(*[]string)(unsafe.Pointer(&in.Command))
return nil
}
// Convert_core_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function.
func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *corev1.PodExecOptions, s conversion.Scope) error {
return autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in, out, s)
}
func autoConvert_url_Values_To_v1_PodExecOptions(in *url.Values, out *corev1.PodExecOptions, s conversion.Scope) error {
// WARNING: Field TypeMeta does not have json tag, skipping.
if values, ok := map[string][]string(*in)["stdin"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdin, s); err != nil {
return err
}
} else {
out.Stdin = false
}
if values, ok := map[string][]string(*in)["stdout"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stdout, s); err != nil {
return err
}
} else {
out.Stdout = false
}
if values, ok := map[string][]string(*in)["stderr"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Stderr, s); err != nil {
return err
}
} else {
out.Stderr = false
}
if values, ok := map[string][]string(*in)["tty"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.TTY, s); err != nil {
return err
}
} else {
out.TTY = false
}
if values, ok := map[string][]string(*in)["container"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_string(&values, &out.Container, s); err != nil {
return err
}
} else {
out.Container = ""
}
if values, ok := map[string][]string(*in)["command"]; ok && len(values) > 0 {
out.Command = *(*[]string)(unsafe.Pointer(&values))
} else {
out.Command = nil
}
return nil
}
// Convert_url_Values_To_v1_PodExecOptions is an autogenerated conversion function.
func Convert_url_Values_To_v1_PodExecOptions(in *url.Values, out *corev1.PodExecOptions, s conversion.Scope) error {
return autoConvert_url_Values_To_v1_PodExecOptions(in, out, s)
}
func autoConvert_v1_PodExtendedResourceClaimStatus_To_core_PodExtendedResourceClaimStatus(in *corev1.PodExtendedResourceClaimStatus, out *core.PodExtendedResourceClaimStatus, s conversion.Scope) error {
out.RequestMappings = *(*[]core.ContainerExtendedResourceRequest)(unsafe.Pointer(&in.RequestMappings))
out.ResourceClaimName = in.ResourceClaimName
return nil
}
// Convert_v1_PodExtendedResourceClaimStatus_To_core_PodExtendedResourceClaimStatus is an autogenerated conversion function.
func Convert_v1_PodExtendedResourceClaimStatus_To_core_PodExtendedResourceClaimStatus(in *corev1.PodExtendedResourceClaimStatus, out *core.PodExtendedResourceClaimStatus, s conversion.Scope) error {
return autoConvert_v1_PodExtendedResourceClaimStatus_To_core_PodExtendedResourceClaimStatus(in, out, s)
}
func autoConvert_core_PodExtendedResourceClaimStatus_To_v1_PodExtendedResourceClaimStatus(in *core.PodExtendedResourceClaimStatus, out *corev1.PodExtendedResourceClaimStatus, s conversion.Scope) error {
out.RequestMappings = *(*[]corev1.ContainerExtendedResourceRequest)(unsafe.Pointer(&in.RequestMappings))
out.ResourceClaimName = in.ResourceClaimName
return nil
}
// Convert_core_PodExtendedResourceClaimStatus_To_v1_PodExtendedResourceClaimStatus is an autogenerated conversion function.
func Convert_core_PodExtendedResourceClaimStatus_To_v1_PodExtendedResourceClaimStatus(in *core.PodExtendedResourceClaimStatus, out *corev1.PodExtendedResourceClaimStatus, s conversion.Scope) error {
return autoConvert_core_PodExtendedResourceClaimStatus_To_v1_PodExtendedResourceClaimStatus(in, out, s)
}
func autoConvert_v1_PodIP_To_core_PodIP(in *corev1.PodIP, out *core.PodIP, s conversion.Scope) error {
out.IP = in.IP
return nil
}
// Convert_v1_PodIP_To_core_PodIP is an autogenerated conversion function.
func Convert_v1_PodIP_To_core_PodIP(in *corev1.PodIP, out *core.PodIP, s conversion.Scope) error {
return autoConvert_v1_PodIP_To_core_PodIP(in, out, s)
}
func autoConvert_core_PodIP_To_v1_PodIP(in *core.PodIP, out *corev1.PodIP, s conversion.Scope) error {
out.IP = in.IP
return nil
}
// Convert_core_PodIP_To_v1_PodIP is an autogenerated conversion function.
func Convert_core_PodIP_To_v1_PodIP(in *core.PodIP, out *corev1.PodIP, s conversion.Scope) error {
return autoConvert_core_PodIP_To_v1_PodIP(in, out, s)
}
func autoConvert_v1_PodList_To_core_PodList(in *corev1.PodList, out *core.PodList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.Pod, len(*in))
for i := range *in {
if err := Convert_v1_Pod_To_core_Pod(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_PodList_To_core_PodList is an autogenerated conversion function.
func Convert_v1_PodList_To_core_PodList(in *corev1.PodList, out *core.PodList, s conversion.Scope) error {
return autoConvert_v1_PodList_To_core_PodList(in, out, s)
}
func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *corev1.PodList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.Pod, len(*in))
for i := range *in {
if err := Convert_core_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_PodList_To_v1_PodList is an autogenerated conversion function.
func Convert_core_PodList_To_v1_PodList(in *core.PodList, out *corev1.PodList, s conversion.Scope) error {
return autoConvert_core_PodList_To_v1_PodList(in, out, s)
}
func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *corev1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error {
out.Container = in.Container
out.Follow = in.Follow
out.Previous = in.Previous
out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds))
out.SinceTime = (*metav1.Time)(unsafe.Pointer(in.SinceTime))
out.Timestamps = in.Timestamps
out.TailLines = (*int64)(unsafe.Pointer(in.TailLines))
out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes))
out.InsecureSkipTLSVerifyBackend = in.InsecureSkipTLSVerifyBackend
out.Stream = (*string)(unsafe.Pointer(in.Stream))
return nil
}
// Convert_v1_PodLogOptions_To_core_PodLogOptions is an autogenerated conversion function.
func Convert_v1_PodLogOptions_To_core_PodLogOptions(in *corev1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error {
return autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in, out, s)
}
func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *corev1.PodLogOptions, s conversion.Scope) error {
out.Container = in.Container
out.Follow = in.Follow
out.Previous = in.Previous
out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds))
out.SinceTime = (*metav1.Time)(unsafe.Pointer(in.SinceTime))
out.Timestamps = in.Timestamps
out.TailLines = (*int64)(unsafe.Pointer(in.TailLines))
out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes))
out.InsecureSkipTLSVerifyBackend = in.InsecureSkipTLSVerifyBackend
out.Stream = (*string)(unsafe.Pointer(in.Stream))
return nil
}
// Convert_core_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function.
func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *corev1.PodLogOptions, s conversion.Scope) error {
return autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in, out, s)
}
func autoConvert_url_Values_To_v1_PodLogOptions(in *url.Values, out *corev1.PodLogOptions, s conversion.Scope) error {
// WARNING: Field TypeMeta does not have json tag, skipping.
if values, ok := map[string][]string(*in)["container"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_string(&values, &out.Container, s); err != nil {
return err
}
} else {
out.Container = ""
}
if values, ok := map[string][]string(*in)["follow"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Follow, s); err != nil {
return err
}
} else {
out.Follow = false
}
if values, ok := map[string][]string(*in)["previous"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Previous, s); err != nil {
return err
}
} else {
out.Previous = false
}
if values, ok := map[string][]string(*in)["sinceSeconds"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.SinceSeconds, s); err != nil {
return err
}
} else {
out.SinceSeconds = nil
}
if values, ok := map[string][]string(*in)["sinceTime"]; ok && len(values) > 0 {
if err := metav1.Convert_Slice_string_To_Pointer_v1_Time(&values, &out.SinceTime, s); err != nil {
return err
}
} else {
out.SinceTime = nil
}
if values, ok := map[string][]string(*in)["timestamps"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.Timestamps, s); err != nil {
return err
}
} else {
out.Timestamps = false
}
if values, ok := map[string][]string(*in)["tailLines"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TailLines, s); err != nil {
return err
}
} else {
out.TailLines = nil
}
if values, ok := map[string][]string(*in)["limitBytes"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.LimitBytes, s); err != nil {
return err
}
} else {
out.LimitBytes = nil
}
if values, ok := map[string][]string(*in)["insecureSkipTLSVerifyBackend"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_bool(&values, &out.InsecureSkipTLSVerifyBackend, s); err != nil {
return err
}
} else {
out.InsecureSkipTLSVerifyBackend = false
}
if values, ok := map[string][]string(*in)["stream"]; ok && len(values) > 0 {
if err := Convert_Slice_string_To_Pointer_string(&values, &out.Stream, s); err != nil {
return err
}
} else {
out.Stream = nil
}
return nil
}
// Convert_url_Values_To_v1_PodLogOptions is an autogenerated conversion function.
func Convert_url_Values_To_v1_PodLogOptions(in *url.Values, out *corev1.PodLogOptions, s conversion.Scope) error {
return autoConvert_url_Values_To_v1_PodLogOptions(in, out, s)
}
func autoConvert_v1_PodOS_To_core_PodOS(in *corev1.PodOS, out *core.PodOS, s conversion.Scope) error {
out.Name = core.OSName(in.Name)
return nil
}
// Convert_v1_PodOS_To_core_PodOS is an autogenerated conversion function.
func Convert_v1_PodOS_To_core_PodOS(in *corev1.PodOS, out *core.PodOS, s conversion.Scope) error {
return autoConvert_v1_PodOS_To_core_PodOS(in, out, s)
}
func autoConvert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *corev1.PodOS, s conversion.Scope) error {
out.Name = corev1.OSName(in.Name)
return nil
}
// Convert_core_PodOS_To_v1_PodOS is an autogenerated conversion function.
func Convert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *corev1.PodOS, s conversion.Scope) error {
return autoConvert_core_PodOS_To_v1_PodOS(in, out, s)
}
func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *corev1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error {
out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions is an autogenerated conversion function.
func Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *corev1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error {
return autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in, out, s)
}
func autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *corev1.PodPortForwardOptions, s conversion.Scope) error {
out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function.
func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *corev1.PodPortForwardOptions, s conversion.Scope) error {
return autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s)
}
func autoConvert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *corev1.PodPortForwardOptions, s conversion.Scope) error {
// WARNING: Field TypeMeta does not have json tag, skipping.
if values, ok := map[string][]string(*in)["ports"]; ok && len(values) > 0 {
if err := metav1.Convert_Slice_string_To_Slice_int32(&values, &out.Ports, s); err != nil {
return err
}
} else {
out.Ports = nil
}
return nil
}
// Convert_url_Values_To_v1_PodPortForwardOptions is an autogenerated conversion function.
func Convert_url_Values_To_v1_PodPortForwardOptions(in *url.Values, out *corev1.PodPortForwardOptions, s conversion.Scope) error {
return autoConvert_url_Values_To_v1_PodPortForwardOptions(in, out, s)
}
func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *corev1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_v1_PodProxyOptions_To_core_PodProxyOptions is an autogenerated conversion function.
func Convert_v1_PodProxyOptions_To_core_PodProxyOptions(in *corev1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error {
return autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in, out, s)
}
func autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *corev1.PodProxyOptions, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_core_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function.
func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *corev1.PodProxyOptions, s conversion.Scope) error {
return autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in, out, s)
}
func autoConvert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *corev1.PodProxyOptions, s conversion.Scope) error {
// WARNING: Field TypeMeta does not have json tag, skipping.
if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_string(&values, &out.Path, s); err != nil {
return err
}
} else {
out.Path = ""
}
return nil
}
// Convert_url_Values_To_v1_PodProxyOptions is an autogenerated conversion function.
func Convert_url_Values_To_v1_PodProxyOptions(in *url.Values, out *corev1.PodProxyOptions, s conversion.Scope) error {
return autoConvert_url_Values_To_v1_PodProxyOptions(in, out, s)
}
func autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in *corev1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error {
out.ConditionType = core.PodConditionType(in.ConditionType)
return nil
}
// Convert_v1_PodReadinessGate_To_core_PodReadinessGate is an autogenerated conversion function.
func Convert_v1_PodReadinessGate_To_core_PodReadinessGate(in *corev1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error {
return autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in, out, s)
}
func autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *corev1.PodReadinessGate, s conversion.Scope) error {
out.ConditionType = corev1.PodConditionType(in.ConditionType)
return nil
}
// Convert_core_PodReadinessGate_To_v1_PodReadinessGate is an autogenerated conversion function.
func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *corev1.PodReadinessGate, s conversion.Scope) error {
return autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in, out, s)
}
func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *corev1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_v1_PodResourceClaim_To_core_PodResourceClaim is an autogenerated conversion function.
func Convert_v1_PodResourceClaim_To_core_PodResourceClaim(in *corev1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
return autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in, out, s)
}
func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *corev1.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_core_PodResourceClaim_To_v1_PodResourceClaim is an autogenerated conversion function.
func Convert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *corev1.PodResourceClaim, s conversion.Scope) error {
return autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in, out, s)
}
func autoConvert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *corev1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error {
out.Name = in.Name
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
return nil
}
// Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus is an autogenerated conversion function.
func Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *corev1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error {
return autoConvert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in, out, s)
}
func autoConvert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *corev1.PodResourceClaimStatus, s conversion.Scope) error {
out.Name = in.Name
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
return nil
}
// Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus is an autogenerated conversion function.
func Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *corev1.PodResourceClaimStatus, s conversion.Scope) error {
return autoConvert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in, out, s)
}
func autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *corev1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate is an autogenerated conversion function.
func Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *corev1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
return autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in, out, s)
}
func autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *corev1.PodSchedulingGate, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate is an autogenerated conversion function.
func Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *corev1.PodSchedulingGate, s conversion.Scope) error {
return autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in, out, s)
}
func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *corev1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error {
out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
out.WindowsOptions = (*core.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions))
out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser))
out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
out.SupplementalGroupsPolicy = (*core.SupplementalGroupsPolicy)(unsafe.Pointer(in.SupplementalGroupsPolicy))
out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup))
out.Sysctls = *(*[]core.Sysctl)(unsafe.Pointer(&in.Sysctls))
out.FSGroupChangePolicy = (*core.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy))
out.SeccompProfile = (*core.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
out.AppArmorProfile = (*core.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile))
out.SELinuxChangePolicy = (*core.PodSELinuxChangePolicy)(unsafe.Pointer(in.SELinuxChangePolicy))
return nil
}
// Convert_v1_PodSecurityContext_To_core_PodSecurityContext is an autogenerated conversion function.
func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *corev1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error {
return autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in, out, s)
}
func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *corev1.PodSecurityContext, s conversion.Scope) error {
// INFO: in.HostNetwork opted out of conversion generation
// INFO: in.HostPID opted out of conversion generation
// INFO: in.HostIPC opted out of conversion generation
// INFO: in.ShareProcessNamespace opted out of conversion generation
// INFO: in.HostUsers opted out of conversion generation
out.SELinuxOptions = (*corev1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
out.WindowsOptions = (*corev1.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions))
out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser))
out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
out.SupplementalGroupsPolicy = (*corev1.SupplementalGroupsPolicy)(unsafe.Pointer(in.SupplementalGroupsPolicy))
out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup))
out.FSGroupChangePolicy = (*corev1.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy))
out.Sysctls = *(*[]corev1.Sysctl)(unsafe.Pointer(&in.Sysctls))
out.SeccompProfile = (*corev1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
out.AppArmorProfile = (*corev1.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile))
out.SELinuxChangePolicy = (*corev1.PodSELinuxChangePolicy)(unsafe.Pointer(in.SELinuxChangePolicy))
return nil
}
// Convert_core_PodSecurityContext_To_v1_PodSecurityContext is an autogenerated conversion function.
func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *corev1.PodSecurityContext, s conversion.Scope) error {
return autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in, out, s)
}
func autoConvert_v1_PodSignature_To_core_PodSignature(in *corev1.PodSignature, out *core.PodSignature, s conversion.Scope) error {
out.PodController = (*metav1.OwnerReference)(unsafe.Pointer(in.PodController))
return nil
}
// Convert_v1_PodSignature_To_core_PodSignature is an autogenerated conversion function.
func Convert_v1_PodSignature_To_core_PodSignature(in *corev1.PodSignature, out *core.PodSignature, s conversion.Scope) error {
return autoConvert_v1_PodSignature_To_core_PodSignature(in, out, s)
}
func autoConvert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *corev1.PodSignature, s conversion.Scope) error {
out.PodController = (*metav1.OwnerReference)(unsafe.Pointer(in.PodController))
return nil
}
// Convert_core_PodSignature_To_v1_PodSignature is an autogenerated conversion function.
func Convert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *corev1.PodSignature, s conversion.Scope) error {
return autoConvert_core_PodSignature_To_v1_PodSignature(in, out, s)
}
func autoConvert_v1_PodSpec_To_core_PodSpec(in *corev1.PodSpec, out *core.PodSpec, s conversion.Scope) error {
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]core.Volume, len(*in))
for i := range *in {
if err := Convert_v1_Volume_To_core_Volume(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Volumes = nil
}
out.InitContainers = *(*[]core.Container)(unsafe.Pointer(&in.InitContainers))
out.Containers = *(*[]core.Container)(unsafe.Pointer(&in.Containers))
out.EphemeralContainers = *(*[]core.EphemeralContainer)(unsafe.Pointer(&in.EphemeralContainers))
out.RestartPolicy = core.RestartPolicy(in.RestartPolicy)
out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds))
out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds))
out.DNSPolicy = core.DNSPolicy(in.DNSPolicy)
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.ServiceAccountName = in.ServiceAccountName
// INFO: in.DeprecatedServiceAccount opted out of conversion generation
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
out.NodeName = in.NodeName
// INFO: in.HostNetwork opted out of conversion generation
// INFO: in.HostPID opted out of conversion generation
// INFO: in.HostIPC opted out of conversion generation
// INFO: in.ShareProcessNamespace opted out of conversion generation
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(core.PodSecurityContext)
if err := Convert_v1_PodSecurityContext_To_core_PodSecurityContext(*in, *out, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets))
out.Hostname = in.Hostname
out.Subdomain = in.Subdomain
out.Affinity = (*core.Affinity)(unsafe.Pointer(in.Affinity))
out.SchedulerName = in.SchedulerName
out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations))
out.HostAliases = *(*[]core.HostAlias)(unsafe.Pointer(&in.HostAliases))
out.PriorityClassName = in.PriorityClassName
out.Priority = (*int32)(unsafe.Pointer(in.Priority))
out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig))
out.ReadinessGates = *(*[]core.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates))
out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName))
out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks))
out.PreemptionPolicy = (*core.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
out.Overhead = *(*core.ResourceList)(unsafe.Pointer(&in.Overhead))
out.TopologySpreadConstraints = *(*[]core.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN))
out.OS = (*core.PodOS)(unsafe.Pointer(in.OS))
// INFO: in.HostUsers opted out of conversion generation
out.SchedulingGates = *(*[]core.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]core.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
out.Resources = (*core.ResourceRequirements)(unsafe.Pointer(in.Resources))
out.HostnameOverride = (*string)(unsafe.Pointer(in.HostnameOverride))
return nil
}
func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *corev1.PodSpec, s conversion.Scope) error {
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]corev1.Volume, len(*in))
for i := range *in {
if err := Convert_core_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Volumes = nil
}
out.InitContainers = *(*[]corev1.Container)(unsafe.Pointer(&in.InitContainers))
out.Containers = *(*[]corev1.Container)(unsafe.Pointer(&in.Containers))
out.EphemeralContainers = *(*[]corev1.EphemeralContainer)(unsafe.Pointer(&in.EphemeralContainers))
out.RestartPolicy = corev1.RestartPolicy(in.RestartPolicy)
out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds))
out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds))
out.DNSPolicy = corev1.DNSPolicy(in.DNSPolicy)
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.ServiceAccountName = in.ServiceAccountName
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
out.NodeName = in.NodeName
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(corev1.PodSecurityContext)
if err := Convert_core_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
out.ImagePullSecrets = *(*[]corev1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets))
out.Hostname = in.Hostname
out.Subdomain = in.Subdomain
out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN))
out.Affinity = (*corev1.Affinity)(unsafe.Pointer(in.Affinity))
out.SchedulerName = in.SchedulerName
out.Tolerations = *(*[]corev1.Toleration)(unsafe.Pointer(&in.Tolerations))
out.HostAliases = *(*[]corev1.HostAlias)(unsafe.Pointer(&in.HostAliases))
out.PriorityClassName = in.PriorityClassName
out.Priority = (*int32)(unsafe.Pointer(in.Priority))
out.PreemptionPolicy = (*corev1.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
out.DNSConfig = (*corev1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig))
out.ReadinessGates = *(*[]corev1.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates))
out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName))
out.Overhead = *(*corev1.ResourceList)(unsafe.Pointer(&in.Overhead))
out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks))
out.TopologySpreadConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
out.OS = (*corev1.PodOS)(unsafe.Pointer(in.OS))
out.SchedulingGates = *(*[]corev1.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]corev1.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
out.Resources = (*corev1.ResourceRequirements)(unsafe.Pointer(in.Resources))
out.HostnameOverride = (*string)(unsafe.Pointer(in.HostnameOverride))
return nil
}
func autoConvert_v1_PodStatus_To_core_PodStatus(in *corev1.PodStatus, out *core.PodStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Phase = core.PodPhase(in.Phase)
out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions))
out.Message = in.Message
out.Reason = in.Reason
out.NominatedNodeName = in.NominatedNodeName
out.HostIP = in.HostIP
out.HostIPs = *(*[]core.HostIP)(unsafe.Pointer(&in.HostIPs))
// WARNING: in.PodIP requires manual conversion: does not exist in peer-type
out.PodIPs = *(*[]core.PodIP)(unsafe.Pointer(&in.PodIPs))
out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime))
out.InitContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses))
out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses))
out.QOSClass = core.PodQOSClass(in.QOSClass)
out.EphemeralContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
out.Resize = core.PodResizeStatus(in.Resize)
out.ResourceClaimStatuses = *(*[]core.PodResourceClaimStatus)(unsafe.Pointer(&in.ResourceClaimStatuses))
out.ExtendedResourceClaimStatus = (*core.PodExtendedResourceClaimStatus)(unsafe.Pointer(in.ExtendedResourceClaimStatus))
return nil
}
func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *corev1.PodStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Phase = corev1.PodPhase(in.Phase)
out.Conditions = *(*[]corev1.PodCondition)(unsafe.Pointer(&in.Conditions))
out.Message = in.Message
out.Reason = in.Reason
out.NominatedNodeName = in.NominatedNodeName
out.HostIP = in.HostIP
out.HostIPs = *(*[]corev1.HostIP)(unsafe.Pointer(&in.HostIPs))
out.PodIPs = *(*[]corev1.PodIP)(unsafe.Pointer(&in.PodIPs))
out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime))
out.QOSClass = corev1.PodQOSClass(in.QOSClass)
out.InitContainerStatuses = *(*[]corev1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses))
out.ContainerStatuses = *(*[]corev1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses))
out.EphemeralContainerStatuses = *(*[]corev1.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
out.Resize = corev1.PodResizeStatus(in.Resize)
out.ResourceClaimStatuses = *(*[]corev1.PodResourceClaimStatus)(unsafe.Pointer(&in.ResourceClaimStatuses))
out.ExtendedResourceClaimStatus = (*corev1.PodExtendedResourceClaimStatus)(unsafe.Pointer(in.ExtendedResourceClaimStatus))
return nil
}
func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *corev1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_PodStatusResult_To_core_PodStatusResult is an autogenerated conversion function.
func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *corev1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error {
return autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s)
}
func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *corev1.PodStatusResult, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function.
func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *corev1.PodStatusResult, s conversion.Scope) error {
return autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s)
}
func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *corev1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_v1_PodTemplate_To_core_PodTemplate is an autogenerated conversion function.
func Convert_v1_PodTemplate_To_core_PodTemplate(in *corev1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error {
return autoConvert_v1_PodTemplate_To_core_PodTemplate(in, out, s)
}
func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *corev1.PodTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_core_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function.
func Convert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *corev1.PodTemplate, s conversion.Scope) error {
return autoConvert_core_PodTemplate_To_v1_PodTemplate(in, out, s)
}
func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *corev1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.PodTemplate, len(*in))
for i := range *in {
if err := Convert_v1_PodTemplate_To_core_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_PodTemplateList_To_core_PodTemplateList is an autogenerated conversion function.
func Convert_v1_PodTemplateList_To_core_PodTemplateList(in *corev1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error {
return autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in, out, s)
}
func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *corev1.PodTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.PodTemplate, len(*in))
for i := range *in {
if err := Convert_core_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function.
func Convert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *corev1.PodTemplateList, s conversion.Scope) error {
return autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in, out, s)
}
func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *corev1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *corev1.PodTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
func autoConvert_v1_PortStatus_To_core_PortStatus(in *corev1.PortStatus, out *core.PortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = core.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_v1_PortStatus_To_core_PortStatus is an autogenerated conversion function.
func Convert_v1_PortStatus_To_core_PortStatus(in *corev1.PortStatus, out *core.PortStatus, s conversion.Scope) error {
return autoConvert_v1_PortStatus_To_core_PortStatus(in, out, s)
}
func autoConvert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *corev1.PortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = corev1.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_core_PortStatus_To_v1_PortStatus is an autogenerated conversion function.
func Convert_core_PortStatus_To_v1_PortStatus(in *core.PortStatus, out *corev1.PortStatus, s conversion.Scope) error {
return autoConvert_core_PortStatus_To_v1_PortStatus(in, out, s)
}
func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *corev1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource is an autogenerated conversion function.
func Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *corev1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error {
return autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in, out, s)
}
func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *corev1.PortworxVolumeSource, s conversion.Scope) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function.
func Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *corev1.PortworxVolumeSource, s conversion.Scope) error {
return autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s)
}
func autoConvert_v1_Preconditions_To_core_Preconditions(in *corev1.Preconditions, out *core.Preconditions, s conversion.Scope) error {
out.UID = (*types.UID)(unsafe.Pointer(in.UID))
return nil
}
// Convert_v1_Preconditions_To_core_Preconditions is an autogenerated conversion function.
func Convert_v1_Preconditions_To_core_Preconditions(in *corev1.Preconditions, out *core.Preconditions, s conversion.Scope) error {
return autoConvert_v1_Preconditions_To_core_Preconditions(in, out, s)
}
func autoConvert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *corev1.Preconditions, s conversion.Scope) error {
out.UID = (*types.UID)(unsafe.Pointer(in.UID))
return nil
}
// Convert_core_Preconditions_To_v1_Preconditions is an autogenerated conversion function.
func Convert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *corev1.Preconditions, s conversion.Scope) error {
return autoConvert_core_Preconditions_To_v1_Preconditions(in, out, s)
}
func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *corev1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error {
if err := Convert_v1_PodSignature_To_core_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil {
return err
}
out.EvictionTime = in.EvictionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry is an autogenerated conversion function.
func Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *corev1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error {
return autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in, out, s)
}
func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *corev1.PreferAvoidPodsEntry, s conversion.Scope) error {
if err := Convert_core_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil {
return err
}
out.EvictionTime = in.EvictionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function.
func Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *corev1.PreferAvoidPodsEntry, s conversion.Scope) error {
return autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s)
}
func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *corev1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error {
out.Weight = in.Weight
if err := Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil {
return err
}
return nil
}
// Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm is an autogenerated conversion function.
func Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *corev1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error {
return autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in, out, s)
}
func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *corev1.PreferredSchedulingTerm, s conversion.Scope) error {
out.Weight = in.Weight
if err := Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil {
return err
}
return nil
}
// Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function.
func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *corev1.PreferredSchedulingTerm, s conversion.Scope) error {
return autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s)
}
func autoConvert_v1_Probe_To_core_Probe(in *corev1.Probe, out *core.Probe, s conversion.Scope) error {
if err := Convert_v1_ProbeHandler_To_core_ProbeHandler(&in.ProbeHandler, &out.ProbeHandler, s); err != nil {
return err
}
out.InitialDelaySeconds = in.InitialDelaySeconds
out.TimeoutSeconds = in.TimeoutSeconds
out.PeriodSeconds = in.PeriodSeconds
out.SuccessThreshold = in.SuccessThreshold
out.FailureThreshold = in.FailureThreshold
out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds))
return nil
}
// Convert_v1_Probe_To_core_Probe is an autogenerated conversion function.
func Convert_v1_Probe_To_core_Probe(in *corev1.Probe, out *core.Probe, s conversion.Scope) error {
return autoConvert_v1_Probe_To_core_Probe(in, out, s)
}
func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *corev1.Probe, s conversion.Scope) error {
if err := Convert_core_ProbeHandler_To_v1_ProbeHandler(&in.ProbeHandler, &out.ProbeHandler, s); err != nil {
return err
}
out.InitialDelaySeconds = in.InitialDelaySeconds
out.TimeoutSeconds = in.TimeoutSeconds
out.PeriodSeconds = in.PeriodSeconds
out.SuccessThreshold = in.SuccessThreshold
out.FailureThreshold = in.FailureThreshold
out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds))
return nil
}
// Convert_core_Probe_To_v1_Probe is an autogenerated conversion function.
func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *corev1.Probe, s conversion.Scope) error {
return autoConvert_core_Probe_To_v1_Probe(in, out, s)
}
func autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in *corev1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error {
out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec))
out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
out.GRPC = (*core.GRPCAction)(unsafe.Pointer(in.GRPC))
return nil
}
// Convert_v1_ProbeHandler_To_core_ProbeHandler is an autogenerated conversion function.
func Convert_v1_ProbeHandler_To_core_ProbeHandler(in *corev1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error {
return autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in, out, s)
}
func autoConvert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *corev1.ProbeHandler, s conversion.Scope) error {
out.Exec = (*corev1.ExecAction)(unsafe.Pointer(in.Exec))
out.HTTPGet = (*corev1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
out.TCPSocket = (*corev1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
out.GRPC = (*corev1.GRPCAction)(unsafe.Pointer(in.GRPC))
return nil
}
// Convert_core_ProbeHandler_To_v1_ProbeHandler is an autogenerated conversion function.
func Convert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *corev1.ProbeHandler, s conversion.Scope) error {
return autoConvert_core_ProbeHandler_To_v1_ProbeHandler(in, out, s)
}
func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *corev1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error {
if in.Sources != nil {
in, out := &in.Sources, &out.Sources
*out = make([]core.VolumeProjection, len(*in))
for i := range *in {
if err := Convert_v1_VolumeProjection_To_core_VolumeProjection(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Sources = nil
}
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
return nil
}
// Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource is an autogenerated conversion function.
func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *corev1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in, out, s)
}
func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *corev1.ProjectedVolumeSource, s conversion.Scope) error {
if in.Sources != nil {
in, out := &in.Sources, &out.Sources
*out = make([]corev1.VolumeProjection, len(*in))
for i := range *in {
if err := Convert_core_VolumeProjection_To_v1_VolumeProjection(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Sources = nil
}
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
return nil
}
// Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function.
func Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *corev1.ProjectedVolumeSource, s conversion.Scope) error {
return autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s)
}
func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *corev1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error {
out.Registry = in.Registry
out.Volume = in.Volume
out.ReadOnly = in.ReadOnly
out.User = in.User
out.Group = in.Group
out.Tenant = in.Tenant
return nil
}
// Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource is an autogenerated conversion function.
func Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *corev1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error {
return autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in, out, s)
}
func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *corev1.QuobyteVolumeSource, s conversion.Scope) error {
out.Registry = in.Registry
out.Volume = in.Volume
out.ReadOnly = in.ReadOnly
out.User = in.User
out.Group = in.Group
out.Tenant = in.Tenant
return nil
}
// Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function.
func Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *corev1.QuobyteVolumeSource, s conversion.Scope) error {
return autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s)
}
func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *corev1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error {
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *corev1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in, out, s)
}
func autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *corev1.RBDPersistentVolumeSource, s conversion.Scope) error {
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *corev1.RBDPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *corev1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error {
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource is an autogenerated conversion function.
func Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *corev1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error {
return autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in, out, s)
}
func autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *corev1.RBDVolumeSource, s conversion.Scope) error {
out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors))
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function.
func Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *corev1.RBDVolumeSource, s conversion.Scope) error {
return autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s)
}
func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *corev1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Range = in.Range
out.Data = *(*[]byte)(unsafe.Pointer(&in.Data))
return nil
}
// Convert_v1_RangeAllocation_To_core_RangeAllocation is an autogenerated conversion function.
func Convert_v1_RangeAllocation_To_core_RangeAllocation(in *corev1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error {
return autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in, out, s)
}
func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *corev1.RangeAllocation, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Range = in.Range
out.Data = *(*[]byte)(unsafe.Pointer(&in.Data))
return nil
}
// Convert_core_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function.
func Convert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *corev1.RangeAllocation, s conversion.Scope) error {
return autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in, out, s)
}
func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *corev1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ReplicationController_To_core_ReplicationController is an autogenerated conversion function.
func Convert_v1_ReplicationController_To_core_ReplicationController(in *corev1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error {
return autoConvert_v1_ReplicationController_To_core_ReplicationController(in, out, s)
}
func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *corev1.ReplicationController, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function.
func Convert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *corev1.ReplicationController, s conversion.Scope) error {
return autoConvert_core_ReplicationController_To_v1_ReplicationController(in, out, s)
}
func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *corev1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error {
out.Type = core.ReplicationControllerConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition is an autogenerated conversion function.
func Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *corev1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error {
return autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in, out, s)
}
func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *corev1.ReplicationControllerCondition, s conversion.Scope) error {
out.Type = corev1.ReplicationControllerConditionType(in.Type)
out.Status = corev1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function.
func Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *corev1.ReplicationControllerCondition, s conversion.Scope) error {
return autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s)
}
func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *corev1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.ReplicationController, len(*in))
for i := range *in {
if err := Convert_v1_ReplicationController_To_core_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList is an autogenerated conversion function.
func Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *corev1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error {
return autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in, out, s)
}
func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *corev1.ReplicationControllerList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.ReplicationController, len(*in))
for i := range *in {
if err := Convert_core_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function.
func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *corev1.ReplicationControllerList, s conversion.Scope) error {
return autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s)
}
func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *corev1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = (*int32)(unsafe.Pointer(in.Replicas))
out.MinReadySeconds = in.MinReadySeconds
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(core.PodTemplateSpec)
if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *corev1.ReplicationControllerSpec, s conversion.Scope) error {
out.Replicas = (*int32)(unsafe.Pointer(in.Replicas))
out.MinReadySeconds = in.MinReadySeconds
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(corev1.PodTemplateSpec)
if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *corev1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]core.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus is an autogenerated conversion function.
func Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *corev1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error {
return autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in, out, s)
}
func autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *corev1.ReplicationControllerStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]corev1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function.
func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *corev1.ReplicationControllerStatus, s conversion.Scope) error {
return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s)
}
func autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in *corev1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
out.Request = in.Request
return nil
}
// Convert_v1_ResourceClaim_To_core_ResourceClaim is an autogenerated conversion function.
func Convert_v1_ResourceClaim_To_core_ResourceClaim(in *corev1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
return autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in, out, s)
}
func autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *corev1.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
out.Request = in.Request
return nil
}
// Convert_core_ResourceClaim_To_v1_ResourceClaim is an autogenerated conversion function.
func Convert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *corev1.ResourceClaim, s conversion.Scope) error {
return autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in, out, s)
}
func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *corev1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error {
out.ContainerName = in.ContainerName
out.Resource = in.Resource
out.Divisor = in.Divisor
return nil
}
// Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector is an autogenerated conversion function.
func Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *corev1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error {
return autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in, out, s)
}
func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *corev1.ResourceFieldSelector, s conversion.Scope) error {
out.ContainerName = in.ContainerName
out.Resource = in.Resource
out.Divisor = in.Divisor
return nil
}
// Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function.
func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *corev1.ResourceFieldSelector, s conversion.Scope) error {
return autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s)
}
func autoConvert_v1_ResourceHealth_To_core_ResourceHealth(in *corev1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error {
out.ResourceID = core.ResourceID(in.ResourceID)
out.Health = core.ResourceHealthStatus(in.Health)
return nil
}
// Convert_v1_ResourceHealth_To_core_ResourceHealth is an autogenerated conversion function.
func Convert_v1_ResourceHealth_To_core_ResourceHealth(in *corev1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error {
return autoConvert_v1_ResourceHealth_To_core_ResourceHealth(in, out, s)
}
func autoConvert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *corev1.ResourceHealth, s conversion.Scope) error {
out.ResourceID = corev1.ResourceID(in.ResourceID)
out.Health = corev1.ResourceHealthStatus(in.Health)
return nil
}
// Convert_core_ResourceHealth_To_v1_ResourceHealth is an autogenerated conversion function.
func Convert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *corev1.ResourceHealth, s conversion.Scope) error {
return autoConvert_core_ResourceHealth_To_v1_ResourceHealth(in, out, s)
}
func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *corev1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ResourceQuota_To_core_ResourceQuota is an autogenerated conversion function.
func Convert_v1_ResourceQuota_To_core_ResourceQuota(in *corev1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error {
return autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in, out, s)
}
func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *corev1.ResourceQuota, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function.
func Convert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *corev1.ResourceQuota, s conversion.Scope) error {
return autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in, out, s)
}
func autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *corev1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.ResourceQuota)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList is an autogenerated conversion function.
func Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *corev1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error {
return autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in, out, s)
}
func autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *corev1.ResourceQuotaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.ResourceQuota)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function.
func Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *corev1.ResourceQuotaList, s conversion.Scope) error {
return autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s)
}
func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *corev1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error {
out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard))
out.Scopes = *(*[]core.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes))
out.ScopeSelector = (*core.ScopeSelector)(unsafe.Pointer(in.ScopeSelector))
return nil
}
// Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec is an autogenerated conversion function.
func Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *corev1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error {
return autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in, out, s)
}
func autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *corev1.ResourceQuotaSpec, s conversion.Scope) error {
out.Hard = *(*corev1.ResourceList)(unsafe.Pointer(&in.Hard))
out.Scopes = *(*[]corev1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes))
out.ScopeSelector = (*corev1.ScopeSelector)(unsafe.Pointer(in.ScopeSelector))
return nil
}
// Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function.
func Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *corev1.ResourceQuotaSpec, s conversion.Scope) error {
return autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s)
}
func autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *corev1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error {
out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard))
out.Used = *(*core.ResourceList)(unsafe.Pointer(&in.Used))
return nil
}
// Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus is an autogenerated conversion function.
func Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *corev1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error {
return autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in, out, s)
}
func autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *corev1.ResourceQuotaStatus, s conversion.Scope) error {
out.Hard = *(*corev1.ResourceList)(unsafe.Pointer(&in.Hard))
out.Used = *(*corev1.ResourceList)(unsafe.Pointer(&in.Used))
return nil
}
// Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function.
func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *corev1.ResourceQuotaStatus, s conversion.Scope) error {
return autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s)
}
func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *corev1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error {
out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests))
out.Claims = *(*[]core.ResourceClaim)(unsafe.Pointer(&in.Claims))
return nil
}
// Convert_v1_ResourceRequirements_To_core_ResourceRequirements is an autogenerated conversion function.
func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *corev1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error {
return autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in, out, s)
}
func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *corev1.ResourceRequirements, s conversion.Scope) error {
out.Limits = *(*corev1.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*corev1.ResourceList)(unsafe.Pointer(&in.Requests))
out.Claims = *(*[]corev1.ResourceClaim)(unsafe.Pointer(&in.Claims))
return nil
}
// Convert_core_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function.
func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *corev1.ResourceRequirements, s conversion.Scope) error {
return autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in, out, s)
}
func autoConvert_v1_ResourceStatus_To_core_ResourceStatus(in *corev1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
out.Resources = *(*[]core.ResourceHealth)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_v1_ResourceStatus_To_core_ResourceStatus is an autogenerated conversion function.
func Convert_v1_ResourceStatus_To_core_ResourceStatus(in *corev1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error {
return autoConvert_v1_ResourceStatus_To_core_ResourceStatus(in, out, s)
}
func autoConvert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *corev1.ResourceStatus, s conversion.Scope) error {
out.Name = corev1.ResourceName(in.Name)
out.Resources = *(*[]corev1.ResourceHealth)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_core_ResourceStatus_To_v1_ResourceStatus is an autogenerated conversion function.
func Convert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *corev1.ResourceStatus, s conversion.Scope) error {
return autoConvert_core_ResourceStatus_To_v1_ResourceStatus(in, out, s)
}
func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *corev1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error {
out.User = in.User
out.Role = in.Role
out.Type = in.Type
out.Level = in.Level
return nil
}
// Convert_v1_SELinuxOptions_To_core_SELinuxOptions is an autogenerated conversion function.
func Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in *corev1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error {
return autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in, out, s)
}
func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *corev1.SELinuxOptions, s conversion.Scope) error {
out.User = in.User
out.Role = in.Role
out.Type = in.Type
out.Level = in.Level
return nil
}
// Convert_core_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function.
func Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *corev1.SELinuxOptions, s conversion.Scope) error {
return autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in, out, s)
}
func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *corev1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
out.Gateway = in.Gateway
out.System = in.System
out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef))
out.SSLEnabled = in.SSLEnabled
out.ProtectionDomain = in.ProtectionDomain
out.StoragePool = in.StoragePool
out.StorageMode = in.StorageMode
out.VolumeName = in.VolumeName
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *corev1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in, out, s)
}
func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *corev1.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
out.Gateway = in.Gateway
out.System = in.System
out.SecretRef = (*corev1.SecretReference)(unsafe.Pointer(in.SecretRef))
out.SSLEnabled = in.SSLEnabled
out.ProtectionDomain = in.ProtectionDomain
out.StoragePool = in.StoragePool
out.StorageMode = in.StorageMode
out.VolumeName = in.VolumeName
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *corev1.ScaleIOPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *corev1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error {
out.Gateway = in.Gateway
out.System = in.System
out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.SSLEnabled = in.SSLEnabled
out.ProtectionDomain = in.ProtectionDomain
out.StoragePool = in.StoragePool
out.StorageMode = in.StorageMode
out.VolumeName = in.VolumeName
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource is an autogenerated conversion function.
func Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *corev1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in, out, s)
}
func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *corev1.ScaleIOVolumeSource, s conversion.Scope) error {
out.Gateway = in.Gateway
out.System = in.System
out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
out.SSLEnabled = in.SSLEnabled
out.ProtectionDomain = in.ProtectionDomain
out.StoragePool = in.StoragePool
out.StorageMode = in.StorageMode
out.VolumeName = in.VolumeName
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
// Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function.
func Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *corev1.ScaleIOVolumeSource, s conversion.Scope) error {
return autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s)
}
func autoConvert_v1_ScopeSelector_To_core_ScopeSelector(in *corev1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error {
out.MatchExpressions = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions))
return nil
}
// Convert_v1_ScopeSelector_To_core_ScopeSelector is an autogenerated conversion function.
func Convert_v1_ScopeSelector_To_core_ScopeSelector(in *corev1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error {
return autoConvert_v1_ScopeSelector_To_core_ScopeSelector(in, out, s)
}
func autoConvert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *corev1.ScopeSelector, s conversion.Scope) error {
out.MatchExpressions = *(*[]corev1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions))
return nil
}
// Convert_core_ScopeSelector_To_v1_ScopeSelector is an autogenerated conversion function.
func Convert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *corev1.ScopeSelector, s conversion.Scope) error {
return autoConvert_core_ScopeSelector_To_v1_ScopeSelector(in, out, s)
}
func autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *corev1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error {
out.ScopeName = core.ResourceQuotaScope(in.ScopeName)
out.Operator = core.ScopeSelectorOperator(in.Operator)
out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement is an autogenerated conversion function.
func Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *corev1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error {
return autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in, out, s)
}
func autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *corev1.ScopedResourceSelectorRequirement, s conversion.Scope) error {
out.ScopeName = corev1.ResourceQuotaScope(in.ScopeName)
out.Operator = corev1.ScopeSelectorOperator(in.Operator)
out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement is an autogenerated conversion function.
func Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *corev1.ScopedResourceSelectorRequirement, s conversion.Scope) error {
return autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in, out, s)
}
func autoConvert_v1_SeccompProfile_To_core_SeccompProfile(in *corev1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error {
out.Type = core.SeccompProfileType(in.Type)
out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile))
return nil
}
// Convert_v1_SeccompProfile_To_core_SeccompProfile is an autogenerated conversion function.
func Convert_v1_SeccompProfile_To_core_SeccompProfile(in *corev1.SeccompProfile, out *core.SeccompProfile, s conversion.Scope) error {
return autoConvert_v1_SeccompProfile_To_core_SeccompProfile(in, out, s)
}
func autoConvert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *corev1.SeccompProfile, s conversion.Scope) error {
out.Type = corev1.SeccompProfileType(in.Type)
out.LocalhostProfile = (*string)(unsafe.Pointer(in.LocalhostProfile))
return nil
}
// Convert_core_SeccompProfile_To_v1_SeccompProfile is an autogenerated conversion function.
func Convert_core_SeccompProfile_To_v1_SeccompProfile(in *core.SeccompProfile, out *corev1.SeccompProfile, s conversion.Scope) error {
return autoConvert_core_SeccompProfile_To_v1_SeccompProfile(in, out, s)
}
func autoConvert_v1_Secret_To_core_Secret(in *corev1.Secret, out *core.Secret, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Immutable = (*bool)(unsafe.Pointer(in.Immutable))
out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data))
// INFO: in.StringData opted out of conversion generation
out.Type = core.SecretType(in.Type)
return nil
}
func autoConvert_core_Secret_To_v1_Secret(in *core.Secret, out *corev1.Secret, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Immutable = (*bool)(unsafe.Pointer(in.Immutable))
out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data))
out.Type = corev1.SecretType(in.Type)
return nil
}
// Convert_core_Secret_To_v1_Secret is an autogenerated conversion function.
func Convert_core_Secret_To_v1_Secret(in *core.Secret, out *corev1.Secret, s conversion.Scope) error {
return autoConvert_core_Secret_To_v1_Secret(in, out, s)
}
func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *corev1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error {
if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_SecretEnvSource_To_core_SecretEnvSource is an autogenerated conversion function.
func Convert_v1_SecretEnvSource_To_core_SecretEnvSource(in *corev1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error {
return autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in, out, s)
}
func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *corev1.SecretEnvSource, s conversion.Scope) error {
if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function.
func Convert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *corev1.SecretEnvSource, s conversion.Scope) error {
return autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in, out, s)
}
func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *corev1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error {
if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Key = in.Key
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_SecretKeySelector_To_core_SecretKeySelector is an autogenerated conversion function.
func Convert_v1_SecretKeySelector_To_core_SecretKeySelector(in *corev1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error {
return autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in, out, s)
}
func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *corev1.SecretKeySelector, s conversion.Scope) error {
if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Key = in.Key
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function.
func Convert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *corev1.SecretKeySelector, s conversion.Scope) error {
return autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in, out, s)
}
func autoConvert_v1_SecretList_To_core_SecretList(in *corev1.SecretList, out *core.SecretList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.Secret, len(*in))
for i := range *in {
if err := Convert_v1_Secret_To_core_Secret(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_SecretList_To_core_SecretList is an autogenerated conversion function.
func Convert_v1_SecretList_To_core_SecretList(in *corev1.SecretList, out *core.SecretList, s conversion.Scope) error {
return autoConvert_v1_SecretList_To_core_SecretList(in, out, s)
}
func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *corev1.SecretList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.Secret, len(*in))
for i := range *in {
if err := Convert_core_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_SecretList_To_v1_SecretList is an autogenerated conversion function.
func Convert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *corev1.SecretList, s conversion.Scope) error {
return autoConvert_core_SecretList_To_v1_SecretList(in, out, s)
}
func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *corev1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error {
if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_SecretProjection_To_core_SecretProjection is an autogenerated conversion function.
func Convert_v1_SecretProjection_To_core_SecretProjection(in *corev1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error {
return autoConvert_v1_SecretProjection_To_core_SecretProjection(in, out, s)
}
func autoConvert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *corev1.SecretProjection, s conversion.Scope) error {
if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil {
return err
}
out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function.
func Convert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *corev1.SecretProjection, s conversion.Scope) error {
return autoConvert_core_SecretProjection_To_v1_SecretProjection(in, out, s)
}
func autoConvert_v1_SecretReference_To_core_SecretReference(in *corev1.SecretReference, out *core.SecretReference, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_v1_SecretReference_To_core_SecretReference is an autogenerated conversion function.
func Convert_v1_SecretReference_To_core_SecretReference(in *corev1.SecretReference, out *core.SecretReference, s conversion.Scope) error {
return autoConvert_v1_SecretReference_To_core_SecretReference(in, out, s)
}
func autoConvert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *corev1.SecretReference, s conversion.Scope) error {
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_core_SecretReference_To_v1_SecretReference is an autogenerated conversion function.
func Convert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *corev1.SecretReference, s conversion.Scope) error {
return autoConvert_core_SecretReference_To_v1_SecretReference(in, out, s)
}
func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *corev1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items))
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource is an autogenerated conversion function.
func Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *corev1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error {
return autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in, out, s)
}
func autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *corev1.SecretVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
out.Items = *(*[]corev1.KeyToPath)(unsafe.Pointer(&in.Items))
out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
return nil
}
// Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function.
func Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *corev1.SecretVolumeSource, s conversion.Scope) error {
return autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s)
}
func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *corev1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error {
out.Capabilities = (*core.Capabilities)(unsafe.Pointer(in.Capabilities))
out.Privileged = (*bool)(unsafe.Pointer(in.Privileged))
out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
out.WindowsOptions = (*core.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions))
out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser))
out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem))
out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation))
out.ProcMount = (*core.ProcMountType)(unsafe.Pointer(in.ProcMount))
out.SeccompProfile = (*core.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
out.AppArmorProfile = (*core.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile))
return nil
}
// Convert_v1_SecurityContext_To_core_SecurityContext is an autogenerated conversion function.
func Convert_v1_SecurityContext_To_core_SecurityContext(in *corev1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error {
return autoConvert_v1_SecurityContext_To_core_SecurityContext(in, out, s)
}
func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *corev1.SecurityContext, s conversion.Scope) error {
out.Capabilities = (*corev1.Capabilities)(unsafe.Pointer(in.Capabilities))
out.Privileged = (*bool)(unsafe.Pointer(in.Privileged))
out.SELinuxOptions = (*corev1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
out.WindowsOptions = (*corev1.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions))
out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser))
out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem))
out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation))
out.ProcMount = (*corev1.ProcMountType)(unsafe.Pointer(in.ProcMount))
out.SeccompProfile = (*corev1.SeccompProfile)(unsafe.Pointer(in.SeccompProfile))
out.AppArmorProfile = (*corev1.AppArmorProfile)(unsafe.Pointer(in.AppArmorProfile))
return nil
}
// Convert_core_SecurityContext_To_v1_SecurityContext is an autogenerated conversion function.
func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *corev1.SecurityContext, s conversion.Scope) error {
return autoConvert_core_SecurityContext_To_v1_SecurityContext(in, out, s)
}
func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *corev1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error {
if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Reference, &out.Reference, s); err != nil {
return err
}
return nil
}
// Convert_v1_SerializedReference_To_core_SerializedReference is an autogenerated conversion function.
func Convert_v1_SerializedReference_To_core_SerializedReference(in *corev1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error {
return autoConvert_v1_SerializedReference_To_core_SerializedReference(in, out, s)
}
func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *corev1.SerializedReference, s conversion.Scope) error {
if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil {
return err
}
return nil
}
// Convert_core_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function.
func Convert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *corev1.SerializedReference, s conversion.Scope) error {
return autoConvert_core_SerializedReference_To_v1_SerializedReference(in, out, s)
}
func autoConvert_v1_Service_To_core_Service(in *corev1.Service, out *core.Service, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ServiceSpec_To_core_ServiceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ServiceStatus_To_core_ServiceStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_Service_To_core_Service is an autogenerated conversion function.
func Convert_v1_Service_To_core_Service(in *corev1.Service, out *core.Service, s conversion.Scope) error {
return autoConvert_v1_Service_To_core_Service(in, out, s)
}
func autoConvert_core_Service_To_v1_Service(in *core.Service, out *corev1.Service, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_core_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_core_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_core_Service_To_v1_Service is an autogenerated conversion function.
func Convert_core_Service_To_v1_Service(in *core.Service, out *corev1.Service, s conversion.Scope) error {
return autoConvert_core_Service_To_v1_Service(in, out, s)
}
func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *corev1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Secrets = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Secrets))
out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets))
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
return nil
}
// Convert_v1_ServiceAccount_To_core_ServiceAccount is an autogenerated conversion function.
func Convert_v1_ServiceAccount_To_core_ServiceAccount(in *corev1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error {
return autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in, out, s)
}
func autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *corev1.ServiceAccount, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Secrets = *(*[]corev1.ObjectReference)(unsafe.Pointer(&in.Secrets))
out.ImagePullSecrets = *(*[]corev1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets))
out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken))
return nil
}
// Convert_core_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function.
func Convert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *corev1.ServiceAccount, s conversion.Scope) error {
return autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in, out, s)
}
func autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in *corev1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]core.ServiceAccount)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ServiceAccountList_To_core_ServiceAccountList is an autogenerated conversion function.
func Convert_v1_ServiceAccountList_To_core_ServiceAccountList(in *corev1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error {
return autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in, out, s)
}
func autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *corev1.ServiceAccountList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]corev1.ServiceAccount)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_core_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function.
func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *corev1.ServiceAccountList, s conversion.Scope) error {
return autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in, out, s)
}
func autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *corev1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error {
out.Audience = in.Audience
if err := metav1.Convert_Pointer_int64_To_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil {
return err
}
out.Path = in.Path
return nil
}
// Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection is an autogenerated conversion function.
func Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *corev1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error {
return autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in, out, s)
}
func autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *corev1.ServiceAccountTokenProjection, s conversion.Scope) error {
out.Audience = in.Audience
if err := metav1.Convert_int64_To_Pointer_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil {
return err
}
out.Path = in.Path
return nil
}
// Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection is an autogenerated conversion function.
func Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *corev1.ServiceAccountTokenProjection, s conversion.Scope) error {
return autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in, out, s)
}
func autoConvert_v1_ServiceList_To_core_ServiceList(in *corev1.ServiceList, out *core.ServiceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.Service, len(*in))
for i := range *in {
if err := Convert_v1_Service_To_core_Service(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_ServiceList_To_core_ServiceList is an autogenerated conversion function.
func Convert_v1_ServiceList_To_core_ServiceList(in *corev1.ServiceList, out *core.ServiceList, s conversion.Scope) error {
return autoConvert_v1_ServiceList_To_core_ServiceList(in, out, s)
}
func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *corev1.ServiceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]corev1.Service, len(*in))
for i := range *in {
if err := Convert_core_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_ServiceList_To_v1_ServiceList is an autogenerated conversion function.
func Convert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *corev1.ServiceList, s conversion.Scope) error {
return autoConvert_core_ServiceList_To_v1_ServiceList(in, out, s)
}
func autoConvert_v1_ServicePort_To_core_ServicePort(in *corev1.ServicePort, out *core.ServicePort, s conversion.Scope) error {
out.Name = in.Name
out.Protocol = core.Protocol(in.Protocol)
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
out.Port = in.Port
out.TargetPort = in.TargetPort
out.NodePort = in.NodePort
return nil
}
// Convert_v1_ServicePort_To_core_ServicePort is an autogenerated conversion function.
func Convert_v1_ServicePort_To_core_ServicePort(in *corev1.ServicePort, out *core.ServicePort, s conversion.Scope) error {
return autoConvert_v1_ServicePort_To_core_ServicePort(in, out, s)
}
func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *corev1.ServicePort, s conversion.Scope) error {
out.Name = in.Name
out.Protocol = corev1.Protocol(in.Protocol)
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
out.Port = in.Port
out.TargetPort = in.TargetPort
out.NodePort = in.NodePort
return nil
}
// Convert_core_ServicePort_To_v1_ServicePort is an autogenerated conversion function.
func Convert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *corev1.ServicePort, s conversion.Scope) error {
return autoConvert_core_ServicePort_To_v1_ServicePort(in, out, s)
}
func autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *corev1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions is an autogenerated conversion function.
func Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *corev1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error {
return autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in, out, s)
}
func autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *corev1.ServiceProxyOptions, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function.
func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *corev1.ServiceProxyOptions, s conversion.Scope) error {
return autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s)
}
func autoConvert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *corev1.ServiceProxyOptions, s conversion.Scope) error {
// WARNING: Field TypeMeta does not have json tag, skipping.
if values, ok := map[string][]string(*in)["path"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_string(&values, &out.Path, s); err != nil {
return err
}
} else {
out.Path = ""
}
return nil
}
// Convert_url_Values_To_v1_ServiceProxyOptions is an autogenerated conversion function.
func Convert_url_Values_To_v1_ServiceProxyOptions(in *url.Values, out *corev1.ServiceProxyOptions, s conversion.Scope) error {
return autoConvert_url_Values_To_v1_ServiceProxyOptions(in, out, s)
}
func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *corev1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error {
out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports))
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
out.ClusterIP = in.ClusterIP
out.ClusterIPs = *(*[]string)(unsafe.Pointer(&in.ClusterIPs))
out.Type = core.ServiceType(in.Type)
out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs))
out.SessionAffinity = core.ServiceAffinity(in.SessionAffinity)
out.LoadBalancerIP = in.LoadBalancerIP
out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
out.ExternalName = in.ExternalName
out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy)
out.HealthCheckNodePort = in.HealthCheckNodePort
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig))
out.IPFamilies = *(*[]core.IPFamily)(unsafe.Pointer(&in.IPFamilies))
out.IPFamilyPolicy = (*core.IPFamilyPolicy)(unsafe.Pointer(in.IPFamilyPolicy))
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass))
out.InternalTrafficPolicy = (*core.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy))
out.TrafficDistribution = (*string)(unsafe.Pointer(in.TrafficDistribution))
return nil
}
// Convert_v1_ServiceSpec_To_core_ServiceSpec is an autogenerated conversion function.
func Convert_v1_ServiceSpec_To_core_ServiceSpec(in *corev1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error {
return autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in, out, s)
}
func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *corev1.ServiceSpec, s conversion.Scope) error {
out.Type = corev1.ServiceType(in.Type)
out.Ports = *(*[]corev1.ServicePort)(unsafe.Pointer(&in.Ports))
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
out.ClusterIP = in.ClusterIP
out.ClusterIPs = *(*[]string)(unsafe.Pointer(&in.ClusterIPs))
out.IPFamilies = *(*[]corev1.IPFamily)(unsafe.Pointer(&in.IPFamilies))
out.IPFamilyPolicy = (*corev1.IPFamilyPolicy)(unsafe.Pointer(in.IPFamilyPolicy))
out.ExternalName = in.ExternalName
out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs))
out.LoadBalancerIP = in.LoadBalancerIP
out.SessionAffinity = corev1.ServiceAffinity(in.SessionAffinity)
out.SessionAffinityConfig = (*corev1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig))
out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
out.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy)
out.HealthCheckNodePort = in.HealthCheckNodePort
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass))
out.InternalTrafficPolicy = (*corev1.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy))
out.TrafficDistribution = (*string)(unsafe.Pointer(in.TrafficDistribution))
return nil
}
// Convert_core_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function.
func Convert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *corev1.ServiceSpec, s conversion.Scope) error {
return autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in, out, s)
}
func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *corev1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error {
if err := Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_ServiceStatus_To_core_ServiceStatus is an autogenerated conversion function.
func Convert_v1_ServiceStatus_To_core_ServiceStatus(in *corev1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error {
return autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in, out, s)
}
func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *corev1.ServiceStatus, s conversion.Scope) error {
if err := Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_core_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function.
func Convert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *corev1.ServiceStatus, s conversion.Scope) error {
return autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in, out, s)
}
func autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *corev1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error {
out.ClientIP = (*core.ClientIPConfig)(unsafe.Pointer(in.ClientIP))
return nil
}
// Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig is an autogenerated conversion function.
func Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *corev1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error {
return autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in, out, s)
}
func autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *corev1.SessionAffinityConfig, s conversion.Scope) error {
out.ClientIP = (*corev1.ClientIPConfig)(unsafe.Pointer(in.ClientIP))
return nil
}
// Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function.
func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *corev1.SessionAffinityConfig, s conversion.Scope) error {
return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s)
}
func autoConvert_v1_SleepAction_To_core_SleepAction(in *corev1.SleepAction, out *core.SleepAction, s conversion.Scope) error {
out.Seconds = in.Seconds
return nil
}
// Convert_v1_SleepAction_To_core_SleepAction is an autogenerated conversion function.
func Convert_v1_SleepAction_To_core_SleepAction(in *corev1.SleepAction, out *core.SleepAction, s conversion.Scope) error {
return autoConvert_v1_SleepAction_To_core_SleepAction(in, out, s)
}
func autoConvert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *corev1.SleepAction, s conversion.Scope) error {
out.Seconds = in.Seconds
return nil
}
// Convert_core_SleepAction_To_v1_SleepAction is an autogenerated conversion function.
func Convert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *corev1.SleepAction, s conversion.Scope) error {
return autoConvert_core_SleepAction_To_v1_SleepAction(in, out, s)
}
func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *corev1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error {
out.VolumeName = in.VolumeName
out.VolumeNamespace = in.VolumeNamespace
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*core.ObjectReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource is an autogenerated conversion function.
func Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *corev1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in, out, s)
}
func autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *corev1.StorageOSPersistentVolumeSource, s conversion.Scope) error {
out.VolumeName = in.VolumeName
out.VolumeNamespace = in.VolumeNamespace
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*corev1.ObjectReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function.
func Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *corev1.StorageOSPersistentVolumeSource, s conversion.Scope) error {
return autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s)
}
func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *corev1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error {
out.VolumeName = in.VolumeName
out.VolumeNamespace = in.VolumeNamespace
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource is an autogenerated conversion function.
func Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *corev1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error {
return autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in, out, s)
}
func autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *corev1.StorageOSVolumeSource, s conversion.Scope) error {
out.VolumeName = in.VolumeName
out.VolumeNamespace = in.VolumeNamespace
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
out.SecretRef = (*corev1.LocalObjectReference)(unsafe.Pointer(in.SecretRef))
return nil
}
// Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function.
func Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *corev1.StorageOSVolumeSource, s conversion.Scope) error {
return autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s)
}
func autoConvert_v1_Sysctl_To_core_Sysctl(in *corev1.Sysctl, out *core.Sysctl, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_v1_Sysctl_To_core_Sysctl is an autogenerated conversion function.
func Convert_v1_Sysctl_To_core_Sysctl(in *corev1.Sysctl, out *core.Sysctl, s conversion.Scope) error {
return autoConvert_v1_Sysctl_To_core_Sysctl(in, out, s)
}
func autoConvert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *corev1.Sysctl, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_core_Sysctl_To_v1_Sysctl is an autogenerated conversion function.
func Convert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *corev1.Sysctl, s conversion.Scope) error {
return autoConvert_core_Sysctl_To_v1_Sysctl(in, out, s)
}
func autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in *corev1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error {
out.Port = in.Port
out.Host = in.Host
return nil
}
// Convert_v1_TCPSocketAction_To_core_TCPSocketAction is an autogenerated conversion function.
func Convert_v1_TCPSocketAction_To_core_TCPSocketAction(in *corev1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error {
return autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in, out, s)
}
func autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *corev1.TCPSocketAction, s conversion.Scope) error {
out.Port = in.Port
out.Host = in.Host
return nil
}
// Convert_core_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function.
func Convert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *corev1.TCPSocketAction, s conversion.Scope) error {
return autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in, out, s)
}
func autoConvert_v1_Taint_To_core_Taint(in *corev1.Taint, out *core.Taint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = core.TaintEffect(in.Effect)
out.TimeAdded = (*metav1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_v1_Taint_To_core_Taint is an autogenerated conversion function.
func Convert_v1_Taint_To_core_Taint(in *corev1.Taint, out *core.Taint, s conversion.Scope) error {
return autoConvert_v1_Taint_To_core_Taint(in, out, s)
}
func autoConvert_core_Taint_To_v1_Taint(in *core.Taint, out *corev1.Taint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = corev1.TaintEffect(in.Effect)
out.TimeAdded = (*metav1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_core_Taint_To_v1_Taint is an autogenerated conversion function.
func Convert_core_Taint_To_v1_Taint(in *core.Taint, out *corev1.Taint, s conversion.Scope) error {
return autoConvert_core_Taint_To_v1_Taint(in, out, s)
}
func autoConvert_v1_Toleration_To_core_Toleration(in *corev1.Toleration, out *core.Toleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = core.TolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = core.TaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_v1_Toleration_To_core_Toleration is an autogenerated conversion function.
func Convert_v1_Toleration_To_core_Toleration(in *corev1.Toleration, out *core.Toleration, s conversion.Scope) error {
return autoConvert_v1_Toleration_To_core_Toleration(in, out, s)
}
func autoConvert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *corev1.Toleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = corev1.TolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = corev1.TaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_core_Toleration_To_v1_Toleration is an autogenerated conversion function.
func Convert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *corev1.Toleration, s conversion.Scope) error {
return autoConvert_core_Toleration_To_v1_Toleration(in, out, s)
}
func autoConvert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *corev1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error {
out.Key = in.Key
out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement is an autogenerated conversion function.
func Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *corev1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error {
return autoConvert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in, out, s)
}
func autoConvert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *corev1.TopologySelectorLabelRequirement, s conversion.Scope) error {
out.Key = in.Key
out.Values = *(*[]string)(unsafe.Pointer(&in.Values))
return nil
}
// Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement is an autogenerated conversion function.
func Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *corev1.TopologySelectorLabelRequirement, s conversion.Scope) error {
return autoConvert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in, out, s)
}
func autoConvert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *corev1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error {
out.MatchLabelExpressions = *(*[]core.TopologySelectorLabelRequirement)(unsafe.Pointer(&in.MatchLabelExpressions))
return nil
}
// Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm is an autogenerated conversion function.
func Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *corev1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error {
return autoConvert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in, out, s)
}
func autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *corev1.TopologySelectorTerm, s conversion.Scope) error {
out.MatchLabelExpressions = *(*[]corev1.TopologySelectorLabelRequirement)(unsafe.Pointer(&in.MatchLabelExpressions))
return nil
}
// Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm is an autogenerated conversion function.
func Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *corev1.TopologySelectorTerm, s conversion.Scope) error {
return autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in, out, s)
}
func autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *corev1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error {
out.MaxSkew = in.MaxSkew
out.TopologyKey = in.TopologyKey
out.WhenUnsatisfiable = core.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.MinDomains = (*int32)(unsafe.Pointer(in.MinDomains))
out.NodeAffinityPolicy = (*core.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy))
out.NodeTaintsPolicy = (*core.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy))
out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys))
return nil
}
// Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint is an autogenerated conversion function.
func Convert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in *corev1.TopologySpreadConstraint, out *core.TopologySpreadConstraint, s conversion.Scope) error {
return autoConvert_v1_TopologySpreadConstraint_To_core_TopologySpreadConstraint(in, out, s)
}
func autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *corev1.TopologySpreadConstraint, s conversion.Scope) error {
out.MaxSkew = in.MaxSkew
out.TopologyKey = in.TopologyKey
out.WhenUnsatisfiable = corev1.UnsatisfiableConstraintAction(in.WhenUnsatisfiable)
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.MinDomains = (*int32)(unsafe.Pointer(in.MinDomains))
out.NodeAffinityPolicy = (*corev1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeAffinityPolicy))
out.NodeTaintsPolicy = (*corev1.NodeInclusionPolicy)(unsafe.Pointer(in.NodeTaintsPolicy))
out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys))
return nil
}
// Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint is an autogenerated conversion function.
func Convert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in *core.TopologySpreadConstraint, out *corev1.TopologySpreadConstraint, s conversion.Scope) error {
return autoConvert_core_TopologySpreadConstraint_To_v1_TopologySpreadConstraint(in, out, s)
}
func autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *corev1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference is an autogenerated conversion function.
func Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *corev1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error {
return autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in, out, s)
}
func autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *corev1.TypedLocalObjectReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference is an autogenerated conversion function.
func Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *corev1.TypedLocalObjectReference, s conversion.Scope) error {
return autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in, out, s)
}
func autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in *corev1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_v1_TypedObjectReference_To_core_TypedObjectReference is an autogenerated conversion function.
func Convert_v1_TypedObjectReference_To_core_TypedObjectReference(in *corev1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error {
return autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in, out, s)
}
func autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *corev1.TypedObjectReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_core_TypedObjectReference_To_v1_TypedObjectReference is an autogenerated conversion function.
func Convert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *corev1.TypedObjectReference, s conversion.Scope) error {
return autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in, out, s)
}
func autoConvert_v1_Volume_To_core_Volume(in *corev1.Volume, out *core.Volume, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil {
return err
}
return nil
}
// Convert_v1_Volume_To_core_Volume is an autogenerated conversion function.
func Convert_v1_Volume_To_core_Volume(in *corev1.Volume, out *core.Volume, s conversion.Scope) error {
return autoConvert_v1_Volume_To_core_Volume(in, out, s)
}
func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *corev1.Volume, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil {
return err
}
return nil
}
// Convert_core_Volume_To_v1_Volume is an autogenerated conversion function.
func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *corev1.Volume, s conversion.Scope) error {
return autoConvert_core_Volume_To_v1_Volume(in, out, s)
}
func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *corev1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error {
out.Name = in.Name
out.DevicePath = in.DevicePath
return nil
}
// Convert_v1_VolumeDevice_To_core_VolumeDevice is an autogenerated conversion function.
func Convert_v1_VolumeDevice_To_core_VolumeDevice(in *corev1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error {
return autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in, out, s)
}
func autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *corev1.VolumeDevice, s conversion.Scope) error {
out.Name = in.Name
out.DevicePath = in.DevicePath
return nil
}
// Convert_core_VolumeDevice_To_v1_VolumeDevice is an autogenerated conversion function.
func Convert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *corev1.VolumeDevice, s conversion.Scope) error {
return autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in, out, s)
}
func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *corev1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error {
out.Name = in.Name
out.ReadOnly = in.ReadOnly
out.RecursiveReadOnly = (*core.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly))
out.MountPath = in.MountPath
out.SubPath = in.SubPath
out.MountPropagation = (*core.MountPropagationMode)(unsafe.Pointer(in.MountPropagation))
out.SubPathExpr = in.SubPathExpr
return nil
}
// Convert_v1_VolumeMount_To_core_VolumeMount is an autogenerated conversion function.
func Convert_v1_VolumeMount_To_core_VolumeMount(in *corev1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error {
return autoConvert_v1_VolumeMount_To_core_VolumeMount(in, out, s)
}
func autoConvert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *corev1.VolumeMount, s conversion.Scope) error {
out.Name = in.Name
out.ReadOnly = in.ReadOnly
out.RecursiveReadOnly = (*corev1.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly))
out.MountPath = in.MountPath
out.SubPath = in.SubPath
out.MountPropagation = (*corev1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation))
out.SubPathExpr = in.SubPathExpr
return nil
}
// Convert_core_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function.
func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *corev1.VolumeMount, s conversion.Scope) error {
return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s)
}
func autoConvert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in *corev1.VolumeMountStatus, out *core.VolumeMountStatus, s conversion.Scope) error {
out.Name = in.Name
out.MountPath = in.MountPath
out.ReadOnly = in.ReadOnly
out.RecursiveReadOnly = (*core.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly))
return nil
}
// Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus is an autogenerated conversion function.
func Convert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in *corev1.VolumeMountStatus, out *core.VolumeMountStatus, s conversion.Scope) error {
return autoConvert_v1_VolumeMountStatus_To_core_VolumeMountStatus(in, out, s)
}
func autoConvert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in *core.VolumeMountStatus, out *corev1.VolumeMountStatus, s conversion.Scope) error {
out.Name = in.Name
out.MountPath = in.MountPath
out.ReadOnly = in.ReadOnly
out.RecursiveReadOnly = (*corev1.RecursiveReadOnlyMode)(unsafe.Pointer(in.RecursiveReadOnly))
return nil
}
// Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus is an autogenerated conversion function.
func Convert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in *core.VolumeMountStatus, out *corev1.VolumeMountStatus, s conversion.Scope) error {
return autoConvert_core_VolumeMountStatus_To_v1_VolumeMountStatus(in, out, s)
}
func autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *corev1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error {
out.Required = (*core.NodeSelector)(unsafe.Pointer(in.Required))
return nil
}
// Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity is an autogenerated conversion function.
func Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *corev1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error {
return autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in, out, s)
}
func autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *corev1.VolumeNodeAffinity, s conversion.Scope) error {
out.Required = (*corev1.NodeSelector)(unsafe.Pointer(in.Required))
return nil
}
// Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity is an autogenerated conversion function.
func Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *corev1.VolumeNodeAffinity, s conversion.Scope) error {
return autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in, out, s)
}
func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *corev1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error {
out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret))
out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI))
out.ConfigMap = (*core.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap))
if in.ServiceAccountToken != nil {
in, out := &in.ServiceAccountToken, &out.ServiceAccountToken
*out = new(core.ServiceAccountTokenProjection)
if err := Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(*in, *out, s); err != nil {
return err
}
} else {
out.ServiceAccountToken = nil
}
out.ClusterTrustBundle = (*core.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle))
out.PodCertificate = (*core.PodCertificateProjection)(unsafe.Pointer(in.PodCertificate))
return nil
}
// Convert_v1_VolumeProjection_To_core_VolumeProjection is an autogenerated conversion function.
func Convert_v1_VolumeProjection_To_core_VolumeProjection(in *corev1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error {
return autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in, out, s)
}
func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *corev1.VolumeProjection, s conversion.Scope) error {
out.Secret = (*corev1.SecretProjection)(unsafe.Pointer(in.Secret))
out.DownwardAPI = (*corev1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI))
out.ConfigMap = (*corev1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap))
if in.ServiceAccountToken != nil {
in, out := &in.ServiceAccountToken, &out.ServiceAccountToken
*out = new(corev1.ServiceAccountTokenProjection)
if err := Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(*in, *out, s); err != nil {
return err
}
} else {
out.ServiceAccountToken = nil
}
out.ClusterTrustBundle = (*corev1.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle))
out.PodCertificate = (*corev1.PodCertificateProjection)(unsafe.Pointer(in.PodCertificate))
return nil
}
// Convert_core_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function.
func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *corev1.VolumeProjection, s conversion.Scope) error {
return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s)
}
func autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *corev1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error {
out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements is an autogenerated conversion function.
func Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *corev1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error {
return autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in, out, s)
}
func autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *corev1.VolumeResourceRequirements, s conversion.Scope) error {
out.Limits = *(*corev1.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*corev1.ResourceList)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements is an autogenerated conversion function.
func Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *corev1.VolumeResourceRequirements, s conversion.Scope) error {
return autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in, out, s)
}
func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *corev1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error {
out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir))
out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk))
out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore))
out.GitRepo = (*core.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo))
out.Secret = (*core.SecretVolumeSource)(unsafe.Pointer(in.Secret))
out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS))
out.ISCSI = (*core.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI))
out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs))
out.PersistentVolumeClaim = (*core.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim))
out.RBD = (*core.RBDVolumeSource)(unsafe.Pointer(in.RBD))
out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder))
out.CephFS = (*core.CephFSVolumeSource)(unsafe.Pointer(in.CephFS))
out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker))
out.DownwardAPI = (*core.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI))
out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC))
out.AzureFile = (*core.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile))
out.ConfigMap = (*core.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap))
out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
if in.Projected != nil {
in, out := &in.Projected, &out.Projected
*out = new(core.ProjectedVolumeSource)
if err := Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(*in, *out, s); err != nil {
return err
}
} else {
out.Projected = nil
}
out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
out.CSI = (*core.CSIVolumeSource)(unsafe.Pointer(in.CSI))
out.Ephemeral = (*core.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
out.Image = (*core.ImageVolumeSource)(unsafe.Pointer(in.Image))
return nil
}
// Convert_v1_VolumeSource_To_core_VolumeSource is an autogenerated conversion function.
func Convert_v1_VolumeSource_To_core_VolumeSource(in *corev1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error {
return autoConvert_v1_VolumeSource_To_core_VolumeSource(in, out, s)
}
func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *corev1.VolumeSource, s conversion.Scope) error {
out.HostPath = (*corev1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
out.EmptyDir = (*corev1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir))
out.GCEPersistentDisk = (*corev1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk))
out.AWSElasticBlockStore = (*corev1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore))
out.GitRepo = (*corev1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo))
out.Secret = (*corev1.SecretVolumeSource)(unsafe.Pointer(in.Secret))
out.NFS = (*corev1.NFSVolumeSource)(unsafe.Pointer(in.NFS))
out.ISCSI = (*corev1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI))
out.Glusterfs = (*corev1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs))
out.PersistentVolumeClaim = (*corev1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim))
out.RBD = (*corev1.RBDVolumeSource)(unsafe.Pointer(in.RBD))
out.Quobyte = (*corev1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte))
out.FlexVolume = (*corev1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume))
out.Cinder = (*corev1.CinderVolumeSource)(unsafe.Pointer(in.Cinder))
out.CephFS = (*corev1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS))
out.Flocker = (*corev1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker))
out.DownwardAPI = (*corev1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI))
out.FC = (*corev1.FCVolumeSource)(unsafe.Pointer(in.FC))
out.AzureFile = (*corev1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile))
out.ConfigMap = (*corev1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap))
out.VsphereVolume = (*corev1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume))
out.AzureDisk = (*corev1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk))
out.PhotonPersistentDisk = (*corev1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk))
if in.Projected != nil {
in, out := &in.Projected, &out.Projected
*out = new(corev1.ProjectedVolumeSource)
if err := Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(*in, *out, s); err != nil {
return err
}
} else {
out.Projected = nil
}
out.PortworxVolume = (*corev1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume))
out.ScaleIO = (*corev1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
out.StorageOS = (*corev1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
out.CSI = (*corev1.CSIVolumeSource)(unsafe.Pointer(in.CSI))
out.Ephemeral = (*corev1.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
out.Image = (*corev1.ImageVolumeSource)(unsafe.Pointer(in.Image))
return nil
}
// Convert_core_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function.
func Convert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *corev1.VolumeSource, s conversion.Scope) error {
return autoConvert_core_VolumeSource_To_v1_VolumeSource(in, out, s)
}
func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *corev1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
out.VolumePath = in.VolumePath
out.FSType = in.FSType
out.StoragePolicyName = in.StoragePolicyName
out.StoragePolicyID = in.StoragePolicyID
return nil
}
// Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource is an autogenerated conversion function.
func Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *corev1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in, out, s)
}
func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *corev1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
out.VolumePath = in.VolumePath
out.FSType = in.FSType
out.StoragePolicyName = in.StoragePolicyName
out.StoragePolicyID = in.StoragePolicyID
return nil
}
// Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function.
func Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *corev1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
return autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s)
}
func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *corev1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error {
out.Weight = in.Weight
if err := Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil {
return err
}
return nil
}
// Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm is an autogenerated conversion function.
func Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *corev1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error {
return autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in, out, s)
}
func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *corev1.WeightedPodAffinityTerm, s conversion.Scope) error {
out.Weight = in.Weight
if err := Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil {
return err
}
return nil
}
// Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function.
func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *corev1.WeightedPodAffinityTerm, s conversion.Scope) error {
return autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s)
}
func autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *corev1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error {
out.GMSACredentialSpecName = (*string)(unsafe.Pointer(in.GMSACredentialSpecName))
out.GMSACredentialSpec = (*string)(unsafe.Pointer(in.GMSACredentialSpec))
out.RunAsUserName = (*string)(unsafe.Pointer(in.RunAsUserName))
out.HostProcess = (*bool)(unsafe.Pointer(in.HostProcess))
return nil
}
// Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions is an autogenerated conversion function.
func Convert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in *corev1.WindowsSecurityContextOptions, out *core.WindowsSecurityContextOptions, s conversion.Scope) error {
return autoConvert_v1_WindowsSecurityContextOptions_To_core_WindowsSecurityContextOptions(in, out, s)
}
func autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *corev1.WindowsSecurityContextOptions, s conversion.Scope) error {
out.GMSACredentialSpecName = (*string)(unsafe.Pointer(in.GMSACredentialSpecName))
out.GMSACredentialSpec = (*string)(unsafe.Pointer(in.GMSACredentialSpec))
out.RunAsUserName = (*string)(unsafe.Pointer(in.RunAsUserName))
out.HostProcess = (*bool)(unsafe.Pointer(in.HostProcess))
return nil
}
// Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions is an autogenerated conversion function.
func Convert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in *core.WindowsSecurityContextOptions, out *corev1.WindowsSecurityContextOptions, s conversion.Scope) error {
return autoConvert_core_WindowsSecurityContextOptions_To_v1_WindowsSecurityContextOptions(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&corev1.ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*corev1.ConfigMap)) })
scheme.AddTypeDefaultingFunc(&corev1.ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*corev1.ConfigMapList)) })
scheme.AddTypeDefaultingFunc(&corev1.Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*corev1.Endpoints)) })
scheme.AddTypeDefaultingFunc(&corev1.EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*corev1.EndpointsList)) })
scheme.AddTypeDefaultingFunc(&corev1.LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*corev1.LimitRange)) })
scheme.AddTypeDefaultingFunc(&corev1.LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*corev1.LimitRangeList)) })
scheme.AddTypeDefaultingFunc(&corev1.Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*corev1.Namespace)) })
scheme.AddTypeDefaultingFunc(&corev1.NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*corev1.NamespaceList)) })
scheme.AddTypeDefaultingFunc(&corev1.Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*corev1.Node)) })
scheme.AddTypeDefaultingFunc(&corev1.NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*corev1.NodeList)) })
scheme.AddTypeDefaultingFunc(&corev1.PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*corev1.PersistentVolume)) })
scheme.AddTypeDefaultingFunc(&corev1.PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*corev1.PersistentVolumeClaim)) })
scheme.AddTypeDefaultingFunc(&corev1.PersistentVolumeClaimList{}, func(obj interface{}) {
SetObjectDefaults_PersistentVolumeClaimList(obj.(*corev1.PersistentVolumeClaimList))
})
scheme.AddTypeDefaultingFunc(&corev1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*corev1.PersistentVolumeList)) })
scheme.AddTypeDefaultingFunc(&corev1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*corev1.Pod)) })
scheme.AddTypeDefaultingFunc(&corev1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*corev1.PodList)) })
scheme.AddTypeDefaultingFunc(&corev1.PodLogOptions{}, func(obj interface{}) { SetObjectDefaults_PodLogOptions(obj.(*corev1.PodLogOptions)) })
scheme.AddTypeDefaultingFunc(&corev1.PodStatusResult{}, func(obj interface{}) { SetObjectDefaults_PodStatusResult(obj.(*corev1.PodStatusResult)) })
scheme.AddTypeDefaultingFunc(&corev1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*corev1.PodTemplate)) })
scheme.AddTypeDefaultingFunc(&corev1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*corev1.PodTemplateList)) })
scheme.AddTypeDefaultingFunc(&corev1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*corev1.ReplicationController)) })
scheme.AddTypeDefaultingFunc(&corev1.ReplicationControllerList{}, func(obj interface{}) {
SetObjectDefaults_ReplicationControllerList(obj.(*corev1.ReplicationControllerList))
})
scheme.AddTypeDefaultingFunc(&corev1.ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*corev1.ResourceQuota)) })
scheme.AddTypeDefaultingFunc(&corev1.ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*corev1.ResourceQuotaList)) })
scheme.AddTypeDefaultingFunc(&corev1.Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*corev1.Secret)) })
scheme.AddTypeDefaultingFunc(&corev1.SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*corev1.SecretList)) })
scheme.AddTypeDefaultingFunc(&corev1.Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*corev1.Service)) })
scheme.AddTypeDefaultingFunc(&corev1.ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*corev1.ServiceList)) })
return nil
}
func SetObjectDefaults_ConfigMap(in *corev1.ConfigMap) {
SetDefaults_ConfigMap(in)
}
func SetObjectDefaults_ConfigMapList(in *corev1.ConfigMapList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ConfigMap(a)
}
}
func SetObjectDefaults_Endpoints(in *corev1.Endpoints) {
SetDefaults_Endpoints(in)
}
func SetObjectDefaults_EndpointsList(in *corev1.EndpointsList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Endpoints(a)
}
}
func SetObjectDefaults_LimitRange(in *corev1.LimitRange) {
for i := range in.Spec.Limits {
a := &in.Spec.Limits[i]
SetDefaults_LimitRangeItem(a)
SetDefaults_ResourceList(&a.Max)
SetDefaults_ResourceList(&a.Min)
SetDefaults_ResourceList(&a.Default)
SetDefaults_ResourceList(&a.DefaultRequest)
SetDefaults_ResourceList(&a.MaxLimitRequestRatio)
}
}
func SetObjectDefaults_LimitRangeList(in *corev1.LimitRangeList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_LimitRange(a)
}
}
func SetObjectDefaults_Namespace(in *corev1.Namespace) {
SetDefaults_Namespace(in)
SetDefaults_NamespaceStatus(&in.Status)
}
func SetObjectDefaults_NamespaceList(in *corev1.NamespaceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Namespace(a)
}
}
func SetObjectDefaults_Node(in *corev1.Node) {
SetDefaults_NodeStatus(&in.Status)
SetDefaults_ResourceList(&in.Status.Capacity)
SetDefaults_ResourceList(&in.Status.Allocatable)
}
func SetObjectDefaults_NodeList(in *corev1.NodeList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Node(a)
}
}
func SetObjectDefaults_PersistentVolume(in *corev1.PersistentVolume) {
SetDefaults_PersistentVolume(in)
SetDefaults_ResourceList(&in.Spec.Capacity)
if in.Spec.PersistentVolumeSource.HostPath != nil {
SetDefaults_HostPathVolumeSource(in.Spec.PersistentVolumeSource.HostPath)
}
if in.Spec.PersistentVolumeSource.RBD != nil {
if in.Spec.PersistentVolumeSource.RBD.RBDPool == "" {
in.Spec.PersistentVolumeSource.RBD.RBDPool = "rbd"
}
if in.Spec.PersistentVolumeSource.RBD.RadosUser == "" {
in.Spec.PersistentVolumeSource.RBD.RadosUser = "admin"
}
if in.Spec.PersistentVolumeSource.RBD.Keyring == "" {
in.Spec.PersistentVolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if in.Spec.PersistentVolumeSource.ISCSI != nil {
if in.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface == "" {
in.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if in.Spec.PersistentVolumeSource.AzureDisk != nil {
if in.Spec.PersistentVolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
in.Spec.PersistentVolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if in.Spec.PersistentVolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
in.Spec.PersistentVolumeSource.AzureDisk.FSType = &ptrVar1
}
if in.Spec.PersistentVolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
in.Spec.PersistentVolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if in.Spec.PersistentVolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
in.Spec.PersistentVolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if in.Spec.PersistentVolumeSource.ScaleIO != nil {
if in.Spec.PersistentVolumeSource.ScaleIO.StorageMode == "" {
in.Spec.PersistentVolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if in.Spec.PersistentVolumeSource.ScaleIO.FSType == "" {
in.Spec.PersistentVolumeSource.ScaleIO.FSType = "xfs"
}
}
}
func SetObjectDefaults_PersistentVolumeClaim(in *corev1.PersistentVolumeClaim) {
SetDefaults_PersistentVolumeClaim(in)
SetDefaults_PersistentVolumeClaimSpec(&in.Spec)
SetDefaults_ResourceList(&in.Spec.Resources.Limits)
SetDefaults_ResourceList(&in.Spec.Resources.Requests)
SetDefaults_ResourceList(&in.Status.Capacity)
SetDefaults_ResourceList(&in.Status.AllocatedResources)
}
func SetObjectDefaults_PersistentVolumeClaimList(in *corev1.PersistentVolumeClaimList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PersistentVolumeClaim(a)
}
}
func SetObjectDefaults_PersistentVolumeList(in *corev1.PersistentVolumeList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PersistentVolume(a)
}
}
func SetObjectDefaults_Pod(in *corev1.Pod) {
SetDefaults_Pod(in)
SetDefaults_PodSpec(&in.Spec)
for i := range in.Spec.Volumes {
a := &in.Spec.Volumes[i]
SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.InitContainers {
a := &in.Spec.InitContainers[i]
SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Containers {
a := &in.Spec.Containers[i]
SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.EphemeralContainers {
a := &in.Spec.EphemeralContainers[i]
SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
SetDefaults_ResourceList(&in.Spec.Overhead)
if in.Spec.Resources != nil {
SetDefaults_ResourceList(&in.Spec.Resources.Limits)
SetDefaults_ResourceList(&in.Spec.Resources.Requests)
}
for i := range in.Status.InitContainerStatuses {
a := &in.Status.InitContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.ContainerStatuses {
a := &in.Status.ContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.EphemeralContainerStatuses {
a := &in.Status.EphemeralContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
}
func SetObjectDefaults_PodList(in *corev1.PodList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Pod(a)
}
}
func SetObjectDefaults_PodLogOptions(in *corev1.PodLogOptions) {
SetDefaults_PodLogOptions(in)
}
func SetObjectDefaults_PodStatusResult(in *corev1.PodStatusResult) {
for i := range in.Status.InitContainerStatuses {
a := &in.Status.InitContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.ContainerStatuses {
a := &in.Status.ContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.EphemeralContainerStatuses {
a := &in.Status.EphemeralContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
}
func SetObjectDefaults_PodTemplate(in *corev1.PodTemplate) {
SetDefaults_PodSpec(&in.Template.Spec)
for i := range in.Template.Spec.Volumes {
a := &in.Template.Spec.Volumes[i]
SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Template.Spec.InitContainers {
a := &in.Template.Spec.InitContainers[i]
SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Template.Spec.Containers {
a := &in.Template.Spec.Containers[i]
SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Template.Spec.EphemeralContainers {
a := &in.Template.Spec.EphemeralContainers[i]
SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
SetDefaults_ResourceList(&in.Template.Spec.Overhead)
if in.Template.Spec.Resources != nil {
SetDefaults_ResourceList(&in.Template.Spec.Resources.Limits)
SetDefaults_ResourceList(&in.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_PodTemplateList(in *corev1.PodTemplateList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PodTemplate(a)
}
}
func SetObjectDefaults_ReplicationController(in *corev1.ReplicationController) {
SetDefaults_ReplicationController(in)
if in.Spec.Replicas == nil {
var ptrVar1 int32 = 1
in.Spec.Replicas = &ptrVar1
}
if in.Spec.Template != nil {
SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
}
func SetObjectDefaults_ReplicationControllerList(in *corev1.ReplicationControllerList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ReplicationController(a)
}
}
func SetObjectDefaults_ResourceQuota(in *corev1.ResourceQuota) {
SetDefaults_ResourceList(&in.Spec.Hard)
SetDefaults_ResourceList(&in.Status.Hard)
SetDefaults_ResourceList(&in.Status.Used)
}
func SetObjectDefaults_ResourceQuotaList(in *corev1.ResourceQuotaList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceQuota(a)
}
}
func SetObjectDefaults_Secret(in *corev1.Secret) {
SetDefaults_Secret(in)
}
func SetObjectDefaults_SecretList(in *corev1.SecretList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Secret(a)
}
}
func SetObjectDefaults_Service(in *corev1.Service) {
SetDefaults_Service(in)
for i := range in.Spec.Ports {
a := &in.Spec.Ports[i]
if a.Protocol == "" {
a.Protocol = "TCP"
}
}
}
func SetObjectDefaults_ServiceList(in *corev1.ServiceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Service(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1
import (
context "context"
fmt "fmt"
corev1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
operation "k8s.io/apimachinery/pkg/api/operation"
safe "k8s.io/apimachinery/pkg/api/safe"
validate "k8s.io/apimachinery/pkg/api/validate"
runtime "k8s.io/apimachinery/pkg/runtime"
field "k8s.io/apimachinery/pkg/util/validation/field"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
scheme.AddValidationFunc((*corev1.ReplicationController)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/", "/scale":
return Validate_ReplicationController(ctx, op, nil /* fldPath */, obj.(*corev1.ReplicationController), safe.Cast[*corev1.ReplicationController](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
scheme.AddValidationFunc((*corev1.ReplicationControllerList)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/":
return Validate_ReplicationControllerList(ctx, op, nil /* fldPath */, obj.(*corev1.ReplicationControllerList), safe.Cast[*corev1.ReplicationControllerList](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
return nil
}
func Validate_ReplicationController(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *corev1.ReplicationController) (errs field.ErrorList) {
// field corev1.ReplicationController.TypeMeta has no validation
// field corev1.ReplicationController.ObjectMeta has no validation
// field corev1.ReplicationController.Spec
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *corev1.ReplicationControllerSpec) (errs field.ErrorList) {
errs = append(errs, Validate_ReplicationControllerSpec(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *corev1.ReplicationController) *corev1.ReplicationControllerSpec { return &oldObj.Spec }))...)
// field corev1.ReplicationController.Status has no validation
return errs
}
func Validate_ReplicationControllerList(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *corev1.ReplicationControllerList) (errs field.ErrorList) {
// field corev1.ReplicationControllerList.TypeMeta has no validation
// field corev1.ReplicationControllerList.ListMeta has no validation
// field corev1.ReplicationControllerList.Items
errs = append(errs,
func(fldPath *field.Path, obj, oldObj []corev1.ReplicationController) (errs field.ErrorList) {
if op.Type == operation.Update && equality.Semantic.DeepEqual(obj, oldObj) {
return nil // no changes
}
errs = append(errs, validate.EachSliceVal(ctx, op, fldPath, obj, oldObj, nil, nil, Validate_ReplicationController)...)
return
}(fldPath.Child("items"), obj.Items, safe.Field(oldObj, func(oldObj *corev1.ReplicationControllerList) []corev1.ReplicationController { return oldObj.Items }))...)
return errs
}
func Validate_ReplicationControllerSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *corev1.ReplicationControllerSpec) (errs field.ErrorList) {
// field corev1.ReplicationControllerSpec.Replicas
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
return nil // no changes
}
// optional fields with default values are effectively required
if e := validate.RequiredPointer(ctx, op, fldPath, obj, oldObj); len(e) != 0 {
errs = append(errs, e...)
return // do not proceed
}
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
return
}(fldPath.Child("replicas"), obj.Replicas, safe.Field(oldObj, func(oldObj *corev1.ReplicationControllerSpec) *int32 { return oldObj.Replicas }))...)
// field corev1.ReplicationControllerSpec.MinReadySeconds
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
// optional value-type fields with zero-value defaults are purely documentation
if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
return nil // no changes
}
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
return
}(fldPath.Child("minReadySeconds"), &obj.MinReadySeconds, safe.Field(oldObj, func(oldObj *corev1.ReplicationControllerSpec) *int32 { return &oldObj.MinReadySeconds }))...)
// field corev1.ReplicationControllerSpec.Selector has no validation
// field corev1.ReplicationControllerSpec.Template has no validation
return errs
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"reflect"
"time"
v1 "k8s.io/api/core/v1"
eventsv1beta1 "k8s.io/api/events/v1beta1"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/core"
)
const (
ReportingInstanceLengthLimit = 128
ActionLengthLimit = 128
ReasonLengthLimit = 128
NoteLengthLimit = 1024
)
func ValidateEventCreate(event *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
// Make sure events always pass legacy validation.
allErrs := legacyValidateEvent(event, requestVersion)
if requestVersion == v1.SchemeGroupVersion || requestVersion == eventsv1beta1.SchemeGroupVersion {
// No further validation for backwards compatibility.
return allErrs
}
// Strict validation applies to creation via events.k8s.io/v1 API and newer.
allErrs = append(allErrs, ValidateObjectMeta(&event.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...)
allErrs = append(allErrs, validateV1EventSeries(event)...)
zeroTime := time.Time{}
if event.EventTime.Time == zeroTime {
allErrs = append(allErrs, field.Required(field.NewPath("eventTime"), ""))
}
if event.Type != v1.EventTypeNormal && event.Type != v1.EventTypeWarning {
allErrs = append(allErrs, field.Invalid(field.NewPath("type"), "", fmt.Sprintf("has invalid value: %v", event.Type)))
}
if event.FirstTimestamp.Time != zeroTime {
allErrs = append(allErrs, field.Invalid(field.NewPath("firstTimestamp"), "", "needs to be unset"))
}
if event.LastTimestamp.Time != zeroTime {
allErrs = append(allErrs, field.Invalid(field.NewPath("lastTimestamp"), "", "needs to be unset"))
}
if event.Count != 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("count"), "", "needs to be unset"))
}
if event.Source.Component != "" || event.Source.Host != "" {
allErrs = append(allErrs, field.Invalid(field.NewPath("source"), "", "needs to be unset"))
}
return allErrs
}
func ValidateEventUpdate(newEvent, oldEvent *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
// Make sure the new event always passes legacy validation.
allErrs := legacyValidateEvent(newEvent, requestVersion)
if requestVersion == v1.SchemeGroupVersion || requestVersion == eventsv1beta1.SchemeGroupVersion {
// No further validation for backwards compatibility.
return allErrs
}
// Strict validation applies to update via events.k8s.io/v1 API and newer.
allErrs = append(allErrs, ValidateObjectMetaUpdate(&newEvent.ObjectMeta, &oldEvent.ObjectMeta, field.NewPath("metadata"))...)
// if the series was modified, validate the new data
if !reflect.DeepEqual(newEvent.Series, oldEvent.Series) {
allErrs = append(allErrs, validateV1EventSeries(newEvent)...)
}
allErrs = append(allErrs, ValidateImmutableField(newEvent.InvolvedObject, oldEvent.InvolvedObject, field.NewPath("involvedObject"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Message, oldEvent.Message, field.NewPath("message"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Source, oldEvent.Source, field.NewPath("source"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.FirstTimestamp, oldEvent.FirstTimestamp, field.NewPath("firstTimestamp"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.LastTimestamp, oldEvent.LastTimestamp, field.NewPath("lastTimestamp"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Count, oldEvent.Count, field.NewPath("count"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Type, oldEvent.Type, field.NewPath("type"))...)
// Disallow changes to eventTime greater than microsecond-level precision.
// Tolerating sub-microsecond changes is required to tolerate updates
// from clients that correctly truncate to microsecond-precision when serializing,
// or from clients built with incorrect nanosecond-precision protobuf serialization.
// See https://github.com/kubernetes/kubernetes/issues/111928
newTruncated := newEvent.EventTime.Truncate(time.Microsecond).UTC()
oldTruncated := oldEvent.EventTime.Truncate(time.Microsecond).UTC()
if newTruncated != oldTruncated {
allErrs = append(allErrs, ValidateImmutableField(newEvent.EventTime, oldEvent.EventTime, field.NewPath("eventTime"))...)
}
allErrs = append(allErrs, ValidateImmutableField(newEvent.Action, oldEvent.Action, field.NewPath("action"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Related, oldEvent.Related, field.NewPath("related"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingController, oldEvent.ReportingController, field.NewPath("reportingController"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingInstance, oldEvent.ReportingInstance, field.NewPath("reportingInstance"))...)
return allErrs
}
func validateV1EventSeries(event *core.Event) field.ErrorList {
allErrs := field.ErrorList{}
zeroTime := time.Time{}
if event.Series != nil {
if event.Series.Count < 2 {
allErrs = append(allErrs, field.Invalid(field.NewPath("series.count"), "", "should be at least 2"))
}
if event.Series.LastObservedTime.Time == zeroTime {
allErrs = append(allErrs, field.Required(field.NewPath("series.lastObservedTime"), ""))
}
}
return allErrs
}
// legacyValidateEvent makes sure that the event makes sense.
func legacyValidateEvent(event *core.Event, requestVersion schema.GroupVersion) field.ErrorList {
allErrs := field.ErrorList{}
// Because go
zeroTime := time.Time{}
reportingControllerFieldName := "reportingController"
if requestVersion == v1.SchemeGroupVersion {
reportingControllerFieldName = "reportingComponent"
}
// "New" Events need to have EventTime set, so it's validating old object.
if event.EventTime.Time == zeroTime {
// Make sure event.Namespace and the involvedInvolvedObject.Namespace agree
if len(event.InvolvedObject.Namespace) == 0 {
// event.Namespace must also be empty (or "default", for compatibility with old clients)
if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
} else {
// event namespace must match
if event.Namespace != event.InvolvedObject.Namespace {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
}
} else {
if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceDefault && event.Namespace != metav1.NamespaceSystem {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace"))
}
if len(event.ReportingController) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath(reportingControllerFieldName), ""))
}
allErrs = append(allErrs, ValidateQualifiedName(event.ReportingController, field.NewPath(reportingControllerFieldName))...)
if len(event.ReportingInstance) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("reportingInstance"), ""))
}
if len(event.ReportingInstance) > ReportingInstanceLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("reportingInstance"), "", fmt.Sprintf("can have at most %v characters", ReportingInstanceLengthLimit)))
}
if len(event.Action) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("action"), ""))
}
if len(event.Action) > ActionLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("action"), "", fmt.Sprintf("can have at most %v characters", ActionLengthLimit)))
}
if len(event.Reason) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("reason"), ""))
}
if len(event.Reason) > ReasonLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("reason"), "", fmt.Sprintf("can have at most %v characters", ReasonLengthLimit)))
}
if len(event.Message) > NoteLengthLimit {
allErrs = append(allErrs, field.Invalid(field.NewPath("message"), "", fmt.Sprintf("can have at most %v characters", NoteLengthLimit)))
}
}
for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
}
return allErrs
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// IsKubernetesSignerName checks if signerName is one reserved by the Kubernetes project.
func IsKubernetesSignerName(signerName string) bool {
hostName, _, _ := strings.Cut(signerName, "/")
return hostName == "kubernetes.io" || strings.HasSuffix(hostName, ".kubernetes.io")
}
// ValidateSignerName checks that signerName is syntactically valid.
//
// ensure signerName is of the form domain.com/something and up to 571 characters.
// This length and format is specified to accommodate signerNames like:
// <fqdn>/<resource-namespace>.<resource-name>.
// The max length of a FQDN is 253 characters (DNS1123Subdomain max length)
// The max length of a namespace name is 63 characters (DNS1123Label max length)
// The max length of a resource name is 253 characters (DNS1123Subdomain max length)
// We then add an additional 2 characters to account for the one '.' and one '/'.
func ValidateSignerName(fldPath *field.Path, signerName string) field.ErrorList {
var el field.ErrorList
if len(signerName) == 0 {
el = append(el, field.Required(fldPath, ""))
return el
}
segments := strings.Split(signerName, "/")
// validate that there is one '/' in the signerName.
// we do this after validating the domain segment to provide more info to the user.
if len(segments) != 2 {
el = append(el, field.Invalid(fldPath, signerName, "must be a fully qualified domain and path of the form 'example.com/signer-name'"))
// return early here as we should not continue attempting to validate a missing or malformed path segment
// (i.e. one containing multiple or zero `/`)
return el
}
// validate that segments[0] is less than 253 characters altogether
maxDomainSegmentLength := validation.DNS1123SubdomainMaxLength
if len(segments[0]) > maxDomainSegmentLength {
el = append(el, field.TooLong(fldPath, "" /*unused*/, maxDomainSegmentLength))
}
// validate that segments[0] consists of valid DNS1123 labels separated by '.'
domainLabels := strings.Split(segments[0], ".")
for _, lbl := range domainLabels {
// use IsDNS1123Label as we want to ensure the max length of any single label in the domain
// is 63 characters
if errs := validation.IsDNS1123Label(lbl); len(errs) > 0 {
for _, err := range errs {
el = append(el, field.Invalid(fldPath, segments[0], fmt.Sprintf("validating label %q: %s", lbl, err)))
}
// if we encounter any errors whilst parsing the domain segment, break from
// validation as any further error messages will be duplicates, and non-distinguishable
// from each other, confusing users.
break
}
}
// validate that there is at least one '.' in segments[0]
if len(domainLabels) < 2 {
el = append(el, field.Invalid(fldPath, segments[0], "should be a domain with at least two segments separated by dots"))
}
// validate that segments[1] consists of valid DNS1123 subdomains separated by '.'.
pathLabels := strings.Split(segments[1], ".")
for _, lbl := range pathLabels {
// use IsDNS1123Subdomain because it enforces a length restriction of 253 characters
// which is required in order to fit a full resource name into a single 'label'
if errs := validation.IsDNS1123Subdomain(lbl); len(errs) > 0 {
for _, err := range errs {
el = append(el, field.Invalid(fldPath, segments[1], fmt.Sprintf("validating label %q: %s", lbl, err)))
}
// if we encounter any errors whilst parsing the path segment, break from
// validation as any further error messages will be duplicates, and non-distinguishable
// from each other, confusing users.
break
}
}
// ensure that segments[1] can accommodate a dns label + dns subdomain + '.'
maxPathSegmentLength := validation.DNS1123SubdomainMaxLength + validation.DNS1123LabelMaxLength + 1
maxSignerNameLength := maxDomainSegmentLength + maxPathSegmentLength + 1
if len(signerName) > maxSignerNameLength {
el = append(el, field.TooLong(fldPath, "" /*unused*/, maxSignerNameLength))
}
return el
}
// ValidateClusterTrustBundleName checks that a ClusterTrustBundle name conforms
// to the rules documented on the type.
func ValidateClusterTrustBundleName(signerName string) func(name string, prefix bool) []string {
return func(name string, isPrefix bool) []string {
if signerName == "" {
if strings.Contains(name, ":") {
return []string{"ClusterTrustBundle without signer name must not have \":\" in its name"}
}
return apimachineryvalidation.NameIsDNSSubdomain(name, isPrefix)
}
requiredPrefix := strings.ReplaceAll(signerName, "/", ":") + ":"
if !strings.HasPrefix(name, requiredPrefix) {
return []string{fmt.Sprintf("ClusterTrustBundle for signerName %s must be named with prefix %s", signerName, requiredPrefix)}
}
return apimachineryvalidation.NameIsDNSSubdomain(strings.TrimPrefix(name, requiredPrefix), isPrefix)
}
}
func extractSignerNameFromClusterTrustBundleName(name string) (string, bool) {
if splitPoint := strings.LastIndex(name, ":"); splitPoint != -1 {
// This looks like it refers to a signerName trustbundle.
return strings.ReplaceAll(name[:splitPoint], ":", "/"), true
} else {
return "", false
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"encoding/json"
"fmt"
"math"
"net"
"path"
"path/filepath"
"reflect"
"regexp"
"slices"
"strings"
"sync"
"unicode"
"unicode/utf8"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
resourcehelper "k8s.io/component-helpers/resource"
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
kubeletapis "k8s.io/kubelet/pkg/apis"
"k8s.io/kubernetes/pkg/apis/certificates"
apiservice "k8s.io/kubernetes/pkg/api/service"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/apis/core/helper/qos"
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
)
const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg
const isInvalidQuotaResource string = `must be a standard resource for quota`
const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg
const isNotIntegerErrorMsg string = `must be an integer`
const isNotPositiveErrorMsg string = `must be greater than zero`
var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255)
var fileModeErrorMsg = "must be a number between 0 and 0777 (octal), both inclusive"
// BannedOwners is a black list of object that are not allowed to be owners.
var BannedOwners = apimachineryvalidation.BannedOwners
var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.]+)(:[^,;*&$|\s]+)$`)
var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`)
var iscsiInitiatorNaaRegex = regexp.MustCompile(`^naa.[[:alnum:]]{32}$`)
var allowedEphemeralContainerFields = map[string]bool{
"Name": true,
"Image": true,
"Command": true,
"Args": true,
"WorkingDir": true,
"Ports": false,
"EnvFrom": true,
"Env": true,
"Resources": false,
"VolumeMounts": true,
"VolumeDevices": true,
"LivenessProbe": false,
"ReadinessProbe": false,
"StartupProbe": false,
"Lifecycle": false,
"TerminationMessagePath": true,
"TerminationMessagePolicy": true,
"ImagePullPolicy": true,
"SecurityContext": true,
"Stdin": true,
"StdinOnce": true,
"TTY": true,
}
// validOS stores the set of valid OSes within pod spec.
// The valid values currently are linux, windows.
// In future, they can be expanded to values from
// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
var validOS = sets.New(core.Linux, core.Windows)
// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue
func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
allErrs := field.ErrorList{}
actualValue, found := meta.Labels[key]
if !found {
allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key),
fmt.Sprintf("must be '%s'", expectedValue)))
return allErrs
}
if actualValue != expectedValue {
allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels,
fmt.Sprintf("must be '%s'", expectedValue)))
}
return allErrs
}
// ValidateAnnotations validates that a set of annotations are correctly defined.
func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
return apimachineryvalidation.ValidateAnnotations(annotations, fldPath)
}
func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsDNS1123Label(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg).WithOrigin("format=dns-label"))
}
return allErrs
}
// ValidateQualifiedName validates if name is what Kubernetes calls a "qualified name".
func ValidateQualifiedName(value string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg).WithOrigin("format=qualified-name"))
}
return allErrs
}
// ValidateDNS1123SubdomainWithUnderScore validates that a name is a proper DNS subdomain but allows for an underscore in the string
func ValidateDNS1123SubdomainWithUnderScore(value string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsDNS1123SubdomainWithUnderscore(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg)).WithOrigin("format=k8s-dns-subdomain-with-underscore")
}
return allErrs
}
// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain.
func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsDNS1123Subdomain(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg)).WithOrigin("format=k8s-long-name")
}
return allErrs
}
func ValidatePodSpecificAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if value, isMirror := annotations[core.MirrorPodAnnotationKey]; isMirror {
if len(spec.NodeName) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Key(core.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set"))
}
}
if annotations[core.TolerationsAnnotationKey] != "" {
allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...)
}
if !opts.AllowInvalidPodDeletionCost {
if _, err := helper.GetDeletionCostFromPodAnnotations(annotations); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Key(core.PodDeletionCost), annotations[core.PodDeletionCost], "must be a 32bit integer"))
}
}
allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...)
allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...)
return allErrs
}
// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data
func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, core.TolerationsAnnotationKey, err.Error()))
return allErrs
}
if len(tolerations) > 0 {
allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(core.TolerationsAnnotationKey))...)
}
return allErrs
}
func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *core.Pod, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
newAnnotations := newPod.Annotations
oldAnnotations := oldPod.Annotations
for k, oldVal := range oldAnnotations {
if newVal, exists := newAnnotations[k]; exists && newVal == oldVal {
continue // No change.
}
if strings.HasPrefix(k, v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix) {
allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations"))
}
if k == core.MirrorPodAnnotationKey {
allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation"))
}
}
// Check for additions
for k := range newAnnotations {
if _, ok := oldAnnotations[k]; ok {
continue // No change.
}
if strings.HasPrefix(k, v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix) {
allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations"))
}
if k == core.MirrorPodAnnotationKey {
allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation"))
}
}
allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath, opts)...)
return allErrs
}
func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
return allErrs
}
// ValidateNameFunc validates that the provided name is valid for a given resource type.
// Not all resources have the same validation rules for names. Prefix is true
// if the name will have a value appended to it. If the name is not valid,
// this returns a list of descriptions of individual characteristics of the
// value that were not valid. Otherwise this returns an empty list or nil.
type ValidateNameFunc apimachineryvalidation.ValidateNameFunc
// ValidatePodName can be used to check whether the given pod name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidatePodName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateReplicationControllerName can be used to check whether the given replication
// controller name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateReplicationControllerName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateServiceName can be used to check whether the given service name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateServiceName = apimachineryvalidation.NameIsDNS1035Label
// ValidateNodeName can be used to check whether the given node name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateNodeName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateNamespaceName can be used to check whether the given namespace name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateNamespaceName = apimachineryvalidation.ValidateNamespaceName
// ValidateLimitRangeName can be used to check whether the given limit range name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateLimitRangeName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateResourceQuotaName can be used to check whether the given
// resource quota name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateResourceQuotaName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateSecretName can be used to check whether the given secret name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateSecretName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateServiceAccountName can be used to check whether the given service account name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountName
// ValidateEndpointsName can be used to check whether the given endpoints name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateEndpointsName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateClassName can be used to check whether the given class name is valid.
// It is defined here to avoid import cycle between pkg/apis/storage/validation
// (where it should be) and this file.
var ValidateClassName = apimachineryvalidation.NameIsDNSSubdomain
// ValidatePriorityClassName can be used to check whether the given priority
// class name is valid.
var ValidatePriorityClassName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateResourceClaimName can be used to check whether the given
// name for a ResourceClaim is valid.
var ValidateResourceClaimName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateResourceClaimTemplateName can be used to check whether the given
// name for a ResourceClaimTemplate is valid.
var ValidateResourceClaimTemplateName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateRuntimeClassName can be used to check whether the given RuntimeClass name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
for _, msg := range apimachineryvalidation.NameIsDNSSubdomain(name, false) {
allErrs = append(allErrs, field.Invalid(fldPath, name, msg))
}
return allErrs
}
// validateOverhead can be used to check whether the given Overhead is valid.
func validateOverhead(overhead core.ResourceList, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
// reuse the ResourceRequirements validation logic
return ValidateContainerResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts)
}
// Validates that given value is not negative.
func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList {
return apimachineryvalidation.ValidateNonnegativeField(value, fldPath)
}
// Validates that a Quantity is not negative
func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if value.Cmp(resource.Quantity{}) < 0 {
allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg))
}
return allErrs
}
// Validates that given value is positive.
func ValidatePositiveField(value int64, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if value <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath, value, isNotPositiveErrorMsg).WithOrigin("minimum"))
}
return allErrs
}
// Validates that a Quantity is positive
func ValidatePositiveQuantityValue(value resource.Quantity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if value.Cmp(resource.Quantity{}) <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNotPositiveErrorMsg))
}
return allErrs
}
func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList {
return apimachineryvalidation.ValidateImmutableField(newVal, oldVal, fldPath)
}
func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if oldVal != newVal {
allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", annotation), newVal, fieldImmutableErrorMsg))
}
return allErrs
}
// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
// been performed.
// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate.
func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
allErrs := apimachineryvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath)
// run additional checks for the finalizer name
for i := range meta.Finalizers {
allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...)
}
return allErrs
}
// ValidateObjectMetaUpdate validates an object's metadata when updated
func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
allErrs := apimachineryvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath)
// run additional checks for the finalizer name
for i := range newMeta.Finalizers {
allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...)
}
return allErrs
}
func ValidateVolumes(volumes []core.Volume, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) (map[string]core.VolumeSource, field.ErrorList) {
allErrs := field.ErrorList{}
allNames := sets.Set[string]{}
allCreatedPVCs := sets.Set[string]{}
// Determine which PVCs will be created for this pod. We need
// the exact name of the pod for this. Without it, this sanity
// check has to be skipped.
if podMeta != nil && podMeta.Name != "" {
for _, vol := range volumes {
if vol.VolumeSource.Ephemeral != nil {
allCreatedPVCs.Insert(podMeta.Name + "-" + vol.Name)
}
}
}
vols := make(map[string]core.VolumeSource)
for i, vol := range volumes {
idxPath := fldPath.Index(i)
namePath := idxPath.Child("name")
el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name, podMeta, opts)
if len(vol.Name) == 0 {
el = append(el, field.Required(namePath, ""))
} else {
el = append(el, ValidateDNS1123Label(vol.Name, namePath)...)
}
if allNames.Has(vol.Name) {
el = append(el, field.Duplicate(namePath, vol.Name))
}
if len(el) == 0 {
allNames.Insert(vol.Name)
vols[vol.Name] = vol.VolumeSource
} else {
allErrs = append(allErrs, el...)
}
// A PersistentVolumeClaimSource should not reference a created PVC. That doesn't
// make sense.
if vol.PersistentVolumeClaim != nil && allCreatedPVCs.Has(vol.PersistentVolumeClaim.ClaimName) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("persistentVolumeClaim").Child("claimName"), vol.PersistentVolumeClaim.ClaimName,
"must not reference a PVC that gets created for an ephemeral volume"))
}
}
return vols, allErrs
}
func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool {
if _, ok := volumes[name]; ok {
return true
}
return false
}
// isMatched checks whether the volume with the given name is used by a
// container and if so, if it involves a PVC.
func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (isMatched bool, isPVC bool) {
if source, ok := volumes[name]; ok {
if source.PersistentVolumeClaim != nil ||
source.Ephemeral != nil {
return true, true
}
return true, false
}
return false, false
}
func mountNameAlreadyExists(name string, devices map[string]string) bool {
if _, ok := devices[name]; ok {
return true
}
return false
}
func mountPathAlreadyExists(mountPath string, devices map[string]string) bool {
for _, devPath := range devices {
if mountPath == devPath {
return true
}
}
return false
}
func deviceNameAlreadyExists(name string, mounts map[string]string) bool {
if _, ok := mounts[name]; ok {
return true
}
return false
}
func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool {
for _, mountPath := range mounts {
if mountPath == devicePath {
return true
}
}
return false
}
func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string, podMeta *metav1.ObjectMeta, opts PodValidationOptions) field.ErrorList {
numVolumes := 0
allErrs := field.ErrorList{}
if source.EmptyDir != nil {
numVolumes++
if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) < 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity"))
}
}
if source.HostPath != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...)
}
}
if source.GitRepo != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...)
}
}
if source.GCEPersistentDisk != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...)
}
}
if source.AWSElasticBlockStore != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...)
}
}
if source.Secret != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...)
}
}
if source.NFS != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...)
}
}
if source.ISCSI != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...)
}
if source.ISCSI.InitiatorName != nil && len(volName+":"+source.ISCSI.TargetPortal) > 64 {
tooLongErr := "Total length of <volume name>:<iscsi.targetPortal> must be under 64 characters if iscsi.initiatorName is specified."
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, tooLongErr))
}
}
if source.Glusterfs != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateGlusterfsVolumeSource(source.Glusterfs, fldPath.Child("glusterfs"))...)
}
}
if source.Flocker != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...)
}
}
if source.PersistentVolumeClaim != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...)
}
}
if source.RBD != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...)
}
}
if source.Cinder != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...)
}
}
if source.CephFS != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...)
}
}
if source.Quobyte != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...)
}
}
if source.DownwardAPI != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"), opts)...)
}
}
if source.FC != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...)
}
}
if source.FlexVolume != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...)
}
}
if source.ConfigMap != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...)
}
}
if source.AzureFile != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...)
}
}
if source.VsphereVolume != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...)
}
}
if source.PhotonPersistentDisk != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...)
}
}
if source.PortworxVolume != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validatePortworxVolumeSource(source.PortworxVolume, fldPath.Child("portworxVolume"))...)
}
}
if source.AzureDisk != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...)
}
}
if source.StorageOS != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateStorageOSVolumeSource(source.StorageOS, fldPath.Child("storageos"))...)
}
}
if source.Projected != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("projected"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateProjectedVolumeSource(source.Projected, fldPath.Child("projected"), opts)...)
}
}
if source.ScaleIO != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...)
}
}
if source.CSI != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("csi"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCSIVolumeSource(source.CSI, fldPath.Child("csi"))...)
}
}
if source.Ephemeral != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeral"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateEphemeralVolumeSource(source.Ephemeral, fldPath.Child("ephemeral"))...)
// Check the expected name for the PVC. This gets skipped if information is missing,
// because that already gets flagged as a problem elsewhere. For example,
// ValidateObjectMeta as called by validatePodMetadataAndSpec checks that the name is set.
if podMeta != nil && podMeta.Name != "" && volName != "" {
pvcName := podMeta.Name + "-" + volName
for _, msg := range ValidatePersistentVolumeName(pvcName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, fmt.Sprintf("PVC name %q: %v", pvcName, msg)))
}
}
}
}
if source.Image != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("image"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateImageVolumeSource(source.Image, fldPath.Child("image"), opts)...)
}
}
if numVolumes == 0 {
allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
}
return allErrs
}
func validateHostPathVolumeSource(hostPath *core.HostPathVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(hostPath.Path) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
return allErrs
}
allErrs = append(allErrs, validatePathNoBacksteps(hostPath.Path, fldPath.Child("path"))...)
allErrs = append(allErrs, validateHostPathType(hostPath.Type, fldPath.Child("type"))...)
return allErrs
}
func validateGitRepoVolumeSource(gitRepo *core.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(gitRepo.Repository) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("repository"), ""))
}
pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory"))
allErrs = append(allErrs, pathErrs...)
return allErrs
}
func validateISCSIVolumeSource(iscsi *core.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(iscsi.TargetPortal) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), ""))
}
if len(iscsi.IQN) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), ""))
} else {
if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format starting with iqn, eui, or naa"))
} else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
} else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
} else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
}
}
if iscsi.Lun < 0 || iscsi.Lun > 255 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255)))
}
if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), ""))
}
if iscsi.InitiatorName != nil {
initiator := *iscsi.InitiatorName
if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format starting with iqn, eui, or naa"))
}
if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
} else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
} else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
}
}
return allErrs
}
func validateISCSIPersistentVolumeSource(iscsi *core.ISCSIPersistentVolumeSource, pvName string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(iscsi.TargetPortal) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), ""))
}
if iscsi.InitiatorName != nil && len(pvName+":"+iscsi.TargetPortal) > 64 {
tooLongErr := "Total length of <volume name>:<iscsi.targetPortal> must be under 64 characters if iscsi.initiatorName is specified."
allErrs = append(allErrs, field.Invalid(fldPath.Child("targetportal"), iscsi.TargetPortal, tooLongErr))
}
if len(iscsi.IQN) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), ""))
} else {
if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
} else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
} else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
} else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format"))
}
}
if iscsi.Lun < 0 || iscsi.Lun > 255 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255)))
}
if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), ""))
}
if iscsi.SecretRef != nil {
if len(iscsi.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
}
}
if iscsi.InitiatorName != nil {
initiator := *iscsi.InitiatorName
if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
}
if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
} else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
} else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format"))
}
}
return allErrs
}
func validateFCVolumeSource(fc *core.FCVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(fc.TargetWWNs) < 1 && len(fc.WWIDs) < 1 {
allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "must specify either targetWWNs or wwids, but not both"))
}
if len(fc.TargetWWNs) != 0 && len(fc.WWIDs) != 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("targetWWNs"), fc.TargetWWNs, "targetWWNs and wwids can not be specified simultaneously"))
}
if len(fc.TargetWWNs) != 0 {
if fc.Lun == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "lun is required if targetWWNs is specified"))
} else {
if *fc.Lun < 0 || *fc.Lun > 255 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255)))
}
}
}
return allErrs
}
func validateGCEPersistentDiskVolumeSource(pd *core.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(pd.PDName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), ""))
}
if pd.Partition < 0 || pd.Partition > 255 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg))
}
return allErrs
}
func validateAWSElasticBlockStoreVolumeSource(PD *core.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(PD.VolumeID) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
}
if PD.Partition < 0 || PD.Partition > 255 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg))
}
return allErrs
}
func validateSecretVolumeSource(secretSource *core.SecretVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(secretSource.SecretName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
}
secretMode := secretSource.DefaultMode
if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, fileModeErrorMsg))
}
itemsPath := fldPath.Child("items")
for i, kp := range secretSource.Items {
itemPath := itemsPath.Index(i)
allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
}
return allErrs
}
func validateConfigMapVolumeSource(configMapSource *core.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(configMapSource.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
configMapMode := configMapSource.DefaultMode
if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, fileModeErrorMsg))
}
itemsPath := fldPath.Child("items")
for i, kp := range configMapSource.Items {
itemPath := itemsPath.Index(i)
allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
}
return allErrs
}
func validateKeyToPath(kp *core.KeyToPath, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(kp.Key) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
}
if len(kp.Path) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
}
allErrs = append(allErrs, ValidateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...)
if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, fileModeErrorMsg))
}
return allErrs
}
func validatePersistentClaimVolumeSource(claim *core.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(claim.ClaimName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), ""))
}
return allErrs
}
func validateNFSVolumeSource(nfs *core.NFSVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(nfs.Server) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("server"), ""))
}
if len(nfs.Path) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
}
if !path.IsAbs(nfs.Path) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path"))
}
return allErrs
}
func validateQuobyteVolumeSource(quobyte *core.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(quobyte.Registry) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas"))
} else if len(quobyte.Tenant) >= 65 {
allErrs = append(allErrs, field.Required(fldPath.Child("tenant"), "must be a UUID and may not exceed a length of 64 characters"))
} else {
for _, hostPortPair := range strings.Split(quobyte.Registry, ",") {
if _, _, err := net.SplitHostPort(hostPortPair); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas"))
}
}
}
if len(quobyte.Volume) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volume"), ""))
}
return allErrs
}
func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(glusterfs.EndpointsName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), ""))
}
if len(glusterfs.Path) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
}
return allErrs
}
func validateGlusterfsPersistentVolumeSource(glusterfs *core.GlusterfsPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(glusterfs.EndpointsName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), ""))
}
if len(glusterfs.Path) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
}
if glusterfs.EndpointsNamespace != nil {
endpointNs := glusterfs.EndpointsNamespace
if *endpointNs == "" {
allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, "if the endpointnamespace is set, it must be a valid namespace name"))
} else {
for _, msg := range ValidateNamespaceName(*endpointNs, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, msg))
}
}
}
return allErrs
}
func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 {
// TODO: consider adding a RequiredOneOf() error for this and similar cases
allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required"))
}
if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 {
allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously"))
}
if strings.Contains(flocker.DatasetName, "/") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'"))
}
return allErrs
}
var validVolumeDownwardAPIFieldPathExpressions = sets.New(
"metadata.name",
"metadata.namespace",
"metadata.labels",
"metadata.annotations",
"metadata.uid")
func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if len(file.Path) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
}
allErrs = append(allErrs, ValidateLocalNonReservedPath(file.Path, fldPath.Child("path"))...)
if file.FieldRef != nil {
allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validVolumeDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...)
if file.ResourceFieldRef != nil {
allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously"))
}
} else if file.ResourceFieldRef != nil {
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), true)...)
} else {
allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required"))
}
if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, fileModeErrorMsg))
}
return allErrs
}
func validateDownwardAPIVolumeSource(downwardAPIVolume *core.DownwardAPIVolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
downwardAPIMode := downwardAPIVolume.DefaultMode
if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, fileModeErrorMsg))
}
for _, file := range downwardAPIVolume.Items {
allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath, opts)...)
}
return allErrs
}
func validateProjectionSources(projection *core.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allPaths := sets.Set[string]{}
for i, source := range projection.Sources {
numSources := 0
srcPath := fldPath.Child("sources").Index(i)
if projPath := srcPath.Child("secret"); source.Secret != nil {
numSources++
if len(source.Secret.Name) == 0 {
allErrs = append(allErrs, field.Required(projPath.Child("name"), ""))
}
itemsPath := projPath.Child("items")
for i, kp := range source.Secret.Items {
itemPath := itemsPath.Index(i)
allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
if len(kp.Path) > 0 {
curPath := kp.Path
if !allPaths.Has(curPath) {
allPaths.Insert(curPath)
} else {
allErrs = append(allErrs, field.Invalid(fldPath, source.Secret.Name, "conflicting duplicate paths"))
}
}
}
}
if projPath := srcPath.Child("configMap"); source.ConfigMap != nil {
numSources++
if len(source.ConfigMap.Name) == 0 {
allErrs = append(allErrs, field.Required(projPath.Child("name"), ""))
}
itemsPath := projPath.Child("items")
for i, kp := range source.ConfigMap.Items {
itemPath := itemsPath.Index(i)
allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...)
if len(kp.Path) > 0 {
curPath := kp.Path
if !allPaths.Has(curPath) {
allPaths.Insert(curPath)
} else {
allErrs = append(allErrs, field.Invalid(fldPath, source.ConfigMap.Name, "conflicting duplicate paths"))
}
}
}
}
if projPath := srcPath.Child("downwardAPI"); source.DownwardAPI != nil {
numSources++
for _, file := range source.DownwardAPI.Items {
allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, projPath, opts)...)
if len(file.Path) > 0 {
curPath := file.Path
if !allPaths.Has(curPath) {
allPaths.Insert(curPath)
} else {
allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths"))
}
}
}
}
if projPath := srcPath.Child("serviceAccountToken"); source.ServiceAccountToken != nil {
numSources++
if source.ServiceAccountToken.ExpirationSeconds < 10*60 {
allErrs = append(allErrs, field.Invalid(projPath.Child("expirationSeconds"), source.ServiceAccountToken.ExpirationSeconds, "may not specify a duration less than 10 minutes"))
}
if source.ServiceAccountToken.ExpirationSeconds > 1<<32 {
allErrs = append(allErrs, field.Invalid(projPath.Child("expirationSeconds"), source.ServiceAccountToken.ExpirationSeconds, "may not specify a duration larger than 2^32 seconds"))
}
if source.ServiceAccountToken.Path == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
} else if !opts.AllowNonLocalProjectedTokenPath {
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.ServiceAccountToken.Path, fldPath.Child("path"))...)
}
}
if projPath := srcPath.Child("clusterTrustBundle"); source.ClusterTrustBundle != nil {
numSources++
usingName := source.ClusterTrustBundle.Name != nil
usingSignerName := source.ClusterTrustBundle.SignerName != nil
switch {
case usingName && usingSignerName:
allErrs = append(allErrs, field.Invalid(projPath, source.ClusterTrustBundle, "only one of name and signerName may be used"))
case usingName:
if *source.ClusterTrustBundle.Name == "" {
allErrs = append(allErrs, field.Required(projPath.Child("name"), "must be a valid object name"))
}
name := *source.ClusterTrustBundle.Name
if signerName, ok := extractSignerNameFromClusterTrustBundleName(name); ok {
validationFunc := ValidateClusterTrustBundleName(signerName)
errMsgs := validationFunc(name, false)
for _, msg := range errMsgs {
allErrs = append(allErrs, field.Invalid(projPath.Child("name"), name, fmt.Sprintf("not a valid clustertrustbundlename: %v", msg)))
}
} else {
validationFunc := ValidateClusterTrustBundleName("")
errMsgs := validationFunc(name, false)
for _, msg := range errMsgs {
allErrs = append(allErrs, field.Invalid(projPath.Child("name"), name, fmt.Sprintf("not a valid clustertrustbundlename: %v", msg)))
}
}
if source.ClusterTrustBundle.LabelSelector != nil {
allErrs = append(allErrs, field.Invalid(projPath.Child("labelSelector"), source.ClusterTrustBundle.LabelSelector, "labelSelector must be unset if name is specified"))
}
case usingSignerName:
if *source.ClusterTrustBundle.SignerName == "" {
allErrs = append(allErrs, field.Required(projPath.Child("signerName"), "must be a valid signer name"))
}
allErrs = append(allErrs, ValidateSignerName(projPath.Child("signerName"), *source.ClusterTrustBundle.SignerName)...)
labelSelectorErrs := unversionedvalidation.ValidateLabelSelector(
source.ClusterTrustBundle.LabelSelector,
unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false},
projPath.Child("labelSelector"),
)
allErrs = append(allErrs, labelSelectorErrs...)
default:
allErrs = append(allErrs, field.Required(projPath, "either name or signerName must be specified"))
}
if source.ClusterTrustBundle.Path == "" {
allErrs = append(allErrs, field.Required(projPath.Child("path"), ""))
}
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.ClusterTrustBundle.Path, projPath.Child("path"))...)
curPath := source.ClusterTrustBundle.Path
if !allPaths.Has(curPath) {
allPaths.Insert(curPath)
} else {
allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths"))
}
}
if projPath := srcPath.Child("podCertificate"); source.PodCertificate != nil {
numSources++
allErrs = append(allErrs, ValidateSignerName(projPath.Child("signerName"), source.PodCertificate.SignerName)...)
switch source.PodCertificate.KeyType {
case "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", "ECDSAP521", "ED25519":
// ok
default:
allErrs = append(allErrs, field.NotSupported(projPath.Child("keyType"), source.PodCertificate.KeyType, []string{"RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", "ECDSAP521", "ED25519"}))
}
if source.PodCertificate.MaxExpirationSeconds != nil {
if *source.PodCertificate.MaxExpirationSeconds < 3600 {
allErrs = append(allErrs, field.Invalid(projPath.Child("maxExpirationSeconds"), *source.PodCertificate.MaxExpirationSeconds, "if provided, maxExpirationSeconds must be >= 3600"))
}
maxMaxExpirationSeconds := certificates.MaxMaxExpirationSeconds
if IsKubernetesSignerName(source.PodCertificate.SignerName) {
maxMaxExpirationSeconds = certificates.KubernetesMaxMaxExpirationSeconds
}
if *source.PodCertificate.MaxExpirationSeconds > int32(maxMaxExpirationSeconds) {
allErrs = append(allErrs, field.Invalid(projPath.Child("maxExpirationSeconds"), *source.PodCertificate.MaxExpirationSeconds, fmt.Sprintf("if provided, maxExpirationSeconds must be <= %d", maxMaxExpirationSeconds)))
}
}
numPaths := 0
if len(source.PodCertificate.CredentialBundlePath) != 0 {
numPaths++
// Credential bundle path must not be weird.
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.PodCertificate.CredentialBundlePath, projPath.Child("credentialBundlePath"))...)
// Credential bundle path must not collide with a path from another source.
if !allPaths.Has(source.PodCertificate.CredentialBundlePath) {
allPaths.Insert(source.PodCertificate.CredentialBundlePath)
} else {
allErrs = append(allErrs, field.Invalid(fldPath, source.PodCertificate.CredentialBundlePath, "conflicting duplicate paths"))
}
}
if len(source.PodCertificate.KeyPath) != 0 {
numPaths++
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.PodCertificate.KeyPath, projPath.Child("keyPath"))...)
if !allPaths.Has(source.PodCertificate.KeyPath) {
allPaths.Insert(source.PodCertificate.KeyPath)
} else {
allErrs = append(allErrs, field.Invalid(fldPath, source.PodCertificate.KeyPath, "conflicting duplicate paths"))
}
}
if len(source.PodCertificate.CertificateChainPath) != 0 {
numPaths++
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.PodCertificate.CertificateChainPath, projPath.Child("certificateChainPath"))...)
if !allPaths.Has(source.PodCertificate.CertificateChainPath) {
allPaths.Insert(source.PodCertificate.CertificateChainPath)
} else {
allErrs = append(allErrs, field.Invalid(fldPath, source.PodCertificate.CertificateChainPath, "conflicting duplicate paths"))
}
}
if numPaths == 0 {
allErrs = append(allErrs, field.Required(projPath, "specify at least one of credentialBundlePath, keyPath, and certificateChainPath"))
}
}
if numSources > 1 {
allErrs = append(allErrs, field.Forbidden(srcPath, "may not specify more than 1 volume type per source"))
}
}
return allErrs
}
func validateProjectedVolumeSource(projection *core.ProjectedVolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
projectionMode := projection.DefaultMode
if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, fileModeErrorMsg))
}
allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath, opts)...)
return allErrs
}
var supportedHostPathTypes = sets.New(
core.HostPathUnset,
core.HostPathDirectoryOrCreate,
core.HostPathDirectory,
core.HostPathFileOrCreate,
core.HostPathFile,
core.HostPathSocket,
core.HostPathCharDev,
core.HostPathBlockDev)
func validateHostPathType(hostPathType *core.HostPathType, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if hostPathType != nil && !supportedHostPathTypes.Has(*hostPathType) {
allErrs = append(allErrs, field.NotSupported(fldPath, hostPathType, sets.List(supportedHostPathTypes)))
}
return allErrs
}
// This validate will make sure targetPath:
// 1. is not abs path
// 2. does not have any element which is ".."
func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if path.IsAbs(targetPath) {
allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path"))
}
allErrs = append(allErrs, validatePathNoBacksteps(targetPath, fldPath)...)
return allErrs
}
// validatePathNoBacksteps makes sure the targetPath does not have any `..` path elements when split
//
// This assumes the OS of the apiserver and the nodes are the same. The same check should be done
// on the node to ensure there are no backsteps.
func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
parts := strings.Split(filepath.ToSlash(targetPath), "/")
for _, item := range parts {
if item == ".." {
allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'"))
break // even for `../../..`, one error is sufficient to make the point
}
}
return allErrs
}
// validateMountPropagation verifies that MountPropagation field is valid and
// allowed for given container.
func validateMountPropagation(mountPropagation *core.MountPropagationMode, container *core.Container, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if mountPropagation == nil {
return allErrs
}
supportedMountPropagations := sets.New(
core.MountPropagationBidirectional,
core.MountPropagationHostToContainer,
core.MountPropagationNone)
if !supportedMountPropagations.Has(*mountPropagation) {
allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, sets.List(supportedMountPropagations)))
}
if container == nil {
// The container is not available yet.
// Stop validation now, Pod validation will refuse final
// Pods with Bidirectional propagation in non-privileged containers.
return allErrs
}
privileged := container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged
if *mountPropagation == core.MountPropagationBidirectional && !privileged {
allErrs = append(allErrs, field.Forbidden(fldPath, "Bidirectional mount propagation is available only to privileged containers"))
}
return allErrs
}
// validateMountRecursiveReadOnly validates RecursiveReadOnly mounts.
func validateMountRecursiveReadOnly(mount core.VolumeMount, fldPath *field.Path) field.ErrorList {
if mount.RecursiveReadOnly == nil {
return nil
}
allErrs := field.ErrorList{}
switch *mount.RecursiveReadOnly {
case core.RecursiveReadOnlyDisabled:
// NOP
case core.RecursiveReadOnlyEnabled, core.RecursiveReadOnlyIfPossible:
if !mount.ReadOnly {
allErrs = append(allErrs, field.Forbidden(fldPath, "may only be specified when readOnly is true"))
}
if mount.MountPropagation != nil && *mount.MountPropagation != core.MountPropagationNone {
allErrs = append(allErrs, field.Forbidden(fldPath, "may only be specified when mountPropagation is None or not specified"))
}
default:
supportedRRO := sets.New(
core.RecursiveReadOnlyDisabled,
core.RecursiveReadOnlyIfPossible,
core.RecursiveReadOnlyEnabled)
allErrs = append(allErrs, field.NotSupported(fldPath, *mount.RecursiveReadOnly, sets.List(supportedRRO)))
}
return allErrs
}
// ValidateLocalNonReservedPath makes sure targetPath:
// 1. is not abs path
// 2. does not contain any '..' elements
// 3. does not start with '..'
func ValidateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...)
// Don't report this error if the check for .. elements already caught it.
if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") {
allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'"))
}
return allErrs
}
func validateRBDVolumeSource(rbd *core.RBDVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(rbd.CephMonitors) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
}
if len(rbd.RBDImage) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("image"), ""))
}
return allErrs
}
func validateRBDPersistentVolumeSource(rbd *core.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(rbd.CephMonitors) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
}
if len(rbd.RBDImage) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("image"), ""))
}
return allErrs
}
func validateCinderVolumeSource(cd *core.CinderVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cd.VolumeID) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
}
if cd.SecretRef != nil {
if len(cd.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
}
}
return allErrs
}
func validateCinderPersistentVolumeSource(cd *core.CinderPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cd.VolumeID) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
}
if cd.SecretRef != nil {
if len(cd.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
}
if len(cd.SecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), ""))
}
}
return allErrs
}
func validateCephFSVolumeSource(cephfs *core.CephFSVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cephfs.Monitors) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
}
return allErrs
}
func validateCephFSPersistentVolumeSource(cephfs *core.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cephfs.Monitors) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), ""))
}
return allErrs
}
func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(fv.Driver) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
}
// Make sure user-specified options don't use kubernetes namespaces
for k := range fv.Options {
namespace := k
if parts := strings.SplitN(k, "/", 2); len(parts) == 2 {
namespace = parts[0]
}
normalized := "." + strings.ToLower(namespace)
if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved"))
}
}
return allErrs
}
func validateFlexPersistentVolumeSource(fv *core.FlexPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(fv.Driver) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("driver"), ""))
}
// Make sure user-specified options don't use kubernetes namespaces
for k := range fv.Options {
namespace := k
if parts := strings.SplitN(k, "/", 2); len(parts) == 2 {
namespace = parts[0]
}
normalized := "." + strings.ToLower(namespace)
if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved"))
}
}
return allErrs
}
func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if azure.SecretName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
}
if azure.ShareName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), ""))
}
return allErrs
}
func validateAzureFilePV(azure *core.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if azure.SecretName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), ""))
}
if azure.ShareName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), ""))
}
if azure.SecretNamespace != nil {
if len(*azure.SecretNamespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretNamespace"), ""))
}
}
return allErrs
}
func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList {
var supportedCachingModes = sets.New(
core.AzureDataDiskCachingNone,
core.AzureDataDiskCachingReadOnly,
core.AzureDataDiskCachingReadWrite)
var supportedDiskKinds = sets.New(
core.AzureSharedBlobDisk,
core.AzureDedicatedBlobDisk,
core.AzureManagedDisk)
diskURISupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"}
diskURISupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"}
allErrs := field.ErrorList{}
if azure.DiskName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), ""))
}
if azure.DataDiskURI == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), ""))
}
if azure.CachingMode != nil && !supportedCachingModes.Has(*azure.CachingMode) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, sets.List(supportedCachingModes)))
}
if azure.Kind != nil && !supportedDiskKinds.Has(*azure.Kind) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, sets.List(supportedDiskKinds)))
}
// validate that DiskUri is the correct format
if azure.Kind != nil && *azure.Kind == core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskURISupportedManaged))
}
if azure.Kind != nil && *azure.Kind != core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskURISupportedblob))
}
return allErrs
}
func validateVsphereVolumeSource(cd *core.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cd.VolumePath) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), ""))
}
return allErrs
}
func validatePhotonPersistentDiskVolumeSource(cd *core.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cd.PdID) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), ""))
}
return allErrs
}
func validatePortworxVolumeSource(pwx *core.PortworxVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(pwx.VolumeID) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
}
return allErrs
}
func validateScaleIOVolumeSource(sio *core.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if sio.Gateway == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), ""))
}
if sio.System == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("system"), ""))
}
if sio.VolumeName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
}
return allErrs
}
func validateScaleIOPersistentVolumeSource(sio *core.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if sio.Gateway == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), ""))
}
if sio.System == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("system"), ""))
}
if sio.VolumeName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
}
return allErrs
}
func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ls.Path == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
return allErrs
}
allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...)
return allErrs
}
func validateStorageOSVolumeSource(storageos *core.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(storageos.VolumeName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...)
}
if len(storageos.VolumeNamespace) > 0 {
allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...)
}
if storageos.SecretRef != nil {
if len(storageos.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
}
}
return allErrs
}
func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(storageos.VolumeName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...)
}
if len(storageos.VolumeNamespace) > 0 {
allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...)
}
if storageos.SecretRef != nil {
if len(storageos.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
}
if len(storageos.SecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), ""))
}
}
return allErrs
}
// validatePVSecretReference check whether provided SecretReference object is valid in terms of secret name and namespace.
func validatePVSecretReference(secretRef *core.SecretReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if len(secretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Subdomain(secretRef.Name, fldPath.Child("name"))...)
}
if len(secretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(secretRef.Namespace, fldPath.Child("namespace"))...)
}
return allErrs
}
func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(driverName) == 0 {
allErrs = append(allErrs, field.Required(fldPath, ""))
}
if len(driverName) > 63 {
allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, 63))
}
for _, msg := range validation.IsDNS1123Subdomain(strings.ToLower(driverName)) {
allErrs = append(allErrs, field.Invalid(fldPath, driverName, msg)).WithOrigin("format=k8s-long-name")
}
return allErrs
}
func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...)
if len(csi.VolumeHandle) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
}
if csi.ControllerPublishSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerPublishSecretRef, fldPath.Child("controllerPublishSecretRef"))...)
}
if csi.ControllerExpandSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerExpandSecretRef, fldPath.Child("controllerExpandSecretRef"))...)
}
if csi.NodePublishSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.NodePublishSecretRef, fldPath.Child("nodePublishSecretRef"))...)
}
if csi.NodeExpandSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.NodeExpandSecretRef, fldPath.Child("nodeExpandSecretRef"))...)
}
return allErrs
}
func validateCSIVolumeSource(csi *core.CSIVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...)
if csi.NodePublishSecretRef != nil {
if len(csi.NodePublishSecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef", "name"), ""))
} else {
for _, msg := range ValidateSecretName(csi.NodePublishSecretRef.Name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), csi.NodePublishSecretRef.Name, msg))
}
}
}
return allErrs
}
func validateEphemeralVolumeSource(ephemeral *core.EphemeralVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ephemeral.VolumeClaimTemplate == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeClaimTemplate"), ""))
} else {
opts := ValidationOptionsForPersistentVolumeClaimTemplate(ephemeral.VolumeClaimTemplate, nil)
allErrs = append(allErrs, ValidatePersistentVolumeClaimTemplate(ephemeral.VolumeClaimTemplate, fldPath.Child("volumeClaimTemplate"), opts)...)
}
return allErrs
}
// ValidatePersistentVolumeClaimTemplate verifies that the embedded object meta and spec are valid.
// Checking of the object data is very minimal because only labels and annotations are used.
func ValidatePersistentVolumeClaimTemplate(claimTemplate *core.PersistentVolumeClaimTemplate, fldPath *field.Path, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := ValidateTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&claimTemplate.Spec, fldPath.Child("spec"), opts)...)
return allErrs
}
func ValidateTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
allErrs := apimachineryvalidation.ValidateAnnotations(objMeta.Annotations, fldPath.Child("annotations"))
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(objMeta.Labels, fldPath.Child("labels"))...)
// All other fields are not supported and thus must not be set
// to avoid confusion. We could reject individual fields,
// but then adding a new one to ObjectMeta wouldn't be checked
// unless this code gets updated. Instead, we ensure that
// only allowed fields are set via reflection.
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedTemplateObjectMetaFields, "cannot be set", fldPath)...)
return allErrs
}
var allowedTemplateObjectMetaFields = map[string]bool{
"Annotations": true,
"Labels": true,
}
// PersistentVolumeSpecValidationOptions contains the different settings for PeristentVolume validation
type PersistentVolumeSpecValidationOptions struct {
// Allow users to modify the class of volume attributes
EnableVolumeAttributesClass bool
// Allow invalid label-value in RequiredNodeSelector
AllowInvalidLabelValueInRequiredNodeAffinity bool
}
// ValidatePersistentVolumeName checks that a name is appropriate for a
// PersistentVolumeName object.
var ValidatePersistentVolumeName = apimachineryvalidation.NameIsDNSSubdomain
var supportedAccessModes = sets.New(
core.ReadWriteOnce,
core.ReadOnlyMany,
core.ReadWriteMany,
core.ReadWriteOncePod)
var supportedReclaimPolicy = sets.New(
core.PersistentVolumeReclaimDelete,
core.PersistentVolumeReclaimRecycle,
core.PersistentVolumeReclaimRetain)
var supportedVolumeModes = sets.New(core.PersistentVolumeBlock, core.PersistentVolumeFilesystem)
func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) PersistentVolumeSpecValidationOptions {
opts := PersistentVolumeSpecValidationOptions{
EnableVolumeAttributesClass: utilfeature.DefaultMutableFeatureGate.Enabled(features.VolumeAttributesClass),
AllowInvalidLabelValueInRequiredNodeAffinity: false,
}
if oldPv != nil && oldPv.Spec.VolumeAttributesClassName != nil {
opts.EnableVolumeAttributesClass = true
}
if oldPv != nil && oldPv.Spec.NodeAffinity != nil &&
oldPv.Spec.NodeAffinity.Required != nil {
terms := oldPv.Spec.NodeAffinity.Required.NodeSelectorTerms
opts.AllowInvalidLabelValueInRequiredNodeAffinity = helper.HasInvalidLabelValueInNodeSelectorTerms(terms)
}
return opts
}
func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName string, validateInlinePersistentVolumeSpec bool, fldPath *field.Path, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if validateInlinePersistentVolumeSpec {
if pvSpec.ClaimRef != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("claimRef"), "may not be specified in the context of inline volumes"))
}
if len(pvSpec.Capacity) != 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("capacity"), "may not be specified in the context of inline volumes"))
}
if pvSpec.CSI == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("csi"), "has to be specified in the context of inline volumes"))
}
}
if len(pvSpec.AccessModes) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), ""))
}
foundReadWriteOncePod, foundNonReadWriteOncePod := false, false
for _, mode := range pvSpec.AccessModes {
if !supportedAccessModes.Has(mode) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, sets.List(supportedAccessModes)))
}
if mode == core.ReadWriteOncePod {
foundReadWriteOncePod = true
} else if supportedAccessModes.Has(mode) {
foundNonReadWriteOncePod = true
}
}
if foundReadWriteOncePod && foundNonReadWriteOncePod {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("accessModes"), "may not use ReadWriteOncePod with other access modes"))
}
if !validateInlinePersistentVolumeSpec {
if len(pvSpec.Capacity) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("capacity"), ""))
}
if _, ok := pvSpec.Capacity[core.ResourceStorage]; !ok || len(pvSpec.Capacity) > 1 {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("capacity"), pvSpec.Capacity, []core.ResourceName{core.ResourceStorage}))
}
capPath := fldPath.Child("capacity")
for r, qty := range pvSpec.Capacity {
allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
allErrs = append(allErrs, ValidatePositiveQuantityValue(qty, capPath.Key(string(r)))...)
}
}
if len(pvSpec.PersistentVolumeReclaimPolicy) > 0 {
if validateInlinePersistentVolumeSpec {
if pvSpec.PersistentVolumeReclaimPolicy != core.PersistentVolumeReclaimRetain {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeReclaimPolicy"), "may only be "+string(core.PersistentVolumeReclaimRetain)+" in the context of inline volumes"))
}
} else {
if !supportedReclaimPolicy.Has(pvSpec.PersistentVolumeReclaimPolicy) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("persistentVolumeReclaimPolicy"), pvSpec.PersistentVolumeReclaimPolicy, sets.List(supportedReclaimPolicy)))
}
}
}
var nodeAffinitySpecified bool
var errs field.ErrorList
if pvSpec.NodeAffinity != nil {
if validateInlinePersistentVolumeSpec {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeAffinity"), "may not be specified in the context of inline volumes"))
} else {
nodeAffinitySpecified, errs = validateVolumeNodeAffinity(pvSpec.NodeAffinity, opts, fldPath.Child("nodeAffinity"))
allErrs = append(allErrs, errs...)
}
}
numVolumes := 0
if pvSpec.HostPath != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateHostPathVolumeSource(pvSpec.HostPath, fldPath.Child("hostPath"))...)
}
}
if pvSpec.GCEPersistentDisk != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pvSpec.GCEPersistentDisk, fldPath.Child("persistentDisk"))...)
}
}
if pvSpec.AWSElasticBlockStore != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pvSpec.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...)
}
}
if pvSpec.Glusterfs != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateGlusterfsPersistentVolumeSource(pvSpec.Glusterfs, fldPath.Child("glusterfs"))...)
}
}
if pvSpec.Flocker != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateFlockerVolumeSource(pvSpec.Flocker, fldPath.Child("flocker"))...)
}
}
if pvSpec.NFS != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateNFSVolumeSource(pvSpec.NFS, fldPath.Child("nfs"))...)
}
}
if pvSpec.RBD != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateRBDPersistentVolumeSource(pvSpec.RBD, fldPath.Child("rbd"))...)
}
}
if pvSpec.Quobyte != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateQuobyteVolumeSource(pvSpec.Quobyte, fldPath.Child("quobyte"))...)
}
}
if pvSpec.CephFS != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCephFSPersistentVolumeSource(pvSpec.CephFS, fldPath.Child("cephfs"))...)
}
}
if pvSpec.ISCSI != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateISCSIPersistentVolumeSource(pvSpec.ISCSI, pvName, fldPath.Child("iscsi"))...)
}
}
if pvSpec.Cinder != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCinderPersistentVolumeSource(pvSpec.Cinder, fldPath.Child("cinder"))...)
}
}
if pvSpec.FC != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateFCVolumeSource(pvSpec.FC, fldPath.Child("fc"))...)
}
}
if pvSpec.FlexVolume != nil {
numVolumes++
allErrs = append(allErrs, validateFlexPersistentVolumeSource(pvSpec.FlexVolume, fldPath.Child("flexVolume"))...)
}
if pvSpec.AzureFile != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureFilePV(pvSpec.AzureFile, fldPath.Child("azureFile"))...)
}
}
if pvSpec.VsphereVolume != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateVsphereVolumeSource(pvSpec.VsphereVolume, fldPath.Child("vsphereVolume"))...)
}
}
if pvSpec.PhotonPersistentDisk != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pvSpec.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...)
}
}
if pvSpec.PortworxVolume != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validatePortworxVolumeSource(pvSpec.PortworxVolume, fldPath.Child("portworxVolume"))...)
}
}
if pvSpec.AzureDisk != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureDisk(pvSpec.AzureDisk, fldPath.Child("azureDisk"))...)
}
}
if pvSpec.ScaleIO != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateScaleIOPersistentVolumeSource(pvSpec.ScaleIO, fldPath.Child("scaleIO"))...)
}
}
if pvSpec.Local != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("local"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateLocalVolumeSource(pvSpec.Local, fldPath.Child("local"))...)
// NodeAffinity is required
if !nodeAffinitySpecified {
allErrs = append(allErrs, field.Required(fldPath.Child("nodeAffinity"), "Local volume requires node affinity"))
}
}
}
if pvSpec.StorageOS != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateStorageOSPersistentVolumeSource(pvSpec.StorageOS, fldPath.Child("storageos"))...)
}
}
if pvSpec.CSI != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("csi"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, fldPath.Child("csi"))...)
}
}
if numVolumes == 0 {
allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
}
// do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy
if pvSpec.HostPath != nil && path.Clean(pvSpec.HostPath.Path) == "/" && pvSpec.PersistentVolumeReclaimPolicy == core.PersistentVolumeReclaimRecycle {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'"))
}
if len(pvSpec.StorageClassName) > 0 {
if validateInlinePersistentVolumeSpec {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageClassName"), "may not be specified in the context of inline volumes"))
} else {
for _, msg := range ValidateClassName(pvSpec.StorageClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), pvSpec.StorageClassName, msg))
}
}
}
if pvSpec.VolumeMode != nil {
if validateInlinePersistentVolumeSpec {
if *pvSpec.VolumeMode != core.PersistentVolumeFilesystem {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeMode"), "may not specify volumeMode other than "+string(core.PersistentVolumeFilesystem)+" in the context of inline volumes"))
}
} else {
if !supportedVolumeModes.Has(*pvSpec.VolumeMode) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *pvSpec.VolumeMode, sets.List(supportedVolumeModes)))
}
}
}
if pvSpec.VolumeAttributesClassName != nil && opts.EnableVolumeAttributesClass {
if len(*pvSpec.VolumeAttributesClassName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeAttributesClassName"), "an empty string is disallowed"))
} else {
for _, msg := range ValidateClassName(*pvSpec.VolumeAttributesClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("volumeAttributesClassName"), *pvSpec.VolumeAttributesClassName, msg))
}
}
if pvSpec.CSI == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("csi"), "has to be specified when using volumeAttributesClassName"))
}
}
return allErrs
}
func ValidatePersistentVolume(pv *core.PersistentVolume, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
metaPath := field.NewPath("metadata")
allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath)
allErrs = append(allErrs, ValidatePersistentVolumeSpec(&pv.Spec, pv.ObjectMeta.Name, false, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make.
// newPv is updated with fields that cannot be changed.
func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
allErrs := ValidatePersistentVolume(newPv, opts)
// if oldPV does not have ControllerExpandSecretRef then allow it to be set
if (oldPv.Spec.CSI != nil && oldPv.Spec.CSI.ControllerExpandSecretRef == nil) &&
(newPv.Spec.CSI != nil && newPv.Spec.CSI.ControllerExpandSecretRef != nil) {
newPv = newPv.DeepCopy()
newPv.Spec.CSI.ControllerExpandSecretRef = nil
}
// PersistentVolumeSource should be immutable after creation.
if !apiequality.Semantic.DeepEqual(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource) {
pvcSourceDiff := diff.Diff(oldPv.Spec.PersistentVolumeSource, newPv.Spec.PersistentVolumeSource)
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "persistentvolumesource"), fmt.Sprintf("spec.persistentvolumesource is immutable after creation\n%v", pvcSourceDiff)))
}
allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...)
// Allow setting NodeAffinity if oldPv NodeAffinity was not set
if oldPv.Spec.NodeAffinity != nil {
allErrs = append(allErrs, validatePvNodeAffinity(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
}
if !apiequality.Semantic.DeepEqual(oldPv.Spec.VolumeAttributesClassName, newPv.Spec.VolumeAttributesClassName) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update is forbidden when the VolumeAttributesClass feature gate is disabled"))
}
if opts.EnableVolumeAttributesClass {
if oldPv.Spec.VolumeAttributesClassName != nil && newPv.Spec.VolumeAttributesClassName == nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update from non-nil value to nil is forbidden"))
}
}
}
return allErrs
}
// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make.
func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata"))
if len(newPv.ResourceVersion) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
}
return allErrs
}
type PersistentVolumeClaimSpecValidationOptions struct {
// Allow users to recover from previously failing expansion operation
EnableRecoverFromExpansionFailure bool
// Allow to validate the label value of the label selector
AllowInvalidLabelValueInSelector bool
// Allow to validate the API group of the data source and data source reference
AllowInvalidAPIGroupInDataSourceOrRef bool
// Allow users to modify the class of volume attributes
EnableVolumeAttributesClass bool
}
func ValidationOptionsForPersistentVolumeClaimCreate() PersistentVolumeClaimSpecValidationOptions {
return PersistentVolumeClaimSpecValidationOptions{
EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure),
AllowInvalidLabelValueInSelector: false,
EnableVolumeAttributesClass: utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass),
}
}
func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions {
opts := ValidationOptionsForPersistentVolumeClaimCreate()
if oldPvc == nil {
// If there's no old PVC, use the options based solely on feature enablement
return opts
}
// If the old object had an invalid API group in the data source or data source reference, continue to allow it in the new object
opts.AllowInvalidAPIGroupInDataSourceOrRef = allowInvalidAPIGroupInDataSourceOrRef(&oldPvc.Spec)
if oldPvc.Spec.VolumeAttributesClassName != nil {
// If the old object had a volume attributes class, continue to validate it in the new object.
opts.EnableVolumeAttributesClass = true
}
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
}
if len(unversionedvalidation.ValidateLabelSelector(oldPvc.Spec.Selector, labelSelectorValidationOpts, nil)) > 0 {
// If the old object had an invalid label selector, continue to allow it in the new object
opts.AllowInvalidLabelValueInSelector = true
}
if helper.ClaimContainsAllocatedResources(oldPvc) ||
helper.ClaimContainsAllocatedResourceStatus(oldPvc) {
opts.EnableRecoverFromExpansionFailure = true
}
return opts
}
func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTemplate *core.PersistentVolumeClaimTemplate) PersistentVolumeClaimSpecValidationOptions {
opts := PersistentVolumeClaimSpecValidationOptions{
AllowInvalidLabelValueInSelector: false,
EnableVolumeAttributesClass: utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass),
}
if oldClaimTemplate == nil {
// If there's no old PVC template, use the options based solely on feature enablement
return opts
}
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
}
if len(unversionedvalidation.ValidateLabelSelector(oldClaimTemplate.Spec.Selector, labelSelectorValidationOpts, nil)) > 0 {
// If the old object had an invalid label selector, continue to allow it in the new object
opts.AllowInvalidLabelValueInSelector = true
}
return opts
}
// allowInvalidAPIGroupInDataSourceOrRef returns true if the spec contains a data source or data source reference with an API group
func allowInvalidAPIGroupInDataSourceOrRef(spec *core.PersistentVolumeClaimSpec) bool {
if spec.DataSource != nil && spec.DataSource.APIGroup != nil {
return true
}
if spec.DataSourceRef != nil && spec.DataSourceRef.APIGroup != nil {
return true
}
return false
}
// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim
func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"), opts)...)
return allErrs
}
// validateDataSource validates a DataSource/DataSourceRef in a PersistentVolumeClaimSpec
func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *field.Path, allowInvalidAPIGroupInDataSourceOrRef bool) field.ErrorList {
allErrs := field.ErrorList{}
if len(dataSource.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
if len(dataSource.Kind) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
}
apiGroup := ""
if dataSource.APIGroup != nil {
apiGroup = *dataSource.APIGroup
}
if len(apiGroup) == 0 && dataSource.Kind != "PersistentVolumeClaim" {
allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup"))
}
if len(apiGroup) > 0 && !allowInvalidAPIGroupInDataSourceOrRef {
for _, errString := range validation.IsDNS1123Subdomain(apiGroup) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("apiGroup"), apiGroup, errString))
}
}
return allErrs
}
// validateDataSourceRef validates a DataSourceRef in a PersistentVolumeClaimSpec
func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *field.Path, allowInvalidAPIGroupInDataSourceOrRef bool) field.ErrorList {
allErrs := field.ErrorList{}
if len(dataSourceRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
if len(dataSourceRef.Kind) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
}
apiGroup := ""
if dataSourceRef.APIGroup != nil {
apiGroup = *dataSourceRef.APIGroup
}
if len(apiGroup) == 0 && dataSourceRef.Kind != "PersistentVolumeClaim" {
allErrs = append(allErrs, field.Invalid(fldPath, dataSourceRef.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup"))
}
if len(apiGroup) > 0 && !allowInvalidAPIGroupInDataSourceOrRef {
for _, errString := range validation.IsDNS1123Subdomain(apiGroup) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("apiGroup"), apiGroup, errString))
}
}
if dataSourceRef.Namespace != nil && len(*dataSourceRef.Namespace) > 0 {
for _, msg := range ValidateNameFunc(ValidateNamespaceName)(*dataSourceRef.Namespace, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), *dataSourceRef.Namespace, msg))
}
}
return allErrs
}
// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec
func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fldPath *field.Path, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if len(spec.AccessModes) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required"))
}
if spec.Selector != nil {
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOpts, fldPath.Child("selector"))...)
}
foundReadWriteOncePod, foundNonReadWriteOncePod := false, false
for _, mode := range spec.AccessModes {
if !supportedAccessModes.Has(mode) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, sets.List(supportedAccessModes)))
}
if mode == core.ReadWriteOncePod {
foundReadWriteOncePod = true
} else if supportedAccessModes.Has(mode) {
foundNonReadWriteOncePod = true
}
}
if foundReadWriteOncePod && foundNonReadWriteOncePod {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("accessModes"), "may not use ReadWriteOncePod with other access modes"))
}
storageValue, ok := spec.Resources.Requests[core.ResourceStorage]
if !ok {
allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(core.ResourceStorage)), ""))
} else if errs := ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage))); len(errs) > 0 {
allErrs = append(allErrs, errs...)
} else {
allErrs = append(allErrs, ValidateResourceQuantityValue(core.ResourceStorage, storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...)
}
if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 {
for _, msg := range ValidateClassName(*spec.StorageClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg))
}
}
if spec.VolumeMode != nil && !supportedVolumeModes.Has(*spec.VolumeMode) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, sets.List(supportedVolumeModes)))
}
if spec.DataSource != nil {
allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"), opts.AllowInvalidAPIGroupInDataSourceOrRef)...)
}
if spec.DataSourceRef != nil {
allErrs = append(allErrs, validateDataSourceRef(spec.DataSourceRef, fldPath.Child("dataSourceRef"), opts.AllowInvalidAPIGroupInDataSourceOrRef)...)
}
if spec.DataSourceRef != nil && spec.DataSourceRef.Namespace != nil && len(*spec.DataSourceRef.Namespace) > 0 {
if spec.DataSource != nil {
allErrs = append(allErrs, field.Invalid(fldPath, fldPath.Child("dataSource"),
"may not be specified when dataSourceRef.namespace is specified"))
}
} else if spec.DataSource != nil && spec.DataSourceRef != nil {
if !isDataSourceEqualDataSourceRef(spec.DataSource, spec.DataSourceRef) {
allErrs = append(allErrs, field.Invalid(fldPath, fldPath.Child("dataSource"),
"must match dataSourceRef"))
}
}
if spec.VolumeAttributesClassName != nil && len(*spec.VolumeAttributesClassName) > 0 && opts.EnableVolumeAttributesClass {
for _, msg := range ValidateClassName(*spec.VolumeAttributesClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("volumeAttributesClassName"), *spec.VolumeAttributesClassName, msg))
}
}
return allErrs
}
func isDataSourceEqualDataSourceRef(dataSource *core.TypedLocalObjectReference, dataSourceRef *core.TypedObjectReference) bool {
return reflect.DeepEqual(dataSource.APIGroup, dataSourceRef.APIGroup) && dataSource.Kind == dataSourceRef.Kind && dataSource.Name == dataSourceRef.Name
}
// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim
func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc, opts)...)
newPvcClone := newPvc.DeepCopy()
oldPvcClone := oldPvc.DeepCopy()
// PVController needs to update PVC.Spec w/ VolumeName.
// Claims are immutable in order to enforce quota, range limits, etc. without gaming the system.
if len(oldPvc.Spec.VolumeName) == 0 {
// volumeName changes are allowed once.
oldPvcClone.Spec.VolumeName = newPvcClone.Spec.VolumeName // +k8s:verify-mutation:reason=clone
}
if validateStorageClassUpgradeFromAnnotation(oldPvcClone.Annotations, newPvcClone.Annotations,
oldPvcClone.Spec.StorageClassName, newPvcClone.Spec.StorageClassName) {
newPvcClone.Spec.StorageClassName = nil
metav1.SetMetaDataAnnotation(&newPvcClone.ObjectMeta, core.BetaStorageClassAnnotation, oldPvcClone.Annotations[core.BetaStorageClassAnnotation])
} else {
// storageclass annotation should be immutable after creation
// TODO: remove Beta when no longer needed
allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
// If update from annotation to attribute failed we can attempt try to validate update from nil value.
if validateStorageClassUpgradeFromNil(oldPvc.Annotations, oldPvc.Spec.StorageClassName, newPvc.Spec.StorageClassName, opts) {
newPvcClone.Spec.StorageClassName = oldPvcClone.Spec.StorageClassName // +k8s:verify-mutation:reason=clone
}
// TODO: add a specific error with a hint that storage class name can not be changed
// (instead of letting spec comparison below return generic field forbidden error)
}
// lets make sure storage values are same.
if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil {
newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] // +k8s:verify-mutation:reason=clone
}
// lets make sure volume attributes class name is same.
if newPvc.Status.Phase == core.ClaimBound {
newPvcClone.Spec.VolumeAttributesClassName = oldPvcClone.Spec.VolumeAttributesClassName // +k8s:verify-mutation:reason=clone
}
oldSize := oldPvc.Spec.Resources.Requests["storage"]
newSize := newPvc.Spec.Resources.Requests["storage"]
statusSize := oldPvc.Status.Capacity["storage"]
if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
specDiff := diff.Diff(oldPvcClone.Spec, newPvcClone.Spec)
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), fmt.Sprintf("spec is immutable after creation except resources.requests and volumeAttributesClassName for bound claims\n%v", specDiff)))
}
if newSize.Cmp(oldSize) < 0 {
if !opts.EnableRecoverFromExpansionFailure {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than previous value"))
} else {
// This validation permits reducing pvc requested size up to capacity recorded in pvc.status
// so that users can recover from volume expansion failure, but Kubernetes does not actually
// support volume shrinking
if newSize.Cmp(statusSize) <= 0 {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than status.capacity"))
}
}
}
allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...)
if !apiequality.Semantic.DeepEqual(oldPvc.Spec.VolumeAttributesClassName, newPvc.Spec.VolumeAttributesClassName) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update is forbidden when the VolumeAttributesClass feature gate is disabled"))
}
if opts.EnableVolumeAttributesClass {
// Forbid removing VAC once one is successfully applied.
if oldPvc.Status.CurrentVolumeAttributesClassName != nil {
if newPvc.Spec.VolumeAttributesClassName == nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update to nil is forbidden when status.currentVolumeAttributesClassName is not nil"))
} else if len(*newPvc.Spec.VolumeAttributesClassName) == 0 {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "volumeAttributesClassName"), "update to empty string is forbidden when status.currentVolumeAttributesClassName is not nil"))
}
}
}
}
return allErrs
}
// Provide an upgrade path from PVC with storage class specified in beta
// annotation to storage class specified in attribute. We allow update of
// StorageClassName only if following four conditions are met at the same time:
// 1. The old pvc's StorageClassAnnotation is set
// 2. The old pvc's StorageClassName is not set
// 3. The new pvc's StorageClassName is set and equal to the old value in annotation
// 4. If the new pvc's StorageClassAnnotation is set,it must be equal to the old pv/pvc's StorageClassAnnotation
func validateStorageClassUpgradeFromAnnotation(oldAnnotations, newAnnotations map[string]string, oldScName, newScName *string) bool {
oldSc, oldAnnotationExist := oldAnnotations[core.BetaStorageClassAnnotation]
newScInAnnotation, newAnnotationExist := newAnnotations[core.BetaStorageClassAnnotation]
return oldAnnotationExist /* condition 1 */ &&
oldScName == nil /* condition 2*/ &&
(newScName != nil && *newScName == oldSc) /* condition 3 */ &&
(!newAnnotationExist || newScInAnnotation == oldSc) /* condition 4 */
}
// Provide an upgrade path from PVC with nil storage class. We allow update of
// StorageClassName only if following four conditions are met at the same time:
// 1. The new pvc's StorageClassName is not nil
// 2. The old pvc's StorageClassName is nil
// 3. The old pvc either does not have beta annotation set, or the beta annotation matches new pvc's StorageClassName
func validateStorageClassUpgradeFromNil(oldAnnotations map[string]string, oldScName, newScName *string, opts PersistentVolumeClaimSpecValidationOptions) bool {
oldAnnotation, oldAnnotationExist := oldAnnotations[core.BetaStorageClassAnnotation]
return newScName != nil /* condition 1 */ &&
oldScName == nil /* condition 2 */ &&
(!oldAnnotationExist || *newScName == oldAnnotation) /* condition 3 */
}
func validatePersistentVolumeClaimResourceKey(value string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
}
if len(allErrs) != 0 {
return allErrs
}
// For native resource names such as - either unprefixed names or with kubernetes.io prefix,
// only allowed value is storage
if helper.IsNativeResource(core.ResourceName(value)) {
if core.ResourceName(value) != core.ResourceStorage {
return append(allErrs, field.NotSupported(fldPath, value, []core.ResourceName{core.ResourceStorage}))
}
}
return allErrs
}
var resizeStatusSet = sets.New(core.PersistentVolumeClaimControllerResizeInProgress,
core.PersistentVolumeClaimControllerResizeInfeasible,
core.PersistentVolumeClaimNodeResizePending,
core.PersistentVolumeClaimNodeResizeInProgress,
core.PersistentVolumeClaimNodeResizeInfeasible)
// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, validationOpts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
if len(newPvc.ResourceVersion) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
}
if len(newPvc.Spec.AccessModes) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), ""))
}
capPath := field.NewPath("status", "capacity")
for r, qty := range newPvc.Status.Capacity {
allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
}
if validationOpts.EnableRecoverFromExpansionFailure {
resizeStatusPath := field.NewPath("status", "allocatedResourceStatuses")
if newPvc.Status.AllocatedResourceStatuses != nil {
resizeStatus := newPvc.Status.AllocatedResourceStatuses
for k, v := range resizeStatus {
if errs := validatePersistentVolumeClaimResourceKey(k.String(), resizeStatusPath); len(errs) > 0 {
allErrs = append(allErrs, errs...)
}
if !resizeStatusSet.Has(v) {
allErrs = append(allErrs, field.NotSupported(resizeStatusPath, k, sets.List(resizeStatusSet)))
continue
}
}
}
allocPath := field.NewPath("status", "allocatedResources")
for r, qty := range newPvc.Status.AllocatedResources {
if errs := validatePersistentVolumeClaimResourceKey(r.String(), allocPath); len(errs) > 0 {
allErrs = append(allErrs, errs...)
continue
}
if errs := validateBasicResource(qty, allocPath.Key(string(r))); len(errs) > 0 {
allErrs = append(allErrs, errs...)
} else {
allErrs = append(allErrs, ValidateResourceQuantityValue(core.ResourceStorage, qty, allocPath.Key(string(r)))...)
}
}
}
return allErrs
}
var supportedPortProtocols = sets.New(
core.ProtocolTCP,
core.ProtocolUDP,
core.ProtocolSCTP)
func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allNames := sets.Set[string]{}
for i, port := range ports {
idxPath := fldPath.Index(i)
if len(port.Name) > 0 {
if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 {
for i = range msgs {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i]))
}
} else if allNames.Has(port.Name) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name))
} else {
allNames.Insert(port.Name)
}
}
if port.ContainerPort == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), ""))
} else {
for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg))
}
}
if port.HostPort != 0 {
for _, msg := range validation.IsValidPortNum(int(port.HostPort)) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg))
}
}
if len(port.Protocol) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), ""))
} else if !supportedPortProtocols.Has(port.Protocol) {
allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, sets.List(supportedPortProtocols)))
}
}
return allErrs
}
// ValidateEnv validates env vars
func ValidateEnv(vars []core.EnvVar, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
for i, ev := range vars {
idxPath := fldPath.Index(i)
if len(ev.Name) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
} else {
if opts.AllowRelaxedEnvironmentVariableValidation {
for _, msg := range validation.IsRelaxedEnvVarName(ev.Name) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg))
}
} else {
for _, msg := range validation.IsEnvVarName(ev.Name) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg))
}
}
}
allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"), opts)...)
}
return allErrs
}
var validEnvDownwardAPIFieldPathExpressions = sets.New(
"metadata.name",
"metadata.namespace",
"metadata.uid",
"spec.nodeName",
"spec.serviceAccountName",
"status.hostIP",
"status.hostIPs",
"status.podIP",
"status.podIPs",
)
var validContainerResourceFieldPathExpressions = sets.New(
"limits.cpu",
"limits.memory",
"limits.ephemeral-storage",
"requests.cpu",
"requests.memory",
"requests.ephemeral-storage",
)
var validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages = sets.New(hugepagesRequestsPrefixDownwardAPI, hugepagesLimitsPrefixDownwardAPI)
const hugepagesRequestsPrefixDownwardAPI string = `requests.hugepages-`
const hugepagesLimitsPrefixDownwardAPI string = `limits.hugepages-`
func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if ev.ValueFrom == nil {
return allErrs
}
numSources := 0
if ev.ValueFrom.FieldRef != nil {
numSources++
allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...)
}
if ev.ValueFrom.ResourceFieldRef != nil {
numSources++
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), false)...)
}
if ev.ValueFrom.ConfigMapKeyRef != nil {
numSources++
allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...)
}
if ev.ValueFrom.SecretKeyRef != nil {
numSources++
allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...)
}
if ev.ValueFrom.FileKeyRef != nil {
numSources++
allErrs = append(allErrs, validateFileKeySelector(ev.ValueFrom.FileKeyRef, fldPath.Child("fileKeyRef"))...)
}
if numSources == 0 {
if opts.AllowEnvFilesValidation {
allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `fieldRef`, `resourceFieldRef`, `configMapKeyRef`, `secretKeyRef` or `fileKeyRef`"))
} else {
allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `fieldRef`, `resourceFieldRef`, `configMapKeyRef` or `secretKeyRef`"))
}
} else if len(ev.Value) != 0 {
if numSources != 0 {
allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty"))
}
} else if numSources > 1 {
allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
}
return allErrs
}
func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.Set[string], fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(fs.APIVersion) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), ""))
return allErrs
}
if len(fs.FieldPath) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), ""))
return allErrs
}
internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "")
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err)))
return allErrs
}
if path, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(internalFieldPath); ok {
switch path {
case "metadata.annotations":
allErrs = append(allErrs, ValidateQualifiedName(strings.ToLower(subscript), fldPath)...)
case "metadata.labels":
allErrs = append(allErrs, ValidateQualifiedName(subscript, fldPath)...)
default:
allErrs = append(allErrs, field.Invalid(fldPath, path, "does not support subscript"))
}
} else if !expressions.Has(path) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, sets.List(*expressions)))
return allErrs
}
return allErrs
}
func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.Set[string], prefixes *sets.Set[string], fldPath *field.Path, volume bool) field.ErrorList {
allErrs := field.ErrorList{}
if volume && len(fs.ContainerName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), ""))
} else if len(fs.Resource) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("resource"), ""))
} else if !expressions.Has(fs.Resource) {
// check if the prefix is present
foundPrefix := false
if prefixes != nil {
for _, prefix := range sets.List(*prefixes) {
if strings.HasPrefix(fs.Resource, prefix) {
foundPrefix = true
}
}
}
if !foundPrefix {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, sets.List(*expressions)))
}
}
allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...)
return allErrs
}
func ValidateEnvFrom(vars []core.EnvFromSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
for i, ev := range vars {
idxPath := fldPath.Index(i)
if len(ev.Prefix) > 0 {
if opts.AllowRelaxedEnvironmentVariableValidation {
for _, msg := range validation.IsRelaxedEnvVarName(ev.Prefix) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg))
}
} else {
for _, msg := range validation.IsEnvVarName(ev.Prefix) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg))
}
}
}
numSources := 0
if ev.ConfigMapRef != nil {
numSources++
allErrs = append(allErrs, validateConfigMapEnvSource(ev.ConfigMapRef, idxPath.Child("configMapRef"))...)
}
if ev.SecretRef != nil {
numSources++
allErrs = append(allErrs, validateSecretEnvSource(ev.SecretRef, idxPath.Child("secretRef"))...)
}
if numSources == 0 {
allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `configMapRef` or `secretRef`"))
} else if numSources > 1 {
allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
}
}
return allErrs
}
func validateConfigMapEnvSource(configMapSource *core.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(configMapSource.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else {
for _, msg := range ValidateConfigMapName(configMapSource.Name, true) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), configMapSource.Name, msg))
}
}
return allErrs
}
func validateSecretEnvSource(secretSource *core.SecretEnvSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(secretSource.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else {
for _, msg := range ValidateSecretName(secretSource.Name, true) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), secretSource.Name, msg))
}
}
return allErrs
}
var validContainerResourceDivisorForCPU = sets.New("1m", "1")
var validContainerResourceDivisorForMemory = sets.New(
"1",
"1k", "1M", "1G", "1T", "1P", "1E",
"1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
var validContainerResourceDivisorForHugePages = sets.New(
"1",
"1k", "1M", "1G", "1T", "1P", "1E",
"1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
var validContainerResourceDivisorForEphemeralStorage = sets.New(
"1",
"1k", "1M", "1G", "1T", "1P", "1E",
"1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei")
func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
unsetDivisor := resource.Quantity{}
if unsetDivisor.Cmp(divisor) == 0 {
return allErrs
}
switch rName {
case "limits.cpu", "requests.cpu":
if !validContainerResourceDivisorForCPU.Has(divisor.String()) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource"))
}
case "limits.memory", "requests.memory":
if !validContainerResourceDivisorForMemory.Has(divisor.String()) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource"))
}
case "limits.ephemeral-storage", "requests.ephemeral-storage":
if !validContainerResourceDivisorForEphemeralStorage.Has(divisor.String()) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the local ephemeral storage resource"))
}
}
if strings.HasPrefix(rName, hugepagesRequestsPrefixDownwardAPI) || strings.HasPrefix(rName, hugepagesLimitsPrefixDownwardAPI) {
if !validContainerResourceDivisorForHugePages.Has(divisor.String()) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the hugepages resource"))
}
}
return allErrs
}
func validateConfigMapKeySelector(s *core.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
nameFn := ValidateNameFunc(ValidateSecretName)
for _, msg := range nameFn(s.Name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
}
if len(s.Key) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
} else {
for _, msg := range validation.IsConfigMapKey(s.Key) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
}
}
return allErrs
}
func validateSecretKeySelector(s *core.SecretKeySelector, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
nameFn := ValidateNameFunc(ValidateSecretName)
for _, msg := range nameFn(s.Name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
}
if len(s.Key) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
} else {
for _, msg := range validation.IsConfigMapKey(s.Key) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
}
}
return allErrs
}
func validateFileKeySelector(s *core.FileKeySelector, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// The Key field must be non-empty and must be a valid environment variable name.
if len(s.Key) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
} else {
for _, msg := range validation.IsRelaxedEnvVarName(s.Key) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
}
}
// The VolumeName field must be non-empty and must be a valid DNS1123 label.
if len(s.VolumeName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(s.VolumeName, fldPath.Child("volumeName"))...)
}
// The Path field must be non-empty and must not contain backsteps ("..").
if len(s.Path) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("path"), ""))
} else {
allErrs = append(allErrs, validatePathNoBacksteps(s.Path, fldPath.Child("path"))...)
}
return allErrs
}
func GetVolumeMountMap(mounts []core.VolumeMount) map[string]string {
volmounts := make(map[string]string)
for _, mnt := range mounts {
volmounts[mnt.Name] = mnt.MountPath
}
return volmounts
}
func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string {
volDevices := make(map[string]string)
for _, dev := range devices {
volDevices[dev.Name] = dev.DevicePath
}
return volDevices
}
func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
mountpoints := sets.New[string]()
for i, mnt := range mounts {
idxPath := fldPath.Index(i)
if len(mnt.Name) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
}
if !IsMatchedVolume(mnt.Name, volumes) {
allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name))
}
if len(mnt.MountPath) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), ""))
}
if mountpoints.Has(mnt.MountPath) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique"))
}
mountpoints.Insert(mnt.MountPath)
// check for overlap with VolumeDevice
if mountNameAlreadyExists(mnt.Name, voldevices) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), mnt.Name, "must not already exist in volumeDevices"))
}
if mountPathAlreadyExists(mnt.MountPath, voldevices) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not already exist as a path in volumeDevices"))
}
if len(mnt.SubPath) > 0 {
allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...)
}
if len(mnt.SubPathExpr) > 0 {
if len(mnt.SubPath) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("subPathExpr"), mnt.SubPathExpr, "subPathExpr and subPath are mutually exclusive"))
}
allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPathExpr, fldPath.Child("subPathExpr"))...)
}
if mnt.MountPropagation != nil {
allErrs = append(allErrs, validateMountPropagation(mnt.MountPropagation, container, fldPath.Child("mountPropagation"))...)
}
allErrs = append(allErrs, validateMountRecursiveReadOnly(mnt, fldPath.Child("recursiveReadOnly"))...)
}
return allErrs
}
func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]string, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
devicepath := sets.New[string]()
devicename := sets.New[string]()
for i, dev := range devices {
idxPath := fldPath.Index(i)
devName := dev.Name
devPath := dev.DevicePath
didMatch, isPVC := isMatchedDevice(devName, volumes)
if len(devName) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
}
if devicename.Has(devName) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique"))
}
// Must be based on PersistentVolumeClaim (PVC reference or generic ephemeral inline volume)
if didMatch && !isPVC {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim or Ephemeral for block mode"))
}
if !didMatch {
allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName))
}
if len(devPath) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), ""))
}
if devicepath.Has(devPath) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique"))
}
if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')"))
} else {
devicepath.Insert(devPath)
}
// check for overlap with VolumeMount
if deviceNameAlreadyExists(devName, volmounts) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts"))
}
if devicePathAlreadyExists(devPath, volmounts) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts"))
}
if len(devName) > 0 {
devicename.Insert(devName)
}
}
return allErrs
}
func validatePodResourceClaims(podMeta *metav1.ObjectMeta, claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
podClaimNames := sets.New[string]()
for i, claim := range claims {
allErrs = append(allErrs, validatePodResourceClaim(podMeta, claim, &podClaimNames, fldPath.Index(i))...)
}
return allErrs
}
// gatherPodResourceClaimNames returns a set of all non-empty
// PodResourceClaim.Name values. Validation that those names are valid is
// handled by validatePodResourceClaims.
func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.Set[string] {
podClaimNames := sets.Set[string]{}
for _, claim := range claims {
if claim.Name != "" {
podClaimNames.Insert(claim.Name)
}
}
return podClaimNames
}
func validatePodResourceClaim(podMeta *metav1.ObjectMeta, claim core.PodResourceClaim, podClaimNames *sets.Set[string], fldPath *field.Path) field.ErrorList {
// static pods don't support resource claims
if podMeta != nil {
if _, ok := podMeta.Annotations[core.MirrorPodAnnotationKey]; ok {
return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods do not support resource claims")}
}
}
var allErrs field.ErrorList
if claim.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if podClaimNames.Has(claim.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), claim.Name))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(claim.Name, fldPath.Child("name"))...)
podClaimNames.Insert(claim.Name)
}
if claim.ResourceClaimName != nil && claim.ResourceClaimTemplateName != nil {
allErrs = append(allErrs, field.Invalid(fldPath, claim, "at most one of `resourceClaimName` or `resourceClaimTemplateName` may be specified"))
}
if claim.ResourceClaimName == nil && claim.ResourceClaimTemplateName == nil {
allErrs = append(allErrs, field.Invalid(fldPath, claim, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
}
if claim.ResourceClaimName != nil {
for _, detail := range ValidateResourceClaimName(*claim.ResourceClaimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimName"), *claim.ResourceClaimName, detail))
}
}
if claim.ResourceClaimTemplateName != nil {
for _, detail := range ValidateResourceClaimTemplateName(*claim.ResourceClaimTemplateName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimTemplateName"), *claim.ResourceClaimTemplateName, detail))
}
}
return allErrs
}
func validateLivenessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
return allErrs
}
allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath, opts)...)
if probe.SuccessThreshold != 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1"))
}
return allErrs
}
func validateReadinessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
return allErrs
}
allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath, opts)...)
if probe.TerminationGracePeriodSeconds != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), probe.TerminationGracePeriodSeconds, "must not be set for readinessProbes"))
}
return allErrs
}
func validateStartupProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
return allErrs
}
allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath, opts)...)
if probe.SuccessThreshold != 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1"))
}
return allErrs
}
func validateProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
return allErrs
}
allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), gracePeriod, fldPath, opts)...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...)
if probe.TerminationGracePeriodSeconds != nil && *probe.TerminationGracePeriodSeconds <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), *probe.TerminationGracePeriodSeconds, "must be greater than 0"))
}
return allErrs
}
func validateInitContainerRestartPolicy(restartPolicy *core.ContainerRestartPolicy, restartRules []core.ContainerRestartRule, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
var allErrors field.ErrorList
if restartPolicy == nil {
return allErrors
}
if opts.AllowContainerRestartPolicyRules {
switch *restartPolicy {
case core.ContainerRestartPolicyAlways:
// Sidecar containers should not have restart policy rules
if len(restartRules) > 0 {
allErrors = append(allErrors, field.Forbidden(fldPath.Child("restartPolicyRules"), "restartPolicyRules are not allowed for init containers with restart policy Always"))
}
default:
allErrors = append(allErrors, validateContainerRestartPolicy(restartPolicy, restartRules, fldPath)...)
}
} else {
switch *restartPolicy {
case core.ContainerRestartPolicyAlways:
break
default:
validValues := []core.ContainerRestartPolicy{core.ContainerRestartPolicyAlways}
allErrors = append(allErrors, field.NotSupported(fldPath.Child("restartPolicy"), *restartPolicy, validValues))
}
}
return allErrors
}
type commonHandler struct {
Exec *core.ExecAction
HTTPGet *core.HTTPGetAction
TCPSocket *core.TCPSocketAction
GRPC *core.GRPCAction
Sleep *core.SleepAction
}
func handlerFromProbe(ph *core.ProbeHandler) commonHandler {
return commonHandler{
Exec: ph.Exec,
HTTPGet: ph.HTTPGet,
TCPSocket: ph.TCPSocket,
GRPC: ph.GRPC,
}
}
func handlerFromLifecycle(lh *core.LifecycleHandler) commonHandler {
return commonHandler{
Exec: lh.Exec,
HTTPGet: lh.HTTPGet,
TCPSocket: lh.TCPSocket,
Sleep: lh.Sleep,
}
}
func validateSleepAction(sleep *core.SleepAction, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrors := field.ErrorList{}
// We allow gracePeriod to be nil here because the pod in which this SleepAction
// is defined might have an invalid grace period defined, and we don't want to
// flag another error here when the real problem will already be flagged.
if opts.AllowPodLifecycleSleepActionZeroValue {
if gracePeriod != nil && (sleep.Seconds < 0 || sleep.Seconds > *gracePeriod) {
invalidStr := fmt.Sprintf("must be non-negative and less than terminationGracePeriodSeconds (%d)", *gracePeriod)
allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr))
}
} else {
if gracePeriod != nil && (sleep.Seconds <= 0 || sleep.Seconds > *gracePeriod) {
invalidStr := fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d). Enable AllowPodLifecycleSleepActionZeroValue feature gate for zero sleep.", *gracePeriod)
allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr))
}
}
return allErrors
}
func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if config == nil {
allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP)))
return allErrs
}
if config.ClientIP == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP)))
return allErrs
}
if config.ClientIP.TimeoutSeconds == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP)))
return allErrs
}
allErrs = append(allErrs, validateAffinityTimeout(config.ClientIP.TimeoutSeconds, fldPath.Child("clientIP").Child("timeoutSeconds"))...)
return allErrs
}
func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if *timeout <= 0 || *timeout > core.MaxClientIPServiceAffinitySeconds {
allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", core.MaxClientIPServiceAffinitySeconds)))
}
return allErrs
}
// AccumulateUniqueHostPorts extracts each HostPort of each Container,
// accumulating the results and returning an error if any ports conflict.
func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.Set[string], fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for ci, ctr := range containers {
idxPath := fldPath.Index(ci)
portsPath := idxPath.Child("ports")
for pi := range ctr.Ports {
idxPath := portsPath.Index(pi)
port := ctr.Ports[pi].HostPort
if port == 0 {
continue
}
str := fmt.Sprintf("%s/%s/%d", ctr.Ports[pi].Protocol, ctr.Ports[pi].HostIP, port)
if accumulator.Has(str) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str))
} else {
accumulator.Insert(str)
}
}
}
return allErrs
}
// checkHostPortConflicts checks for colliding Port.HostPort values across
// a slice of containers.
func checkHostPortConflicts(containers []core.Container, fldPath *field.Path) field.ErrorList {
allPorts := sets.Set[string]{}
return AccumulateUniqueHostPorts(containers, &allPorts, fldPath)
}
func validateExecAction(exec *core.ExecAction, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if len(exec.Command) == 0 {
allErrors = append(allErrors, field.Required(fldPath.Child("command"), ""))
}
return allErrors
}
var supportedHTTPSchemes = sets.New(core.URISchemeHTTP, core.URISchemeHTTPS)
func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if len(http.Path) == 0 {
allErrors = append(allErrors, field.Required(fldPath.Child("path"), ""))
}
allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...)
if !supportedHTTPSchemes.Has(http.Scheme) {
allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, sets.List(supportedHTTPSchemes)))
}
for _, header := range http.HTTPHeaders {
for _, msg := range validation.IsHTTPHeaderName(header.Name) {
allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg))
}
}
return allErrors
}
func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if port.Type == intstr.Int {
for _, msg := range validation.IsValidPortNum(port.IntValue()) {
allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg))
}
} else if port.Type == intstr.String {
for _, msg := range validation.IsValidPortName(port.StrVal) {
allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg))
}
} else {
allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type)))
}
return allErrs
}
func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) field.ErrorList {
return ValidatePortNumOrName(tcp.Port, fldPath.Child("port"))
}
func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList {
return ValidatePortNumOrName(intstr.FromInt32(grpc.Port), fldPath.Child("port"))
}
func validateHandler(handler commonHandler, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
numHandlers := 0
allErrors := field.ErrorList{}
if handler.Exec != nil {
if numHandlers > 0 {
allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type"))
} else {
numHandlers++
allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...)
}
}
if handler.HTTPGet != nil {
if numHandlers > 0 {
allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type"))
} else {
numHandlers++
allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...)
}
}
if handler.TCPSocket != nil {
if numHandlers > 0 {
allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type"))
} else {
numHandlers++
allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...)
}
}
if handler.GRPC != nil {
if numHandlers > 0 {
allErrors = append(allErrors, field.Forbidden(fldPath.Child("grpc"), "may not specify more than 1 handler type"))
} else {
numHandlers++
allErrors = append(allErrors, validateGRPCAction(handler.GRPC, fldPath.Child("grpc"))...)
}
}
if handler.Sleep != nil {
if numHandlers > 0 {
allErrors = append(allErrors, field.Forbidden(fldPath.Child("sleep"), "may not specify more than 1 handler type"))
} else {
numHandlers++
allErrors = append(allErrors, validateSleepAction(handler.Sleep, gracePeriod, fldPath.Child("sleep"), opts)...)
}
}
if numHandlers == 0 {
allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type"))
}
return allErrors
}
var supportedStopSignalsLinux = sets.New(
core.SIGABRT, core.SIGALRM, core.SIGBUS, core.SIGCHLD,
core.SIGCLD, core.SIGCONT, core.SIGFPE, core.SIGHUP,
core.SIGILL, core.SIGINT, core.SIGIO, core.SIGIOT,
core.SIGKILL, core.SIGPIPE, core.SIGPOLL, core.SIGPROF,
core.SIGPWR, core.SIGQUIT, core.SIGSEGV, core.SIGSTKFLT,
core.SIGSTOP, core.SIGSYS, core.SIGTERM, core.SIGTRAP,
core.SIGTSTP, core.SIGTTIN, core.SIGTTOU, core.SIGURG,
core.SIGUSR1, core.SIGUSR2, core.SIGVTALRM, core.SIGWINCH,
core.SIGXCPU, core.SIGXFSZ, core.SIGRTMIN, core.SIGRTMINPLUS1,
core.SIGRTMINPLUS2, core.SIGRTMINPLUS3, core.SIGRTMINPLUS4,
core.SIGRTMINPLUS5, core.SIGRTMINPLUS6, core.SIGRTMINPLUS7,
core.SIGRTMINPLUS8, core.SIGRTMINPLUS9, core.SIGRTMINPLUS10,
core.SIGRTMINPLUS11, core.SIGRTMINPLUS12, core.SIGRTMINPLUS13,
core.SIGRTMINPLUS14, core.SIGRTMINPLUS15, core.SIGRTMAXMINUS14,
core.SIGRTMAXMINUS13, core.SIGRTMAXMINUS12, core.SIGRTMAXMINUS11,
core.SIGRTMAXMINUS10, core.SIGRTMAXMINUS9, core.SIGRTMAXMINUS8,
core.SIGRTMAXMINUS7, core.SIGRTMAXMINUS6, core.SIGRTMAXMINUS5,
core.SIGRTMAXMINUS4, core.SIGRTMAXMINUS3, core.SIGRTMAXMINUS2,
core.SIGRTMAXMINUS1, core.SIGRTMAX)
var supportedStopSignalsWindows = sets.New(core.SIGKILL, core.SIGTERM)
func validateStopSignal(stopSignal *core.Signal, fldPath *field.Path, os *core.PodOS) field.ErrorList {
allErrors := field.ErrorList{}
if os == nil {
allErrors = append(allErrors, field.Forbidden(fldPath, "may not be set for containers with empty `spec.os.name`"))
} else if os.Name == core.Windows {
if !supportedStopSignalsWindows.Has(*stopSignal) {
allErrors = append(allErrors, field.NotSupported(fldPath, stopSignal, sets.List(supportedStopSignalsWindows)))
}
} else if os.Name == core.Linux {
if !supportedStopSignalsLinux.Has(*stopSignal) {
allErrors = append(allErrors, field.NotSupported(fldPath, stopSignal, sets.List(supportedStopSignalsLinux)))
}
}
return allErrors
}
func validateLifecycle(lifecycle *core.Lifecycle, gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions, os *core.PodOS) field.ErrorList {
allErrs := field.ErrorList{}
if lifecycle.PostStart != nil {
allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), gracePeriod, fldPath.Child("postStart"), opts)...)
}
if lifecycle.PreStop != nil {
allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), gracePeriod, fldPath.Child("preStop"), opts)...)
}
if lifecycle.StopSignal != nil {
allErrs = append(allErrs, validateStopSignal(lifecycle.StopSignal, fldPath.Child("stopSignal"), os)...)
}
return allErrs
}
var supportedPullPolicies = sets.New(
core.PullAlways,
core.PullIfNotPresent,
core.PullNever)
func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
switch policy {
case core.PullAlways, core.PullIfNotPresent, core.PullNever:
break
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
allErrors = append(allErrors, field.NotSupported(fldPath, policy, sets.List(supportedPullPolicies)))
}
return allErrors
}
var supportedResizeResources = sets.New(core.ResourceCPU, core.ResourceMemory)
var supportedResizePolicies = sets.New(core.NotRequired, core.RestartContainer)
func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *field.Path, podRestartPolicy *core.RestartPolicy) field.ErrorList {
allErrors := field.ErrorList{}
// validate that resource name is not repeated, supported resource names and policy values are specified
resources := make(map[core.ResourceName]bool)
for i, p := range policyList {
if _, found := resources[p.ResourceName]; found {
allErrors = append(allErrors, field.Duplicate(fldPath.Index(i), p.ResourceName))
}
resources[p.ResourceName] = true
switch p.ResourceName {
case core.ResourceCPU, core.ResourceMemory:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
allErrors = append(allErrors, field.NotSupported(fldPath, p.ResourceName, sets.List(supportedResizeResources)))
}
switch p.RestartPolicy {
case core.NotRequired, core.RestartContainer:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
allErrors = append(allErrors, field.NotSupported(fldPath, p.RestartPolicy, sets.List(supportedResizePolicies)))
}
if *podRestartPolicy == core.RestartPolicyNever && p.RestartPolicy != core.NotRequired {
allErrors = append(allErrors, field.Invalid(fldPath, p.RestartPolicy, "must be 'NotRequired' when `restartPolicy` is 'Never'"))
}
}
return allErrors
}
var supportedContainerRestartPolicies = sets.New(
core.ContainerRestartPolicyAlways,
core.ContainerRestartPolicyNever,
core.ContainerRestartPolicyOnFailure,
)
var supportedContainerRestartPolicyOperators = sets.New(
core.ContainerRestartRuleOnExitCodesOpIn,
core.ContainerRestartRuleOnExitCodesOpNotIn,
)
// validateContainerRestartPolicy checks the container-level restartPolicy and restartPolicyRules are valid for
// regular containers and non-sidecar init containers.
func validateContainerRestartPolicy(policy *core.ContainerRestartPolicy, rules []core.ContainerRestartRule, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
restartPolicyFld := fldPath.Child("restartPolicy")
if policy == nil {
if len(rules) == 0 {
return allErrs
} else {
allErrs = append(allErrs, field.Required(restartPolicyFld, "must specify restartPolicy when restart rules are used"))
}
} else if !supportedContainerRestartPolicies.Has(*policy) {
allErrs = append(allErrs, field.NotSupported(restartPolicyFld, *policy, sets.List(supportedContainerRestartPolicies)))
}
if len(rules) > 20 {
allErrs = append(allErrs, field.TooLong(fldPath.Child("restartPolicyRules"), rules, 20))
}
for i, rule := range rules {
policyRulesFld := fldPath.Child("restartPolicyRules").Index(i)
if rule.Action != core.ContainerRestartRuleActionRestart {
validActions := []core.ContainerRestartRuleAction{core.ContainerRestartRuleActionRestart}
allErrs = append(allErrs, field.NotSupported(policyRulesFld.Child("action"), rule.Action, validActions))
}
if rule.ExitCodes != nil {
exitCodesFld := policyRulesFld.Child("exitCodes")
if !supportedContainerRestartPolicyOperators.Has(rule.ExitCodes.Operator) {
allErrs = append(allErrs, field.NotSupported(exitCodesFld.Child("operator"), rule.ExitCodes.Operator, sets.List(supportedContainerRestartPolicyOperators)))
}
if len(rule.ExitCodes.Values) > 255 {
allErrs = append(allErrs, field.TooLong(exitCodesFld.Child("values"), rule.ExitCodes.Values, 255))
}
} else {
allErrs = append(allErrs, field.Required(policyRulesFld.Child("exitCodes"), "must be specified"))
}
}
return allErrs
}
// validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers.
// Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates.
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
var allErrs field.ErrorList
if len(ephemeralContainers) == 0 {
return allErrs
}
otherNames, allNames := sets.Set[string]{}, sets.Set[string]{}
for _, c := range containers {
otherNames.Insert(c.Name)
allNames.Insert(c.Name)
}
for _, c := range initContainers {
otherNames.Insert(c.Name)
allNames.Insert(c.Name)
}
for i, ec := range ephemeralContainers {
idxPath := fldPath.Index(i)
c := (*core.Container)(&ec.EphemeralContainerCommon)
allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts, podRestartPolicy, hostUsers)...)
// Ephemeral containers don't need looser constraints for pod templates, so it's convenient to apply both validations
// here where we've already converted EphemeralContainerCommon to Container.
allErrs = append(allErrs, validateContainerOnlyForPod(c, idxPath)...)
// Ephemeral containers must have a name unique across all container types.
if allNames.Has(ec.Name) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ec.Name))
} else {
allNames.Insert(ec.Name)
}
// The target container name must exist and be non-ephemeral.
if ec.TargetContainerName != "" && !otherNames.Has(ec.TargetContainerName) {
allErrs = append(allErrs, field.NotFound(idxPath.Child("targetContainerName"), ec.TargetContainerName))
}
// Ephemeral containers should not be relied upon for fundamental pod services, so fields such as
// Lifecycle, probes, resources and ports should be disallowed. This is implemented as a list
// of allowed fields so that new fields will be given consideration prior to inclusion in ephemeral containers.
allErrs = append(allErrs, validateFieldAllowList(ec.EphemeralContainerCommon, allowedEphemeralContainerFields, "cannot be set for an Ephemeral Container", idxPath)...)
// VolumeMount subpaths have the potential to leak resources since they're implemented with bind mounts
// that aren't cleaned up until the pod exits. Since they also imply that the container is being used
// as part of the workload, they're disallowed entirely.
for i, vm := range ec.VolumeMounts {
if vm.SubPath != "" {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("volumeMounts").Index(i).Child("subPath"), "cannot be set for an Ephemeral Container"))
}
if vm.SubPathExpr != "" {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("volumeMounts").Index(i).Child("subPathExpr"), "cannot be set for an Ephemeral Container"))
}
}
}
return allErrs
}
// ValidateFieldAcceptList checks that only allowed fields are set.
// The value must be a struct (not a pointer to a struct!).
func validateFieldAllowList(value interface{}, allowedFields map[string]bool, errorText string, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
reflectType, reflectValue := reflect.TypeOf(value), reflect.ValueOf(value)
for i := 0; i < reflectType.NumField(); i++ {
f := reflectType.Field(i)
if allowedFields[f.Name] {
continue
}
// Compare the value of this field to its zero value to determine if it has been set
if !reflect.DeepEqual(reflectValue.Field(i).Interface(), reflect.Zero(f.Type).Interface()) {
r, n := utf8.DecodeRuneInString(f.Name)
lcName := string(unicode.ToLower(r)) + f.Name[n:]
allErrs = append(allErrs, field.Forbidden(fldPath.Child(lcName), errorText))
}
}
return allErrs
}
// validateInitContainers is called by pod spec and template validation to validate the list of init containers
func validateInitContainers(containers []core.Container, os *core.PodOS, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
var allErrs field.ErrorList
allNames := sets.Set[string]{}
for _, ctr := range regularContainers {
allNames.Insert(ctr.Name)
}
for i, ctr := range containers {
idxPath := fldPath.Index(i)
// Apply the validation common to all container types
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts, podRestartPolicy, hostUsers)...)
restartAlways := false
// Apply the validation specific to init containers
if ctr.RestartPolicy != nil {
allErrs = append(allErrs, validateInitContainerRestartPolicy(ctr.RestartPolicy, ctr.RestartPolicyRules, idxPath, opts)...)
restartAlways = *ctr.RestartPolicy == core.ContainerRestartPolicyAlways
}
// Names must be unique within regular and init containers. Collisions with ephemeral containers
// will be detected by validateEphemeralContainers().
if allNames.Has(ctr.Name) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name))
} else if len(ctr.Name) > 0 {
allNames.Insert(ctr.Name)
}
// Check for port conflicts in init containers individually since init containers run one-by-one.
allErrs = append(allErrs, checkHostPortConflicts([]core.Container{ctr}, fldPath)...)
switch {
case restartAlways:
if ctr.Lifecycle != nil {
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, idxPath.Child("lifecycle"), opts, os)...)
}
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, idxPath.Child("livenessProbe"), opts)...)
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, idxPath.Child("readinessProbe"), opts)...)
allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, idxPath.Child("startupProbe"), opts)...)
default:
// These fields are disallowed for init containers.
if ctr.Lifecycle != nil {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers without restartPolicy=Always"))
}
if ctr.LivenessProbe != nil {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers without restartPolicy=Always"))
}
if ctr.ReadinessProbe != nil {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("readinessProbe"), "may not be set for init containers without restartPolicy=Always"))
}
if ctr.StartupProbe != nil {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("startupProbe"), "may not be set for init containers without restartPolicy=Always"))
}
}
if !opts.AllowSidecarResizePolicy && len(ctr.ResizePolicy) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("resizePolicy"), ctr.ResizePolicy, "must not be set for init containers"))
}
}
return allErrs
}
// validateContainerCommon applies validation common to all container types. It's called by regular, init, and ephemeral
// container list validation to require a properly formatted name, image, etc.
func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], path *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
var allErrs field.ErrorList
namePath := path.Child("name")
if len(ctr.Name) == 0 {
allErrs = append(allErrs, field.Required(namePath, ""))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...)
}
// TODO: do not validate leading and trailing whitespace to preserve backward compatibility.
// for example: https://github.com/openshift/origin/issues/14659 image = " " is special token in pod template
// others may have done similar
if len(ctr.Image) == 0 {
allErrs = append(allErrs, field.Required(path.Child("image"), ""))
}
switch ctr.TerminationMessagePolicy {
case core.TerminationMessageReadFile, core.TerminationMessageFallbackToLogsOnError:
case "":
allErrs = append(allErrs, field.Required(path.Child("terminationMessagePolicy"), ""))
default:
supported := []core.TerminationMessagePolicy{
core.TerminationMessageReadFile,
core.TerminationMessageFallbackToLogsOnError,
}
allErrs = append(allErrs, field.NotSupported(path.Child("terminationMessagePolicy"), ctr.TerminationMessagePolicy, supported))
}
volMounts := GetVolumeMountMap(ctr.VolumeMounts)
volDevices := GetVolumeDeviceMap(ctr.VolumeDevices)
allErrs = append(allErrs, validateContainerPorts(ctr.Ports, path.Child("ports"))...)
allErrs = append(allErrs, ValidateEnv(ctr.Env, path.Child("env"), opts)...)
allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, path.Child("envFrom"), opts)...)
allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"), opts)...)
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
allErrs = append(allErrs, ValidateContainerResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"), podRestartPolicy)...)
allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"), hostUsers)...)
return allErrs
}
func validateHostUsers(spec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
// Only make the following checks if hostUsers is false (otherwise, the container uses the
// same userns as the host, and so there isn't anything to check).
if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers {
return allErrs
}
// We decided to restrict the usage of userns with other host namespaces:
// https://github.com/kubernetes/kubernetes/pull/111090#discussion_r935994282
// The tl;dr is: you can easily run into permission issues that seem unexpected, we don't
// know of any good use case and we can always enable them later.
// Note we already validated above spec.SecurityContext is not nil.
if spec.SecurityContext.HostNetwork {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostNetwork"), "when `hostUsers` is false"))
}
if spec.SecurityContext.HostPID {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("HostPID"), "when `hostUsers` is false"))
}
if spec.SecurityContext.HostIPC {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("HostIPC"), "when `hostUsers` is false"))
}
if !opts.AllowUserNamespacesWithVolumeDevices {
// volumeDevices won't work, as they don't support idmap mounts nor we are chown-ing them.
// Let's return a clear error in this case.
podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, containerPath *field.Path) bool {
if len(c.VolumeDevices) > 0 {
allErrs = append(allErrs, field.Forbidden(containerPath.Child("volumeDevices"), "when `hostUsers` is false"))
}
return true // Always visit all containers.
})
}
return allErrs
}
// validateFileKeyRefVolumes validates that volumes referenced by FileKeyRef environment variables
// are of type emptyDir. FileKeyRef requires emptyDir volumes to ensure proper file access.
func validateFileKeyRefVolumes(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
volumeSources := make(map[string]*core.VolumeSource)
for i := range spec.Volumes {
volume := &spec.Volumes[i]
volumeSources[volume.Name] = &volume.VolumeSource
}
podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, cFldPath *field.Path) bool {
envPath := cFldPath.Child("env")
for j, env := range c.Env {
// Only care about environment variables that use FileKeyRef.
if env.ValueFrom == nil || env.ValueFrom.FileKeyRef == nil {
continue
}
volumeName := env.ValueFrom.FileKeyRef.VolumeName
fileKeyRefPath := envPath.Index(j).Child("valueFrom").Child("fileKeyRef")
volumeNamePath := fileKeyRefPath.Child("volumeName")
source, found := volumeSources[volumeName]
if !found {
// The referenced volume does not exist in the pod spec.
allErrs = append(allErrs, field.NotFound(volumeNamePath, volumeName))
} else if source.EmptyDir == nil {
// The volume exists, but it is not of type emptyDir, which is required.
allErrs = append(allErrs, field.Invalid(volumeNamePath, volumeName, "referenced volume must be of type emptyDir"))
}
}
return true
})
return allErrs
}
func validatePodHostName(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if spec.HostnameOverride == nil {
return allErrs
}
// If SetHostnameAsFQDN is true, HostnameOverride must not be set.
if spec.SetHostnameAsFQDN != nil && *spec.SetHostnameAsFQDN {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostnameOverride"), "may not be specified when setHostnameAsFQDN is true"))
}
// If HostNetwork is true, HostnameOverride must not be set.
if spec.SecurityContext != nil && spec.SecurityContext.HostNetwork {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostnameOverride"), "may not be specified when hostNetwork is true"))
}
if len(*spec.HostnameOverride) > 64 {
allErrs = append(allErrs, field.TooLong(fldPath.Child("hostnameOverride"), "" /*unused*/, 64))
}
// Not allow to set a string that is not an RFC 1123 DNS subdomain as a hostname.
allErrs = append(allErrs, ValidateDNS1123Subdomain(*spec.HostnameOverride, fldPath.Child("hostnameOverride"))...)
return allErrs
}
// validateContainers is called by pod spec and template validation to validate the list of regular containers.
func validateContainers(containers []core.Container, os *core.PodOS, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
allErrs := field.ErrorList{}
if len(containers) == 0 {
return append(allErrs, field.Required(fldPath, ""))
}
allNames := sets.Set[string]{}
for i, ctr := range containers {
path := fldPath.Index(i)
// Apply validation common to all containers
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts, podRestartPolicy, hostUsers)...)
// Container names must be unique within the list of regular containers.
// Collisions with init or ephemeral container names will be detected by the init or ephemeral
// container validation to prevent duplicate error messages.
if allNames.Has(ctr.Name) {
allErrs = append(allErrs, field.Duplicate(path.Child("name"), ctr.Name))
} else {
allNames.Insert(ctr.Name)
}
// These fields are allowed for regular containers and restartable init
// containers.
// Regular init container and ephemeral container validation will return
// field.Forbidden() for these paths.
if ctr.Lifecycle != nil {
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, path.Child("lifecycle"), opts, os)...)
}
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, path.Child("livenessProbe"), opts)...)
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, path.Child("readinessProbe"), opts)...)
allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, path.Child("startupProbe"), opts)...)
if opts.AllowContainerRestartPolicyRules {
allErrs = append(allErrs, validateContainerRestartPolicy(ctr.RestartPolicy, ctr.RestartPolicyRules, path)...)
} else if ctr.RestartPolicy != nil {
allErrs = append(allErrs, field.Forbidden(path.Child("restartPolicy"), "may not be set for non-init containers"))
}
}
// Port conflicts are checked across all containers
allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...)
return allErrs
}
func validateRestartPolicy(restartPolicy *core.RestartPolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
switch *restartPolicy {
case core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever:
break
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
validValues := []core.RestartPolicy{core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever}
allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues))
}
return allErrors
}
func ValidatePreemptionPolicy(preemptionPolicy *core.PreemptionPolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
switch *preemptionPolicy {
case core.PreemptLowerPriority, core.PreemptNever:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
validValues := []core.PreemptionPolicy{core.PreemptLowerPriority, core.PreemptNever}
allErrors = append(allErrors, field.NotSupported(fldPath, preemptionPolicy, validValues))
}
return allErrors
}
func validateDNSPolicy(dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
switch *dnsPolicy {
case core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault, core.DNSNone:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
validValues := []core.DNSPolicy{core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault, core.DNSNone}
allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues))
}
return allErrors
}
var validFSGroupChangePolicies = sets.New(core.FSGroupChangeOnRootMismatch, core.FSGroupChangeAlways)
func validateFSGroupChangePolicy(fsGroupPolicy *core.PodFSGroupChangePolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if !validFSGroupChangePolicies.Has(*fsGroupPolicy) {
allErrors = append(allErrors, field.NotSupported(fldPath, fsGroupPolicy, sets.List(validFSGroupChangePolicies)))
}
return allErrors
}
const (
// Limits on various DNS parameters. These are derived from
// restrictions in Linux libc name resolution handling.
// Max number of DNS name servers.
MaxDNSNameservers = 3
// Max number of domains in the search path list.
MaxDNSSearchPaths = 32
// Max number of characters in the search path.
MaxDNSSearchListChars = 2048
)
func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, value := range readinessGates {
allErrs = append(allErrs, ValidateQualifiedName(string(value.ConditionType), fldPath.Index(i).Child("conditionType"))...)
}
return allErrs
}
func validateSchedulingGates(schedulingGates []core.PodSchedulingGate, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// There should be no duplicates in the list of scheduling gates.
seen := sets.Set[string]{}
for i, schedulingGate := range schedulingGates {
allErrs = append(allErrs, ValidateQualifiedName(schedulingGate.Name, fldPath.Index(i))...)
if seen.Has(schedulingGate.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), schedulingGate.Name))
}
seen.Insert(schedulingGate.Name)
}
return allErrs
}
func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
// Validate DNSNone case. Must provide at least one DNS name server.
if dnsPolicy != nil && *dnsPolicy == core.DNSNone {
if dnsConfig == nil {
return append(allErrs, field.Required(fldPath, fmt.Sprintf("must provide `dnsConfig` when `dnsPolicy` is %s", core.DNSNone)))
}
if len(dnsConfig.Nameservers) == 0 {
return append(allErrs, field.Required(fldPath.Child("nameservers"), fmt.Sprintf("must provide at least one DNS nameserver when `dnsPolicy` is %s", core.DNSNone)))
}
}
if dnsConfig != nil {
// Validate nameservers.
if len(dnsConfig.Nameservers) > MaxDNSNameservers {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers)))
}
for i, ns := range dnsConfig.Nameservers {
allErrs = append(allErrs, IsValidIPForLegacyField(fldPath.Child("nameservers").Index(i), ns, nil)...)
}
// Validate searches.
if len(dnsConfig.Searches) > MaxDNSSearchPaths {
allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", MaxDNSSearchPaths)))
}
// Include the space between search paths.
if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars {
allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", MaxDNSSearchListChars)))
}
for i, search := range dnsConfig.Searches {
if opts.AllowRelaxedDNSSearchValidation {
if search != "." {
search = strings.TrimSuffix(search, ".")
allErrs = append(allErrs, ValidateDNS1123SubdomainWithUnderScore(search, fldPath.Child("searches").Index(i))...)
}
} else {
search = strings.TrimSuffix(search, ".")
allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...)
}
}
// Validate options.
for i, option := range dnsConfig.Options {
if len(option.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("options").Index(i), "must not be empty"))
}
}
}
return allErrs
}
// validatePodHostNetworkDeps checks fields which depend on whether HostNetwork is
// true or not. It should be called on all PodSpecs, but opts can change what
// is enforce. E.g. opts.ResourceIsPod should only be set when called in the
// context of a Pod, and not on PodSpecs which are embedded in other resources
// (e.g. Deployments).
func validatePodHostNetworkDeps(spec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
// For <reasons> we keep `.HostNetwork` in .SecurityContext on the internal
// version of Pod.
hostNetwork := false
if spec.SecurityContext != nil {
hostNetwork = spec.SecurityContext.HostNetwork
}
allErrors := field.ErrorList{}
if hostNetwork {
fldPath := fldPath.Child("containers")
for i, container := range spec.Containers {
portsPath := fldPath.Index(i).Child("ports")
for i, port := range container.Ports {
idxPath := portsPath.Index(i)
// At this point, we know that HostNetwork is true. If this
// PodSpec is in a Pod (opts.ResourceIsPod), then HostPort must
// be the same value as ContainerPort. If this PodSpec is in
// some other resource (e.g. Deployment) we allow 0 (i.e.
// unspecified) because it will be defaulted when the Pod is
// ultimately created, but we do not allow any other values.
if hp, cp := port.HostPort, port.ContainerPort; (opts.ResourceIsPod || hp != 0) && hp != cp {
allErrors = append(allErrors, field.Invalid(idxPath.Child("hostPort"), port.HostPort, "must match `containerPort` when `hostNetwork` is true"))
}
}
}
}
return allErrors
}
// validateImagePullSecrets checks to make sure the pull secrets are well
// formed. Right now, we only expect name to be set (it's the only field). If
// this ever changes and someone decides to set those fields, we'd like to
// know.
func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
for i, currPullSecret := range imagePullSecrets {
idxPath := fldPath.Index(i)
strippedRef := core.LocalObjectReference{Name: currPullSecret.Name}
if !reflect.DeepEqual(strippedRef, currPullSecret) {
allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set"))
}
}
return allErrors
}
// validateAffinity checks if given affinities are valid
func validateAffinity(affinity *core.Affinity, opts PodValidationOptions, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if affinity != nil {
if affinity.NodeAffinity != nil {
allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, opts, fldPath.Child("nodeAffinity"))...)
}
if affinity.PodAffinity != nil {
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, opts.AllowInvalidLabelValueInSelector, fldPath.Child("podAffinity"))...)
}
if affinity.PodAntiAffinity != nil {
allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, opts.AllowInvalidLabelValueInSelector, fldPath.Child("podAntiAffinity"))...)
}
}
return allErrs
}
func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList {
if !allowEmpty && len(*effect) == 0 {
return field.ErrorList{field.Required(fldPath, "")}
}
allErrors := field.ErrorList{}
switch *effect {
// TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit.
case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoExecute:
// case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, core.TaintEffectNoExecute:
default:
validValues := []core.TaintEffect{
core.TaintEffectNoSchedule,
core.TaintEffectPreferNoSchedule,
core.TaintEffectNoExecute,
// TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit.
// core.TaintEffectNoScheduleNoAdmit,
}
allErrors = append(allErrors, field.NotSupported(fldPath, *effect, validValues))
}
return allErrors
}
// validateOnlyAddedTolerations validates updated pod tolerations.
func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldTolerations []core.Toleration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, old := range oldTolerations {
found := false
oldTolerationClone := old.DeepCopy()
for _, newToleration := range newTolerations {
// assign to our clone before doing a deep equal so we can allow tolerationseconds to change.
oldTolerationClone.TolerationSeconds = newToleration.TolerationSeconds // +k8s:verify-mutation:reason=clone
if reflect.DeepEqual(*oldTolerationClone, newToleration) {
found = true
break
}
}
if !found {
allErrs = append(allErrs, field.Forbidden(fldPath, "existing toleration can not be modified except its tolerationSeconds"))
return allErrs
}
}
allErrs = append(allErrs, ValidateTolerations(newTolerations, fldPath)...)
return allErrs
}
func validateOnlyDeletedSchedulingGates(newGates, oldGates []core.PodSchedulingGate, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(newGates) == 0 {
return allErrs
}
additionalGates := make(map[string]int)
for i, newGate := range newGates {
additionalGates[newGate.Name] = i
}
for _, oldGate := range oldGates {
delete(additionalGates, oldGate.Name)
}
for gate, i := range additionalGates {
allErrs = append(allErrs, field.Forbidden(fldPath.Index(i).Child("name"), fmt.Sprintf("only deletion is allowed, but found new scheduling gate '%s'", gate)))
}
return allErrs
}
func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, hostAlias := range hostAliases {
allErrs = append(allErrs, IsValidIPForLegacyField(fldPath.Index(i).Child("ip"), hostAlias.IP, nil)...)
for j, hostname := range hostAlias.Hostnames {
allErrs = append(allErrs, ValidateDNS1123Subdomain(hostname, fldPath.Index(i).Child("hostnames").Index(j))...)
}
}
return allErrs
}
// ValidateTolerations tests if given tolerations have valid data.
func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
for i, toleration := range tolerations {
idxPath := fldPath.Index(i)
// validate the toleration key
if len(toleration.Key) > 0 {
allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...)
}
// empty toleration key with Exists operator and empty value means match all taints
if len(toleration.Key) == 0 && toleration.Operator != core.TolerationOpExists {
allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator,
"operator must be Exists when `key` is empty, which means \"match all values and all keys\""))
}
if toleration.TolerationSeconds != nil && toleration.Effect != core.TaintEffectNoExecute {
allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect,
"effect must be 'NoExecute' when `tolerationSeconds` is set"))
}
// validate toleration operator and value
switch toleration.Operator {
// empty operator means Equal
case core.TolerationOpEqual, "":
if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 {
allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";")))
}
case core.TolerationOpExists:
if len(toleration.Value) > 0 {
allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, "value must be empty when `operator` is 'Exists'"))
}
default:
validValues := []core.TolerationOperator{core.TolerationOpEqual, core.TolerationOpExists}
allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues))
}
// validate toleration effect, empty toleration effect means match all taint effects
if len(toleration.Effect) > 0 {
allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...)
}
}
return allErrors
}
// validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template
// it only does additive validation of fields not covered in validateContainers and is not called for
// ephemeral containers which require a conversion to core.Container.
func validateContainersOnlyForPod(containers []core.Container, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, ctr := range containers {
allErrs = append(allErrs, validateContainerOnlyForPod(&ctr, fldPath.Index(i))...)
}
return allErrs
}
// validateContainerOnlyForPod does pod-only (i.e. not pod template) validation for a single container.
// This is called by validateContainersOnlyForPod and validateEphemeralContainers directly.
func validateContainerOnlyForPod(ctr *core.Container, path *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(ctr.Image) != len(strings.TrimSpace(ctr.Image)) {
allErrs = append(allErrs, field.Invalid(path.Child("image"), ctr.Image, "must not have leading or trailing whitespace"))
}
return allErrs
}
// PodValidationOptions contains the different settings for pod validation
type PodValidationOptions struct {
// Allow invalid pod-deletion-cost annotation value for backward compatibility.
AllowInvalidPodDeletionCost bool
// Allow invalid label-value in LabelSelector
AllowInvalidLabelValueInSelector bool
// Allow pod spec to use non-integer multiple of huge page unit size
AllowIndivisibleHugePagesValues bool
// Allow invalid topologySpreadConstraint labelSelector for backward compatibility
AllowInvalidTopologySpreadConstraintLabelSelector bool
// Allow projected token volumes with non-local paths
AllowNonLocalProjectedTokenPath bool
// Allow namespaced sysctls in hostNet and hostIPC pods
AllowNamespacedSysctlsForHostNetAndHostIPC bool
// The top-level resource being validated is a Pod, not just a PodSpec
// embedded in some other resource.
ResourceIsPod bool
// Allow relaxed validation of environment variable names
AllowRelaxedEnvironmentVariableValidation bool
// Allow the use of a relaxed DNS search
AllowRelaxedDNSSearchValidation bool
// Allows zero value for Pod Lifecycle Sleep Action
AllowPodLifecycleSleepActionZeroValue bool
// Allow only Recursive value of SELinuxChangePolicy.
AllowOnlyRecursiveSELinuxChangePolicy bool
// Indicates whether PodLevelResources feature is enabled or disabled.
PodLevelResourcesEnabled bool
// Allow sidecar containers resize policy for backward compatibility
AllowSidecarResizePolicy bool
// Allow invalid label-value in RequiredNodeSelector
AllowInvalidLabelValueInRequiredNodeAffinity bool
// Allow feature of MatchLabelKeys in TopologySpreadConstraints
AllowMatchLabelKeysInPodTopologySpread bool
// Allow merging selectors built from MatchLabelKeys into LabelSelector of TopologySpreadConstraints
AllowMatchLabelKeysInPodTopologySpreadSelectorMerge bool
// OldPod has invalid MatchLabelKeys in TopologySpreadConstraints against current(>=v1.34) validation
OldPodViolatesMatchLabelKeysValidation bool
// OldPod has invalid MatchLabelKeys in TopologySpreadConstraints against legacy(<v1.34) validation
OldPodViolatesLegacyMatchLabelKeysValidation bool
// Allows containers to consume environment variables via environment variable files.
AllowEnvFilesValidation bool
// Allows containers have restart policy and restart policy rules.
AllowContainerRestartPolicyRules bool
// Allow user namespaces with volume devices, even though they will not function properly (should only be tolerated in updates of objects which already have this invalid configuration).
AllowUserNamespacesWithVolumeDevices bool
}
// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
// and is called by ValidatePodCreate and ValidatePodUpdate.
func validatePodMetadataAndSpec(pod *core.Pod, opts PodValidationOptions) field.ErrorList {
metaPath := field.NewPath("metadata")
specPath := field.NewPath("spec")
allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, metaPath)
allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, metaPath.Child("annotations"), opts)...)
allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, specPath, opts)...)
// we do additional validation only pertinent for pods and not pod templates
// this was done to preserve backwards compatibility
if pod.Spec.ServiceAccountName == "" {
for vi, volume := range pod.Spec.Volumes {
path := specPath.Child("volumes").Index(vi).Child("projected")
if volume.Projected != nil {
for si, source := range volume.Projected.Sources {
saPath := path.Child("sources").Index(si).Child("serviceAccountToken")
if source.ServiceAccountToken != nil {
allErrs = append(allErrs, field.Forbidden(saPath, "must not be specified when serviceAccountName is not set"))
}
}
}
}
}
allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.Containers, specPath.Child("containers"))...)
allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.InitContainers, specPath.Child("initContainers"))...)
// validateContainersOnlyForPod() is checked for ephemeral containers by validateEphemeralContainers()
return allErrs
}
// validatePodIPs validates IPs in pod status
func validatePodIPs(pod, oldPod *core.Pod) field.ErrorList {
allErrs := field.ErrorList{}
podIPsField := field.NewPath("status", "podIPs")
// all new PodIPs must be valid IPs, but existing invalid ones can be kept.
var existingPodIPs []string
if oldPod != nil {
existingPodIPs = make([]string, len(oldPod.Status.PodIPs))
for i, podIP := range oldPod.Status.PodIPs {
existingPodIPs[i] = podIP.IP
}
}
for i, podIP := range pod.Status.PodIPs {
allErrs = append(allErrs, IsValidIPForLegacyField(podIPsField.Index(i), podIP.IP, existingPodIPs)...)
}
// if we have more than one Pod.PodIP then we must have a dual-stack pair
if len(pod.Status.PodIPs) > 1 {
podIPs := make([]string, 0, len(pod.Status.PodIPs))
for _, podIP := range pod.Status.PodIPs {
podIPs = append(podIPs, podIP.IP)
}
dualStack, err := netutils.IsDualStackIPStrings(podIPs)
if err != nil {
allErrs = append(allErrs, field.InternalError(podIPsField, fmt.Errorf("failed to check for dual stack with error:%v", err)))
}
// We only support one from each IP family (i.e. max two IPs in this list).
if !dualStack || len(podIPs) > 2 {
allErrs = append(allErrs, field.Invalid(podIPsField, pod.Status.PodIPs, "may specify no more than one IP for each IP family"))
}
}
return allErrs
}
// validateHostIPs validates IPs in pod status
func validateHostIPs(pod, oldPod *core.Pod) field.ErrorList {
allErrs := field.ErrorList{}
if len(pod.Status.HostIPs) == 0 {
return allErrs
}
hostIPsField := field.NewPath("status", "hostIPs")
// hostIP must be equal to hostIPs[0].IP
if pod.Status.HostIP != pod.Status.HostIPs[0].IP {
allErrs = append(allErrs, field.Invalid(hostIPsField.Index(0).Child("ip"), pod.Status.HostIPs[0].IP, "must be equal to `hostIP`"))
}
// all new HostIPs must be valid IPs, but existing invalid ones can be kept.
var existingHostIPs []string
if oldPod != nil {
existingHostIPs = make([]string, len(oldPod.Status.HostIPs))
for i, hostIP := range oldPod.Status.HostIPs {
existingHostIPs[i] = hostIP.IP
}
}
for i, hostIP := range pod.Status.HostIPs {
allErrs = append(allErrs, IsValidIPForLegacyField(hostIPsField.Index(i), hostIP.IP, existingHostIPs)...)
}
// if we have more than one Pod.HostIP then we must have a dual-stack pair
if len(pod.Status.HostIPs) > 1 {
hostIPs := make([]string, 0, len(pod.Status.HostIPs))
for _, hostIP := range pod.Status.HostIPs {
hostIPs = append(hostIPs, hostIP.IP)
}
dualStack, err := netutils.IsDualStackIPStrings(hostIPs)
if err != nil {
allErrs = append(allErrs, field.InternalError(hostIPsField, fmt.Errorf("failed to check for dual stack with error:%v", err)))
}
// We only support one from each IP family (i.e. max two IPs in this list).
if !dualStack || len(hostIPs) > 2 {
allErrs = append(allErrs, field.Invalid(hostIPsField, pod.Status.HostIPs, "may specify no more than one IP for each IP family"))
}
}
return allErrs
}
// ValidatePodSpec tests that the specified PodSpec has valid data.
// This includes checking formatting and uniqueness. It also canonicalizes the
// structure by setting default values and implementing any backwards-compatibility
// tricks.
// The pod metadata is needed to validate generic ephemeral volumes. It is optional
// and should be left empty unless the spec is from a real pod object.
func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if spec.TerminationGracePeriodSeconds == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("terminationGracePeriodSeconds"), ""))
}
gracePeriod := spec.TerminationGracePeriodSeconds
// The default for hostUsers is true, so a spec with no SecurityContext or no HostUsers field will be true.
// If the default ever changes, this condition will need to be changed.
hostUsers := spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts)
allErrs = append(allErrs, vErrs...)
podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims)
allErrs = append(allErrs, validatePodResourceClaims(podMeta, spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
allErrs = append(allErrs, validateContainers(spec.Containers, spec.OS, vols, podClaimNames, gracePeriod, fldPath.Child("containers"), opts, &spec.RestartPolicy, hostUsers)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.OS, spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("initContainers"), opts, &spec.RestartPolicy, hostUsers)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts, &spec.RestartPolicy, hostUsers)...)
if opts.PodLevelResourcesEnabled {
allErrs = append(allErrs, validatePodResources(spec, podClaimNames, fldPath, opts)...)
}
allErrs = append(allErrs, validatePodHostNetworkDeps(spec, fldPath, opts)...)
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
allErrs = append(allErrs, validatePodSpecSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...)
allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
allErrs = append(allErrs, validateAffinity(spec.Affinity, opts, fldPath.Child("affinity"))...)
allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...)
allErrs = append(allErrs, validateSchedulingGates(spec.SchedulingGates, fldPath.Child("schedulingGates"))...)
allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"), opts)...)
allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath)...)
allErrs = append(allErrs, validateHostUsers(spec, fldPath, opts)...)
allErrs = append(allErrs, validatePodHostName(spec, fldPath)...)
if len(spec.ServiceAccountName) > 0 {
for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg))
}
}
if len(spec.NodeName) > 0 {
for _, msg := range ValidateNodeName(spec.NodeName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg))
}
}
if spec.ActiveDeadlineSeconds != nil {
value := *spec.ActiveDeadlineSeconds
if value < 1 || value > math.MaxInt32 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), value, validation.InclusiveRangeError(1, math.MaxInt32)))
}
}
if len(spec.Hostname) > 0 {
allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...)
}
if len(spec.Subdomain) > 0 {
allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...)
}
if len(spec.Tolerations) > 0 {
allErrs = append(allErrs, ValidateTolerations(spec.Tolerations, fldPath.Child("tolerations"))...)
}
if len(spec.HostAliases) > 0 {
allErrs = append(allErrs, ValidateHostAliases(spec.HostAliases, fldPath.Child("hostAliases"))...)
}
if len(spec.PriorityClassName) > 0 {
for _, msg := range ValidatePriorityClassName(spec.PriorityClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("priorityClassName"), spec.PriorityClassName, msg))
}
}
if spec.RuntimeClassName != nil {
allErrs = append(allErrs, ValidateRuntimeClassName(*spec.RuntimeClassName, fldPath.Child("runtimeClassName"))...)
}
if spec.PreemptionPolicy != nil {
allErrs = append(allErrs, ValidatePreemptionPolicy(spec.PreemptionPolicy, fldPath.Child("preemptionPolicy"))...)
}
if spec.Overhead != nil {
allErrs = append(allErrs, validateOverhead(spec.Overhead, fldPath.Child("overhead"), opts)...)
}
if spec.OS != nil {
osErrs := validateOS(spec, fldPath.Child("os"), opts)
switch {
case len(osErrs) > 0:
allErrs = append(allErrs, osErrs...)
case spec.OS.Name == core.Linux:
allErrs = append(allErrs, validateLinux(spec, fldPath)...)
case spec.OS.Name == core.Windows:
allErrs = append(allErrs, validateWindows(spec, fldPath)...)
}
}
allErrs = append(allErrs, validateFileKeyRefVolumes(spec, fldPath)...)
return allErrs
}
func validatePodResources(spec *core.PodSpec, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
if spec.Resources == nil {
return nil
}
resourcesFldPath := fldPath.Child("resources")
if spec.OS != nil && spec.OS.Name == core.Windows {
// Do not include more detailed errors on the resources field value
// if the resources field may not be set on the target OS.
return field.ErrorList{
field.Forbidden(resourcesFldPath, "may not be set for a windows pod"),
}
}
allErrs := field.ErrorList{}
if spec.Resources.Claims != nil {
allErrs = append(allErrs, field.Forbidden(resourcesFldPath.Child("claims"), "claims may not be set for Resources at pod-level"))
}
// validatePodResourceRequirements checks if resource names and quantities are
// valid, and requests are less than limits.
allErrs = append(allErrs, validatePodResourceRequirements(spec.Resources, podClaimNames, resourcesFldPath, opts)...)
allErrs = append(allErrs, validatePodResourceConsistency(spec, resourcesFldPath)...)
return allErrs
}
// validatePodResourceConsistency checks if aggregate container-level requests are
// less than or equal to pod-level requests, and individual container-level limits
// are less than or equal to pod-level limits.
func validatePodResourceConsistency(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// Convert the *core.PodSpec to *v1.PodSpec to satisfy the call to
// resourcehelper.PodRequests method, in the subsequent lines,
// which requires a *v1.Pod object (containing a *v1.PodSpec).
v1PodSpec := &v1.PodSpec{}
// TODO(ndixita): Convert_core_PodSpec_To_v1_PodSpec is risky. Add a copy of
// AggregateContainerRequests against internal core.Pod type for beta release of
// PodLevelResources feature.
if err := corev1.Convert_core_PodSpec_To_v1_PodSpec(spec, v1PodSpec, nil); err != nil {
allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("invalid %q: %v", fldPath, err.Error())))
}
reqPath := fldPath.Child("requests")
// resourcehelper.AggregateContainerRequests method requires a Pod object to
// calculate the total requests requirements of a pod. Hence a Pod object using
// v1PodSpec i.e. (&v1.Pod{Spec: *v1PodSpec}, is created on the fly, and passed
// to the AggregateContainerRequests method to facilitate proper resource
// calculation without modifying AggregateContainerRequests method.
aggrContainerReqs := resourcehelper.AggregateContainerRequests(&v1.Pod{Spec: *v1PodSpec}, resourcehelper.PodResourcesOptions{})
// Pod-level requests must be >= aggregate requests of all containers in a pod.
for resourceName, ctrReqs := range aggrContainerReqs {
// Skip if the pod-level request of the resource is not set.
podSpecRequests, exists := spec.Resources.Requests[core.ResourceName(resourceName.String())]
if !exists {
continue
}
fldPath := reqPath.Key(resourceName.String())
if ctrReqs.Cmp(podSpecRequests) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath, podSpecRequests.String(), fmt.Sprintf("must be greater than or equal to aggregate container requests of %s", ctrReqs.String())))
}
}
// Pod level hugepage limits must be always equal or greater than the aggregated
// container level hugepage limits, this is due to the hugepage resources being
// treated as a non overcommitable resource (request and limit must be equal)
// for the current container level hugepage behavior.
// This is also why hugepages overcommitment is not allowed in pod level resources,
// the pod cgroup values must reflect the request/limit set at pod level, and the
// container level cgroup values must be within that limit.
aggrContainerLims := resourcehelper.AggregateContainerLimits(&v1.Pod{Spec: *v1PodSpec}, resourcehelper.PodResourcesOptions{})
for resourceName, ctrLims := range aggrContainerLims {
if !helper.IsHugePageResourceName(core.ResourceName(resourceName)) {
continue
}
podSpecLimits, hasLimit := spec.Resources.Limits[core.ResourceName(resourceName)]
if !hasLimit {
continue
}
if ctrLims.Cmp(podSpecLimits) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("limits").Key(string(resourceName)), podSpecLimits.String(), fmt.Sprintf("must be greater than or equal to aggregate container limits of %s", ctrLims.String())))
}
}
// Individual Container limits must be <= Pod-level limits.
for i, ctr := range spec.Containers {
for resourceName, ctrLimit := range ctr.Resources.Limits {
// Skip if the pod-level limit of the resource is not set.
podSpecLimits, exists := spec.Resources.Limits[core.ResourceName(resourceName.String())]
if !exists {
continue
}
if ctrLimit.Cmp(podSpecLimits) > 0 {
fldPath := fldPath.Child("containers").Index(i).Key(resourceName.String()).Child("limits")
allErrs = append(allErrs, field.Invalid(fldPath, ctrLimit.String(), fmt.Sprintf("must be less than or equal to pod limits of %s", podSpecLimits.String())))
}
}
}
return allErrs
}
func validateLinux(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
securityContext := spec.SecurityContext
if securityContext != nil && securityContext.WindowsOptions != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("windowsOptions"), "windows options cannot be set for a linux pod"))
}
podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, cFldPath *field.Path) bool {
sc := c.SecurityContext
if sc != nil && sc.WindowsOptions != nil {
fldPath := cFldPath.Child("securityContext")
allErrs = append(allErrs, field.Forbidden(fldPath.Child("windowsOptions"), "windows options cannot be set for a linux pod"))
}
return true
})
return allErrs
}
func validateWindows(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
securityContext := spec.SecurityContext
// validate Pod SecurityContext
if securityContext != nil {
if securityContext.AppArmorProfile != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("appArmorProfile"), "cannot be set for a windows pod"))
}
if securityContext.SELinuxOptions != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("seLinuxOptions"), "cannot be set for a windows pod"))
}
if securityContext.HostUsers != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostUsers"), "cannot be set for a windows pod"))
}
if securityContext.HostPID {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPID"), "cannot be set for a windows pod"))
}
if securityContext.HostIPC {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostIPC"), "cannot be set for a windows pod"))
}
if securityContext.SeccompProfile != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("seccompProfile"), "cannot be set for a windows pod"))
}
if securityContext.FSGroup != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("fsGroup"), "cannot be set for a windows pod"))
}
if securityContext.FSGroupChangePolicy != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("fsGroupChangePolicy"), "cannot be set for a windows pod"))
}
if len(securityContext.Sysctls) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("sysctls"), "cannot be set for a windows pod"))
}
if securityContext.ShareProcessNamespace != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("shareProcessNamespace"), "cannot be set for a windows pod"))
}
if securityContext.RunAsUser != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("runAsUser"), "cannot be set for a windows pod"))
}
if securityContext.RunAsGroup != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("runAsGroup"), "cannot be set for a windows pod"))
}
if securityContext.SupplementalGroups != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("supplementalGroups"), "cannot be set for a windows pod"))
}
if securityContext.SupplementalGroupsPolicy != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("supplementalGroupsPolicy"), "cannot be set for a windows pod"))
}
if securityContext.SELinuxChangePolicy != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("seLinuxChangePolicy"), "cannot be set for a windows pod"))
}
}
podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, cFldPath *field.Path) bool {
// validate container security context
sc := c.SecurityContext
// OS based podSecurityContext validation
// There is some naming overlap between Windows and Linux Security Contexts but all the Windows Specific options
// are set via securityContext.WindowsOptions which we validate below
// TODO: Think if we need to relax this restriction or some of the restrictions
if sc != nil {
fldPath := cFldPath.Child("securityContext")
if sc.AppArmorProfile != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("appArmorProfile"), "cannot be set for a windows pod"))
}
if sc.SELinuxOptions != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("seLinuxOptions"), "cannot be set for a windows pod"))
}
if sc.SeccompProfile != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("seccompProfile"), "cannot be set for a windows pod"))
}
if sc.Capabilities != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("capabilities"), "cannot be set for a windows pod"))
}
if sc.ReadOnlyRootFilesystem != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("readOnlyRootFilesystem"), "cannot be set for a windows pod"))
}
if sc.Privileged != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "cannot be set for a windows pod"))
}
if sc.AllowPrivilegeEscalation != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("allowPrivilegeEscalation"), "cannot be set for a windows pod"))
}
if sc.ProcMount != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("procMount"), "cannot be set for a windows pod"))
}
if sc.RunAsUser != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("runAsUser"), "cannot be set for a windows pod"))
}
if sc.RunAsGroup != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("runAsGroup"), "cannot be set for a windows pod"))
}
}
return true
})
return allErrs
}
// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, allowInvalidLabelValueInRequiredNodeAffinity bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch rq.Operator {
case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn:
if len(rq.Values) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
}
case core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist:
if len(rq.Values) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
}
case core.NodeSelectorOpGt, core.NodeSelectorOpLt:
if len(rq.Values) != 1 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'"))
}
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator"))
}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
if !allowInvalidLabelValueInRequiredNodeAffinity {
path := fldPath.Child("values")
for valueIndex, value := range rq.Values {
for _, msg := range validation.IsValidLabelValue(value) {
allErrs = append(allErrs, field.Invalid(path.Index(valueIndex), value, msg)).WithOrigin("format=k8s-label-value")
}
}
}
return allErrs
}
var nodeFieldSelectorValidators = map[string]func(string, bool) []string{
metav1.ObjectNameField: ValidateNodeName,
}
// ValidateNodeFieldSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
func ValidateNodeFieldSelectorRequirement(req core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch req.Operator {
case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn:
if len(req.Values) != 1 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"),
"must be only one value when `operator` is 'In' or 'NotIn' for node field selector"))
}
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator"))
}
if vf, found := nodeFieldSelectorValidators[req.Key]; !found {
allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), req.Key, "not a valid field selector key"))
} else {
for i, v := range req.Values {
for _, msg := range vf(v, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(i), v, msg))
}
}
}
return allErrs
}
// ValidateNodeSelectorTerm tests that the specified node selector term has valid data
func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, allowInvalidLabelValueInRequiredNodeAffinity bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for j, req := range term.MatchExpressions {
allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, allowInvalidLabelValueInRequiredNodeAffinity, fldPath.Child("matchExpressions").Index(j))...)
}
for j, req := range term.MatchFields {
allErrs = append(allErrs, ValidateNodeFieldSelectorRequirement(req, fldPath.Child("matchFields").Index(j))...)
}
return allErrs
}
// ValidateNodeSelector tests that the specified nodeSelector fields has valid data
func ValidateNodeSelector(nodeSelector *core.NodeSelector, allowInvalidLabelValueInRequiredNodeAffinity bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
termFldPath := fldPath.Child("nodeSelectorTerms")
if len(nodeSelector.NodeSelectorTerms) == 0 {
return append(allErrs, field.Required(termFldPath, "must have at least one node selector term"))
}
for i, term := range nodeSelector.NodeSelectorTerms {
allErrs = append(allErrs, ValidateNodeSelectorTerm(term, allowInvalidLabelValueInRequiredNodeAffinity, termFldPath.Index(i))...)
}
return allErrs
}
// validateTopologySelectorLabelRequirement tests that the specified TopologySelectorLabelRequirement fields has valid data,
// and constructs a set containing all of its Values.
func validateTopologySelectorLabelRequirement(rq core.TopologySelectorLabelRequirement, fldPath *field.Path) (sets.Set[string], field.ErrorList) {
allErrs := field.ErrorList{}
valueSet := make(sets.Set[string])
valuesPath := fldPath.Child("values")
if len(rq.Values) == 0 {
allErrs = append(allErrs, field.Required(valuesPath, ""))
}
// Validate set property of Values field
for i, value := range rq.Values {
if valueSet.Has(value) {
allErrs = append(allErrs, field.Duplicate(valuesPath.Index(i), value))
}
valueSet.Insert(value)
}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
return valueSet, allErrs
}
// ValidateTopologySelectorTerm tests that the specified topology selector term has valid data,
// and constructs a map representing the term in raw form.
func ValidateTopologySelectorTerm(term core.TopologySelectorTerm, fldPath *field.Path) (map[string]sets.Set[string], field.ErrorList) {
allErrs := field.ErrorList{}
exprMap := make(map[string]sets.Set[string])
exprPath := fldPath.Child("matchLabelExpressions")
// Allow empty MatchLabelExpressions, in case this field becomes optional in the future.
for i, req := range term.MatchLabelExpressions {
idxPath := exprPath.Index(i)
valueSet, exprErrs := validateTopologySelectorLabelRequirement(req, idxPath)
allErrs = append(allErrs, exprErrs...)
// Validate no duplicate keys exist.
if _, exists := exprMap[req.Key]; exists {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("key"), req.Key))
}
exprMap[req.Key] = valueSet
}
return exprMap, allErrs
}
// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data
func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
v1Avoids, err := schedulinghelper.GetAvoidPodsFromNodeAnnotations(annotations)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error()))
return allErrs
}
var avoids core.AvoidPods
if err := corev1.Convert_v1_AvoidPods_To_core_AvoidPods(&v1Avoids, &avoids, nil); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error()))
return allErrs
}
if len(avoids.PreferAvoidPods) != 0 {
for i, pa := range avoids.PreferAvoidPods {
idxPath := fldPath.Child(core.PreferAvoidPodsAnnotationKey).Index(i)
allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...)
}
}
return allErrs
}
// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data.
func validatePreferAvoidPodsEntry(avoidPodEntry core.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if avoidPodEntry.PodSignature.PodController == nil {
allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), ""))
} else {
if !*(avoidPodEntry.PodSignature.PodController.Controller) {
allErrors = append(allErrors,
field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"),
*(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller"))
}
}
return allErrors
}
// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data
func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, term := range terms {
if term.Weight <= 0 || term.Weight > 100 {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100"))
}
// we always allow invalid label-value for preferred affinity
// as they can success when cluster has only one node
allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, true, fldPath.Index(i).Child("preference"))...)
}
return allErrs
}
// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidatePodAffinityTermSelector(podAffinityTerm, allowInvalidLabelValueInSelector, fldPath)...)
for _, name := range podAffinityTerm.Namespaces {
for _, msg := range ValidateNamespaceName(name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
}
}
allErrs = append(allErrs, ValidateMatchLabelKeysAndMismatchLabelKeys(fldPath, podAffinityTerm.MatchLabelKeys, podAffinityTerm.MismatchLabelKeys, podAffinityTerm.LabelSelector)...)
if len(podAffinityTerm.TopologyKey) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty"))
}
return append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...)
}
// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, podAffinityTerm := range podAffinityTerms {
allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowInvalidLabelValueInSelector, fldPath.Index(i))...)
}
return allErrs
}
// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for j, weightedTerm := range weightedPodAffinityTerms {
if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
}
allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowInvalidLabelValueInSelector, fldPath.Index(j).Child("podAffinityTerm"))...)
}
return allErrs
}
// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
// validateNodeAffinity tests that the specified nodeAffinity fields have valid data
func validateNodeAffinity(na *core.NodeAffinity, opts PodValidationOptions, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, opts.AllowInvalidLabelValueInRequiredNodeAffinity, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
// validatePodAffinity tests that the specified podAffinity fields have valid data
func validatePodAffinity(podAffinity *core.PodAffinity, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
func validateSeccompProfileField(sp *core.SeccompProfile, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if sp == nil {
return allErrs
}
if err := validateSeccompProfileType(fldPath.Child("type"), sp.Type); err != nil {
allErrs = append(allErrs, err)
}
if sp.Type == core.SeccompProfileTypeLocalhost {
if sp.LocalhostProfile == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("localhostProfile"), "must be set when seccomp type is Localhost"))
} else {
allErrs = append(allErrs, validateLocalDescendingPath(*sp.LocalhostProfile, fldPath.Child("localhostProfile"))...)
}
} else {
if sp.LocalhostProfile != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("localhostProfile"), sp, "can only be set when seccomp type is Localhost"))
}
}
return allErrs
}
func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList {
if p == core.SeccompProfileRuntimeDefault || p == core.DeprecatedSeccompProfileDockerDefault {
return nil
}
if p == v1.SeccompProfileNameUnconfined {
return nil
}
if strings.HasPrefix(p, v1.SeccompLocalhostProfileNamePrefix) {
return validateLocalDescendingPath(strings.TrimPrefix(p, v1.SeccompLocalhostProfileNamePrefix), fldPath)
}
return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")}
}
func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if p, exists := annotations[core.SeccompPodAnnotationKey]; exists {
allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(core.SeccompPodAnnotationKey))...)
}
for k, p := range annotations {
if strings.HasPrefix(k, core.SeccompContainerAnnotationKeyPrefix) {
allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...)
}
}
return allErrs
}
// ValidateSeccompProfileType tests that the argument is a valid SeccompProfileType.
func validateSeccompProfileType(fldPath *field.Path, seccompProfileType core.SeccompProfileType) *field.Error {
switch seccompProfileType {
case core.SeccompProfileTypeLocalhost, core.SeccompProfileTypeRuntimeDefault, core.SeccompProfileTypeUnconfined:
return nil
case "":
return field.Required(fldPath, "type is required when seccompProfile is set")
default:
return field.NotSupported(fldPath, seccompProfileType, []core.SeccompProfileType{core.SeccompProfileTypeLocalhost, core.SeccompProfileTypeRuntimeDefault, core.SeccompProfileTypeUnconfined})
}
}
func ValidateAppArmorProfileField(profile *core.AppArmorProfile, fldPath *field.Path) field.ErrorList {
if profile == nil {
return nil
}
allErrs := field.ErrorList{}
switch profile.Type {
case core.AppArmorProfileTypeLocalhost:
if profile.LocalhostProfile == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("localhostProfile"), "must be set when AppArmor type is Localhost"))
} else {
localhostProfile := strings.TrimSpace(*profile.LocalhostProfile)
if localhostProfile != *profile.LocalhostProfile {
allErrs = append(allErrs, field.Invalid(fldPath.Child("localhostProfile"), *profile.LocalhostProfile, "must not be padded with whitespace"))
} else if localhostProfile == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("localhostProfile"), "must be set when AppArmor type is Localhost"))
}
const maxLocalhostProfileLength = 4095 // PATH_MAX - 1
if len(*profile.LocalhostProfile) > maxLocalhostProfileLength {
allErrs = append(allErrs, field.TooLong(fldPath.Child("localhostProfile"), "" /*unused*/, maxLocalhostProfileLength))
}
}
case core.AppArmorProfileTypeRuntimeDefault, core.AppArmorProfileTypeUnconfined:
if profile.LocalhostProfile != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("localhostProfile"), profile.LocalhostProfile, "can only be set when AppArmor type is Localhost"))
}
case "":
allErrs = append(allErrs, field.Required(fldPath.Child("type"), "type is required when appArmorProfile is set"))
default:
allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), profile.Type,
[]core.AppArmorProfileType{core.AppArmorProfileTypeLocalhost, core.AppArmorProfileTypeRuntimeDefault, core.AppArmorProfileTypeUnconfined}))
}
return allErrs
}
func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for k, p := range annotations {
if !strings.HasPrefix(k, v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix) {
continue
}
containerName := strings.TrimPrefix(k, v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix)
if !podSpecHasContainer(spec, containerName) {
allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found"))
}
if err := ValidateAppArmorProfileFormat(p); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error()))
}
}
return allErrs
}
func ValidateAppArmorProfileFormat(profile string) error {
if profile == "" || profile == v1.DeprecatedAppArmorBetaProfileRuntimeDefault || profile == v1.DeprecatedAppArmorBetaProfileNameUnconfined {
return nil
}
if !strings.HasPrefix(profile, v1.DeprecatedAppArmorBetaProfileNamePrefix) {
return fmt.Errorf("invalid AppArmor profile name: %q", profile)
}
return nil
}
// validateAppArmorAnnotationsAndFieldsMatchOnCreate validates that AppArmor fields and annotations are consistent.
func validateAppArmorAnnotationsAndFieldsMatchOnCreate(objectMeta metav1.ObjectMeta, podSpec *core.PodSpec, specPath *field.Path) field.ErrorList {
if podSpec.OS != nil && podSpec.OS.Name == core.Windows {
// Skip consistency check for windows pods.
return nil
}
allErrs := field.ErrorList{}
var podProfile *core.AppArmorProfile
if podSpec.SecurityContext != nil {
podProfile = podSpec.SecurityContext.AppArmorProfile
}
podshelper.VisitContainersWithPath(podSpec, specPath, func(c *core.Container, cFldPath *field.Path) bool {
containerProfile := podProfile
if c.SecurityContext != nil && c.SecurityContext.AppArmorProfile != nil {
containerProfile = c.SecurityContext.AppArmorProfile
}
if containerProfile == nil {
return true
}
key := core.DeprecatedAppArmorAnnotationKeyPrefix + c.Name
if annotation, found := objectMeta.Annotations[key]; found {
apparmorPath := cFldPath.Child("securityContext").Child("appArmorProfile")
switch containerProfile.Type {
case core.AppArmorProfileTypeUnconfined:
if annotation != core.DeprecatedAppArmorAnnotationValueUnconfined {
allErrs = append(allErrs, field.Forbidden(apparmorPath.Child("type"), "apparmor type in annotation and field must match"))
}
case core.AppArmorProfileTypeRuntimeDefault:
if annotation != core.DeprecatedAppArmorAnnotationValueRuntimeDefault {
allErrs = append(allErrs, field.Forbidden(apparmorPath.Child("type"), "apparmor type in annotation and field must match"))
}
case core.AppArmorProfileTypeLocalhost:
if !strings.HasPrefix(annotation, core.DeprecatedAppArmorAnnotationValueLocalhostPrefix) {
allErrs = append(allErrs, field.Forbidden(apparmorPath.Child("type"), "apparmor type in annotation and field must match"))
} else if containerProfile.LocalhostProfile == nil || strings.TrimPrefix(annotation, core.DeprecatedAppArmorAnnotationValueLocalhostPrefix) != *containerProfile.LocalhostProfile {
allErrs = append(allErrs, field.Forbidden(apparmorPath.Child("localhostProfile"), "apparmor profile in annotation and field must match"))
}
}
}
return true
})
return allErrs
}
func podSpecHasContainer(spec *core.PodSpec, containerName string) bool {
var hasContainer bool
podshelper.VisitContainersWithPath(spec, field.NewPath("spec"), func(c *core.Container, _ *field.Path) bool {
if c.Name == containerName {
hasContainer = true
return false
}
return true
})
return hasContainer
}
const (
// a sysctl segment regex, concatenated with dots to form a sysctl name
SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
// a sysctl name regex with slash allowed
SysctlContainSlashFmt string = "(" + SysctlSegmentFmt + "[\\./])*" + SysctlSegmentFmt
// the maximal length of a sysctl name
SysctlMaxLength int = 253
)
var sysctlContainSlashRegexp = regexp.MustCompile("^" + SysctlContainSlashFmt + "$")
// IsValidSysctlName checks that the given string is a valid sysctl name,
// i.e. matches SysctlContainSlashFmt.
// More info:
//
// https://man7.org/linux/man-pages/man8/sysctl.8.html
// https://man7.org/linux/man-pages/man5/sysctl.d.5.html
func IsValidSysctlName(name string) bool {
if len(name) > SysctlMaxLength {
return false
}
return sysctlContainSlashRegexp.MatchString(name)
}
func validateSysctls(securityContext *core.PodSecurityContext, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
names := make(map[string]struct{})
for i, s := range securityContext.Sysctls {
if len(s.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), ""))
} else if !IsValidSysctlName(s.Name) {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, sysctlContainSlashRegexp)))
} else if _, ok := names[s.Name]; ok {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("name"), s.Name))
}
if !opts.AllowNamespacedSysctlsForHostNetAndHostIPC {
err := ValidateHostSysctl(s.Name, securityContext, fldPath.Index(i).Child("name"))
if err != nil {
allErrs = append(allErrs, err)
}
}
names[s.Name] = struct{}{}
}
return allErrs
}
// ValidateHostSysctl will return error if namespaced sysctls is applied to pod sharing the respective namespaces with the host.
func ValidateHostSysctl(sysctl string, securityContext *core.PodSecurityContext, fldPath *field.Path) *field.Error {
ns, _, _ := utilsysctl.GetNamespace(sysctl)
switch {
case securityContext.HostNetwork && ns == utilsysctl.NetNamespace:
return field.Invalid(fldPath, sysctl, "may not be specified when 'hostNetwork' is true")
case securityContext.HostIPC && ns == utilsysctl.IPCNamespace:
return field.Invalid(fldPath, sysctl, "may not be specified when 'hostIPC' is true")
}
return nil
}
var validSELinuxChangePolicies = sets.New(core.SELinuxChangePolicyRecursive, core.SELinuxChangePolicyMountOption)
func validateSELinuxChangePolicy(seLinuxChangePolicy *core.PodSELinuxChangePolicy, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
if seLinuxChangePolicy == nil {
return nil
}
allErrs := field.ErrorList{}
if opts.AllowOnlyRecursiveSELinuxChangePolicy {
if *seLinuxChangePolicy != core.SELinuxChangePolicyRecursive {
allErrs = append(allErrs, field.NotSupported(fldPath, *seLinuxChangePolicy, []core.PodSELinuxChangePolicy{core.SELinuxChangePolicyRecursive}))
}
} else {
// Allow any valid SELinuxChangePolicy value.
if !validSELinuxChangePolicies.Has(*seLinuxChangePolicy) {
allErrs = append(allErrs, field.NotSupported(fldPath, *seLinuxChangePolicy, sets.List(validSELinuxChangePolicies)))
}
}
return allErrs
}
// validatePodSpecSecurityContext verifies the SecurityContext of a PodSpec,
// whether that is defined in a Pod or in an embedded PodSpec (e.g. a
// Deployment's pod template).
func validatePodSpecSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if securityContext != nil {
if securityContext.FSGroup != nil {
for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg))
}
}
if securityContext.RunAsUser != nil {
for _, msg := range validation.IsValidUserID(*securityContext.RunAsUser) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg))
}
}
if securityContext.RunAsGroup != nil {
for _, msg := range validation.IsValidGroupID(*securityContext.RunAsGroup) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsGroup"), *(securityContext.RunAsGroup), msg))
}
}
for g, gid := range securityContext.SupplementalGroups {
for _, msg := range validation.IsValidGroupID(gid) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
}
}
if securityContext.ShareProcessNamespace != nil && securityContext.HostPID && *securityContext.ShareProcessNamespace {
allErrs = append(allErrs, field.Invalid(fldPath.Child("shareProcessNamespace"), *securityContext.ShareProcessNamespace, "ShareProcessNamespace and HostPID cannot both be enabled"))
}
if len(securityContext.Sysctls) != 0 {
allErrs = append(allErrs, validateSysctls(securityContext, fldPath.Child("sysctls"), opts)...)
}
if securityContext.FSGroupChangePolicy != nil {
allErrs = append(allErrs, validateFSGroupChangePolicy(securityContext.FSGroupChangePolicy, fldPath.Child("fsGroupChangePolicy"))...)
}
allErrs = append(allErrs, validateSeccompProfileField(securityContext.SeccompProfile, fldPath.Child("seccompProfile"))...)
allErrs = append(allErrs, validateWindowsSecurityContextOptions(securityContext.WindowsOptions, fldPath.Child("windowsOptions"))...)
allErrs = append(allErrs, ValidateAppArmorProfileField(securityContext.AppArmorProfile, fldPath.Child("appArmorProfile"))...)
if securityContext.SupplementalGroupsPolicy != nil {
allErrs = append(allErrs, validateSupplementalGroupsPolicy(securityContext.SupplementalGroupsPolicy, fldPath.Child("supplementalGroupsPolicy"))...)
}
if securityContext.SELinuxChangePolicy != nil {
allErrs = append(allErrs, validateSELinuxChangePolicy(securityContext.SELinuxChangePolicy, fldPath.Child("seLinuxChangePolicy"), opts)...)
}
}
return allErrs
}
func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) {
allErrs = field.ErrorList{}
if len(newContainers) != len(oldContainers) {
// TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers"))
return allErrs, true
}
// validate updated container images
for i, ctr := range newContainers {
if len(ctr.Image) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), ""))
}
// this is only called from ValidatePodUpdate so its safe to check leading/trailing whitespace.
if len(strings.TrimSpace(ctr.Image)) != len(ctr.Image) {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("image"), ctr.Image, "must not have leading or trailing whitespace"))
}
}
return allErrs, false
}
// ValidatePodCreate validates a pod in the context of its initial create
func ValidatePodCreate(pod *core.Pod, opts PodValidationOptions) field.ErrorList {
allErrs := validatePodMetadataAndSpec(pod, opts)
fldPath := field.NewPath("spec")
// EphemeralContainers can only be set on update using the ephemeralcontainers subresource
if len(pod.Spec.EphemeralContainers) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeralContainers"), "cannot be set on create"))
}
// A Pod cannot be assigned a Node if there are remaining scheduling gates.
if pod.Spec.NodeName != "" && len(pod.Spec.SchedulingGates) != 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "cannot be set until all schedulingGates have been cleared"))
}
allErrs = append(allErrs, validateSeccompAnnotationsAndFields(pod.ObjectMeta, &pod.Spec, fldPath)...)
allErrs = append(allErrs, validateAppArmorAnnotationsAndFieldsMatchOnCreate(pod.ObjectMeta, &pod.Spec, fldPath)...)
return allErrs
}
// validateSeccompAnnotationsAndFields iterates through all containers and ensure that when both seccompProfile and seccomp annotations exist they match.
func validateSeccompAnnotationsAndFields(objectMeta metav1.ObjectMeta, podSpec *core.PodSpec, specPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if podSpec.SecurityContext != nil && podSpec.SecurityContext.SeccompProfile != nil {
// If both seccomp annotations and fields are specified, the values must match.
if annotation, found := objectMeta.Annotations[v1.SeccompPodAnnotationKey]; found {
seccompPath := specPath.Child("securityContext").Child("seccompProfile")
err := validateSeccompAnnotationsAndFieldsMatch(annotation, podSpec.SecurityContext.SeccompProfile, seccompPath)
if err != nil {
allErrs = append(allErrs, err)
}
}
}
podshelper.VisitContainersWithPath(podSpec, specPath, func(c *core.Container, cFldPath *field.Path) bool {
var field *core.SeccompProfile
if c.SecurityContext != nil {
field = c.SecurityContext.SeccompProfile
}
if field == nil {
return true
}
key := v1.SeccompContainerAnnotationKeyPrefix + c.Name
if annotation, found := objectMeta.Annotations[key]; found {
seccompPath := cFldPath.Child("securityContext").Child("seccompProfile")
err := validateSeccompAnnotationsAndFieldsMatch(annotation, field, seccompPath)
if err != nil {
allErrs = append(allErrs, err)
}
}
return true
})
return allErrs
}
func validateSeccompAnnotationsAndFieldsMatch(annotationValue string, seccompField *core.SeccompProfile, fldPath *field.Path) *field.Error {
if seccompField == nil {
return nil
}
switch seccompField.Type {
case core.SeccompProfileTypeUnconfined:
if annotationValue != v1.SeccompProfileNameUnconfined {
return field.Forbidden(fldPath.Child("type"), "seccomp type in annotation and field must match")
}
case core.SeccompProfileTypeRuntimeDefault:
if annotationValue != v1.SeccompProfileRuntimeDefault && annotationValue != v1.DeprecatedSeccompProfileDockerDefault {
return field.Forbidden(fldPath.Child("type"), "seccomp type in annotation and field must match")
}
case core.SeccompProfileTypeLocalhost:
if !strings.HasPrefix(annotationValue, v1.SeccompLocalhostProfileNamePrefix) {
return field.Forbidden(fldPath.Child("type"), "seccomp type in annotation and field must match")
} else if seccompField.LocalhostProfile == nil || strings.TrimPrefix(annotationValue, v1.SeccompLocalhostProfileNamePrefix) != *seccompField.LocalhostProfile {
return field.Forbidden(fldPath.Child("localhostProfile"), "seccomp profile in annotation and field must match")
}
}
return nil
}
var updatablePodSpecFields = []string{
"`spec.containers[*].image`",
"`spec.initContainers[*].image`",
"`spec.activeDeadlineSeconds`",
"`spec.tolerations` (only additions to existing tolerations)",
"`spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)",
}
// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
// that cannot be changed.
func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
specPath := field.NewPath("spec")
// validate updateable fields:
// 1. spec.containers[*].image
// 2. spec.initContainers[*].image
// 3. spec.activeDeadlineSeconds
// 4. spec.terminationGracePeriodSeconds
// 5. spec.schedulingGates
containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers"))
allErrs = append(allErrs, containerErrs...)
if stop {
return allErrs
}
containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers"))
allErrs = append(allErrs, containerErrs...)
if stop {
return allErrs
}
// validate updated spec.activeDeadlineSeconds. two types of updates are allowed:
// 1. from nil to a positive value
// 2. from a positive value to a lesser, non-negative value
if newPod.Spec.ActiveDeadlineSeconds != nil {
newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds
if newActiveDeadlineSeconds < 0 || newActiveDeadlineSeconds > math.MaxInt32 {
allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, validation.InclusiveRangeError(0, math.MaxInt32)))
return allErrs
}
if oldPod.Spec.ActiveDeadlineSeconds != nil {
oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
if oldActiveDeadlineSeconds < newActiveDeadlineSeconds {
allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value"))
return allErrs
}
}
} else if oldPod.Spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value"))
}
// Allow only additions to tolerations updates.
allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...)
// Allow only deletions to schedulingGates updates.
allErrs = append(allErrs, validateOnlyDeletedSchedulingGates(newPod.Spec.SchedulingGates, oldPod.Spec.SchedulingGates, specPath.Child("schedulingGates"))...)
// the last thing to check is pod spec equality. If the pod specs are equal, then we can simply return the errors we have
// so far and save the cost of a deep copy.
if apiequality.Semantic.DeepEqual(newPod.Spec, oldPod.Spec) {
return allErrs
}
// handle updateable fields by munging those fields prior to deep equal comparison.
mungedPodSpec := *newPod.Spec.DeepCopy()
// munge spec.containers[*].image
var newContainers []core.Container
for ix, container := range mungedPodSpec.Containers {
container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone
newContainers = append(newContainers, container)
}
mungedPodSpec.Containers = newContainers
// munge spec.initContainers[*].image
var newInitContainers []core.Container
for ix, container := range mungedPodSpec.InitContainers {
container.Image = oldPod.Spec.InitContainers[ix].Image // +k8s:verify-mutation:reason=clone
newInitContainers = append(newInitContainers, container)
}
mungedPodSpec.InitContainers = newInitContainers
// munge spec.activeDeadlineSeconds
mungedPodSpec.ActiveDeadlineSeconds = nil
if oldPod.Spec.ActiveDeadlineSeconds != nil {
activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
mungedPodSpec.ActiveDeadlineSeconds = &activeDeadlineSeconds
}
// munge spec.schedulingGates
mungedPodSpec.SchedulingGates = oldPod.Spec.SchedulingGates // +k8s:verify-mutation:reason=clone
// tolerations are checked before the deep copy, so munge those too
mungedPodSpec.Tolerations = oldPod.Spec.Tolerations // +k8s:verify-mutation:reason=clone
// Relax validation of immutable fields to allow it to be set to 1 if it was previously negative.
if oldPod.Spec.TerminationGracePeriodSeconds != nil && *oldPod.Spec.TerminationGracePeriodSeconds < 0 &&
mungedPodSpec.TerminationGracePeriodSeconds != nil && *mungedPodSpec.TerminationGracePeriodSeconds == 1 {
mungedPodSpec.TerminationGracePeriodSeconds = oldPod.Spec.TerminationGracePeriodSeconds // +k8s:verify-mutation:reason=clone
}
// Handle validations specific to gated pods.
podIsGated := len(oldPod.Spec.SchedulingGates) > 0
if podIsGated {
// Additions to spec.nodeSelector are allowed (no deletions or mutations) for gated pods.
if !apiequality.Semantic.DeepEqual(mungedPodSpec.NodeSelector, oldPod.Spec.NodeSelector) {
allErrs = append(allErrs, validateNodeSelectorMutation(specPath.Child("nodeSelector"), mungedPodSpec.NodeSelector, oldPod.Spec.NodeSelector)...)
mungedPodSpec.NodeSelector = oldPod.Spec.NodeSelector // +k8s:verify-mutation:reason=clone
}
// Validate node affinity mutations.
var oldNodeAffinity *core.NodeAffinity
if oldPod.Spec.Affinity != nil {
oldNodeAffinity = oldPod.Spec.Affinity.NodeAffinity // +k8s:verify-mutation:reason=clone
}
var mungedNodeAffinity *core.NodeAffinity
if mungedPodSpec.Affinity != nil {
mungedNodeAffinity = mungedPodSpec.Affinity.NodeAffinity // +k8s:verify-mutation:reason=clone
}
if !apiequality.Semantic.DeepEqual(oldNodeAffinity, mungedNodeAffinity) {
allErrs = append(allErrs, validateNodeAffinityMutation(specPath.Child("affinity").Child("nodeAffinity"), mungedNodeAffinity, oldNodeAffinity)...)
switch {
case mungedPodSpec.Affinity == nil && oldNodeAffinity == nil:
// already effectively nil, no change needed
case mungedPodSpec.Affinity == nil && oldNodeAffinity != nil:
mungedPodSpec.Affinity = &core.Affinity{NodeAffinity: oldNodeAffinity} // +k8s:verify-mutation:reason=clone
case mungedPodSpec.Affinity != nil && oldPod.Spec.Affinity == nil &&
mungedPodSpec.Affinity.PodAntiAffinity == nil && mungedPodSpec.Affinity.PodAffinity == nil:
// We ensure no other fields are being changed, but the NodeAffinity. If that's the case, and the
// old pod's affinity is nil, we set the mungedPodSpec's affinity to nil.
mungedPodSpec.Affinity = nil // +k8s:verify-mutation:reason=clone
default:
// The node affinity is being updated and the old pod Affinity is not nil.
// We set the mungedPodSpec's node affinity to the old pod's node affinity.
mungedPodSpec.Affinity.NodeAffinity = oldNodeAffinity // +k8s:verify-mutation:reason=clone
}
}
// Note: Unlike NodeAffinity and NodeSelector, we cannot make PodAffinity/PodAntiAffinity mutable due to the presence of the matchLabelKeys/mismatchLabelKeys feature.
// Those features automatically generate the matchExpressions in labelSelector for PodAffinity/PodAntiAffinity when the Pod is created.
// When we make them mutable, we need to make sure things like how to handle/validate matchLabelKeys,
// and what if the fieldManager/A sets matchexpressions and fieldManager/B sets matchLabelKeys later. (could it lead the understandable conflict, etc)
}
if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) {
// This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is".
// TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
specDiff := diff.Diff(oldPod.Spec, mungedPodSpec)
errs := field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFields, ","), specDiff))
allErrs = append(allErrs, errs)
}
return allErrs
}
// ValidateContainerStateTransition test to if any illegal container state transitions are being attempted
func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldPath *field.Path, podSpec core.PodSpec) field.ErrorList {
allErrs := field.ErrorList{}
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
v1PodSpec := &v1.PodSpec{}
err := corev1.Convert_core_PodSpec_To_v1_PodSpec(&podSpec, v1PodSpec, nil)
if err != nil {
allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("invalid %q: %v", fldPath, err.Error())))
}
for i, oldStatus := range oldStatuses {
// Skip any container that is not terminated
if oldStatus.State.Terminated == nil {
continue
}
for _, newStatus := range newStatuses {
if newStatus.Name == oldStatus.Name && newStatus.State.Terminated == nil {
allowed := false
for _, c := range v1PodSpec.Containers {
if c.Name == oldStatus.Name {
allowed = podutil.ContainerShouldRestart(c, *v1PodSpec, oldStatus.State.Terminated.ExitCode)
break
}
}
if !allowed {
allErrs = append(allErrs, field.Forbidden(fldPath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
}
break
}
}
}
return allErrs
}
restartPolicy := podSpec.RestartPolicy
// If we should always restart, containers are allowed to leave the terminated state
if restartPolicy == core.RestartPolicyAlways {
return allErrs
}
for i, oldStatus := range oldStatuses {
// Skip any container that is not terminated
if oldStatus.State.Terminated == nil {
continue
}
// Skip any container that failed but is allowed to restart
if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == core.RestartPolicyOnFailure {
continue
}
for _, newStatus := range newStatuses {
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
}
}
}
return allErrs
}
// ValidateInitContainerStateTransition test to if any illegal init container state transitions are being attempted
func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, podSpec *core.PodSpec) field.ErrorList {
allErrs := field.ErrorList{}
// If we should always restart, containers are allowed to leave the terminated state
if podSpec.RestartPolicy == core.RestartPolicyAlways {
return allErrs
}
for i, oldStatus := range oldStatuses {
// Skip any container that is not terminated
if oldStatus.State.Terminated == nil {
continue
}
// Skip any container that failed but is allowed to restart
if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == core.RestartPolicyOnFailure {
continue
}
// Skip any restartable init container that is allowed to restart
isRestartableInitCtr := false
for _, c := range podSpec.InitContainers {
if oldStatus.Name == c.Name {
if isRestartableInitContainer(&c) {
isRestartableInitCtr = true
}
break
}
}
if isRestartableInitCtr {
continue
}
for _, newStatus := range newStatuses {
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
}
}
}
return allErrs
}
// ValidateEphemeralContainerStateTransition test to if any ephemeral containers are transitioned
// from terminated state to non-terminated state.
func ValidateEphemeralContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, oldStatus := range oldStatuses {
// Skip any container that is not terminated
if oldStatus.State.Terminated == nil {
continue
}
for _, newStatus := range newStatuses {
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
}
}
}
return allErrs
}
// ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation.
func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
fldPath = field.NewPath("status")
allErrs = append(allErrs, validatePodConditions(newPod.Status.Conditions, fldPath.Child("conditions"))...)
if newPod.Spec.NodeName != oldPod.Spec.NodeName {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "may not be changed directly"))
}
if newPod.Status.NominatedNodeName != oldPod.Status.NominatedNodeName && len(newPod.Status.NominatedNodeName) > 0 {
for _, msg := range ValidateNodeName(newPod.Status.NominatedNodeName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nominatedNodeName"), newPod.Status.NominatedNodeName, msg))
}
}
// Prevent setting NominatedNodeName on already bound pods
if utilfeature.DefaultFeatureGate.Enabled(features.ClearingNominatedNodeNameAfterBinding) &&
oldPod.Spec.NodeName != "" &&
newPod.Status.NominatedNodeName != "" &&
newPod.Status.NominatedNodeName != oldPod.Status.NominatedNodeName {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nominatedNodeName"),
"may not be set on pods that are already bound to a node"))
}
if newPod.Status.ObservedGeneration < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), newPod.Status.ObservedGeneration, "must be a non-negative integer"))
}
// Pod QoS is immutable
allErrs = append(allErrs, ValidateImmutableField(newPod.Status.QOSClass, oldPod.Status.QOSClass, fldPath.Child("qosClass"))...)
// Note: there is no check that ContainerStatuses, InitContainerStatuses, and EphemeralContainerStatuses doesn't have duplicate conatainer names
// or statuses of containers that are not defined in the pod spec. Changing this may lead to a breaking changes. So consumers of those fields
// must account for unexpected data. Kubelet will never report statuses like this.
//
// If pod should not restart, make sure the status update does not transition
// any terminated containers to a non-terminated state.
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec)...)
allErrs = append(allErrs, ValidateInitContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), &oldPod.Spec)...)
allErrs = append(allErrs, ValidateEphemeralContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"))...)
allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...)
allErrs = append(allErrs, validatePodExtendedResourceClaimStatus(newPod.Status.ExtendedResourceClaimStatus, &newPod.Spec, fldPath.Child("extendedResourceClaimStatus"))...)
if newIPErrs := validatePodIPs(newPod, oldPod); len(newIPErrs) > 0 {
allErrs = append(allErrs, newIPErrs...)
}
if newIPErrs := validateHostIPs(newPod, oldPod); len(newIPErrs) > 0 {
allErrs = append(allErrs, newIPErrs...)
}
allErrs = append(allErrs, validateContainerStatusUsers(newPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), newPod.Spec.OS)...)
allErrs = append(allErrs, validateContainerStatusUsers(newPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), newPod.Spec.OS)...)
allErrs = append(allErrs, validateContainerStatusUsers(newPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), newPod.Spec.OS)...)
allErrs = append(allErrs, validateContainerStatusAllocatedResourcesStatus(newPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), newPod.Spec.Containers)...)
allErrs = append(allErrs, validateContainerStatusAllocatedResourcesStatus(newPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), newPod.Spec.InitContainers)...)
// ephemeral containers are not allowed to have resources allocated
allErrs = append(allErrs, validateContainerStatusNoAllocatedResourcesStatus(newPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"))...)
return allErrs
}
// validatePodConditions tests if the custom pod conditions are valid, and that the observedGeneration
// is a non-negative integer.
func validatePodConditions(conditions []core.PodCondition, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
systemConditions := sets.New(
core.PodScheduled,
core.PodReady,
core.PodInitialized)
for i, condition := range conditions {
if condition.ObservedGeneration < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("observedGeneration"), condition.ObservedGeneration, "must be a non-negative integer"))
}
if systemConditions.Has(condition.Type) {
continue
}
allErrs = append(allErrs, ValidateQualifiedName(string(condition.Type), fldPath.Index(i).Child("Type"))...)
}
return allErrs
}
// validatePodResourceClaimStatuses validates the ResourceClaimStatuses slice in a pod status.
func validatePodResourceClaimStatuses(statuses []core.PodResourceClaimStatus, podClaims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
claimNames := sets.New[string]()
for i, status := range statuses {
idxPath := fldPath.Index(i)
// There's no need to check the content of the name. If it matches an entry,
// then it is valid, otherwise we reject it here.
if !havePodClaim(podClaims, status.Name) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), status.Name, "must match the name of an entry in `spec.resourceClaims`"))
}
if claimNames.Has(status.Name) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), status.Name))
} else {
claimNames.Insert(status.Name)
}
if status.ResourceClaimName != nil {
for _, detail := range ValidateResourceClaimName(*status.ResourceClaimName, false) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), status.ResourceClaimName, detail))
}
}
}
return allErrs
}
// validatePodExtendedResourceClaimStatus validates the ExtendedResourceClaimStatus in a pod status.
func validatePodExtendedResourceClaimStatus(status *core.PodExtendedResourceClaimStatus, spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
if status == nil {
return nil
}
containers := make(map[string]*core.Container)
for _, c := range spec.InitContainers {
containers[c.Name] = &c
}
for _, c := range spec.Containers {
containers[c.Name] = &c
}
var allErrs field.ErrorList
rmFldPath := fldPath.Child("requestMappings")
if len(status.RequestMappings) == 0 {
allErrs = append(allErrs, field.Required(rmFldPath, "at least one request mapping is required"))
}
type key struct {
container string
resource string
}
seen := map[key]struct{}{}
for i, rm := range status.RequestMappings {
idxPath := rmFldPath.Index(i)
c, ok := containers[rm.ContainerName]
if ok {
if _, ok := c.Resources.Requests[core.ResourceName(rm.ResourceName)]; !ok {
allErrs = append(allErrs, field.Invalid(idxPath.Child("resourceName"), rm.ResourceName, "must match the extended resource name of an entry in spec.initContainers.resources.requests or spec.containers.resources.requests"))
}
} else {
allErrs = append(allErrs, field.Invalid(idxPath.Child("containerName"), rm.ContainerName, "must match the name of an entry in spec.initContainers.name or spec.containers.name"))
}
allErrs = append(allErrs, ValidateDNS1123Label(rm.RequestName, fldPath.Child("requestName"))...)
k := key{container: rm.ContainerName, resource: rm.ResourceName}
if _, ok := seen[k]; ok {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("containerName"), rm.ContainerName))
allErrs = append(allErrs, field.Duplicate(idxPath.Child("resourceName"), rm.ResourceName))
}
seen[k] = struct{}{}
}
if len(status.ResourceClaimName) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("resourceClaimName"), ""))
} else {
for _, detail := range ValidateResourceClaimName(status.ResourceClaimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimName"), status.ResourceClaimName, detail))
}
}
return allErrs
}
func havePodClaim(podClaims []core.PodResourceClaim, name string) bool {
for _, podClaim := range podClaims {
if podClaim.Name == name {
return true
}
}
return false
}
// ValidatePodEphemeralContainersUpdate tests that a user update to EphemeralContainers is valid.
// newPod and oldPod must only differ in their EphemeralContainers.
func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
// Part 1: Validate newPod's spec and updates to metadata
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
// static pods don't support ephemeral containers #113935
if _, ok := oldPod.Annotations[core.MirrorPodAnnotationKey]; ok {
return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods do not support ephemeral containers")}
}
// Part 2: Validate that the changes between oldPod.Spec.EphemeralContainers and
// newPod.Spec.EphemeralContainers are allowed.
//
// Existing EphemeralContainers may not be changed. Order isn't preserved by patch, so check each individually.
newContainerIndex := make(map[string]*core.EphemeralContainer)
specPath := field.NewPath("spec").Child("ephemeralContainers")
for i := range newPod.Spec.EphemeralContainers {
newContainerIndex[newPod.Spec.EphemeralContainers[i].Name] = &newPod.Spec.EphemeralContainers[i]
}
for _, old := range oldPod.Spec.EphemeralContainers {
if new, ok := newContainerIndex[old.Name]; !ok {
allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("existing ephemeral containers %q may not be removed\n", old.Name)))
} else if !apiequality.Semantic.DeepEqual(old, *new) {
specDiff := diff.Diff(old, *new)
allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("existing ephemeral containers %q may not be changed\n%v", old.Name, specDiff)))
}
}
return allErrs
}
// ValidatePodResize tests that a user update to pod container resources is valid.
// newPod and oldPod must only differ in their Containers[*].Resources and
// Containers[*].ResizePolicy field.
func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
// Part 1: Validate newPod's spec and updates to metadata
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
// pods with pod-level resources cannot be resized
isPodLevelResourcesSet := func(pod *core.Pod) bool {
return pod.Spec.Resources != nil &&
(len(pod.Spec.Resources.Requests)+len(pod.Spec.Resources.Limits) > 0)
}
if isPodLevelResourcesSet(oldPod) || isPodLevelResourcesSet(newPod) {
return field.ErrorList{field.Forbidden(field.NewPath(""), "pods with pod-level resources cannot be resized")}
}
// static pods cannot be resized.
if _, ok := oldPod.Annotations[core.MirrorPodAnnotationKey]; ok {
return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods cannot be resized")}
}
// windows pods are not supported.
if oldPod.Spec.OS != nil && oldPod.Spec.OS.Name == core.Windows {
return field.ErrorList{field.Forbidden(field.NewPath(""), "windows pods cannot be resized")}
}
// Part 2: Validate that the changes between oldPod.Spec.Containers[].Resources and
// newPod.Spec.Containers[].Resources are allowed. Also validate that the changes between oldPod.Spec.InitContainers[].Resources and
// newPod.Spec.InitContainers[].Resources are allowed.
specPath := field.NewPath("spec")
if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) {
allErrs = append(allErrs, field.Invalid(specPath, newPod.Status.QOSClass, "Pod QOS Class may not change as a result of resizing"))
}
if !isPodResizeRequestSupported(*oldPod) {
allErrs = append(allErrs, field.Forbidden(specPath, "Pod running on node without support for resize"))
}
// The rest of the validation assumes that the containers are in the same order,
// so we proceed only if that assumption is true.
containerOrderErrs := validatePodResizeContainerOrdering(newPod, oldPod, specPath)
allErrs = append(allErrs, containerOrderErrs...)
if containerOrderErrs != nil {
return allErrs
}
// Do not allow removing resource requests/limits on resize.
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for ix, ctr := range oldPod.Spec.InitContainers {
if !isRestartableInitContainer(&ctr) {
continue
}
allErrs = append(allErrs, validateContainerResize(
&newPod.Spec.InitContainers[ix].Resources,
&oldPod.Spec.InitContainers[ix].Resources,
newPod.Spec.InitContainers[ix].ResizePolicy,
specPath.Child("initContainers").Index(ix).Child("resources"))...)
}
}
for ix := range oldPod.Spec.Containers {
allErrs = append(allErrs, validateContainerResize(
&newPod.Spec.Containers[ix].Resources,
&oldPod.Spec.Containers[ix].Resources,
newPod.Spec.Containers[ix].ResizePolicy,
specPath.Child("containers").Index(ix).Child("resources"))...)
}
// Ensure that only CPU and memory resources are mutable for regular containers.
originalCPUMemPodSpec := *newPod.Spec.DeepCopy()
var newContainers []core.Container
for ix, container := range originalCPUMemPodSpec.Containers {
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.Containers[ix])
if !apiequality.Semantic.DeepEqual(container, oldPod.Spec.Containers[ix]) {
// This likely means that the user has made changes to resources other than CPU and memory for regular container.
errs := field.Forbidden(specPath, "only cpu and memory resources are mutable")
allErrs = append(allErrs, errs)
}
newContainers = append(newContainers, container)
}
originalCPUMemPodSpec.Containers = newContainers
// Ensure that only CPU and memory resources are mutable for restartable init containers.
// Also ensure that resources are immutable for non-restartable init containers.
var newInitContainers []core.Container
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
for ix, container := range originalCPUMemPodSpec.InitContainers {
if isRestartableInitContainer(&container) { // restartable init container
dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix])
if !apiequality.Semantic.DeepEqual(container, oldPod.Spec.InitContainers[ix]) {
// This likely means that the user has made changes to resources other than CPU and memory for sidecar container.
errs := field.Forbidden(specPath, "only cpu and memory resources for sidecar containers are mutable")
allErrs = append(allErrs, errs)
}
} else if !apiequality.Semantic.DeepEqual(container, oldPod.Spec.InitContainers[ix]) { // non-restartable init container
// This likely means that the user has modified resources of non-sidecar init container.
errs := field.Forbidden(specPath, "resources for non-sidecar init containers are immutable")
allErrs = append(allErrs, errs)
}
newInitContainers = append(newInitContainers, container)
}
originalCPUMemPodSpec.InitContainers = newInitContainers
}
if len(allErrs) > 0 {
return allErrs
}
if !apiequality.Semantic.DeepEqual(originalCPUMemPodSpec, oldPod.Spec) {
// This likely means that the user has made changes to resources other than CPU and Memory.
errs := field.Forbidden(specPath, "only cpu and memory resources are mutable")
allErrs = append(allErrs, errs)
}
return allErrs
}
// validatePodResizeContainerOrdering validates container ordering for a resize request.
// We do not allow adding, removing, re-ordering, or renaming containers on resize.
func validatePodResizeContainerOrdering(newPod, oldPod *core.Pod, specPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if len(newPod.Spec.Containers) != len(oldPod.Spec.Containers) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("containers"), "containers may not be added or removed on resize"))
} else {
for i, oldCtr := range oldPod.Spec.Containers {
if newPod.Spec.Containers[i].Name != oldCtr.Name {
allErrs = append(allErrs, field.Forbidden(specPath.Child("containers").Index(i).Child("name"), "containers may not be renamed or reordered on resize"))
}
}
}
if len(newPod.Spec.InitContainers) != len(oldPod.Spec.InitContainers) {
allErrs = append(allErrs, field.Forbidden(specPath.Child("initContainers"), "initContainers may not be added or removed on resize"))
} else {
for i, oldCtr := range oldPod.Spec.InitContainers {
if newPod.Spec.InitContainers[i].Name != oldCtr.Name {
allErrs = append(allErrs, field.Forbidden(specPath.Child("initContainers").Index(i).Child("name"), "initContainers may not be renamed or reordered on resize"))
}
}
}
return allErrs
}
// dropCPUMemoryResourcesFromContainer deletes the cpu and memory resources from the container, and copies them from the old pod container resources if present.
func dropCPUMemoryResourcesFromContainer(container *core.Container, oldPodSpecContainer *core.Container) {
dropCPUMemoryUpdates := func(resourceList, oldResourceList core.ResourceList) core.ResourceList {
if oldResourceList == nil {
return nil
}
var mungedResourceList core.ResourceList
if resourceList == nil {
mungedResourceList = make(core.ResourceList)
} else {
mungedResourceList = resourceList.DeepCopy()
}
delete(mungedResourceList, core.ResourceCPU)
delete(mungedResourceList, core.ResourceMemory)
if cpu, found := oldResourceList[core.ResourceCPU]; found {
mungedResourceList[core.ResourceCPU] = cpu
}
if mem, found := oldResourceList[core.ResourceMemory]; found {
mungedResourceList[core.ResourceMemory] = mem
}
return mungedResourceList
}
lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPodSpecContainer.Resources.Limits)
req := dropCPUMemoryUpdates(container.Resources.Requests, oldPodSpecContainer.Resources.Requests)
container.Resources = core.ResourceRequirements{Limits: lim, Requests: req}
}
// isPodResizeRequestSupported checks whether the pod is running on a node with InPlacePodVerticalScaling enabled.
func isPodResizeRequestSupported(pod core.Pod) bool {
// TODO: Remove this after GA+3 releases of InPlacePodVerticalScaling
// This code handles the version skew as described in the KEP.
// For handling version skew we're only allowing to update the Pod's Resources
// if the Pod already has Pod.Status.ContainerStatuses[i].Resources. This means
// that the apiserver would only allow updates to Pods running on Nodes with
// the InPlacePodVerticalScaling feature gate enabled.
for _, c := range pod.Status.ContainerStatuses {
if c.State.Running != nil {
return c.Resources != nil
}
}
// No running containers. We cannot tell whether the node supports resize at this point, so we assume it does.
return true
}
// validateContainerResize validates the changes to the container's resource requirements for a pod resize request.
// newRequriements and oldRequirements must be non-nil.
func validateContainerResize(newRequirements, oldRequirements *core.ResourceRequirements, resizePolicies []core.ContainerResizePolicy, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
// Removing resource requirements is not supported.
if resourcesRemoved(newRequirements.Requests, oldRequirements.Requests) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("requests"), "resource requests cannot be removed"))
}
if resourcesRemoved(newRequirements.Limits, oldRequirements.Limits) {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("limits"), "resource limits cannot be removed"))
}
// TODO(tallclair): Move resizable resource checks here.
return allErrs
}
func resourcesRemoved(resourceList, oldResourceList core.ResourceList) bool {
if len(oldResourceList) > len(resourceList) {
return true
}
for name := range oldResourceList {
if _, ok := resourceList[name]; !ok {
return true
}
}
return false
}
// ValidatePodBinding tests if required fields in the pod binding are legal.
func ValidatePodBinding(binding *core.Binding) field.ErrorList {
allErrs := field.ErrorList{}
if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" {
// TODO: When validation becomes versioned, this gets more complicated.
allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", "<empty>"}))
}
if len(binding.Target.Name) == 0 {
// TODO: When validation becomes versioned, this gets more complicated.
allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), ""))
}
return allErrs
}
// ValidatePodTemplate tests if required fields in the pod template are set.
func ValidatePodTemplate(pod *core.PodTemplate, opts PodValidationOptions) field.ErrorList {
allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"), opts)...)
return allErrs
}
// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
// that cannot be changed.
func ValidatePodTemplateUpdate(newPod, oldPod *core.PodTemplate, opts PodValidationOptions) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"), opts)...)
return allErrs
}
var supportedSessionAffinityType = sets.New(core.ServiceAffinityClientIP, core.ServiceAffinityNone)
var supportedServiceType = sets.New(core.ServiceTypeClusterIP, core.ServiceTypeNodePort,
core.ServiceTypeLoadBalancer, core.ServiceTypeExternalName)
var supportedServiceInternalTrafficPolicy = sets.New(core.ServiceInternalTrafficPolicyCluster, core.ServiceInternalTrafficPolicyLocal)
var supportedServiceIPFamily = sets.New(core.IPv4Protocol, core.IPv6Protocol)
var supportedServiceIPFamilyPolicy = sets.New(
core.IPFamilyPolicySingleStack,
core.IPFamilyPolicyPreferDualStack,
core.IPFamilyPolicyRequireDualStack)
// ValidateService tests if required fields/annotations of a Service are valid.
func validateService(service, oldService *core.Service) field.ErrorList {
metaPath := field.NewPath("metadata")
// Don't validate ObjectMeta here - that is handled in the ValidateServiceCreate/ValidateServiceUpdate
// functions which call ValidateObjectMeta and ValidateObjectMetaUpdate respectively.
var allErrs field.ErrorList
topologyHintsVal, topologyHintsSet := service.Annotations[core.DeprecatedAnnotationTopologyAwareHints]
topologyModeVal, topologyModeSet := service.Annotations[core.AnnotationTopologyMode]
if topologyModeSet && topologyHintsSet && topologyModeVal != topologyHintsVal {
message := fmt.Sprintf("must match annotations[%s] when both are specified", core.DeprecatedAnnotationTopologyAwareHints)
allErrs = append(allErrs, field.Invalid(metaPath.Child("annotations").Key(core.AnnotationTopologyMode), topologyModeVal, message))
}
specPath := field.NewPath("spec")
if len(service.Spec.Ports) == 0 && !isHeadlessService(service) && service.Spec.Type != core.ServiceTypeExternalName {
allErrs = append(allErrs, field.Required(specPath.Child("ports"), ""))
}
switch service.Spec.Type {
case core.ServiceTypeLoadBalancer:
if isHeadlessService(service) {
allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIPs").Index(0), service.Spec.ClusterIPs[0], "may not be set to 'None' for LoadBalancer services"))
}
case core.ServiceTypeNodePort:
if isHeadlessService(service) {
allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIPs").Index(0), service.Spec.ClusterIPs[0], "may not be set to 'None' for NodePort services"))
}
case core.ServiceTypeExternalName:
// must have len(.spec.ClusterIPs) == 0 // note: strategy sets ClusterIPs based on ClusterIP
if len(service.Spec.ClusterIPs) > 0 {
allErrs = append(allErrs, field.Forbidden(specPath.Child("clusterIPs"), "may not be set for ExternalName services"))
}
// must have nil families and nil policy
if len(service.Spec.IPFamilies) > 0 {
allErrs = append(allErrs, field.Forbidden(specPath.Child("ipFamilies"), "may not be set for ExternalName services"))
}
if service.Spec.IPFamilyPolicy != nil {
allErrs = append(allErrs, field.Forbidden(specPath.Child("ipFamilyPolicy"), "may not be set for ExternalName services"))
}
// The value (a CNAME) may have a trailing dot to denote it as fully qualified
cname := strings.TrimSuffix(service.Spec.ExternalName, ".")
if len(cname) > 0 {
allErrs = append(allErrs, ValidateDNS1123Subdomain(cname, specPath.Child("externalName"))...)
} else {
allErrs = append(allErrs, field.Required(specPath.Child("externalName"), ""))
}
}
allPortNames := sets.Set[string]{}
portsPath := specPath.Child("ports")
for i := range service.Spec.Ports {
portPath := portsPath.Index(i)
allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService(service), &allPortNames, portPath)...)
}
if service.Spec.Selector != nil {
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...)
}
if len(service.Spec.SessionAffinity) == 0 {
allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), ""))
} else if !supportedSessionAffinityType.Has(service.Spec.SessionAffinity) {
allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, sets.List(supportedSessionAffinityType)))
}
if service.Spec.SessionAffinity == core.ServiceAffinityClientIP {
allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...)
} else if service.Spec.SessionAffinity == core.ServiceAffinityNone {
if service.Spec.SessionAffinityConfig != nil {
allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", core.ServiceAffinityNone)))
}
}
// dualstack <-> ClusterIPs <-> ipfamilies
allErrs = append(allErrs, ValidateServiceClusterIPsRelatedFields(service, oldService)...)
// All new ExternalIPs must be valid and "non-special". (Existing ExternalIPs may
// have been validated against older rules, but if we allowed them before we can't
// reject them now.)
ipPath := specPath.Child("externalIPs")
var existingExternalIPs []string
if oldService != nil {
existingExternalIPs = oldService.Spec.ExternalIPs // +k8s:verify-mutation:reason=clone
}
for i, ip := range service.Spec.ExternalIPs {
idxPath := ipPath.Index(i)
if errs := IsValidIPForLegacyField(idxPath, ip, existingExternalIPs); len(errs) != 0 {
allErrs = append(allErrs, errs...)
} else {
// For historical reasons, this uses ValidateEndpointIP even
// though that is not exactly the appropriate set of checks.
allErrs = append(allErrs, ValidateEndpointIP(ip, idxPath)...)
}
}
if len(service.Spec.Type) == 0 {
allErrs = append(allErrs, field.Required(specPath.Child("type"), ""))
} else if !supportedServiceType.Has(service.Spec.Type) {
allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, sets.List(supportedServiceType)))
}
if service.Spec.Type == core.ServiceTypeClusterIP {
portsPath := specPath.Child("ports")
for i := range service.Spec.Ports {
portPath := portsPath.Index(i)
if service.Spec.Ports[i].NodePort != 0 {
allErrs = append(allErrs, field.Forbidden(portPath.Child("nodePort"), "may not be used when `type` is 'ClusterIP'"))
}
}
}
// Check for duplicate NodePorts, considering (protocol,port) pairs
portsPath = specPath.Child("ports")
nodePorts := make(map[core.ServicePort]bool)
for i := range service.Spec.Ports {
port := &service.Spec.Ports[i]
if port.NodePort == 0 {
continue
}
portPath := portsPath.Index(i)
var key core.ServicePort
key.Protocol = port.Protocol
key.NodePort = port.NodePort
_, found := nodePorts[key]
if found {
allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort))
}
nodePorts[key] = true
}
// Check for duplicate Ports, considering (protocol,port) pairs
portsPath = specPath.Child("ports")
ports := make(map[core.ServicePort]bool)
for i, port := range service.Spec.Ports {
portPath := portsPath.Index(i)
key := core.ServicePort{Protocol: port.Protocol, Port: port.Port}
_, found := ports[key]
if found {
allErrs = append(allErrs, field.Duplicate(portPath, key))
}
ports[key] = true
}
// Validate SourceRanges field or annotation. Existing invalid CIDR values do not
// need to be fixed. Note that even with the tighter CIDR validation we still
// allow excess whitespace, because that is effectively part of the API.
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
fieldPath := specPath.Child("LoadBalancerSourceRanges")
if service.Spec.Type != core.ServiceTypeLoadBalancer {
allErrs = append(allErrs, field.Forbidden(fieldPath, "may only be used when `type` is 'LoadBalancer'"))
}
var existingSourceRanges []string
if oldService != nil {
existingSourceRanges = make([]string, len(oldService.Spec.LoadBalancerSourceRanges))
for i, value := range oldService.Spec.LoadBalancerSourceRanges {
existingSourceRanges[i] = strings.TrimSpace(value)
}
}
for idx, value := range service.Spec.LoadBalancerSourceRanges {
// Note: due to a historical accident around transition from the
// annotation value, these values are allowed to be space-padded.
value = strings.TrimSpace(value)
allErrs = append(allErrs, IsValidCIDRForLegacyField(fieldPath.Index(idx), value, existingSourceRanges)...)
}
} else if val, annotationSet := service.Annotations[core.AnnotationLoadBalancerSourceRangesKey]; annotationSet {
fieldPath := field.NewPath("metadata", "annotations").Key(core.AnnotationLoadBalancerSourceRangesKey)
if service.Spec.Type != core.ServiceTypeLoadBalancer {
allErrs = append(allErrs, field.Forbidden(fieldPath, "may only be used when `type` is 'LoadBalancer'"))
}
if oldService == nil || oldService.Annotations[core.AnnotationLoadBalancerSourceRangesKey] != val {
val = strings.TrimSpace(val)
if val != "" {
cidrs := strings.Split(val, ",")
for _, value := range cidrs {
value = strings.TrimSpace(value)
allErrs = append(allErrs, IsValidCIDRForLegacyField(fieldPath, value, nil)...)
}
}
}
}
if service.Spec.AllocateLoadBalancerNodePorts != nil && service.Spec.Type != core.ServiceTypeLoadBalancer {
allErrs = append(allErrs, field.Forbidden(specPath.Child("allocateLoadBalancerNodePorts"), "may only be used when `type` is 'LoadBalancer'"))
}
if service.Spec.Type == core.ServiceTypeLoadBalancer && service.Spec.AllocateLoadBalancerNodePorts == nil {
allErrs = append(allErrs, field.Required(field.NewPath("allocateLoadBalancerNodePorts"), ""))
}
// validate LoadBalancerClass field
allErrs = append(allErrs, validateLoadBalancerClassField(nil, service)...)
// external traffic policy fields
allErrs = append(allErrs, validateServiceExternalTrafficPolicy(service)...)
// internal traffic policy field
allErrs = append(allErrs, validateServiceInternalTrafficFieldsValue(service)...)
// traffic distribution field
allErrs = append(allErrs, validateServiceTrafficDistribution(service)...)
return allErrs
}
func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.Set[string], fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if requireName && len(sp.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if len(sp.Name) != 0 {
allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...)
if allNames.Has(sp.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name))
} else {
allNames.Insert(sp.Name)
}
}
for _, msg := range validation.IsValidPortNum(int(sp.Port)) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg))
}
if len(sp.Protocol) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
} else if !supportedPortProtocols.Has(sp.Protocol) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, sets.List(supportedPortProtocols)))
}
allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...)
if sp.AppProtocol != nil {
allErrs = append(allErrs, ValidateQualifiedName(*sp.AppProtocol, fldPath.Child("appProtocol"))...)
}
// in the v1 API, targetPorts on headless services were tolerated.
// once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility.
//
// if isHeadlessService {
// if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) {
// allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None"))
// }
// }
return allErrs
}
var validExternalTrafficPolicies = sets.New(core.ServiceExternalTrafficPolicyCluster, core.ServiceExternalTrafficPolicyLocal)
func validateServiceExternalTrafficPolicy(service *core.Service) field.ErrorList {
allErrs := field.ErrorList{}
fldPath := field.NewPath("spec")
if !apiservice.ExternallyAccessible(service) {
if service.Spec.ExternalTrafficPolicy != "" {
allErrs = append(allErrs, field.Invalid(fldPath.Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
"may only be set for externally-accessible services"))
}
} else {
if service.Spec.ExternalTrafficPolicy == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("externalTrafficPolicy"), ""))
} else if !validExternalTrafficPolicies.Has(service.Spec.ExternalTrafficPolicy) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("externalTrafficPolicy"),
service.Spec.ExternalTrafficPolicy, sets.List(validExternalTrafficPolicies)))
}
}
if !apiservice.NeedsHealthCheck(service) {
if service.Spec.HealthCheckNodePort != 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort,
"may only be set when `type` is 'LoadBalancer' and `externalTrafficPolicy` is 'Local'"))
}
} else {
if service.Spec.HealthCheckNodePort == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("healthCheckNodePort"), ""))
} else {
for _, msg := range validation.IsValidPortNum(int(service.Spec.HealthCheckNodePort)) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, msg))
}
}
}
return allErrs
}
func validateServiceExternalTrafficFieldsUpdate(before, after *core.Service) field.ErrorList {
allErrs := field.ErrorList{}
if apiservice.NeedsHealthCheck(before) && apiservice.NeedsHealthCheck(after) {
if after.Spec.HealthCheckNodePort != before.Spec.HealthCheckNodePort {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "healthCheckNodePort"), "field is immutable"))
}
}
return allErrs
}
// validateServiceInternalTrafficFieldsValue validates InternalTraffic related
// spec have legal value.
func validateServiceInternalTrafficFieldsValue(service *core.Service) field.ErrorList {
allErrs := field.ErrorList{}
if service.Spec.InternalTrafficPolicy == nil {
// We do not forbid internalTrafficPolicy on other Service types because of historical reasons.
// We did not check that before it went beta and we don't want to invalidate existing stored objects.
if service.Spec.Type == core.ServiceTypeNodePort ||
service.Spec.Type == core.ServiceTypeLoadBalancer || service.Spec.Type == core.ServiceTypeClusterIP {
allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("internalTrafficPolicy"), ""))
}
}
if service.Spec.InternalTrafficPolicy != nil && !supportedServiceInternalTrafficPolicy.Has(*service.Spec.InternalTrafficPolicy) {
allErrs = append(allErrs, field.NotSupported(field.NewPath("spec").Child("internalTrafficPolicy"), *service.Spec.InternalTrafficPolicy, sets.List(supportedServiceInternalTrafficPolicy)))
}
return allErrs
}
// validateServiceTrafficDistribution validates the values for the
// trafficDistribution field.
func validateServiceTrafficDistribution(service *core.Service) field.ErrorList {
allErrs := field.ErrorList{}
if service.Spec.TrafficDistribution == nil {
return allErrs
}
var supportedTrafficDistribution []string
if !utilfeature.DefaultFeatureGate.Enabled(features.PreferSameTrafficDistribution) {
supportedTrafficDistribution = []string{
v1.ServiceTrafficDistributionPreferClose,
}
} else {
supportedTrafficDistribution = []string{
v1.ServiceTrafficDistributionPreferClose,
v1.ServiceTrafficDistributionPreferSameZone,
v1.ServiceTrafficDistributionPreferSameNode,
}
}
if !slices.Contains(supportedTrafficDistribution, *service.Spec.TrafficDistribution) {
allErrs = append(allErrs, field.NotSupported(field.NewPath("spec").Child("trafficDistribution"), *service.Spec.TrafficDistribution, supportedTrafficDistribution))
}
return allErrs
}
// ValidateServiceCreate validates Services as they are created.
func ValidateServiceCreate(service *core.Service) field.ErrorList {
metaPath := field.NewPath("metadata")
// KEP-5311 Relaxed validation for Services names
validateServiceNameFunc := ValidateServiceName
if utilfeature.DefaultFeatureGate.Enabled(features.RelaxedServiceNameValidation) {
validateServiceNameFunc = apimachineryvalidation.NameIsDNSLabel
}
allErrs := ValidateObjectMeta(&service.ObjectMeta, true, validateServiceNameFunc, metaPath)
return append(allErrs, validateService(service, nil)...)
}
// ValidateServiceUpdate tests if required fields in the service are set during an update
func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
// User can upgrade (add another clusterIP or ipFamily)
// can downgrade (remove secondary clusterIP or ipFamily)
// but *CAN NOT* change primary/secondary clusterIP || ipFamily *UNLESS*
// they are changing from/to/ON ExternalName
upgradeDowngradeClusterIPsErrs := validateUpgradeDowngradeClusterIPs(oldService, service)
allErrs = append(allErrs, upgradeDowngradeClusterIPsErrs...)
upgradeDowngradeIPFamiliesErrs := validateUpgradeDowngradeIPFamilies(oldService, service)
allErrs = append(allErrs, upgradeDowngradeIPFamiliesErrs...)
upgradeDowngradeLoadBalancerClassErrs := validateLoadBalancerClassField(oldService, service)
allErrs = append(allErrs, upgradeDowngradeLoadBalancerClassErrs...)
allErrs = append(allErrs, validateServiceExternalTrafficFieldsUpdate(oldService, service)...)
return append(allErrs, validateService(service, oldService)...)
}
// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status.
func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, &oldService.Status.LoadBalancer, field.NewPath("status", "loadBalancer"), &service.Spec)...)
return allErrs
}
// ValidateReplicationController tests if required fields in the replication controller are set.
func ValidateReplicationController(controller *core.ReplicationController, opts PodValidationOptions) field.ErrorList {
allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, nil, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set.
func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController, opts PodValidationOptions) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, &oldController.Spec, field.NewPath("spec"), opts)...)
return allErrs
}
// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set.
func ValidateReplicationControllerStatusUpdate(controller, oldController *core.ReplicationController) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...)
return allErrs
}
func ValidateReplicationControllerStatus(status core.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ReadyReplicas), statusPath.Child("readyReplicas"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(status.AvailableReplicas), statusPath.Child("availableReplicas"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ObservedGeneration), statusPath.Child("observedGeneration"))...)
msg := "cannot be greater than status.replicas"
if status.FullyLabeledReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(statusPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg))
}
if status.ReadyReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(statusPath.Child("readyReplicas"), status.ReadyReplicas, msg))
}
if status.AvailableReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, msg))
}
if status.AvailableReplicas > status.ReadyReplicas {
allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
}
return allErrs
}
// Validates that the given selector is non-empty.
func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
selector := labels.Set(selectorMap).AsSelector()
if selector.Empty() {
allErrs = append(allErrs, field.Required(fldPath, ""))
}
return allErrs
}
// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if template == nil {
allErrs = append(allErrs, field.Required(fldPath, ""))
} else {
selector := labels.Set(selectorMap).AsSelector()
if !selector.Empty() {
// Verify that the RC selector matches the labels in template.
labels := labels.Set(template.Labels)
if !selector.Matches(labels) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
}
}
allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath, opts)...)
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
if template.Spec.RestartPolicy != core.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []core.RestartPolicy{core.RestartPolicyAlways}))
}
if template.Spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "activeDeadlineSeconds"), "activeDeadlineSeconds in ReplicationController is not Supported"))
}
}
return allErrs
}
// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set.
func ValidateReplicationControllerSpec(spec, oldSpec *core.ReplicationControllerSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds")).MarkCoveredByDeclarative()...)
allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...)
if spec.Replicas == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("replicas"), "").MarkCoveredByDeclarative())
} else {
allErrs = append(allErrs, ValidateNonnegativeField(int64(*spec.Replicas), fldPath.Child("replicas")).MarkCoveredByDeclarative()...)
}
allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, fldPath.Child("template"), opts)...)
return allErrs
}
// ValidatePodTemplateSpec validates the spec of a pod template
func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"), opts)...)
allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, nil, fldPath.Child("spec"), opts)...)
allErrs = append(allErrs, validateSeccompAnnotationsAndFields(spec.ObjectMeta, &spec.Spec, fldPath.Child("spec"))...)
allErrs = append(allErrs, validateAppArmorAnnotationsAndFieldsMatchOnCreate(spec.ObjectMeta, &spec.Spec, fldPath.Child("spec"))...)
if len(spec.Spec.EphemeralContainers) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("spec", "ephemeralContainers"), "ephemeral containers not allowed in pod template"))
}
return allErrs
}
// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data
func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
taints, err := helper.GetTaintsFromNodeAnnotations(annotations)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, core.TaintsAnnotationKey, err.Error()))
return allErrs
}
if len(taints) > 0 {
allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(core.TaintsAnnotationKey))...)
}
return allErrs
}
// validateNodeTaints tests if given taints have valid data.
func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
uniqueTaints := map[core.TaintEffect]sets.Set[string]{}
for i, currTaint := range taints {
idxPath := fldPath.Index(i)
// validate the taint key
allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...)
// validate the taint value
if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 {
allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";")))
}
// validate the taint effect
allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...)
// validate if taint is unique by <key, effect>
if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) {
duplicatedError := field.Duplicate(idxPath, currTaint)
duplicatedError.Detail = "taints must be unique by key and effect pair"
allErrors = append(allErrors, duplicatedError)
continue
}
// add taint to existingTaints for uniqueness check
if len(uniqueTaints[currTaint.Effect]) == 0 {
uniqueTaints[currTaint.Effect] = sets.Set[string]{}
}
uniqueTaints[currTaint.Effect].Insert(currTaint.Key)
}
return allErrors
}
func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if annotations[core.TaintsAnnotationKey] != "" {
allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...)
}
if annotations[core.PreferAvoidPodsAnnotationKey] != "" {
allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...)
}
return allErrs
}
// ValidateNode tests if required fields in the node are set.
func ValidateNode(node *core.Node) field.ErrorList {
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath)
allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
if len(node.Spec.Taints) > 0 {
allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...)
}
// Only validate spec.
// All status fields are optional and can be updated later.
// That said, if specified, we need to ensure they are valid.
allErrs = append(allErrs, ValidateNodeResources(node)...)
allErrs = append(allErrs, validateNodeSwapStatus(node.Status.NodeInfo.Swap, fldPath.Child("nodeSwapStatus"))...)
// validate PodCIDRS only if we need to
if len(node.Spec.PodCIDRs) > 0 {
podCIDRsField := field.NewPath("spec", "podCIDRs")
// all PodCIDRs should be valid ones
for idx, value := range node.Spec.PodCIDRs {
allErrs = append(allErrs, IsValidCIDRForLegacyField(podCIDRsField.Index(idx), value, nil)...)
}
// if more than PodCIDR then it must be a dual-stack pair
if len(node.Spec.PodCIDRs) > 1 {
dualStack, err := netutils.IsDualStackCIDRStrings(node.Spec.PodCIDRs)
if err != nil {
allErrs = append(allErrs, field.InternalError(podCIDRsField, fmt.Errorf("invalid PodCIDRs. failed to check with dual stack with error:%v", err)))
}
if !dualStack || len(node.Spec.PodCIDRs) > 2 {
allErrs = append(allErrs, field.Invalid(podCIDRsField, node.Spec.PodCIDRs, "may specify no more than one CIDR for each IP family"))
}
}
}
return allErrs
}
// ValidateNodeResources is used to make sure a node has valid capacity and allocatable values.
func ValidateNodeResources(node *core.Node) field.ErrorList {
allErrs := field.ErrorList{}
// Validate resource quantities in capacity.
for k, v := range node.Status.Capacity {
resPath := field.NewPath("status", "capacity", string(k))
allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...)
}
// Validate resource quantities in allocatable.
for k, v := range node.Status.Allocatable {
resPath := field.NewPath("status", "allocatable", string(k))
allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...)
}
return allErrs
}
// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode.
func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath)
allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
// TODO: Enable the code once we have better core object.status update model. Currently,
// anyone can update node status.
// if !apiequality.Semantic.DeepEqual(node.Status, core.NodeStatus{}) {
// allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty"))
// }
allErrs = append(allErrs, ValidateNodeResources(node)...)
// Validate no duplicate addresses in node status.
addresses := make(map[core.NodeAddress]bool)
for i, address := range node.Status.Addresses {
if _, ok := addresses[address]; ok {
allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address))
}
addresses[address] = true
}
// Allow the controller manager to assign a CIDR to a node if it doesn't have one.
if len(oldNode.Spec.PodCIDRs) > 0 {
// compare the entire slice
if len(oldNode.Spec.PodCIDRs) != len(node.Spec.PodCIDRs) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDRs"), "node updates may not change podCIDR except from \"\" to valid"))
} else {
for idx, value := range oldNode.Spec.PodCIDRs {
if value != node.Spec.PodCIDRs[idx] {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDRs"), "node updates may not change podCIDR except from \"\" to valid"))
}
}
}
}
// Allow controller manager updating provider ID when not set
if len(oldNode.Spec.ProviderID) > 0 && oldNode.Spec.ProviderID != node.Spec.ProviderID {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "providerID"), "node updates may not change providerID except from \"\" to valid"))
}
if node.Spec.ConfigSource != nil {
allErrs = append(allErrs, validateNodeConfigSourceSpec(node.Spec.ConfigSource, field.NewPath("spec", "configSource"))...)
}
if node.Status.Config != nil {
allErrs = append(allErrs, validateNodeConfigStatus(node.Status.Config, field.NewPath("status", "config"))...)
}
// update taints
if len(node.Spec.Taints) > 0 {
allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...)
}
if node.Spec.DoNotUseExternalID != oldNode.Spec.DoNotUseExternalID {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "externalID"), "may not be updated"))
}
// status and metadata are allowed change (barring restrictions above), so separately test spec field.
// spec only has a few fields, so check the ones we don't allow changing
// 1. PodCIDRs - immutable after first set - checked above
// 2. ProviderID - immutable after first set - checked above
// 3. Unschedulable - allowed to change
// 4. Taints - allowed to change
// 5. ConfigSource - allowed to change (and checked above)
// 6. DoNotUseExternalID - immutable - checked above
return allErrs
}
// validation specific to Node.Spec.ConfigSource
// The field ConfigSource is deprecated and will not be used. The validation is kept in place
// for the backward compatibility
func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
count := int(0)
if source.ConfigMap != nil {
count++
allErrs = append(allErrs, validateConfigMapNodeConfigSourceSpec(source.ConfigMap, fldPath.Child("configMap"))...)
}
// add more subfields here in the future as they are added to NodeConfigSource
// exactly one reference subfield must be non-nil
if count != 1 {
allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil"))
}
return allErrs
}
// validation specific to Node.Spec.ConfigSource.ConfigMap
// The field ConfigSource is deprecated and will not be used. The validation is kept in place
// for the backward compatibility
func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// uid and resourceVersion must not be set in spec
if string(source.UID) != "" {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("uid"), "uid must not be set in spec"))
}
if source.ResourceVersion != "" {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("resourceVersion"), "resourceVersion must not be set in spec"))
}
return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
}
// validation specififc to Node.Status.Config
func validateNodeConfigStatus(status *core.NodeConfigStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if status.Assigned != nil {
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Assigned, fldPath.Child("assigned"))...)
}
if status.Active != nil {
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Active, fldPath.Child("active"))...)
}
if status.LastKnownGood != nil {
allErrs = append(allErrs, validateNodeConfigSourceStatus(status.LastKnownGood, fldPath.Child("lastKnownGood"))...)
}
return allErrs
}
// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood)
func validateNodeConfigSourceStatus(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
count := int(0)
if source.ConfigMap != nil {
count++
allErrs = append(allErrs, validateConfigMapNodeConfigSourceStatus(source.ConfigMap, fldPath.Child("configMap"))...)
}
// add more subfields here in the future as they are added to NodeConfigSource
// exactly one reference subfield must be non-nil
if count != 1 {
allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil"))
}
return allErrs
}
// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood).ConfigMap
func validateConfigMapNodeConfigSourceStatus(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// uid and resourceVersion must be set in status
if string(source.UID) == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("uid"), ""))
}
if source.ResourceVersion == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("resourceVersion"), ""))
}
return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...)
}
// common validation
func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// validate target configmap namespace
if source.Namespace == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
} else {
for _, msg := range ValidateNameFunc(ValidateNamespaceName)(source.Namespace, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), source.Namespace, msg))
}
}
// validate target configmap name
if source.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else {
for _, msg := range ValidateNameFunc(ValidateConfigMapName)(source.Name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), source.Name, msg))
}
}
// validate kubeletConfigKey against rules for configMap key names
if source.KubeletConfigKey == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("kubeletConfigKey"), ""))
} else {
for _, msg := range validation.IsConfigMapKey(source.KubeletConfigKey) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("kubeletConfigKey"), source.KubeletConfigKey, msg))
}
}
return allErrs
}
// Validate compute resource typename.
// Refer to docs/design/resources.md for more details.
func validateResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(string(value)) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
}
if len(allErrs) != 0 {
return allErrs
}
if len(strings.Split(string(value), "/")) == 1 {
if !helper.IsStandardResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified"))
}
}
return allErrs
}
// Validate container resource name
// Refer to docs/design/resources.md for more details.
func validateContainerResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList {
allErrs := validateResourceName(value, fldPath)
if len(strings.Split(string(value), "/")) == 1 {
if !helper.IsStandardContainerResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers"))
}
} else if !helper.IsNativeResource(value) {
if !helper.IsExtendedResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, "doesn't follow extended resource name standard"))
}
}
return allErrs
}
// validatePodResourceName verifies that:
// 1. The resource name is a valid compute resource name for pod-level specification.
// 2. The resource is supported by the PodLevelResources feature.
func validatePodResourceName(resourceName core.ResourceName, fldPath *field.Path) field.ErrorList {
allErrs := validateResourceName(resourceName, fldPath)
if len(allErrs) != 0 {
return allErrs
}
if !resourcehelper.IsSupportedPodLevelResource(v1.ResourceName(resourceName)) {
return append(allErrs, field.NotSupported(fldPath, resourceName, sets.List(resourcehelper.SupportedPodLevelResources())))
}
return allErrs
}
// Validate resource names that can go in a resource quota
// Refer to docs/design/resources.md for more details.
func ValidateResourceQuotaResourceName(value core.ResourceName, fldPath *field.Path) field.ErrorList {
allErrs := validateResourceName(value, fldPath)
if len(strings.Split(string(value), "/")) == 1 {
if !helper.IsStandardQuotaResourceName(value) {
return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource))
}
}
return allErrs
}
// Validate limit range types
func validateLimitRangeTypeName(value core.LimitType, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(string(value)) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
}
if len(allErrs) != 0 {
return allErrs
}
if len(strings.Split(string(value), "/")) == 1 {
if !helper.IsStandardLimitRangeType(value) {
return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified"))
}
}
return allErrs
}
// Validate limit range resource name
// limit types (other than Pod/Container) could contain storage not just cpu or memory
func validateLimitRangeResourceName(limitType core.LimitType, value core.ResourceName, fldPath *field.Path) field.ErrorList {
switch limitType {
case core.LimitTypePod, core.LimitTypeContainer:
return validateContainerResourceName(value, fldPath)
default:
return validateResourceName(value, fldPath)
}
}
// ValidateLimitRange tests if required fields in the LimitRange are set.
func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList {
allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata"))
// ensure resource names are properly qualified per docs/design/resources.md
limitTypeSet := map[core.LimitType]bool{}
fldPath := field.NewPath("spec", "limits")
for i := range limitRange.Spec.Limits {
idxPath := fldPath.Index(i)
limit := &limitRange.Spec.Limits[i]
allErrs = append(allErrs, validateLimitRangeTypeName(limit.Type, idxPath.Child("type"))...)
_, found := limitTypeSet[limit.Type]
if found {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type))
}
limitTypeSet[limit.Type] = true
keys := sets.Set[string]{}
min := map[string]resource.Quantity{}
max := map[string]resource.Quantity{}
defaults := map[string]resource.Quantity{}
defaultRequests := map[string]resource.Quantity{}
maxLimitRequestRatios := map[string]resource.Quantity{}
for k, q := range limit.Max {
allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("max").Key(string(k)))...)
keys.Insert(string(k))
max[string(k)] = q
}
for k, q := range limit.Min {
allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("min").Key(string(k)))...)
keys.Insert(string(k))
min[string(k)] = q
}
if limit.Type == core.LimitTypePod {
if len(limit.Default) > 0 {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'"))
}
if len(limit.DefaultRequest) > 0 {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'"))
}
} else {
for k, q := range limit.Default {
allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("default").Key(string(k)))...)
keys.Insert(string(k))
defaults[string(k)] = q
}
for k, q := range limit.DefaultRequest {
allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("defaultRequest").Key(string(k)))...)
keys.Insert(string(k))
defaultRequests[string(k)] = q
}
}
if limit.Type == core.LimitTypePersistentVolumeClaim {
_, minQuantityFound := limit.Min[core.ResourceStorage]
_, maxQuantityFound := limit.Max[core.ResourceStorage]
if !minQuantityFound && !maxQuantityFound {
allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided"))
}
}
for k, q := range limit.MaxLimitRequestRatio {
allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, k, idxPath.Child("maxLimitRequestRatio").Key(string(k)))...)
keys.Insert(string(k))
maxLimitRequestRatios[string(k)] = q
}
for k := range keys {
minQuantity, minQuantityFound := min[k]
maxQuantity, maxQuantityFound := max[k]
defaultQuantity, defaultQuantityFound := defaults[k]
defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k]
maxRatio, maxRatioFound := maxLimitRequestRatios[k]
if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String())))
}
if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String())))
}
if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String())))
}
if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String())))
}
if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String())))
}
if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String())))
}
if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String())))
}
if maxRatioFound && minQuantityFound && maxQuantityFound {
maxRatioValue := float64(maxRatio.Value())
minQuantityValue := minQuantity.Value()
maxQuantityValue := maxQuantity.Value()
if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue {
maxRatioValue = float64(maxRatio.MilliValue()) / 1000
minQuantityValue = minQuantity.MilliValue()
maxQuantityValue = maxQuantity.MilliValue()
}
maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue)
if maxRatioValue > maxRatioLimit {
allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit)))
}
}
// for GPU, hugepages and other resources that are not allowed to overcommit,
// the default value and defaultRequest value must match if both are specified
if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k)))
}
}
}
return allErrs
}
// ValidateServiceAccount tests if required fields in the ServiceAccount are set.
func ValidateServiceAccount(serviceAccount *core.ServiceAccount) field.ErrorList {
allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata"))
return allErrs
}
// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set.
func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *core.ServiceAccount) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...)
return allErrs
}
// ValidateSecret tests if required fields in the Secret are set.
func ValidateSecret(secret *core.Secret) field.ErrorList {
allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata"))
dataPath := field.NewPath("data")
totalSize := 0
for key, value := range secret.Data {
for _, msg := range validation.IsConfigMapKey(key) {
allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg))
}
totalSize += len(value)
}
if totalSize > core.MaxSecretSize {
allErrs = append(allErrs, field.TooLong(dataPath, "" /*unused*/, core.MaxSecretSize))
}
switch secret.Type {
case core.SecretTypeServiceAccountToken:
// Only require Annotations[kubernetes.io/service-account.name]
// Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop
if value := secret.Annotations[core.ServiceAccountNameKey]; len(value) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(core.ServiceAccountNameKey), ""))
}
case core.SecretTypeOpaque, "":
// no-op
case core.SecretTypeDockercfg:
dockercfgBytes, exists := secret.Data[core.DockerConfigKey]
if !exists {
allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigKey), ""))
break
}
// make sure that the content is well-formed json.
if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil {
allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigKey), "<secret contents redacted>", err.Error()))
}
case core.SecretTypeDockerConfigJSON:
dockerConfigJSONBytes, exists := secret.Data[core.DockerConfigJSONKey]
if !exists {
allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJSONKey), ""))
break
}
// make sure that the content is well-formed json.
if err := json.Unmarshal(dockerConfigJSONBytes, &map[string]interface{}{}); err != nil {
allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJSONKey), "<secret contents redacted>", err.Error()))
}
case core.SecretTypeBasicAuth:
_, usernameFieldExists := secret.Data[core.BasicAuthUsernameKey]
_, passwordFieldExists := secret.Data[core.BasicAuthPasswordKey]
// username or password might be empty, but the field must be present
if !usernameFieldExists && !passwordFieldExists {
allErrs = append(allErrs, field.Required(dataPath.Key(core.BasicAuthUsernameKey), ""))
allErrs = append(allErrs, field.Required(dataPath.Key(core.BasicAuthPasswordKey), ""))
break
}
case core.SecretTypeSSHAuth:
if len(secret.Data[core.SSHAuthPrivateKey]) == 0 {
allErrs = append(allErrs, field.Required(dataPath.Key(core.SSHAuthPrivateKey), ""))
break
}
case core.SecretTypeTLS:
if _, exists := secret.Data[core.TLSCertKey]; !exists {
allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSCertKey), ""))
}
if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists {
allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), ""))
}
default:
// no-op
}
return allErrs
}
// ValidateSecretUpdate tests if required fields in the Secret are set.
func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...)
if oldSecret.Immutable != nil && *oldSecret.Immutable {
if newSecret.Immutable == nil || !*newSecret.Immutable {
allErrs = append(allErrs, field.Forbidden(field.NewPath("immutable"), "field is immutable when `immutable` is set"))
}
if !reflect.DeepEqual(newSecret.Data, oldSecret.Data) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("data"), "field is immutable when `immutable` is set"))
}
// We don't validate StringData, as it was already converted back to Data
// before validation is happening.
}
allErrs = append(allErrs, ValidateSecret(newSecret)...)
return allErrs
}
// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateConfigMapName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateConfigMap tests whether required fields in the ConfigMap are set.
func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...)
totalSize := 0
for key, value := range cfg.Data {
for _, msg := range validation.IsConfigMapKey(key) {
allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
}
// check if we have a duplicate key in the other bag
if _, isValue := cfg.BinaryData[key]; isValue {
msg := "duplicate of key present in binaryData"
allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg))
}
totalSize += len(value)
}
for key, value := range cfg.BinaryData {
for _, msg := range validation.IsConfigMapKey(key) {
allErrs = append(allErrs, field.Invalid(field.NewPath("binaryData").Key(key), key, msg))
}
totalSize += len(value)
}
if totalSize > core.MaxSecretSize {
// pass back "" to indicate that the error refers to the whole object.
allErrs = append(allErrs, field.TooLong(field.NewPath(""), "" /*unused*/, core.MaxSecretSize))
}
return allErrs
}
// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set.
func ValidateConfigMapUpdate(newCfg, oldCfg *core.ConfigMap) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...)
if oldCfg.Immutable != nil && *oldCfg.Immutable {
if newCfg.Immutable == nil || !*newCfg.Immutable {
allErrs = append(allErrs, field.Forbidden(field.NewPath("immutable"), "field is immutable when `immutable` is set"))
}
if !reflect.DeepEqual(newCfg.Data, oldCfg.Data) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("data"), "field is immutable when `immutable` is set"))
}
if !reflect.DeepEqual(newCfg.BinaryData, oldCfg.BinaryData) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("binaryData"), "field is immutable when `immutable` is set"))
}
}
allErrs = append(allErrs, ValidateConfigMap(newCfg)...)
return allErrs
}
func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList {
if quantity.Value() < 0 {
return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")}
}
return field.ErrorList{}
}
func validatePodResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
return validateResourceRequirements(requirements, validatePodResourceName, podClaimNames, fldPath, opts)
}
func ValidateContainerResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
return validateResourceRequirements(requirements, validateContainerResourceName, podClaimNames, fldPath, opts)
}
// Validates resource requirement spec.
func validateResourceRequirements(requirements *core.ResourceRequirements, resourceNameFn func(core.ResourceName, *field.Path) field.ErrorList, podClaimNames sets.Set[string], fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
limPath := fldPath.Child("limits")
reqPath := fldPath.Child("requests")
limContainsCPUOrMemory := false
reqContainsCPUOrMemory := false
limContainsHugePages := false
reqContainsHugePages := false
supportedQoSComputeResources := sets.New(core.ResourceCPU, core.ResourceMemory)
for resourceName, quantity := range requirements.Limits {
fldPath := limPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, resourceNameFn(resourceName, fldPath)...)
// Validate resource quantity.
allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...)
if helper.IsHugePageResourceName(resourceName) {
limContainsHugePages = true
if err := validateResourceQuantityHugePageValue(resourceName, quantity, opts); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, quantity.String(), err.Error()))
}
}
if supportedQoSComputeResources.Has(resourceName) {
limContainsCPUOrMemory = true
}
}
for resourceName, quantity := range requirements.Requests {
fldPath := reqPath.Key(string(resourceName))
// Validate resource name.
allErrs = append(allErrs, resourceNameFn(resourceName, fldPath)...)
// Validate resource quantity.
allErrs = append(allErrs, ValidateResourceQuantityValue(resourceName, quantity, fldPath)...)
// Check that request <= limit.
limitQuantity, exists := requirements.Limits[resourceName]
if exists {
// For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit of %s", resourceName, limitQuantity.String())))
} else if quantity.Cmp(limitQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit of %s", resourceName, limitQuantity.String())))
}
} else if !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources"))
}
if helper.IsHugePageResourceName(resourceName) {
reqContainsHugePages = true
if err := validateResourceQuantityHugePageValue(resourceName, quantity, opts); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, quantity.String(), err.Error()))
}
}
if supportedQoSComputeResources.Has(resourceName) {
reqContainsCPUOrMemory = true
}
}
if !limContainsCPUOrMemory && !reqContainsCPUOrMemory && (reqContainsHugePages || limContainsHugePages) {
allErrs = append(allErrs, field.Forbidden(fldPath, "HugePages require cpu or memory"))
}
allErrs = append(allErrs, validateResourceClaimNames(requirements.Claims, podClaimNames, fldPath.Child("claims"))...)
return allErrs
}
// validateResourceClaimNames checks that the names in
// ResourceRequirements.Claims have a corresponding entry in
// PodSpec.ResourceClaims.
func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets.Set[string], fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
names := sets.Set[string]{}
for i, claim := range claims {
name := claim.Name
if name == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i), ""))
} else {
if names.Has(name) {
// All requests of that claim already referenced.
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), name))
} else {
key := name
if claim.Request != "" {
allErrs = append(allErrs, ValidateDNS1123Label(claim.Request, fldPath.Index(i).Child("request"))...)
key += "/" + claim.Request
}
if names.Has(key) {
// The exact same entry was already referenced.
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), key))
} else if claim.Request == "" {
// When referencing a claim, there's an
// overlap when previously some request
// in the claim was referenced. This
// cannot be checked with a map lookup,
// we need to iterate.
for key := range names {
index := strings.Index(key, "/")
if index < 0 {
continue
}
if key[0:index] == name {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), name))
}
}
}
names.Insert(key)
}
if !podClaimNames.Has(name) {
// field.NotFound doesn't accept an
// explanation. Adding one here is more
// user-friendly.
error := field.NotFound(fldPath.Index(i), name)
error.Detail = "must be one of the names in pod.spec.resourceClaims"
if len(podClaimNames) == 0 {
error.Detail += " which is empty"
} else {
error.Detail += ": " + strings.Join(sets.List(podClaimNames), ", ")
}
allErrs = append(allErrs, error)
}
}
}
return allErrs
}
func validateResourceQuantityHugePageValue(name core.ResourceName, quantity resource.Quantity, opts PodValidationOptions) error {
if !helper.IsHugePageResourceName(name) {
return nil
}
if !opts.AllowIndivisibleHugePagesValues && !helper.IsHugePageResourceValueDivisible(name, quantity) {
return fmt.Errorf("%s is not positive integer multiple of %s", quantity.String(), name)
}
return nil
}
// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes
func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(resourceQuotaSpec.Scopes) == 0 {
return allErrs
}
hardLimits := sets.New[core.ResourceName]()
for k := range resourceQuotaSpec.Hard {
hardLimits.Insert(k)
}
fldPath := fld.Child("scopes")
scopeSet := sets.New[core.ResourceQuotaScope]()
for _, scope := range resourceQuotaSpec.Scopes {
if !helper.IsStandardResourceQuotaScope(scope) {
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope"))
}
for _, k := range sets.List(hardLimits) {
if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(scope, k) {
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource"))
}
}
scopeSet.Insert(scope)
}
invalidScopePairs := []sets.Set[core.ResourceQuotaScope]{
sets.New(core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort),
sets.New(core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating),
}
for _, invalidScopePair := range invalidScopePairs {
if scopeSet.HasAll(sets.List(invalidScopePair)...) {
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes"))
}
}
return allErrs
}
// validateScopedResourceSelectorRequirement tests that the match expressions has valid data
func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
hardLimits := sets.New[core.ResourceName]()
for k := range resourceQuotaSpec.Hard {
hardLimits.Insert(k)
}
fldPath := fld.Child("matchExpressions")
scopeSet := sets.New[core.ResourceQuotaScope]()
for _, req := range resourceQuotaSpec.ScopeSelector.MatchExpressions {
if !helper.IsStandardResourceQuotaScope(req.ScopeName) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("scopeName"), req.ScopeName, "unsupported scope"))
}
for _, k := range sets.List(hardLimits) {
if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(req.ScopeName, k) {
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.ScopeSelector, "unsupported scope applied to resource"))
}
}
switch req.ScopeName {
case core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeCrossNamespacePodAffinity:
if req.Operator != core.ScopeSelectorOpExists {
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator,
"must be 'Exists' when scope is any of ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeBestEffort, ResourceQuotaScopeNotBestEffort or ResourceQuotaScopeCrossNamespacePodAffinity"))
}
}
switch req.Operator {
case core.ScopeSelectorOpIn, core.ScopeSelectorOpNotIn:
if len(req.Values) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"),
"must be at least one value when `operator` is 'In' or 'NotIn' for scope selector"))
}
case core.ScopeSelectorOpExists, core.ScopeSelectorOpDoesNotExist:
if len(req.Values) != 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("values"), req.Values,
"must be no value when `operator` is 'Exist' or 'DoesNotExist' for scope selector"))
}
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator"))
}
scopeSet.Insert(req.ScopeName)
}
invalidScopePairs := []sets.Set[core.ResourceQuotaScope]{
sets.New(core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort),
sets.New(core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating),
}
for _, invalidScopePair := range invalidScopePairs {
if scopeSet.HasAll(sets.List(invalidScopePair)...) {
allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes"))
}
}
return allErrs
}
// validateScopeSelector tests that the specified scope selector has valid data
func validateScopeSelector(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if resourceQuotaSpec.ScopeSelector == nil {
return allErrs
}
allErrs = append(allErrs, validateScopedResourceSelectorRequirement(resourceQuotaSpec, fld.Child("scopeSelector"))...)
return allErrs
}
// ValidateResourceQuota tests if required fields in the ResourceQuota are set.
func ValidateResourceQuota(resourceQuota *core.ResourceQuota) field.ErrorList {
allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...)
allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...)
return allErrs
}
func ValidateResourceQuotaStatus(status *core.ResourceQuotaStatus, fld *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
fldPath := fld.Child("hard")
for k, v := range status.Hard {
resPath := fldPath.Key(string(k))
allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...)
allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...)
}
fldPath = fld.Child("used")
for k, v := range status.Used {
resPath := fldPath.Key(string(k))
allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...)
allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...)
}
return allErrs
}
func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
fldPath := fld.Child("hard")
for k, v := range resourceQuotaSpec.Hard {
resPath := fldPath.Key(string(k))
allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...)
allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...)
}
allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...)
allErrs = append(allErrs, validateScopeSelector(resourceQuotaSpec, fld)...)
return allErrs
}
// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource
func ValidateResourceQuantityValue(resource core.ResourceName, value resource.Quantity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...)
if helper.IsIntegerResourceName(resource) {
if value.MilliValue()%int64(1000) != int64(0) {
allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg))
}
}
return allErrs
}
// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make.
func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...)
// ensure scopes cannot change, and that resources are still valid for scope
fldPath := field.NewPath("spec", "scopes")
oldScopes := sets.New[string]()
newScopes := sets.New[string]()
for _, scope := range newResourceQuota.Spec.Scopes {
newScopes.Insert(string(scope))
}
for _, scope := range oldResourceQuota.Spec.Scopes {
oldScopes.Insert(string(scope))
}
if !oldScopes.Equal(newScopes) {
allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, fieldImmutableErrorMsg))
}
return allErrs
}
// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make.
func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata"))
if len(newResourceQuota.ResourceVersion) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
}
fldPath := field.NewPath("status", "hard")
for k, v := range newResourceQuota.Status.Hard {
resPath := fldPath.Key(string(k))
allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...)
allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...)
}
fldPath = field.NewPath("status", "used")
for k, v := range newResourceQuota.Status.Used {
resPath := fldPath.Key(string(k))
allErrs = append(allErrs, ValidateResourceQuotaResourceName(k, resPath)...)
allErrs = append(allErrs, ValidateResourceQuantityValue(k, v, resPath)...)
}
return allErrs
}
// ValidateNamespace tests if required fields are set.
func ValidateNamespace(namespace *core.Namespace) field.ErrorList {
allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata"))
for i := range namespace.Spec.Finalizers {
allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...)
}
return allErrs
}
// Validate finalizer names
func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
allErrs := apimachineryvalidation.ValidateFinalizerName(stringValue, fldPath)
allErrs = append(allErrs, validateKubeFinalizerName(stringValue, fldPath)...)
return allErrs
}
// validateKubeFinalizerName checks for "standard" names of legacy finalizer
func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(strings.Split(stringValue, "/")) == 1 {
if !helper.IsStandardFinalizerName(stringValue) {
return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified"))
}
}
return allErrs
}
// ValidateNamespaceUpdate tests to make sure a namespace update can be applied.
func ValidateNamespaceUpdate(newNamespace *core.Namespace, oldNamespace *core.Namespace) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
return allErrs
}
// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make.
func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
if newNamespace.DeletionTimestamp.IsZero() {
if newNamespace.Status.Phase != core.NamespaceActive {
allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty"))
}
} else {
if newNamespace.Status.Phase != core.NamespaceTerminating {
allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty"))
}
}
return allErrs
}
// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make.
func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata"))
fldPath := field.NewPath("spec", "finalizers")
for i := range newNamespace.Spec.Finalizers {
idxPath := fldPath.Index(i)
allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...)
}
return allErrs
}
// ValidateEndpoints validates Endpoints on create and update.
func ValidateEndpoints(endpoints, oldEndpoints *core.Endpoints) field.ErrorList {
allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...)
subsetErrs := validateEndpointSubsets(endpoints.Subsets, field.NewPath("subsets"))
if len(subsetErrs) != 0 {
// If this is an update, and Subsets was unchanged, then ignore the
// validation errors, since apparently older versions of Kubernetes
// considered the data valid. (We only check this after getting a
// validation error since Endpoints may be large and DeepEqual is slow.)
if oldEndpoints != nil && apiequality.Semantic.DeepEqual(oldEndpoints.Subsets, endpoints.Subsets) {
subsetErrs = nil
}
}
allErrs = append(allErrs, subsetErrs...)
return allErrs
}
// ValidateEndpointsCreate validates Endpoints on create.
func ValidateEndpointsCreate(endpoints *core.Endpoints) field.ErrorList {
return ValidateEndpoints(endpoints, nil)
}
// ValidateEndpointsUpdate validates Endpoints on update. NodeName changes are
// allowed during update to accommodate the case where nodeIP or PodCIDR is
// reused. An existing endpoint ip will have a different nodeName if this
// happens.
func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateEndpoints(newEndpoints, oldEndpoints)...)
return allErrs
}
func validateEndpointSubsets(subsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i := range subsets {
ss := &subsets[i]
idxPath := fldPath.Index(i)
// EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports.
if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 {
// TODO: consider adding a RequiredOneOf() error for this and similar cases
allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`"))
}
for addr := range ss.Addresses {
allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...)
}
for addr := range ss.NotReadyAddresses {
allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...)
}
for port := range ss.Ports {
allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...)
}
}
return allErrs
}
func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, IsValidIPForLegacyField(fldPath.Child("ip"), address.IP, nil)...)
if len(address.Hostname) > 0 {
allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...)
}
// During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update
if address.NodeName != nil {
for _, msg := range ValidateNodeName(*address.NodeName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg).WithOrigin("format=dns-label"))
}
}
allErrs = append(allErrs, ValidateEndpointIP(address.IP, fldPath.Child("ip"))...)
return allErrs
}
// ValidateEndpointIP is used to validate Endpoints and EndpointSlice addresses, and also
// (for historical reasons) external IPs. It disallows certain address types that don't
// make sense in those contexts. Note that this function is _almost_, but not exactly,
// equivalent to net.IP.IsGlobalUnicast(). (Unlike IsGlobalUnicast, it allows global
// multicast IPs, which is probably a bug.)
//
// This function should not be used for new validations; the exact set of IPs that do and
// don't make sense in a particular field is context-dependent (e.g., localhost makes
// sense in some places; unspecified IPs make sense in fields that are used as bind
// addresses rather than destination addresses).
func ValidateEndpointIP(ipAddress string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
ip := netutils.ParseIPSloppy(ipAddress)
if ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address").WithOrigin("format=ip-sloppy"))
return allErrs
}
if ip.IsUnspecified() {
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, fmt.Sprintf("may not be unspecified (%v)", ipAddress)))
}
if ip.IsLoopback() {
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8, ::1/128)"))
}
if ip.IsLinkLocalUnicast() {
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16, fe80::/10)"))
}
if ip.IsLinkLocalMulticast() {
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24, ff02::/10)"))
}
return allErrs.WithOrigin("format=endpoint-ip")
}
func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if requireName && len(port.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if len(port.Name) != 0 {
allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...)
}
for _, msg := range validation.IsValidPortNum(int(port.Port)) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg).WithOrigin("portNum"))
}
if len(port.Protocol) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), ""))
} else if !supportedPortProtocols.Has(port.Protocol) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, sets.List(supportedPortProtocols)))
}
if port.AppProtocol != nil {
allErrs = append(allErrs, ValidateQualifiedName(*port.AppProtocol, fldPath.Child("appProtocol"))...)
}
return allErrs
}
// ValidateSecurityContext ensures the security context contains valid settings
func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path, hostUsers bool) field.ErrorList {
allErrs := field.ErrorList{}
// this should only be true for testing since SecurityContext is defaulted by the core
if sc == nil {
return allErrs
}
if sc.Privileged != nil {
if *sc.Privileged && !capabilities.Get().AllowPrivileged {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by cluster policy"))
}
}
if sc.RunAsUser != nil {
for _, msg := range validation.IsValidUserID(*sc.RunAsUser) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, msg))
}
}
if sc.RunAsGroup != nil {
for _, msg := range validation.IsValidGroupID(*sc.RunAsGroup) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsGroup"), *sc.RunAsGroup, msg))
}
}
if sc.ProcMount != nil {
if err := ValidateProcMountType(fldPath.Child("procMount"), *sc.ProcMount); err != nil {
allErrs = append(allErrs, err)
}
if hostUsers && *sc.ProcMount == core.UnmaskedProcMount {
allErrs = append(allErrs, field.Invalid(fldPath.Child("procMount"), sc.ProcMount, "`hostUsers` must be false to use `Unmasked`"))
}
}
allErrs = append(allErrs, validateSeccompProfileField(sc.SeccompProfile, fldPath.Child("seccompProfile"))...)
if sc.AllowPrivilegeEscalation != nil && !*sc.AllowPrivilegeEscalation {
if sc.Privileged != nil && *sc.Privileged {
allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `privileged` to true"))
}
if sc.Capabilities != nil {
for _, cap := range sc.Capabilities.Add {
if string(cap) == "CAP_SYS_ADMIN" {
allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `capabilities.Add` CAP_SYS_ADMIN"))
}
}
}
}
allErrs = append(allErrs, validateWindowsSecurityContextOptions(sc.WindowsOptions, fldPath.Child("windowsOptions"))...)
allErrs = append(allErrs, ValidateAppArmorProfileField(sc.AppArmorProfile, fldPath.Child("appArmorProfile"))...)
return allErrs
}
// maxGMSACredentialSpecLength is the max length, in bytes, for the actual contents
// of a GMSA cred spec. In general, those shouldn't be more than a few hundred bytes,
// so we want to give plenty of room here while still providing an upper bound.
// The runAsUserName field will be used to execute the given container's entrypoint, and
// it can be formatted as "DOMAIN/USER", where the DOMAIN is optional, maxRunAsUserNameDomainLength
// is the max character length for the user's DOMAIN, and maxRunAsUserNameUserLength
// is the max character length for the USER itself. Both the DOMAIN and USER have their
// own restrictions, and more information about them can be found here:
// https://support.microsoft.com/en-us/help/909264/naming-conventions-in-active-directory-for-computers-domains-sites-and
// https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-2000-server/bb726984(v=technet.10)
const (
maxGMSACredentialSpecLengthInKiB = 64
maxGMSACredentialSpecLength = maxGMSACredentialSpecLengthInKiB * 1024
maxRunAsUserNameDomainLength = 256
maxRunAsUserNameUserLength = 104
)
var (
// control characters are not permitted in the runAsUserName field.
ctrlRegex = regexp.MustCompile(`[[:cntrl:]]+`)
// a valid NetBios Domain name cannot start with a dot, has at least 1 character,
// at most 15 characters, and it cannot the characters: \ / : * ? " < > |
validNetBiosRegex = regexp.MustCompile(`^[^\\/:\*\?"<>|\.][^\\/:\*\?"<>|]{0,14}$`)
// a valid DNS name contains only alphanumeric characters, dots, and dashes.
dnsLabelFormat = `[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?`
dnsSubdomainFormat = fmt.Sprintf(`^%s(?:\.%s)*$`, dnsLabelFormat, dnsLabelFormat)
validWindowsUserDomainDNSRegex = regexp.MustCompile(dnsSubdomainFormat)
// a username is invalid if it contains the characters: " / \ [ ] : ; | = , + * ? < > @
// or it contains only dots or spaces.
invalidUserNameCharsRegex = regexp.MustCompile(`["/\\:;|=,\+\*\?<>@\[\]]`)
invalidUserNameDotsSpacesRegex = regexp.MustCompile(`^[\. ]+$`)
)
func validateWindowsSecurityContextOptions(windowsOptions *core.WindowsSecurityContextOptions, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if windowsOptions == nil {
return allErrs
}
if windowsOptions.GMSACredentialSpecName != nil {
// gmsaCredentialSpecName must be the name of a custom resource
for _, msg := range validation.IsDNS1123Subdomain(*windowsOptions.GMSACredentialSpecName) {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpecName"), windowsOptions.GMSACredentialSpecName, msg))
}
}
if windowsOptions.GMSACredentialSpec != nil {
if l := len(*windowsOptions.GMSACredentialSpec); l == 0 {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpec"), windowsOptions.GMSACredentialSpec, "gmsaCredentialSpec cannot be an empty string"))
} else if l > maxGMSACredentialSpecLength {
errMsg := fmt.Sprintf("gmsaCredentialSpec size must be under %d KiB", maxGMSACredentialSpecLengthInKiB)
allErrs = append(allErrs, field.Invalid(fieldPath.Child("gmsaCredentialSpec"), windowsOptions.GMSACredentialSpec, errMsg))
}
}
if windowsOptions.RunAsUserName != nil {
if l := len(*windowsOptions.RunAsUserName); l == 0 {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, "runAsUserName cannot be an empty string"))
} else if ctrlRegex.MatchString(*windowsOptions.RunAsUserName) {
errMsg := "runAsUserName cannot contain control characters"
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
} else if parts := strings.Split(*windowsOptions.RunAsUserName, "\\"); len(parts) > 2 {
errMsg := "runAsUserName cannot contain more than one backslash"
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
} else {
var (
hasDomain = false
domain = ""
user string
)
if len(parts) == 1 {
user = parts[0]
} else {
hasDomain = true
domain = parts[0]
user = parts[1]
}
if len(domain) >= maxRunAsUserNameDomainLength {
errMsg := fmt.Sprintf("runAsUserName's Domain length must be under %d characters", maxRunAsUserNameDomainLength)
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
}
if hasDomain && !(validNetBiosRegex.MatchString(domain) || validWindowsUserDomainDNSRegex.MatchString(domain)) {
errMsg := "runAsUserName's Domain doesn't match the NetBios nor the DNS format"
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
}
if l := len(user); l == 0 {
errMsg := "runAsUserName's User cannot be empty"
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
} else if l > maxRunAsUserNameUserLength {
errMsg := fmt.Sprintf("runAsUserName's User length must not be longer than %d characters", maxRunAsUserNameUserLength)
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
}
if invalidUserNameDotsSpacesRegex.MatchString(user) {
errMsg := `runAsUserName's User cannot contain only periods or spaces`
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
}
if invalidUserNameCharsRegex.MatchString(user) {
errMsg := `runAsUserName's User cannot contain the following characters: "/\:;|=,+*?<>@[]`
allErrs = append(allErrs, field.Invalid(fieldPath.Child("runAsUserName"), windowsOptions.RunAsUserName, errMsg))
}
}
}
return allErrs
}
func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// Keep track of container and hostProcess container count for validate
containerCount := 0
hostProcessContainerCount := 0
var podHostProcess *bool
if podSpec.SecurityContext != nil && podSpec.SecurityContext.WindowsOptions != nil {
podHostProcess = podSpec.SecurityContext.WindowsOptions.HostProcess
}
hostNetwork := false
if podSpec.SecurityContext != nil {
hostNetwork = podSpec.SecurityContext.HostNetwork
}
podshelper.VisitContainersWithPath(podSpec, fieldPath, func(c *core.Container, cFieldPath *field.Path) bool {
containerCount++
var containerHostProcess *bool = nil
if c.SecurityContext != nil && c.SecurityContext.WindowsOptions != nil {
containerHostProcess = c.SecurityContext.WindowsOptions.HostProcess
}
if podHostProcess != nil && containerHostProcess != nil && *podHostProcess != *containerHostProcess {
errMsg := fmt.Sprintf("pod hostProcess value must be identical if both are specified, was %v", *podHostProcess)
allErrs = append(allErrs, field.Invalid(cFieldPath.Child("securityContext", "windowsOptions", "hostProcess"), *containerHostProcess, errMsg))
}
switch {
case containerHostProcess != nil && *containerHostProcess:
// Container explicitly sets hostProcess=true
hostProcessContainerCount++
case containerHostProcess == nil && podHostProcess != nil && *podHostProcess:
// Container inherits hostProcess=true from pod settings
hostProcessContainerCount++
}
return true
})
if hostProcessContainerCount > 0 {
// At present, if a Windows Pods contains any HostProcess containers than all containers must be
// HostProcess containers (explicitly set or inherited).
if hostProcessContainerCount != containerCount {
errMsg := "If pod contains any hostProcess containers then all containers must be HostProcess containers"
allErrs = append(allErrs, field.Invalid(fieldPath, "", errMsg))
}
// At present Windows Pods which contain HostProcess containers must also set HostNetwork.
if !hostNetwork {
errMsg := "hostNetwork must be true if pod contains any hostProcess containers"
allErrs = append(allErrs, field.Invalid(fieldPath.Child("hostNetwork"), hostNetwork, errMsg))
}
if !capabilities.Get().AllowPrivileged {
errMsg := "hostProcess containers are disallowed by cluster policy"
allErrs = append(allErrs, field.Forbidden(fieldPath, errMsg))
}
}
return allErrs
}
// validateOS validates the OS field within pod spec
func validateOS(podSpec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
os := podSpec.OS
if os == nil {
return allErrs
}
if len(os.Name) == 0 {
return append(allErrs, field.Required(fldPath.Child("name"), ""))
}
if !validOS.Has(os.Name) {
allErrs = append(allErrs, field.NotSupported(fldPath, os.Name, sets.List(validOS)))
}
return allErrs
}
var validLogStreams = sets.New[string](
core.LogStreamStdout,
core.LogStreamStderr,
core.LogStreamAll,
)
func ValidatePodLogOptions(opts *core.PodLogOptions, allowStreamSelection bool) field.ErrorList {
allErrs := field.ErrorList{}
if opts.TailLines != nil && *opts.TailLines < 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg))
}
if opts.LimitBytes != nil && *opts.LimitBytes < 1 {
allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0"))
}
switch {
case opts.SinceSeconds != nil && opts.SinceTime != nil:
allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified"))
case opts.SinceSeconds != nil:
if *opts.SinceSeconds < 1 {
allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0"))
}
}
if allowStreamSelection {
if opts.Stream == nil {
allErrs = append(allErrs, field.Required(field.NewPath("stream"), "must be specified"))
} else {
if !validLogStreams.Has(*opts.Stream) {
allErrs = append(allErrs, field.NotSupported(field.NewPath("stream"), *opts.Stream, validLogStreams.UnsortedList()))
}
if *opts.Stream != core.LogStreamAll && opts.TailLines != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "`tailLines` and specific `stream` are mutually exclusive for now"))
}
}
} else if opts.Stream != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("stream"), "may not be specified"))
}
return allErrs
}
var (
supportedLoadBalancerIPMode = sets.New(core.LoadBalancerIPModeVIP, core.LoadBalancerIPModeProxy)
)
// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus
func ValidateLoadBalancerStatus(status, oldStatus *core.LoadBalancerStatus, fldPath *field.Path, spec *core.ServiceSpec) field.ErrorList {
allErrs := field.ErrorList{}
ingrPath := fldPath.Child("ingress")
if !utilfeature.DefaultFeatureGate.Enabled(features.AllowServiceLBStatusOnNonLB) && spec.Type != core.ServiceTypeLoadBalancer && len(status.Ingress) != 0 {
allErrs = append(allErrs, field.Forbidden(ingrPath, "may only be used when `spec.type` is 'LoadBalancer'"))
} else {
var existingIngressIPs []string
if oldStatus != nil {
existingIngressIPs = make([]string, 0, len(oldStatus.Ingress))
for _, ingress := range oldStatus.Ingress {
if len(ingress.IP) > 0 {
existingIngressIPs = append(existingIngressIPs, ingress.IP)
}
}
}
for i, ingress := range status.Ingress {
idxPath := ingrPath.Index(i)
if len(ingress.IP) > 0 {
allErrs = append(allErrs, IsValidIPForLegacyField(idxPath.Child("ip"), ingress.IP, existingIngressIPs)...)
}
if utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) && ingress.IPMode == nil {
if len(ingress.IP) > 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("ipMode"), "must be specified when `ip` is set"))
}
} else if ingress.IPMode != nil && len(ingress.IP) == 0 {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("ipMode"), "may not be specified when `ip` is not set"))
} else if ingress.IPMode != nil && !supportedLoadBalancerIPMode.Has(*ingress.IPMode) {
allErrs = append(allErrs, field.NotSupported(idxPath.Child("ipMode"), ingress.IPMode, sets.List(supportedLoadBalancerIPMode)))
}
if len(ingress.Hostname) > 0 {
for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
}
if isIP := netutils.ParseIPSloppy(ingress.Hostname) != nil; isIP {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
}
}
}
}
return allErrs
}
// validateVolumeNodeAffinity tests that the PersistentVolume.NodeAffinity has valid data
// returns:
// - true if volumeNodeAffinity is set
// - errorList if there are validation errors
func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, opts PersistentVolumeSpecValidationOptions, fldPath *field.Path) (bool, field.ErrorList) {
allErrs := field.ErrorList{}
if nodeAffinity == nil {
return false, allErrs
}
if nodeAffinity.Required != nil {
allErrs = append(allErrs, ValidateNodeSelector(nodeAffinity.Required, opts.AllowInvalidLabelValueInRequiredNodeAffinity, fldPath.Child("required"))...)
} else {
allErrs = append(allErrs, field.Required(fldPath.Child("required"), "must specify required node constraints"))
}
return true, allErrs
}
func IsDecremented(update, old *int32) bool {
if update == nil && old != nil {
return true
}
if update == nil || old == nil {
return false
}
return *update < *old
}
// ValidateProcMountType tests that the argument is a valid ProcMountType.
func ValidateProcMountType(fldPath *field.Path, procMountType core.ProcMountType) *field.Error {
switch procMountType {
case core.DefaultProcMount, core.UnmaskedProcMount:
return nil
default:
return field.NotSupported(fldPath, procMountType, []core.ProcMountType{core.DefaultProcMount, core.UnmaskedProcMount})
}
}
var (
supportedScheduleActions = sets.New(core.DoNotSchedule, core.ScheduleAnyway)
)
// validateTopologySpreadConstraints validates given TopologySpreadConstraints.
func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
for i, constraint := range constraints {
subFldPath := fldPath.Index(i)
if errs := ValidatePositiveField(int64(constraint.MaxSkew), subFldPath.Child("maxSkew")); len(errs) > 0 {
allErrs = append(allErrs, errs...)
}
if err := ValidateTopologyKey(subFldPath.Child("topologyKey"), constraint.TopologyKey); err != nil {
allErrs = append(allErrs, err)
}
if err := ValidateWhenUnsatisfiable(subFldPath.Child("whenUnsatisfiable"), constraint.WhenUnsatisfiable); err != nil {
allErrs = append(allErrs, err)
}
// tuple {topologyKey, whenUnsatisfiable} denotes one kind of spread constraint
if err := ValidateSpreadConstraintNotRepeat(subFldPath.Child("{topologyKey, whenUnsatisfiable}"), constraint, constraints[i+1:]); err != nil {
allErrs = append(allErrs, err)
}
allErrs = append(allErrs, validateMinDomains(subFldPath.Child("minDomains"), constraint.MinDomains, constraint.WhenUnsatisfiable)...)
if err := validateNodeInclusionPolicy(subFldPath.Child("nodeAffinityPolicy"), constraint.NodeAffinityPolicy); err != nil {
allErrs = append(allErrs, err)
}
if err := validateNodeInclusionPolicy(subFldPath.Child("nodeTaintsPolicy"), constraint.NodeTaintsPolicy); err != nil {
allErrs = append(allErrs, err)
}
// legacyValidationFunction is ValidateMatchLabelKeysInTopologySpread
// preferredValidationFunction is ValidateMatchLabelKeysAndMismatchLabelKeys
// OldPodViolatesMatchLabelKeysValidation==true means ValidateMatchLabelKeysAndMismatchLabelKeys failed, which means preferredValidation failed
// OldPodViolatesLegacyMatchLabelKeysValidation==true means ValidateMatchLabelKeysInTopologySpread failed, which means legacyValidation failed
if opts.AllowMatchLabelKeysInPodTopologySpread {
switch {
case opts.AllowMatchLabelKeysInPodTopologySpreadSelectorMerge && opts.OldPodViolatesMatchLabelKeysValidation:
// This case means that we want to use the preferredValidationFunction, but the old pod doesn't pass the preferredValidationFunction, so it must continue using the legacyValidationFunction.
// This is because we don't allow the fields to change.
allErrs = append(allErrs, ValidateMatchLabelKeysInTopologySpread(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...)
case opts.AllowMatchLabelKeysInPodTopologySpreadSelectorMerge && !opts.OldPodViolatesMatchLabelKeysValidation:
// This case means we want to use the preferredValidationFunction and the old pod passes it, so we will continue requiring the preferredValidationFunction to pass.
allErrs = append(allErrs, ValidateMatchLabelKeysAndMismatchLabelKeys(subFldPath, constraint.MatchLabelKeys, nil, constraint.LabelSelector)...)
case !opts.AllowMatchLabelKeysInPodTopologySpreadSelectorMerge && opts.OldPodViolatesLegacyMatchLabelKeysValidation:
// This case means we want to use the legacyValidationFunction, but the old pod doesn't pass it, so it must continue using the preferredValidationFunction instead so that updates to other fields can happen.
// This allows us to enable the featuregate, then disable the featuregate and still be able to update the pod.
allErrs = append(allErrs, ValidateMatchLabelKeysAndMismatchLabelKeys(subFldPath, constraint.MatchLabelKeys, nil, constraint.LabelSelector)...)
case !opts.AllowMatchLabelKeysInPodTopologySpreadSelectorMerge && !opts.OldPodViolatesLegacyMatchLabelKeysValidation:
// This case means we want to use the legacyValidationFunction and the old pod passes it, so we will continue requiring the legacyValidationFunction to pass.
allErrs = append(allErrs, ValidateMatchLabelKeysInTopologySpread(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...)
default:
// If we fall through, then we use the legacyValidationFunction because that's what we did prior to the featuregate(MatchLabelKeysInPodTopologySpreadSelectorMerge).
allErrs = append(allErrs, ValidateMatchLabelKeysInTopologySpread(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...)
}
} else {
allErrs = append(allErrs, ValidateMatchLabelKeysInTopologySpread(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...)
}
if !opts.AllowInvalidTopologySpreadConstraintLabelSelector {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(constraint.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, subFldPath.Child("labelSelector"))...)
}
}
return allErrs
}
// validateMinDomains tests that the argument is a valid MinDomains.
func validateMinDomains(fldPath *field.Path, minDomains *int32, action core.UnsatisfiableConstraintAction) field.ErrorList {
if minDomains == nil {
return nil
}
var allErrs field.ErrorList
allErrs = append(allErrs, ValidatePositiveField(int64(*minDomains), fldPath)...)
// When MinDomains is non-nil, whenUnsatisfiable must be DoNotSchedule.
if action != core.DoNotSchedule {
allErrs = append(allErrs, field.Invalid(fldPath, minDomains, fmt.Sprintf("can only use minDomains if whenUnsatisfiable=%s, not %s", core.DoNotSchedule, action)).WithOrigin("dependsOn"))
}
return allErrs
}
// ValidateTopologyKey tests that the argument is a valid TopologyKey.
func ValidateTopologyKey(fldPath *field.Path, topologyKey string) *field.Error {
if len(topologyKey) == 0 {
return field.Required(fldPath, "can not be empty")
}
return nil
}
// ValidateWhenUnsatisfiable tests that the argument is a valid UnsatisfiableConstraintAction.
func ValidateWhenUnsatisfiable(fldPath *field.Path, action core.UnsatisfiableConstraintAction) *field.Error {
if !supportedScheduleActions.Has(action) {
return field.NotSupported(fldPath, action, sets.List(supportedScheduleActions))
}
return nil
}
// ValidateSpreadConstraintNotRepeat tests that if `constraint` duplicates with `existingConstraintPairs`
// on TopologyKey and WhenUnsatisfiable fields.
func ValidateSpreadConstraintNotRepeat(fldPath *field.Path, constraint core.TopologySpreadConstraint, restingConstraints []core.TopologySpreadConstraint) *field.Error {
for _, restingConstraint := range restingConstraints {
if constraint.TopologyKey == restingConstraint.TopologyKey &&
constraint.WhenUnsatisfiable == restingConstraint.WhenUnsatisfiable {
return field.Duplicate(fldPath, fmt.Sprintf("{%v, %v}", constraint.TopologyKey, constraint.WhenUnsatisfiable))
}
}
return nil
}
var (
supportedPodTopologySpreadNodePolicies = sets.New(core.NodeInclusionPolicyIgnore, core.NodeInclusionPolicyHonor)
)
// validateNodeInclusionPolicy tests that the argument is a valid NodeInclusionPolicy.
func validateNodeInclusionPolicy(fldPath *field.Path, policy *core.NodeInclusionPolicy) *field.Error {
if policy == nil {
return nil
}
if !supportedPodTopologySpreadNodePolicies.Has(*policy) {
return field.NotSupported(fldPath, policy, sets.List(supportedPodTopologySpreadNodePolicies))
}
return nil
}
// ValidateMatchLabelKeysAndMismatchLabelKeys checks if both matchLabelKeys and mismatchLabelKeys are valid.
// - validate that all matchLabelKeys and mismatchLabelKeys are valid label names.
// - validate that the user doens't specify the same key in both matchLabelKeys and labelSelector.
// - validate that any matchLabelKeys are not duplicated with mismatchLabelKeys.
func ValidateMatchLabelKeysAndMismatchLabelKeys(fldPath *field.Path, matchLabelKeys, mismatchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList {
var allErrs field.ErrorList
// 1. validate that all matchLabelKeys and mismatchLabelKeys are valid label names.
allErrs = append(allErrs, validateLabelKeys(fldPath.Child("matchLabelKeys"), matchLabelKeys, labelSelector)...)
allErrs = append(allErrs, validateLabelKeys(fldPath.Child("mismatchLabelKeys"), mismatchLabelKeys, labelSelector)...)
// 2. validate that the user doens't specify the same key in both matchLabelKeys and labelSelector.
// It doesn't make sense to have the labelselector with the key specified in matchLabelKeys
// because the matchLabelKeys will be `In` labelSelector which matches with only one value in the key
// and we cannot make any further filtering with that key.
// On the other hand, we may want to have labelSelector with the key specified in mismatchLabelKeys.
// because the mismatchLabelKeys will be `NotIn` labelSelector
// and we may want to filter Pods further with other labelSelector with that key.
// labelKeysMap is keyed by label key and valued by the index of label key in labelKeys.
if labelSelector != nil {
labelKeysMap := map[string]int{}
for i, key := range matchLabelKeys {
labelKeysMap[key] = i
}
labelSelectorKeys := sets.New[string]()
for key := range labelSelector.MatchLabels {
labelSelectorKeys.Insert(key)
}
for _, matchExpression := range labelSelector.MatchExpressions {
key := matchExpression.Key
if i, ok := labelKeysMap[key]; ok && labelSelectorKeys.Has(key) {
// Before validateLabelKeysWithSelector is called, the labelSelector has already got the selector created from matchLabelKeys.
// Here, we found the duplicate key in labelSelector and the key is specified in labelKeys.
// Meaning that the same key is specified in both labelSelector and matchLabelKeys/mismatchLabelKeys.
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), key, "exists in both matchLabelKeys and labelSelector").WithOrigin("duplicatedLabelKeys"))
}
labelSelectorKeys.Insert(key)
}
}
// 3. validate that any matchLabelKeys are not duplicated with mismatchLabelKeys.
mismatchLabelKeysSet := sets.New(mismatchLabelKeys...)
for i, k := range matchLabelKeys {
if mismatchLabelKeysSet.Has(k) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("matchLabelKeys").Index(i), k, "exists in both matchLabelKeys and mismatchLabelKeys").WithOrigin("duplicatedMismatchLabelKeys"))
}
}
return allErrs
}
// ValidateMatchLabelKeysInTopologySpread tests that the elements are a valid label name and are not already included in labelSelector.
func ValidateMatchLabelKeysInTopologySpread(fldPath *field.Path, matchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList {
if len(matchLabelKeys) == 0 {
return nil
}
var allErrs field.ErrorList
labelSelectorKeys := sets.Set[string]{}
if labelSelector != nil {
for key := range labelSelector.MatchLabels {
labelSelectorKeys.Insert(key)
}
for _, matchExpression := range labelSelector.MatchExpressions {
labelSelectorKeys.Insert(matchExpression.Key)
}
} else {
allErrs = append(allErrs, field.Forbidden(fldPath, "must not be specified when labelSelector is not set"))
}
for i, key := range matchLabelKeys {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(key, fldPath.Index(i))...)
if labelSelectorKeys.Has(key) {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), key, "exists in both matchLabelKeys and labelSelector").WithOrigin("duplicatedLabelKeys"))
}
}
return allErrs
}
// validateLabelKeys tests that the label keys are a valid label name.
// It's intended to be used for matchLabelKeys or mismatchLabelKeys.
func validateLabelKeys(fldPath *field.Path, labelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList {
if len(labelKeys) == 0 {
return nil
}
if labelSelector == nil {
return field.ErrorList{field.Forbidden(fldPath, "must not be specified when labelSelector is not set")}
}
var allErrs field.ErrorList
for i, key := range labelKeys {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(key, fldPath.Index(i))...)
}
return allErrs
}
// ValidateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,,
// .spec.IPFamilies, .spec.ipFamilyPolicy. This is exported because it is used
// during IP init and allocation.
func ValidateServiceClusterIPsRelatedFields(service, oldService *core.Service) field.ErrorList {
// ClusterIP, ClusterIPs, IPFamilyPolicy and IPFamilies are validated prior (all must be unset) for ExternalName service
if service.Spec.Type == core.ServiceTypeExternalName {
return field.ErrorList{}
}
allErrs := field.ErrorList{}
hasInvalidIPs := false
specPath := field.NewPath("spec")
clusterIPsField := specPath.Child("clusterIPs")
ipFamiliesField := specPath.Child("ipFamilies")
ipFamilyPolicyField := specPath.Child("ipFamilyPolicy")
// Make sure ClusterIP and ClusterIPs are synced. For most cases users can
// just manage one or the other and we'll handle the rest (see PrepareFor*
// in strategy).
if len(service.Spec.ClusterIP) != 0 {
// If ClusterIP is set, ClusterIPs[0] must match.
if len(service.Spec.ClusterIPs) == 0 {
allErrs = append(allErrs, field.Required(clusterIPsField, ""))
} else if service.Spec.ClusterIPs[0] != service.Spec.ClusterIP {
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "first value must match `clusterIP`"))
}
} else { // ClusterIP == ""
// If ClusterIP is not set, ClusterIPs must also be unset.
if len(service.Spec.ClusterIPs) != 0 {
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "must be empty when `clusterIP` is not specified"))
}
}
// ipfamilies stand alone validation
// must be either IPv4 or IPv6
seen := sets.Set[core.IPFamily]{}
for i, ipFamily := range service.Spec.IPFamilies {
if !supportedServiceIPFamily.Has(ipFamily) {
allErrs = append(allErrs, field.NotSupported(ipFamiliesField.Index(i), ipFamily, sets.List(supportedServiceIPFamily)))
}
// no duplicate check also ensures that ipfamilies is dualstacked, in any order
if seen.Has(ipFamily) {
allErrs = append(allErrs, field.Duplicate(ipFamiliesField.Index(i), ipFamily))
}
seen.Insert(ipFamily)
}
// IPFamilyPolicy stand alone validation
// note: nil is ok, defaulted in alloc check registry/core/service/*
if service.Spec.IPFamilyPolicy != nil {
// must have a supported value
if !supportedServiceIPFamilyPolicy.Has(*(service.Spec.IPFamilyPolicy)) {
allErrs = append(allErrs, field.NotSupported(ipFamilyPolicyField, service.Spec.IPFamilyPolicy, sets.List(supportedServiceIPFamilyPolicy)))
}
}
var existingClusterIPs []string
if oldService != nil {
existingClusterIPs = oldService.Spec.ClusterIPs // +k8s:verify-mutation:reason=clone
}
// clusterIPs stand alone validation
// valid ips with None and empty string handling
// duplication check is done as part of DualStackvalidation below
for i, clusterIP := range service.Spec.ClusterIPs {
// valid at first location only. if and only if len(clusterIPs) == 1
if i == 0 && clusterIP == core.ClusterIPNone {
if len(service.Spec.ClusterIPs) > 1 {
hasInvalidIPs = true
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "'None' must be the first and only value"))
}
continue
}
// is it a valid ip? (or was it at least previously considered valid?)
errorMessages := IsValidIPForLegacyField(clusterIPsField.Index(i), clusterIP, existingClusterIPs)
hasInvalidIPs = (len(errorMessages) != 0) || hasInvalidIPs
allErrs = append(allErrs, errorMessages...)
}
// max two
if len(service.Spec.ClusterIPs) > 2 {
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "may only hold up to 2 values"))
}
// at this stage if there is an invalid ip or misplaced none/empty string
// it will skew the error messages (bad index || dualstackness of already bad ips). so we
// stop here if there are errors in clusterIPs validation
if hasInvalidIPs {
return allErrs
}
// must be dual stacked ips if they are more than one ip
if len(service.Spec.ClusterIPs) > 1 /* meaning: it does not have a None or empty string */ {
dualStack, err := netutils.IsDualStackIPStrings(service.Spec.ClusterIPs)
if err != nil { // though we check for that earlier. safe > sorry
allErrs = append(allErrs, field.InternalError(clusterIPsField, fmt.Errorf("failed to check for dual stack with error:%v", err)))
}
// We only support one from each IP family (i.e. max two IPs in this list).
if !dualStack {
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "may specify no more than one IP for each IP family"))
}
}
// match clusterIPs to their families, if they were provided
if !isHeadlessService(service) && len(service.Spec.ClusterIPs) > 0 && len(service.Spec.IPFamilies) > 0 {
for i, ip := range service.Spec.ClusterIPs {
if i > (len(service.Spec.IPFamilies) - 1) {
break // no more families to check
}
// 4=>6
if service.Spec.IPFamilies[i] == core.IPv4Protocol && netutils.IsIPv6String(ip) {
allErrs = append(allErrs, field.Invalid(clusterIPsField.Index(i), ip, fmt.Sprintf("expected an IPv4 value as indicated by `ipFamilies[%v]`", i)))
}
// 6=>4
if service.Spec.IPFamilies[i] == core.IPv6Protocol && !netutils.IsIPv6String(ip) {
allErrs = append(allErrs, field.Invalid(clusterIPsField.Index(i), ip, fmt.Sprintf("expected an IPv6 value as indicated by `ipFamilies[%v]`", i)))
}
}
}
return allErrs
}
// specific validation for clusterIPs in cases of user upgrading or downgrading to/from dualstack
func validateUpgradeDowngradeClusterIPs(oldService, service *core.Service) field.ErrorList {
allErrs := make(field.ErrorList, 0)
// bail out early for ExternalName
if service.Spec.Type == core.ServiceTypeExternalName || oldService.Spec.Type == core.ServiceTypeExternalName {
return allErrs
}
newIsHeadless := isHeadlessService(service)
oldIsHeadless := isHeadlessService(oldService)
if oldIsHeadless && newIsHeadless {
return allErrs
}
switch {
// no change in ClusterIP lengths
// compare each
case len(oldService.Spec.ClusterIPs) == len(service.Spec.ClusterIPs):
for i, ip := range oldService.Spec.ClusterIPs {
if ip != service.Spec.ClusterIPs[i] {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(i), service.Spec.ClusterIPs, "may not change once set"))
}
}
// something has been released (downgraded)
case len(oldService.Spec.ClusterIPs) > len(service.Spec.ClusterIPs):
// primary ClusterIP has been released
if len(service.Spec.ClusterIPs) == 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "primary clusterIP can not be unset"))
}
// test if primary clusterIP has changed
if len(oldService.Spec.ClusterIPs) > 0 &&
len(service.Spec.ClusterIPs) > 0 &&
service.Spec.ClusterIPs[0] != oldService.Spec.ClusterIPs[0] {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "may not change once set"))
}
// test if secondary ClusterIP has been released. has this service been downgraded correctly?
// user *must* set IPFamilyPolicy == SingleStack
if len(service.Spec.ClusterIPs) == 1 {
if service.Spec.IPFamilyPolicy == nil || *(service.Spec.IPFamilyPolicy) != core.IPFamilyPolicySingleStack {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy, "must be set to 'SingleStack' when releasing the secondary clusterIP"))
}
}
case len(oldService.Spec.ClusterIPs) < len(service.Spec.ClusterIPs):
// something has been added (upgraded)
// test if primary clusterIP has changed
if len(oldService.Spec.ClusterIPs) > 0 &&
service.Spec.ClusterIPs[0] != oldService.Spec.ClusterIPs[0] {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "may not change once set"))
}
// we don't check for Policy == RequireDualStack here since, Validation/Creation func takes care of it
}
return allErrs
}
// specific validation for ipFamilies in cases of user upgrading or downgrading to/from dualstack
func validateUpgradeDowngradeIPFamilies(oldService, service *core.Service) field.ErrorList {
allErrs := make(field.ErrorList, 0)
// bail out early for ExternalName
if service.Spec.Type == core.ServiceTypeExternalName || oldService.Spec.Type == core.ServiceTypeExternalName {
return allErrs
}
oldIsHeadless := isHeadlessService(oldService)
newIsHeadless := isHeadlessService(service)
// if changed to/from headless, then bail out
if newIsHeadless != oldIsHeadless {
return allErrs
}
// headless can change families
if newIsHeadless {
return allErrs
}
switch {
case len(oldService.Spec.IPFamilies) == len(service.Spec.IPFamilies):
// no change in ClusterIP lengths
// compare each
for i, ip := range oldService.Spec.IPFamilies {
if ip != service.Spec.IPFamilies[i] {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.IPFamilies, "may not change once set"))
}
}
case len(oldService.Spec.IPFamilies) > len(service.Spec.IPFamilies):
// something has been released (downgraded)
// test if primary ipfamily has been released
if len(service.Spec.ClusterIPs) == 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.IPFamilies, "primary ipFamily can not be unset"))
}
// test if primary ipFamily has changed
if len(service.Spec.IPFamilies) > 0 &&
service.Spec.IPFamilies[0] != oldService.Spec.IPFamilies[0] {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.ClusterIPs, "may not change once set"))
}
// test if secondary IPFamily has been released. has this service been downgraded correctly?
// user *must* set IPFamilyPolicy == SingleStack
if len(service.Spec.IPFamilies) == 1 {
if service.Spec.IPFamilyPolicy == nil || *(service.Spec.IPFamilyPolicy) != core.IPFamilyPolicySingleStack {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy, "must be set to 'SingleStack' when releasing the secondary ipFamily"))
}
}
case len(oldService.Spec.IPFamilies) < len(service.Spec.IPFamilies):
// something has been added (upgraded)
// test if primary ipFamily has changed
if len(oldService.Spec.IPFamilies) > 0 &&
len(service.Spec.IPFamilies) > 0 &&
service.Spec.IPFamilies[0] != oldService.Spec.IPFamilies[0] {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilies").Index(0), service.Spec.ClusterIPs, "may not change once set"))
}
// we don't check for Policy == RequireDualStack here since, Validation/Creation func takes care of it
}
return allErrs
}
func isHeadlessService(service *core.Service) bool {
return service != nil &&
len(service.Spec.ClusterIPs) == 1 &&
service.Spec.ClusterIPs[0] == core.ClusterIPNone
}
// validateLoadBalancerClassField validation for loadBalancerClass
func validateLoadBalancerClassField(oldService, service *core.Service) field.ErrorList {
allErrs := make(field.ErrorList, 0)
if oldService != nil {
// validate update op
if isTypeLoadBalancer(oldService) && isTypeLoadBalancer(service) {
// old and new are both LoadBalancer
if !sameLoadBalancerClass(oldService, service) {
// can't change loadBalancerClass
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "loadBalancerClass"), service.Spec.LoadBalancerClass, "may not change once set"))
}
}
}
if isTypeLoadBalancer(service) {
// check LoadBalancerClass format
if service.Spec.LoadBalancerClass != nil {
allErrs = append(allErrs, ValidateQualifiedName(*service.Spec.LoadBalancerClass, field.NewPath("spec", "loadBalancerClass"))...)
}
} else {
// check if LoadBalancerClass set for non LoadBalancer type of service
if service.Spec.LoadBalancerClass != nil {
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "loadBalancerClass"), "may only be used when `type` is 'LoadBalancer'"))
}
}
return allErrs
}
// isTypeLoadBalancer tests service type is loadBalancer or not
func isTypeLoadBalancer(service *core.Service) bool {
return service.Spec.Type == core.ServiceTypeLoadBalancer
}
// sameLoadBalancerClass check two services have the same loadBalancerClass or not
func sameLoadBalancerClass(oldService, service *core.Service) bool {
if oldService.Spec.LoadBalancerClass == nil && service.Spec.LoadBalancerClass == nil {
return true
}
if oldService.Spec.LoadBalancerClass == nil || service.Spec.LoadBalancerClass == nil {
return false
}
return *oldService.Spec.LoadBalancerClass == *service.Spec.LoadBalancerClass
}
func ValidatePodAffinityTermSelector(podAffinityTerm core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
labelSelectorValidationOptions := unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: allowInvalidLabelValueInSelector}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, labelSelectorValidationOptions, fldPath.Child("labelSelector"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, labelSelectorValidationOptions, fldPath.Child("namespaceSelector"))...)
return allErrs
}
var betaToGALabel = map[string]string{
v1.LabelFailureDomainBetaZone: v1.LabelTopologyZone,
v1.LabelFailureDomainBetaRegion: v1.LabelTopologyRegion,
kubeletapis.LabelOS: v1.LabelOSStable,
kubeletapis.LabelArch: v1.LabelArchStable,
v1.LabelInstanceType: v1.LabelInstanceTypeStable,
}
var (
maskNodeSelectorLabelChangeEqualities conversion.Equalities
initMaskNodeSelectorLabelChangeEqualities sync.Once
)
func getMaskNodeSelectorLabelChangeEqualities() conversion.Equalities {
initMaskNodeSelectorLabelChangeEqualities.Do(func() {
var eqs = apiequality.Semantic.Copy()
err := eqs.AddFunc(
func(newReq, oldReq core.NodeSelectorRequirement) bool {
// allow newReq to change to a GA key
if oldReq.Key != newReq.Key && betaToGALabel[oldReq.Key] == newReq.Key {
oldReq.Key = newReq.Key // +k8s:verify-mutation:reason=clone
}
return apiequality.Semantic.DeepEqual(newReq, oldReq)
},
)
if err != nil {
panic(fmt.Errorf("failed to instantiate semantic equalities: %w", err))
}
maskNodeSelectorLabelChangeEqualities = eqs
})
return maskNodeSelectorLabelChangeEqualities
}
func validatePvNodeAffinity(newPvNodeAffinity, oldPvNodeAffinity *core.VolumeNodeAffinity, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if !getMaskNodeSelectorLabelChangeEqualities().DeepEqual(newPvNodeAffinity, oldPvNodeAffinity) {
allErrs = append(allErrs, field.Invalid(fldPath, newPvNodeAffinity, fieldImmutableErrorMsg+", except for updating from beta label to GA"))
}
return allErrs
}
func validateNodeSelectorMutation(fldPath *field.Path, newNodeSelector, oldNodeSelector map[string]string) field.ErrorList {
var allErrs field.ErrorList
// Validate no existing node selectors were deleted or mutated.
for k, v1 := range oldNodeSelector {
if v2, ok := newNodeSelector[k]; !ok || v1 != v2 {
allErrs = append(allErrs, field.Invalid(fldPath, newNodeSelector, "only additions to spec.nodeSelector are allowed (no mutations or deletions)"))
return allErrs
}
}
return allErrs
}
func validateNodeAffinityMutation(nodeAffinityPath *field.Path, newNodeAffinity, oldNodeAffinity *core.NodeAffinity) field.ErrorList {
var allErrs field.ErrorList
// If old node affinity was nil, anything can be set.
if oldNodeAffinity == nil || oldNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return allErrs
}
oldTerms := oldNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
var newTerms []core.NodeSelectorTerm
if newNodeAffinity != nil && newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
newTerms = newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
}
// If there are no old terms, we can set the new terms to anything.
// If there are old terms, we cannot add any new ones.
if len(oldTerms) > 0 && len(oldTerms) != len(newTerms) {
return append(allErrs, field.Invalid(nodeAffinityPath.Child("requiredDuringSchedulingIgnoredDuringExecution").Child("nodeSelectorTerms"), newTerms, "no additions/deletions to non-empty NodeSelectorTerms list are allowed"))
}
// For requiredDuringSchedulingIgnoredDuringExecution, if old NodeSelectorTerms
// was empty, anything can be set. If non-empty, only additions of NodeSelectorRequirements
// to matchExpressions or fieldExpressions are allowed.
for i := range oldTerms {
if !validateNodeSelectorTermHasOnlyAdditions(newTerms[i], oldTerms[i]) {
allErrs = append(allErrs, field.Invalid(nodeAffinityPath.Child("requiredDuringSchedulingIgnoredDuringExecution").Child("nodeSelectorTerms").Index(i), newTerms[i], "only additions are allowed (no mutations or deletions)"))
}
}
return allErrs
}
func validateNodeSelectorTermHasOnlyAdditions(newTerm, oldTerm core.NodeSelectorTerm) bool {
if len(oldTerm.MatchExpressions) == 0 && len(oldTerm.MatchFields) == 0 {
if len(newTerm.MatchExpressions) > 0 || len(newTerm.MatchFields) > 0 {
return false
}
}
// Validate MatchExpressions only has additions (no deletions or mutations)
if l := len(oldTerm.MatchExpressions); l > 0 {
if len(newTerm.MatchExpressions) < l {
return false
}
if !apiequality.Semantic.DeepEqual(newTerm.MatchExpressions[:l], oldTerm.MatchExpressions) {
return false
}
}
// Validate MatchFields only has additions (no deletions or mutations)
if l := len(oldTerm.MatchFields); l > 0 {
if len(newTerm.MatchFields) < l {
return false
}
if !apiequality.Semantic.DeepEqual(newTerm.MatchFields[:l], oldTerm.MatchFields) {
return false
}
}
return true
}
var validSupplementalGroupsPolicies = sets.New(core.SupplementalGroupsPolicyMerge, core.SupplementalGroupsPolicyStrict)
func validateSupplementalGroupsPolicy(supplementalGroupsPolicy *core.SupplementalGroupsPolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if !validSupplementalGroupsPolicies.Has(*supplementalGroupsPolicy) {
allErrors = append(allErrors, field.NotSupported(fldPath, supplementalGroupsPolicy, sets.List(validSupplementalGroupsPolicies)))
}
return allErrors
}
func validateContainerStatusUsers(containerStatuses []core.ContainerStatus, fldPath *field.Path, podOS *core.PodOS) field.ErrorList {
allErrors := field.ErrorList{}
osName := core.Linux
if podOS != nil {
osName = podOS.Name
}
for i, containerStatus := range containerStatuses {
if containerStatus.User == nil {
continue
}
containerUser := containerStatus.User
switch osName {
case core.Windows:
if containerUser.Linux != nil {
allErrors = append(allErrors, field.Forbidden(fldPath.Index(i).Child("user").Child("linux"), "cannot be set for a windows pod"))
}
case core.Linux:
allErrors = append(allErrors, validateLinuxContainerUser(containerUser.Linux, fldPath.Index(i).Child("user").Child("linux"))...)
}
}
return allErrors
}
func validateContainerStatusNoAllocatedResourcesStatus(containerStatuses []core.ContainerStatus, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
for i, containerStatus := range containerStatuses {
if len(containerStatus.AllocatedResourcesStatus) == 0 {
continue
}
allErrors = append(allErrors, field.Forbidden(fldPath.Index(i).Child("allocatedResourcesStatus"), "must not be specified in container status"))
}
return allErrors
}
// validateContainerStatusAllocatedResourcesStatus iterate the allocated resources health and validate:
// - resourceName matches one of resources in container's resource requirements
// - resourceID is not empty and unique
func validateContainerStatusAllocatedResourcesStatus(containerStatuses []core.ContainerStatus, fldPath *field.Path, containers []core.Container) field.ErrorList {
allErrors := field.ErrorList{}
for i, containerStatus := range containerStatuses {
if containerStatus.AllocatedResourcesStatus == nil {
continue
}
allocatedResources := containerStatus.AllocatedResourcesStatus
for j, allocatedResource := range allocatedResources {
var container core.Container
containerFound := false
// get container by name
for _, c := range containers {
if c.Name == containerStatus.Name {
containerFound = true
container = c
break
}
}
// ignore missing container, see https://github.com/kubernetes/kubernetes/issues/124915
if containerFound {
found := false
var errorStr string
if strings.HasPrefix(string(allocatedResource.Name), "claim:") {
// assume it is a claim name
errorStr = "must match one of the container's resource claims in a format 'claim:<claimName>/<request>' or 'claim:<claimName>' if request is empty"
for _, c := range container.Resources.Claims {
name := "claim:" + c.Name
if c.Request != "" {
name += "/" + c.Request
}
if name == string(allocatedResource.Name) {
found = true
break
}
}
} else {
// assume it is a resource name
errorStr = "must match one of the container's resource requests"
for resourceName := range container.Resources.Requests {
if resourceName == allocatedResource.Name {
found = true
break
}
}
}
if !found {
allErrors = append(allErrors, field.Invalid(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("name"), allocatedResource.Name, errorStr))
}
}
uniqueResources := sets.New[core.ResourceID]()
// check resource IDs are unique
for k, r := range allocatedResource.Resources {
var supportedResourceHealthValues = sets.New(
core.ResourceHealthStatusHealthy,
core.ResourceHealthStatusUnhealthy,
core.ResourceHealthStatusUnknown)
if !supportedResourceHealthValues.Has(r.Health) {
allErrors = append(allErrors, field.NotSupported(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("resources").Index(k).Child("health"), r.Health, sets.List(supportedResourceHealthValues)))
}
if uniqueResources.Has(r.ResourceID) {
allErrors = append(allErrors, field.Duplicate(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("resources").Index(k).Child("resourceID"), r.ResourceID))
} else {
uniqueResources.Insert(r.ResourceID)
}
}
}
}
return allErrors
}
func validateLinuxContainerUser(linuxContainerUser *core.LinuxContainerUser, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if linuxContainerUser == nil {
return allErrors
}
for _, msg := range validation.IsValidUserID(linuxContainerUser.UID) {
allErrors = append(allErrors, field.Invalid(fldPath.Child("uid"), linuxContainerUser.UID, msg))
}
for _, msg := range validation.IsValidGroupID(linuxContainerUser.GID) {
allErrors = append(allErrors, field.Invalid(fldPath.Child("gid"), linuxContainerUser.GID, msg))
}
for g, gid := range linuxContainerUser.SupplementalGroups {
for _, msg := range validation.IsValidGroupID(gid) {
allErrors = append(allErrors, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
}
}
return allErrors
}
func validateImageVolumeSource(imageVolume *core.ImageVolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if opts.ResourceIsPod && len(imageVolume.Reference) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("reference"), ""))
}
allErrs = append(allErrs, validatePullPolicy(imageVolume.PullPolicy, fldPath.Child("pullPolicy"))...)
return allErrs
}
// isRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways.
func isRestartableInitContainer(initContainer *core.Container) bool {
if initContainer == nil || initContainer.RestartPolicy == nil {
return false
}
return *initContainer.RestartPolicy == core.ContainerRestartPolicyAlways
}
// IsValidIPForLegacyField is a wrapper around validation.IsValidIPForLegacyField that
// handles setting strictValidation correctly. This is only for fields that use legacy IP
// address validation; use validation.IsValidIP for new fields.
func IsValidIPForLegacyField(fldPath *field.Path, value string, validOldIPs []string) field.ErrorList {
return validation.IsValidIPForLegacyField(fldPath, value, utilfeature.DefaultFeatureGate.Enabled(features.StrictIPCIDRValidation), validOldIPs)
}
// IsValidCIDRForLegacyField is a wrapper around validation.IsValidCIDRForLegacyField that
// handles setting strictValidation correctly. This is only for fields that use legacy CIDR
// value validation; use validation.IsValidCIDR for new fields.
func IsValidCIDRForLegacyField(fldPath *field.Path, value string, validOldCIDRs []string) field.ErrorList {
return validation.IsValidCIDRForLegacyField(fldPath, value, utilfeature.DefaultFeatureGate.Enabled(features.StrictIPCIDRValidation), validOldCIDRs)
}
func validateNodeSwapStatus(nodeSwapStatus *core.NodeSwapStatus, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if nodeSwapStatus == nil {
return allErrors
}
if nodeSwapStatus.Capacity != nil {
capacityFld := fldPath.Child("capacity")
errs := ValidatePositiveField(*nodeSwapStatus.Capacity, capacityFld)
if len(errs) > 0 {
allErrors = append(allErrors, errs...)
}
}
return allErrors
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package core
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource.
func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource {
if in == nil {
return nil
}
out := new(AWSElasticBlockStoreVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Affinity) DeepCopyInto(out *Affinity) {
*out = *in
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
*out = new(NodeAffinity)
(*in).DeepCopyInto(*out)
}
if in.PodAffinity != nil {
in, out := &in.PodAffinity, &out.PodAffinity
*out = new(PodAffinity)
(*in).DeepCopyInto(*out)
}
if in.PodAntiAffinity != nil {
in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
*out = new(PodAntiAffinity)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity.
func (in *Affinity) DeepCopy() *Affinity {
if in == nil {
return nil
}
out := new(Affinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppArmorProfile) DeepCopyInto(out *AppArmorProfile) {
*out = *in
if in.LocalhostProfile != nil {
in, out := &in.LocalhostProfile, &out.LocalhostProfile
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppArmorProfile.
func (in *AppArmorProfile) DeepCopy() *AppArmorProfile {
if in == nil {
return nil
}
out := new(AppArmorProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume.
func (in *AttachedVolume) DeepCopy() *AttachedVolume {
if in == nil {
return nil
}
out := new(AttachedVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AvoidPods) DeepCopyInto(out *AvoidPods) {
*out = *in
if in.PreferAvoidPods != nil {
in, out := &in.PreferAvoidPods, &out.PreferAvoidPods
*out = make([]PreferAvoidPodsEntry, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods.
func (in *AvoidPods) DeepCopy() *AvoidPods {
if in == nil {
return nil
}
out := new(AvoidPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) {
*out = *in
if in.CachingMode != nil {
in, out := &in.CachingMode, &out.CachingMode
*out = new(AzureDataDiskCachingMode)
**out = **in
}
if in.FSType != nil {
in, out := &in.FSType, &out.FSType
*out = new(string)
**out = **in
}
if in.ReadOnly != nil {
in, out := &in.ReadOnly, &out.ReadOnly
*out = new(bool)
**out = **in
}
if in.Kind != nil {
in, out := &in.Kind, &out.Kind
*out = new(AzureDataDiskKind)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource.
func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource {
if in == nil {
return nil
}
out := new(AzureDiskVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) {
*out = *in
if in.SecretNamespace != nil {
in, out := &in.SecretNamespace, &out.SecretNamespace
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource.
func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource {
if in == nil {
return nil
}
out := new(AzureFilePersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource.
func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource {
if in == nil {
return nil
}
out := new(AzureFileVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Binding) DeepCopyInto(out *Binding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Target = in.Target
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding.
func (in *Binding) DeepCopy() *Binding {
if in == nil {
return nil
}
out := new(Binding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Binding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) {
*out = *in
if in.VolumeAttributes != nil {
in, out := &in.VolumeAttributes, &out.VolumeAttributes
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ControllerPublishSecretRef != nil {
in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef
*out = new(SecretReference)
**out = **in
}
if in.NodeStageSecretRef != nil {
in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef
*out = new(SecretReference)
**out = **in
}
if in.NodePublishSecretRef != nil {
in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
*out = new(SecretReference)
**out = **in
}
if in.ControllerExpandSecretRef != nil {
in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef
*out = new(SecretReference)
**out = **in
}
if in.NodeExpandSecretRef != nil {
in, out := &in.NodeExpandSecretRef, &out.NodeExpandSecretRef
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource.
func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource {
if in == nil {
return nil
}
out := new(CSIPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) {
*out = *in
if in.ReadOnly != nil {
in, out := &in.ReadOnly, &out.ReadOnly
*out = new(bool)
**out = **in
}
if in.FSType != nil {
in, out := &in.FSType, &out.FSType
*out = new(string)
**out = **in
}
if in.VolumeAttributes != nil {
in, out := &in.VolumeAttributes, &out.VolumeAttributes
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodePublishSecretRef != nil {
in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
*out = new(LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource.
func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource {
if in == nil {
return nil
}
out := new(CSIVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Capabilities) DeepCopyInto(out *Capabilities) {
*out = *in
if in.Add != nil {
in, out := &in.Add, &out.Add
*out = make([]Capability, len(*in))
copy(*out, *in)
}
if in.Drop != nil {
in, out := &in.Drop, &out.Drop
*out = make([]Capability, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities.
func (in *Capabilities) DeepCopy() *Capabilities {
if in == nil {
return nil
}
out := new(Capabilities)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) {
*out = *in
if in.Monitors != nil {
in, out := &in.Monitors, &out.Monitors
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource.
func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource {
if in == nil {
return nil
}
out := new(CephFSPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) {
*out = *in
if in.Monitors != nil {
in, out := &in.Monitors, &out.Monitors
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource.
func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource {
if in == nil {
return nil
}
out := new(CephFSVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource.
func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource {
if in == nil {
return nil
}
out := new(CinderPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource.
func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
if in == nil {
return nil
}
out := new(CinderVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
*out = *in
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig.
func (in *ClientIPConfig) DeepCopy() *ClientIPConfig {
if in == nil {
return nil
}
out := new(ClientIPConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) {
*out = *in
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.SignerName != nil {
in, out := &in.SignerName, &out.SignerName
*out = new(string)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection.
func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection {
if in == nil {
return nil
}
out := new(ClusterTrustBundleProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition.
func (in *ComponentCondition) DeepCopy() *ComponentCondition {
if in == nil {
return nil
}
out := new(ComponentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ComponentCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus.
func (in *ComponentStatus) DeepCopy() *ComponentStatus {
if in == nil {
return nil
}
out := new(ComponentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ComponentStatus) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ComponentStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList.
func (in *ComponentStatusList) DeepCopy() *ComponentStatusList {
if in == nil {
return nil
}
out := new(ComponentStatusList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ComponentStatusList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMap) DeepCopyInto(out *ConfigMap) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Immutable != nil {
in, out := &in.Immutable, &out.Immutable
*out = new(bool)
**out = **in
}
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.BinaryData != nil {
in, out := &in.BinaryData, &out.BinaryData
*out = make(map[string][]byte, len(*in))
for key, val := range *in {
var outVal []byte
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]byte, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap.
func (in *ConfigMap) DeepCopy() *ConfigMap {
if in == nil {
return nil
}
out := new(ConfigMap)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ConfigMap) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource.
func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource {
if in == nil {
return nil
}
out := new(ConfigMapEnvSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector.
func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector {
if in == nil {
return nil
}
out := new(ConfigMapKeySelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ConfigMap, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList.
func (in *ConfigMapList) DeepCopy() *ConfigMapList {
if in == nil {
return nil
}
out := new(ConfigMapList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ConfigMapList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource.
func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource {
if in == nil {
return nil
}
out := new(ConfigMapNodeConfigSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KeyToPath, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection.
func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection {
if in == nil {
return nil
}
out := new(ConfigMapProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KeyToPath, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
*out = new(int32)
**out = **in
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource.
func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource {
if in == nil {
return nil
}
out := new(ConfigMapVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Container) DeepCopyInto(out *Container) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]ContainerPort, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ResizePolicy != nil {
in, out := &in.ResizePolicy, &out.ResizePolicy
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
if in.RestartPolicy != nil {
in, out := &in.RestartPolicy, &out.RestartPolicy
*out = new(ContainerRestartPolicy)
**out = **in
}
if in.RestartPolicyRules != nil {
in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
*out = make([]ContainerRestartRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]VolumeDevice, len(*in))
copy(*out, *in)
}
if in.LivenessProbe != nil {
in, out := &in.LivenessProbe, &out.LivenessProbe
*out = new(Probe)
(*in).DeepCopyInto(*out)
}
if in.ReadinessProbe != nil {
in, out := &in.ReadinessProbe, &out.ReadinessProbe
*out = new(Probe)
(*in).DeepCopyInto(*out)
}
if in.StartupProbe != nil {
in, out := &in.StartupProbe, &out.StartupProbe
*out = new(Probe)
(*in).DeepCopyInto(*out)
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
*out = new(Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(SecurityContext)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container.
func (in *Container) DeepCopy() *Container {
if in == nil {
return nil
}
out := new(Container)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerExtendedResourceRequest) DeepCopyInto(out *ContainerExtendedResourceRequest) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerExtendedResourceRequest.
func (in *ContainerExtendedResourceRequest) DeepCopy() *ContainerExtendedResourceRequest {
if in == nil {
return nil
}
out := new(ContainerExtendedResourceRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerImage) DeepCopyInto(out *ContainerImage) {
*out = *in
if in.Names != nil {
in, out := &in.Names, &out.Names
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage.
func (in *ContainerImage) DeepCopy() *ContainerImage {
if in == nil {
return nil
}
out := new(ContainerImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerPort) DeepCopyInto(out *ContainerPort) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort.
func (in *ContainerPort) DeepCopy() *ContainerPort {
if in == nil {
return nil
}
out := new(ContainerPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy.
func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy {
if in == nil {
return nil
}
out := new(ContainerResizePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRestartRule) DeepCopyInto(out *ContainerRestartRule) {
*out = *in
if in.ExitCodes != nil {
in, out := &in.ExitCodes, &out.ExitCodes
*out = new(ContainerRestartRuleOnExitCodes)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRule.
func (in *ContainerRestartRule) DeepCopy() *ContainerRestartRule {
if in == nil {
return nil
}
out := new(ContainerRestartRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRestartRuleOnExitCodes) DeepCopyInto(out *ContainerRestartRuleOnExitCodes) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]int32, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRuleOnExitCodes.
func (in *ContainerRestartRuleOnExitCodes) DeepCopy() *ContainerRestartRuleOnExitCodes {
if in == nil {
return nil
}
out := new(ContainerRestartRuleOnExitCodes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerState) DeepCopyInto(out *ContainerState) {
*out = *in
if in.Waiting != nil {
in, out := &in.Waiting, &out.Waiting
*out = new(ContainerStateWaiting)
**out = **in
}
if in.Running != nil {
in, out := &in.Running, &out.Running
*out = new(ContainerStateRunning)
(*in).DeepCopyInto(*out)
}
if in.Terminated != nil {
in, out := &in.Terminated, &out.Terminated
*out = new(ContainerStateTerminated)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState.
func (in *ContainerState) DeepCopy() *ContainerState {
if in == nil {
return nil
}
out := new(ContainerState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) {
*out = *in
in.StartedAt.DeepCopyInto(&out.StartedAt)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning.
func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning {
if in == nil {
return nil
}
out := new(ContainerStateRunning)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) {
*out = *in
in.StartedAt.DeepCopyInto(&out.StartedAt)
in.FinishedAt.DeepCopyInto(&out.FinishedAt)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated.
func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated {
if in == nil {
return nil
}
out := new(ContainerStateTerminated)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting.
func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting {
if in == nil {
return nil
}
out := new(ContainerStateWaiting)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
*out = *in
in.State.DeepCopyInto(&out.State)
in.LastTerminationState.DeepCopyInto(&out.LastTerminationState)
if in.Started != nil {
in, out := &in.Started, &out.Started
*out = new(bool)
**out = **in
}
if in.AllocatedResources != nil {
in, out := &in.AllocatedResources, &out.AllocatedResources
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMountStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.User != nil {
in, out := &in.User, &out.User
*out = new(ContainerUser)
(*in).DeepCopyInto(*out)
}
if in.AllocatedResourcesStatus != nil {
in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus
*out = make([]ResourceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StopSignal != nil {
in, out := &in.StopSignal, &out.StopSignal
*out = new(Signal)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus.
func (in *ContainerStatus) DeepCopy() *ContainerStatus {
if in == nil {
return nil
}
out := new(ContainerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerUser) DeepCopyInto(out *ContainerUser) {
*out = *in
if in.Linux != nil {
in, out := &in.Linux, &out.Linux
*out = new(LinuxContainerUser)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser.
func (in *ContainerUser) DeepCopy() *ContainerUser {
if in == nil {
return nil
}
out := new(ContainerUser)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint.
func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint {
if in == nil {
return nil
}
out := new(DaemonEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DownwardAPIVolumeFile, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection.
func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection {
if in == nil {
return nil
}
out := new(DownwardAPIProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) {
*out = *in
if in.FieldRef != nil {
in, out := &in.FieldRef, &out.FieldRef
*out = new(ObjectFieldSelector)
**out = **in
}
if in.ResourceFieldRef != nil {
in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
*out = new(ResourceFieldSelector)
(*in).DeepCopyInto(*out)
}
if in.Mode != nil {
in, out := &in.Mode, &out.Mode
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile.
func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile {
if in == nil {
return nil
}
out := new(DownwardAPIVolumeFile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DownwardAPIVolumeFile, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource.
func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource {
if in == nil {
return nil
}
out := new(DownwardAPIVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
*out = *in
if in.SizeLimit != nil {
in, out := &in.SizeLimit, &out.SizeLimit
x := (*in).DeepCopy()
*out = &x
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource.
func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource {
if in == nil {
return nil
}
out := new(EmptyDirVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) {
*out = *in
if in.NodeName != nil {
in, out := &in.NodeName, &out.NodeName
*out = new(string)
**out = **in
}
if in.TargetRef != nil {
in, out := &in.TargetRef, &out.TargetRef
*out = new(ObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress.
func (in *EndpointAddress) DeepCopy() *EndpointAddress {
if in == nil {
return nil
}
out := new(EndpointAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
*out = *in
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
func (in *EndpointPort) DeepCopy() *EndpointPort {
if in == nil {
return nil
}
out := new(EndpointPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]EndpointAddress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NotReadyAddresses != nil {
in, out := &in.NotReadyAddresses, &out.NotReadyAddresses
*out = make([]EndpointAddress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]EndpointPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset.
func (in *EndpointSubset) DeepCopy() *EndpointSubset {
if in == nil {
return nil
}
out := new(EndpointSubset)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Endpoints) DeepCopyInto(out *Endpoints) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Subsets != nil {
in, out := &in.Subsets, &out.Subsets
*out = make([]EndpointSubset, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints.
func (in *Endpoints) DeepCopy() *Endpoints {
if in == nil {
return nil
}
out := new(Endpoints)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Endpoints) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointsList) DeepCopyInto(out *EndpointsList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Endpoints, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList.
func (in *EndpointsList) DeepCopy() *EndpointsList {
if in == nil {
return nil
}
out := new(EndpointsList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointsList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) {
*out = *in
if in.ConfigMapRef != nil {
in, out := &in.ConfigMapRef, &out.ConfigMapRef
*out = new(ConfigMapEnvSource)
(*in).DeepCopyInto(*out)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretEnvSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource.
func (in *EnvFromSource) DeepCopy() *EnvFromSource {
if in == nil {
return nil
}
out := new(EnvFromSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvVar) DeepCopyInto(out *EnvVar) {
*out = *in
if in.ValueFrom != nil {
in, out := &in.ValueFrom, &out.ValueFrom
*out = new(EnvVarSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar.
func (in *EnvVar) DeepCopy() *EnvVar {
if in == nil {
return nil
}
out := new(EnvVar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
*out = *in
if in.FieldRef != nil {
in, out := &in.FieldRef, &out.FieldRef
*out = new(ObjectFieldSelector)
**out = **in
}
if in.ResourceFieldRef != nil {
in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
*out = new(ResourceFieldSelector)
(*in).DeepCopyInto(*out)
}
if in.ConfigMapKeyRef != nil {
in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
*out = new(ConfigMapKeySelector)
(*in).DeepCopyInto(*out)
}
if in.SecretKeyRef != nil {
in, out := &in.SecretKeyRef, &out.SecretKeyRef
*out = new(SecretKeySelector)
(*in).DeepCopyInto(*out)
}
if in.FileKeyRef != nil {
in, out := &in.FileKeyRef, &out.FileKeyRef
*out = new(FileKeySelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource.
func (in *EnvVarSource) DeepCopy() *EnvVarSource {
if in == nil {
return nil
}
out := new(EnvVarSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) {
*out = *in
in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer.
func (in *EphemeralContainer) DeepCopy() *EphemeralContainer {
if in == nil {
return nil
}
out := new(EphemeralContainer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]ContainerPort, len(*in))
copy(*out, *in)
}
if in.EnvFrom != nil {
in, out := &in.EnvFrom, &out.EnvFrom
*out = make([]EnvFromSource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]EnvVar, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ResizePolicy != nil {
in, out := &in.ResizePolicy, &out.ResizePolicy
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
if in.RestartPolicy != nil {
in, out := &in.RestartPolicy, &out.RestartPolicy
*out = new(ContainerRestartPolicy)
**out = **in
}
if in.RestartPolicyRules != nil {
in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
*out = make([]ContainerRestartRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumeDevices != nil {
in, out := &in.VolumeDevices, &out.VolumeDevices
*out = make([]VolumeDevice, len(*in))
copy(*out, *in)
}
if in.LivenessProbe != nil {
in, out := &in.LivenessProbe, &out.LivenessProbe
*out = new(Probe)
(*in).DeepCopyInto(*out)
}
if in.ReadinessProbe != nil {
in, out := &in.ReadinessProbe, &out.ReadinessProbe
*out = new(Probe)
(*in).DeepCopyInto(*out)
}
if in.StartupProbe != nil {
in, out := &in.StartupProbe, &out.StartupProbe
*out = new(Probe)
(*in).DeepCopyInto(*out)
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
*out = new(Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(SecurityContext)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon.
func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon {
if in == nil {
return nil
}
out := new(EphemeralContainerCommon)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) {
*out = *in
if in.VolumeClaimTemplate != nil {
in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
*out = new(PersistentVolumeClaimTemplate)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource.
func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource {
if in == nil {
return nil
}
out := new(EphemeralVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Event) DeepCopyInto(out *Event) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.InvolvedObject = in.InvolvedObject
out.Source = in.Source
in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp)
in.LastTimestamp.DeepCopyInto(&out.LastTimestamp)
in.EventTime.DeepCopyInto(&out.EventTime)
if in.Series != nil {
in, out := &in.Series, &out.Series
*out = new(EventSeries)
(*in).DeepCopyInto(*out)
}
if in.Related != nil {
in, out := &in.Related, &out.Related
*out = new(ObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
func (in *Event) DeepCopy() *Event {
if in == nil {
return nil
}
out := new(Event)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Event) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EventList) DeepCopyInto(out *EventList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Event, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList.
func (in *EventList) DeepCopy() *EventList {
if in == nil {
return nil
}
out := new(EventList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EventList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EventSeries) DeepCopyInto(out *EventSeries) {
*out = *in
in.LastObservedTime.DeepCopyInto(&out.LastObservedTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries.
func (in *EventSeries) DeepCopy() *EventSeries {
if in == nil {
return nil
}
out := new(EventSeries)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EventSource) DeepCopyInto(out *EventSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource.
func (in *EventSource) DeepCopy() *EventSource {
if in == nil {
return nil
}
out := new(EventSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecAction) DeepCopyInto(out *ExecAction) {
*out = *in
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction.
func (in *ExecAction) DeepCopy() *ExecAction {
if in == nil {
return nil
}
out := new(ExecAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) {
*out = *in
if in.TargetWWNs != nil {
in, out := &in.TargetWWNs, &out.TargetWWNs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Lun != nil {
in, out := &in.Lun, &out.Lun
*out = new(int32)
**out = **in
}
if in.WWIDs != nil {
in, out := &in.WWIDs, &out.WWIDs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource.
func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
if in == nil {
return nil
}
out := new(FCVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileKeySelector) DeepCopyInto(out *FileKeySelector) {
*out = *in
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileKeySelector.
func (in *FileKeySelector) DeepCopy() *FileKeySelector {
if in == nil {
return nil
}
out := new(FileKeySelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretReference)
**out = **in
}
if in.Options != nil {
in, out := &in.Options, &out.Options
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource.
func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource {
if in == nil {
return nil
}
out := new(FlexPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(LocalObjectReference)
**out = **in
}
if in.Options != nil {
in, out := &in.Options, &out.Options
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource.
func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource {
if in == nil {
return nil
}
out := new(FlexVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource.
func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource {
if in == nil {
return nil
}
out := new(FlockerVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource.
func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource {
if in == nil {
return nil
}
out := new(GCEPersistentDiskVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GRPCAction) DeepCopyInto(out *GRPCAction) {
*out = *in
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAction.
func (in *GRPCAction) DeepCopy() *GRPCAction {
if in == nil {
return nil
}
out := new(GRPCAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource.
func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource {
if in == nil {
return nil
}
out := new(GitRepoVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) {
*out = *in
if in.EndpointsNamespace != nil {
in, out := &in.EndpointsNamespace, &out.EndpointsNamespace
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource.
func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource {
if in == nil {
return nil
}
out := new(GlusterfsPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource.
func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource {
if in == nil {
return nil
}
out := new(GlusterfsVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) {
*out = *in
out.Port = in.Port
if in.HTTPHeaders != nil {
in, out := &in.HTTPHeaders, &out.HTTPHeaders
*out = make([]HTTPHeader, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction.
func (in *HTTPGetAction) DeepCopy() *HTTPGetAction {
if in == nil {
return nil
}
out := new(HTTPGetAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader.
func (in *HTTPHeader) DeepCopy() *HTTPHeader {
if in == nil {
return nil
}
out := new(HTTPHeader)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostAlias) DeepCopyInto(out *HostAlias) {
*out = *in
if in.Hostnames != nil {
in, out := &in.Hostnames, &out.Hostnames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias.
func (in *HostAlias) DeepCopy() *HostAlias {
if in == nil {
return nil
}
out := new(HostAlias)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostIP) DeepCopyInto(out *HostIP) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostIP.
func (in *HostIP) DeepCopy() *HostIP {
if in == nil {
return nil
}
out := new(HostIP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) {
*out = *in
if in.Type != nil {
in, out := &in.Type, &out.Type
*out = new(HostPathType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource.
func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource {
if in == nil {
return nil
}
out := new(HostPathVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) {
*out = *in
if in.Portals != nil {
in, out := &in.Portals, &out.Portals
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretReference)
**out = **in
}
if in.InitiatorName != nil {
in, out := &in.InitiatorName, &out.InitiatorName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource.
func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource {
if in == nil {
return nil
}
out := new(ISCSIPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) {
*out = *in
if in.Portals != nil {
in, out := &in.Portals, &out.Portals
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(LocalObjectReference)
**out = **in
}
if in.InitiatorName != nil {
in, out := &in.InitiatorName, &out.InitiatorName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource.
func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource {
if in == nil {
return nil
}
out := new(ISCSIVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource.
func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource {
if in == nil {
return nil
}
out := new(ImageVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
*out = *in
if in.Mode != nil {
in, out := &in.Mode, &out.Mode
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath.
func (in *KeyToPath) DeepCopy() *KeyToPath {
if in == nil {
return nil
}
out := new(KeyToPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
*out = *in
if in.PostStart != nil {
in, out := &in.PostStart, &out.PostStart
*out = new(LifecycleHandler)
(*in).DeepCopyInto(*out)
}
if in.PreStop != nil {
in, out := &in.PreStop, &out.PreStop
*out = new(LifecycleHandler)
(*in).DeepCopyInto(*out)
}
if in.StopSignal != nil {
in, out := &in.StopSignal, &out.StopSignal
*out = new(Signal)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle.
func (in *Lifecycle) DeepCopy() *Lifecycle {
if in == nil {
return nil
}
out := new(Lifecycle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) {
*out = *in
if in.Exec != nil {
in, out := &in.Exec, &out.Exec
*out = new(ExecAction)
(*in).DeepCopyInto(*out)
}
if in.HTTPGet != nil {
in, out := &in.HTTPGet, &out.HTTPGet
*out = new(HTTPGetAction)
(*in).DeepCopyInto(*out)
}
if in.TCPSocket != nil {
in, out := &in.TCPSocket, &out.TCPSocket
*out = new(TCPSocketAction)
**out = **in
}
if in.Sleep != nil {
in, out := &in.Sleep, &out.Sleep
*out = new(SleepAction)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHandler.
func (in *LifecycleHandler) DeepCopy() *LifecycleHandler {
if in == nil {
return nil
}
out := new(LifecycleHandler)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitRange) DeepCopyInto(out *LimitRange) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange.
func (in *LimitRange) DeepCopy() *LimitRange {
if in == nil {
return nil
}
out := new(LimitRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LimitRange) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) {
*out = *in
if in.Max != nil {
in, out := &in.Max, &out.Max
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Min != nil {
in, out := &in.Min, &out.Min
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Default != nil {
in, out := &in.Default, &out.Default
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.DefaultRequest != nil {
in, out := &in.DefaultRequest, &out.DefaultRequest
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.MaxLimitRequestRatio != nil {
in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem.
func (in *LimitRangeItem) DeepCopy() *LimitRangeItem {
if in == nil {
return nil
}
out := new(LimitRangeItem)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]LimitRange, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList.
func (in *LimitRangeList) DeepCopy() *LimitRangeList {
if in == nil {
return nil
}
out := new(LimitRangeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LimitRangeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make([]LimitRangeItem, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec.
func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
if in == nil {
return nil
}
out := new(LimitRangeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) {
*out = *in
if in.SupplementalGroups != nil {
in, out := &in.SupplementalGroups, &out.SupplementalGroups
*out = make([]int64, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser.
func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser {
if in == nil {
return nil
}
out := new(LinuxContainerUser)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *List) DeepCopyInto(out *List) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.Object, len(*in))
for i := range *in {
if (*in)[i] != nil {
(*out)[i] = (*in)[i].DeepCopyObject()
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
func (in *List) DeepCopy() *List {
if in == nil {
return nil
}
out := new(List)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *List) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
*out = *in
if in.IPMode != nil {
in, out := &in.IPMode, &out.IPMode
*out = new(LoadBalancerIPMode)
**out = **in
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]PortStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress.
func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress {
if in == nil {
return nil
}
out := new(LoadBalancerIngress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]LoadBalancerIngress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus.
func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus {
if in == nil {
return nil
}
out := new(LoadBalancerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference.
func (in *LocalObjectReference) DeepCopy() *LocalObjectReference {
if in == nil {
return nil
}
out := new(LocalObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) {
*out = *in
if in.FSType != nil {
in, out := &in.FSType, &out.FSType
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource.
func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource {
if in == nil {
return nil
}
out := new(LocalVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus.
func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus {
if in == nil {
return nil
}
out := new(ModifyVolumeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource.
func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource {
if in == nil {
return nil
}
out := new(NFSVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespace) DeepCopyInto(out *Namespace) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace.
func (in *Namespace) DeepCopy() *Namespace {
if in == nil {
return nil
}
out := new(Namespace)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Namespace) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition.
func (in *NamespaceCondition) DeepCopy() *NamespaceCondition {
if in == nil {
return nil
}
out := new(NamespaceCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceList) DeepCopyInto(out *NamespaceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Namespace, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList.
func (in *NamespaceList) DeepCopy() *NamespaceList {
if in == nil {
return nil
}
out := new(NamespaceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NamespaceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) {
*out = *in
if in.Finalizers != nil {
in, out := &in.Finalizers, &out.Finalizers
*out = make([]FinalizerName, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec.
func (in *NamespaceSpec) DeepCopy() *NamespaceSpec {
if in == nil {
return nil
}
out := new(NamespaceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NamespaceCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus.
func (in *NamespaceStatus) DeepCopy() *NamespaceStatus {
if in == nil {
return nil
}
out := new(NamespaceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Node) DeepCopyInto(out *Node) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
func (in *Node) DeepCopy() *Node {
if in == nil {
return nil
}
out := new(Node)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Node) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
func (in *NodeAddress) DeepCopy() *NodeAddress {
if in == nil {
return nil
}
out := new(NodeAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) {
*out = *in
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
*out = new(NodeSelector)
(*in).DeepCopyInto(*out)
}
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
*out = make([]PreferredSchedulingTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity.
func (in *NodeAffinity) DeepCopy() *NodeAffinity {
if in == nil {
return nil
}
out := new(NodeAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeCondition) DeepCopyInto(out *NodeCondition) {
*out = *in
in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition.
func (in *NodeCondition) DeepCopy() *NodeCondition {
if in == nil {
return nil
}
out := new(NodeCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) {
*out = *in
if in.ConfigMap != nil {
in, out := &in.ConfigMap, &out.ConfigMap
*out = new(ConfigMapNodeConfigSource)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource.
func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
if in == nil {
return nil
}
out := new(NodeConfigSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
*out = *in
if in.Assigned != nil {
in, out := &in.Assigned, &out.Assigned
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
if in.Active != nil {
in, out := &in.Active, &out.Active
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
if in.LastKnownGood != nil {
in, out := &in.LastKnownGood, &out.LastKnownGood
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
if in == nil {
return nil
}
out := new(NodeConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) {
*out = *in
out.KubeletEndpoint = in.KubeletEndpoint
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints.
func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
if in == nil {
return nil
}
out := new(NodeDaemonEndpoints)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) {
*out = *in
if in.SupplementalGroupsPolicy != nil {
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures.
func (in *NodeFeatures) DeepCopy() *NodeFeatures {
if in == nil {
return nil
}
out := new(NodeFeatures)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeList) DeepCopyInto(out *NodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Node, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList.
func (in *NodeList) DeepCopy() *NodeList {
if in == nil {
return nil
}
out := new(NodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions.
func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions {
if in == nil {
return nil
}
out := new(NodeProxyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeRuntimeHandler) DeepCopyInto(out *NodeRuntimeHandler) {
*out = *in
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = new(NodeRuntimeHandlerFeatures)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandler.
func (in *NodeRuntimeHandler) DeepCopy() *NodeRuntimeHandler {
if in == nil {
return nil
}
out := new(NodeRuntimeHandler)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatures) {
*out = *in
if in.RecursiveReadOnlyMounts != nil {
in, out := &in.RecursiveReadOnlyMounts, &out.RecursiveReadOnlyMounts
*out = new(bool)
**out = **in
}
if in.UserNamespaces != nil {
in, out := &in.UserNamespaces, &out.UserNamespaces
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandlerFeatures.
func (in *NodeRuntimeHandlerFeatures) DeepCopy() *NodeRuntimeHandlerFeatures {
if in == nil {
return nil
}
out := new(NodeRuntimeHandlerFeatures)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSelector) DeepCopyInto(out *NodeSelector) {
*out = *in
if in.NodeSelectorTerms != nil {
in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms
*out = make([]NodeSelectorTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector.
func (in *NodeSelector) DeepCopy() *NodeSelector {
if in == nil {
return nil
}
out := new(NodeSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement.
func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement {
if in == nil {
return nil
}
out := new(NodeSelectorRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) {
*out = *in
if in.MatchExpressions != nil {
in, out := &in.MatchExpressions, &out.MatchExpressions
*out = make([]NodeSelectorRequirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.MatchFields != nil {
in, out := &in.MatchFields, &out.MatchFields
*out = make([]NodeSelectorRequirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm.
func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm {
if in == nil {
return nil
}
out := new(NodeSelectorTerm)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
*out = *in
if in.PodCIDRs != nil {
in, out := &in.PodCIDRs, &out.PodCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ConfigSource != nil {
in, out := &in.ConfigSource, &out.ConfigSource
*out = new(NodeConfigSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
func (in *NodeSpec) DeepCopy() *NodeSpec {
if in == nil {
return nil
}
out := new(NodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = *in
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Allocatable != nil {
in, out := &in.Allocatable, &out.Allocatable
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NodeCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]NodeAddress, len(*in))
copy(*out, *in)
}
out.DaemonEndpoints = in.DaemonEndpoints
in.NodeInfo.DeepCopyInto(&out.NodeInfo)
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]ContainerImage, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.VolumesInUse != nil {
in, out := &in.VolumesInUse, &out.VolumesInUse
*out = make([]UniqueVolumeName, len(*in))
copy(*out, *in)
}
if in.VolumesAttached != nil {
in, out := &in.VolumesAttached, &out.VolumesAttached
*out = make([]AttachedVolume, len(*in))
copy(*out, *in)
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(NodeConfigStatus)
(*in).DeepCopyInto(*out)
}
if in.RuntimeHandlers != nil {
in, out := &in.RuntimeHandlers, &out.RuntimeHandlers
*out = make([]NodeRuntimeHandler, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = new(NodeFeatures)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
func (in *NodeStatus) DeepCopy() *NodeStatus {
if in == nil {
return nil
}
out := new(NodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSwapStatus) DeepCopyInto(out *NodeSwapStatus) {
*out = *in
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSwapStatus.
func (in *NodeSwapStatus) DeepCopy() *NodeSwapStatus {
if in == nil {
return nil
}
out := new(NodeSwapStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) {
*out = *in
if in.Swap != nil {
in, out := &in.Swap, &out.Swap
*out = new(NodeSwapStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo.
func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo {
if in == nil {
return nil
}
out := new(NodeSystemInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector.
func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector {
if in == nil {
return nil
}
out := new(ObjectFieldSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
func (in *ObjectReference) DeepCopy() *ObjectReference {
if in == nil {
return nil
}
out := new(ObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ObjectReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume.
func (in *PersistentVolume) DeepCopy() *PersistentVolume {
if in == nil {
return nil
}
out := new(PersistentVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PersistentVolume) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim.
func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim {
if in == nil {
return nil
}
out := new(PersistentVolumeClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition.
func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PersistentVolumeClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList.
func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) {
*out = *in
if in.AccessModes != nil {
in, out := &in.AccessModes, &out.AccessModes
*out = make([]PersistentVolumeAccessMode, len(*in))
copy(*out, *in)
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Resources.DeepCopyInto(&out.Resources)
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
*out = new(string)
**out = **in
}
if in.VolumeMode != nil {
in, out := &in.VolumeMode, &out.VolumeMode
*out = new(PersistentVolumeMode)
**out = **in
}
if in.DataSource != nil {
in, out := &in.DataSource, &out.DataSource
*out = new(TypedLocalObjectReference)
(*in).DeepCopyInto(*out)
}
if in.DataSourceRef != nil {
in, out := &in.DataSourceRef, &out.DataSourceRef
*out = new(TypedObjectReference)
(*in).DeepCopyInto(*out)
}
if in.VolumeAttributesClassName != nil {
in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec.
func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) {
*out = *in
if in.AccessModes != nil {
in, out := &in.AccessModes, &out.AccessModes
*out = make([]PersistentVolumeAccessMode, len(*in))
copy(*out, *in)
}
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]PersistentVolumeClaimCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AllocatedResources != nil {
in, out := &in.AllocatedResources, &out.AllocatedResources
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.AllocatedResourceStatuses != nil {
in, out := &in.AllocatedResourceStatuses, &out.AllocatedResourceStatuses
*out = make(map[ResourceName]ClaimResourceStatus, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.CurrentVolumeAttributesClassName != nil {
in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName
*out = new(string)
**out = **in
}
if in.ModifyVolumeStatus != nil {
in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus
*out = new(ModifyVolumeStatus)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus.
func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimTemplate) DeepCopyInto(out *PersistentVolumeClaimTemplate) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimTemplate.
func (in *PersistentVolumeClaimTemplate) DeepCopy() *PersistentVolumeClaimTemplate {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource.
func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PersistentVolume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList.
func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList {
if in == nil {
return nil
}
out := new(PersistentVolumeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PersistentVolumeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
*out = *in
if in.GCEPersistentDisk != nil {
in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
*out = new(GCEPersistentDiskVolumeSource)
**out = **in
}
if in.AWSElasticBlockStore != nil {
in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
*out = new(AWSElasticBlockStoreVolumeSource)
**out = **in
}
if in.HostPath != nil {
in, out := &in.HostPath, &out.HostPath
*out = new(HostPathVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Glusterfs != nil {
in, out := &in.Glusterfs, &out.Glusterfs
*out = new(GlusterfsPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.NFS != nil {
in, out := &in.NFS, &out.NFS
*out = new(NFSVolumeSource)
**out = **in
}
if in.RBD != nil {
in, out := &in.RBD, &out.RBD
*out = new(RBDPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Quobyte != nil {
in, out := &in.Quobyte, &out.Quobyte
*out = new(QuobyteVolumeSource)
**out = **in
}
if in.ISCSI != nil {
in, out := &in.ISCSI, &out.ISCSI
*out = new(ISCSIPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.FlexVolume != nil {
in, out := &in.FlexVolume, &out.FlexVolume
*out = new(FlexPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Cinder != nil {
in, out := &in.Cinder, &out.Cinder
*out = new(CinderPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.CephFS != nil {
in, out := &in.CephFS, &out.CephFS
*out = new(CephFSPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.FC != nil {
in, out := &in.FC, &out.FC
*out = new(FCVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Flocker != nil {
in, out := &in.Flocker, &out.Flocker
*out = new(FlockerVolumeSource)
**out = **in
}
if in.AzureFile != nil {
in, out := &in.AzureFile, &out.AzureFile
*out = new(AzureFilePersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
**out = **in
}
if in.AzureDisk != nil {
in, out := &in.AzureDisk, &out.AzureDisk
*out = new(AzureDiskVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
*out = new(PhotonPersistentDiskVolumeSource)
**out = **in
}
if in.PortworxVolume != nil {
in, out := &in.PortworxVolume, &out.PortworxVolume
*out = new(PortworxVolumeSource)
**out = **in
}
if in.ScaleIO != nil {
in, out := &in.ScaleIO, &out.ScaleIO
*out = new(ScaleIOPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Local != nil {
in, out := &in.Local, &out.Local
*out = new(LocalVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.StorageOS != nil {
in, out := &in.StorageOS, &out.StorageOS
*out = new(StorageOSPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.CSI != nil {
in, out := &in.CSI, &out.CSI
*out = new(CSIPersistentVolumeSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource.
func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource {
if in == nil {
return nil
}
out := new(PersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
*out = *in
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource)
if in.AccessModes != nil {
in, out := &in.AccessModes, &out.AccessModes
*out = make([]PersistentVolumeAccessMode, len(*in))
copy(*out, *in)
}
if in.ClaimRef != nil {
in, out := &in.ClaimRef, &out.ClaimRef
*out = new(ObjectReference)
**out = **in
}
if in.MountOptions != nil {
in, out := &in.MountOptions, &out.MountOptions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.VolumeMode != nil {
in, out := &in.VolumeMode, &out.VolumeMode
*out = new(PersistentVolumeMode)
**out = **in
}
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
*out = new(VolumeNodeAffinity)
(*in).DeepCopyInto(*out)
}
if in.VolumeAttributesClassName != nil {
in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec.
func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec {
if in == nil {
return nil
}
out := new(PersistentVolumeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) {
*out = *in
if in.LastPhaseTransitionTime != nil {
in, out := &in.LastPhaseTransitionTime, &out.LastPhaseTransitionTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus.
func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus {
if in == nil {
return nil
}
out := new(PersistentVolumeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource.
func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource {
if in == nil {
return nil
}
out := new(PhotonPersistentDiskVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pod) DeepCopyInto(out *Pod) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod.
func (in *Pod) DeepCopy() *Pod {
if in == nil {
return nil
}
out := new(Pod)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Pod) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodAffinity) DeepCopyInto(out *PodAffinity) {
*out = *in
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
*out = make([]PodAffinityTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
*out = make([]WeightedPodAffinityTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity.
func (in *PodAffinity) DeepCopy() *PodAffinity {
if in == nil {
return nil
}
out := new(PodAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MatchLabelKeys != nil {
in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MismatchLabelKeys != nil {
in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm.
func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm {
if in == nil {
return nil
}
out := new(PodAffinityTerm)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) {
*out = *in
if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
*out = make([]PodAffinityTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
*out = make([]WeightedPodAffinityTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity.
func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity {
if in == nil {
return nil
}
out := new(PodAntiAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions.
func (in *PodAttachOptions) DeepCopy() *PodAttachOptions {
if in == nil {
return nil
}
out := new(PodAttachOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection) {
*out = *in
if in.MaxExpirationSeconds != nil {
in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateProjection.
func (in *PodCertificateProjection) DeepCopy() *PodCertificateProjection {
if in == nil {
return nil
}
out := new(PodCertificateProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodCondition) DeepCopyInto(out *PodCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition.
func (in *PodCondition) DeepCopy() *PodCondition {
if in == nil {
return nil
}
out := new(PodCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) {
*out = *in
if in.Nameservers != nil {
in, out := &in.Nameservers, &out.Nameservers
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Searches != nil {
in, out := &in.Searches, &out.Searches
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Options != nil {
in, out := &in.Options, &out.Options
*out = make([]PodDNSConfigOption, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig.
func (in *PodDNSConfig) DeepCopy() *PodDNSConfig {
if in == nil {
return nil
}
out := new(PodDNSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption.
func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption {
if in == nil {
return nil
}
out := new(PodDNSConfigOption)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions.
func (in *PodExecOptions) DeepCopy() *PodExecOptions {
if in == nil {
return nil
}
out := new(PodExecOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodExecOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodExtendedResourceClaimStatus) DeepCopyInto(out *PodExtendedResourceClaimStatus) {
*out = *in
if in.RequestMappings != nil {
in, out := &in.RequestMappings, &out.RequestMappings
*out = make([]ContainerExtendedResourceRequest, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExtendedResourceClaimStatus.
func (in *PodExtendedResourceClaimStatus) DeepCopy() *PodExtendedResourceClaimStatus {
if in == nil {
return nil
}
out := new(PodExtendedResourceClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodIP) DeepCopyInto(out *PodIP) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP.
func (in *PodIP) DeepCopy() *PodIP {
if in == nil {
return nil
}
out := new(PodIP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodList) DeepCopyInto(out *PodList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Pod, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList.
func (in *PodList) DeepCopy() *PodList {
if in == nil {
return nil
}
out := new(PodList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.SinceSeconds != nil {
in, out := &in.SinceSeconds, &out.SinceSeconds
*out = new(int64)
**out = **in
}
if in.SinceTime != nil {
in, out := &in.SinceTime, &out.SinceTime
*out = (*in).DeepCopy()
}
if in.TailLines != nil {
in, out := &in.TailLines, &out.TailLines
*out = new(int64)
**out = **in
}
if in.LimitBytes != nil {
in, out := &in.LimitBytes, &out.LimitBytes
*out = new(int64)
**out = **in
}
if in.Stream != nil {
in, out := &in.Stream, &out.Stream
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions.
func (in *PodLogOptions) DeepCopy() *PodLogOptions {
if in == nil {
return nil
}
out := new(PodLogOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodLogOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodOS) DeepCopyInto(out *PodOS) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodOS.
func (in *PodOS) DeepCopy() *PodOS {
if in == nil {
return nil
}
out := new(PodOS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]int32, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions.
func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions {
if in == nil {
return nil
}
out := new(PodPortForwardOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions.
func (in *PodProxyOptions) DeepCopy() *PodProxyOptions {
if in == nil {
return nil
}
out := new(PodProxyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate.
func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
if in == nil {
return nil
}
out := new(PodReadinessGate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
*out = *in
if in.ResourceClaimName != nil {
in, out := &in.ResourceClaimName, &out.ResourceClaimName
*out = new(string)
**out = **in
}
if in.ResourceClaimTemplateName != nil {
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim.
func (in *PodResourceClaim) DeepCopy() *PodResourceClaim {
if in == nil {
return nil
}
out := new(PodResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodResourceClaimStatus) DeepCopyInto(out *PodResourceClaimStatus) {
*out = *in
if in.ResourceClaimName != nil {
in, out := &in.ResourceClaimName, &out.ResourceClaimName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaimStatus.
func (in *PodResourceClaimStatus) DeepCopy() *PodResourceClaimStatus {
if in == nil {
return nil
}
out := new(PodResourceClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingGate.
func (in *PodSchedulingGate) DeepCopy() *PodSchedulingGate {
if in == nil {
return nil
}
out := new(PodSchedulingGate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = *in
if in.ShareProcessNamespace != nil {
in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace
*out = new(bool)
**out = **in
}
if in.HostUsers != nil {
in, out := &in.HostUsers, &out.HostUsers
*out = new(bool)
**out = **in
}
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
*out = new(SELinuxOptions)
**out = **in
}
if in.WindowsOptions != nil {
in, out := &in.WindowsOptions, &out.WindowsOptions
*out = new(WindowsSecurityContextOptions)
(*in).DeepCopyInto(*out)
}
if in.RunAsUser != nil {
in, out := &in.RunAsUser, &out.RunAsUser
*out = new(int64)
**out = **in
}
if in.RunAsGroup != nil {
in, out := &in.RunAsGroup, &out.RunAsGroup
*out = new(int64)
**out = **in
}
if in.RunAsNonRoot != nil {
in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
*out = new(bool)
**out = **in
}
if in.SupplementalGroups != nil {
in, out := &in.SupplementalGroups, &out.SupplementalGroups
*out = make([]int64, len(*in))
copy(*out, *in)
}
if in.SupplementalGroupsPolicy != nil {
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
*out = new(SupplementalGroupsPolicy)
**out = **in
}
if in.FSGroup != nil {
in, out := &in.FSGroup, &out.FSGroup
*out = new(int64)
**out = **in
}
if in.FSGroupChangePolicy != nil {
in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy
*out = new(PodFSGroupChangePolicy)
**out = **in
}
if in.Sysctls != nil {
in, out := &in.Sysctls, &out.Sysctls
*out = make([]Sysctl, len(*in))
copy(*out, *in)
}
if in.SeccompProfile != nil {
in, out := &in.SeccompProfile, &out.SeccompProfile
*out = new(SeccompProfile)
(*in).DeepCopyInto(*out)
}
if in.AppArmorProfile != nil {
in, out := &in.AppArmorProfile, &out.AppArmorProfile
*out = new(AppArmorProfile)
(*in).DeepCopyInto(*out)
}
if in.SELinuxChangePolicy != nil {
in, out := &in.SELinuxChangePolicy, &out.SELinuxChangePolicy
*out = new(PodSELinuxChangePolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext.
func (in *PodSecurityContext) DeepCopy() *PodSecurityContext {
if in == nil {
return nil
}
out := new(PodSecurityContext)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSignature) DeepCopyInto(out *PodSignature) {
*out = *in
if in.PodController != nil {
in, out := &in.PodController, &out.PodController
*out = new(v1.OwnerReference)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature.
func (in *PodSignature) DeepCopy() *PodSignature {
if in == nil {
return nil
}
out := new(PodSignature)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = *in
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EphemeralContainers != nil {
in, out := &in.EphemeralContainers, &out.EphemeralContainers
*out = make([]EphemeralContainer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
*out = new(bool)
**out = **in
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(PodSecurityContext)
(*in).DeepCopyInto(*out)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.SetHostnameAsFQDN != nil {
in, out := &in.SetHostnameAsFQDN, &out.SetHostnameAsFQDN
*out = new(bool)
**out = **in
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(Affinity)
(*in).DeepCopyInto(*out)
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostAliases != nil {
in, out := &in.HostAliases, &out.HostAliases
*out = make([]HostAlias, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Priority != nil {
in, out := &in.Priority, &out.Priority
*out = new(int32)
**out = **in
}
if in.PreemptionPolicy != nil {
in, out := &in.PreemptionPolicy, &out.PreemptionPolicy
*out = new(PreemptionPolicy)
**out = **in
}
if in.DNSConfig != nil {
in, out := &in.DNSConfig, &out.DNSConfig
*out = new(PodDNSConfig)
(*in).DeepCopyInto(*out)
}
if in.ReadinessGates != nil {
in, out := &in.ReadinessGates, &out.ReadinessGates
*out = make([]PodReadinessGate, len(*in))
copy(*out, *in)
}
if in.RuntimeClassName != nil {
in, out := &in.RuntimeClassName, &out.RuntimeClassName
*out = new(string)
**out = **in
}
if in.Overhead != nil {
in, out := &in.Overhead, &out.Overhead
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.EnableServiceLinks != nil {
in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
*out = new(bool)
**out = **in
}
if in.TopologySpreadConstraints != nil {
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
*out = make([]TopologySpreadConstraint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.OS != nil {
in, out := &in.OS, &out.OS
*out = new(PodOS)
**out = **in
}
if in.SchedulingGates != nil {
in, out := &in.SchedulingGates, &out.SchedulingGates
*out = make([]PodSchedulingGate, len(*in))
copy(*out, *in)
}
if in.ResourceClaims != nil {
in, out := &in.ResourceClaims, &out.ResourceClaims
*out = make([]PodResourceClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.HostnameOverride != nil {
in, out := &in.HostnameOverride, &out.HostnameOverride
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec.
func (in *PodSpec) DeepCopy() *PodSpec {
if in == nil {
return nil
}
out := new(PodSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodStatus) DeepCopyInto(out *PodStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]PodCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostIPs != nil {
in, out := &in.HostIPs, &out.HostIPs
*out = make([]HostIP, len(*in))
copy(*out, *in)
}
if in.PodIPs != nil {
in, out := &in.PodIPs, &out.PodIPs
*out = make([]PodIP, len(*in))
copy(*out, *in)
}
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.InitContainerStatuses != nil {
in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
*out = make([]ContainerStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ContainerStatuses != nil {
in, out := &in.ContainerStatuses, &out.ContainerStatuses
*out = make([]ContainerStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EphemeralContainerStatuses != nil {
in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses
*out = make([]ContainerStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ResourceClaimStatuses != nil {
in, out := &in.ResourceClaimStatuses, &out.ResourceClaimStatuses
*out = make([]PodResourceClaimStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtendedResourceClaimStatus != nil {
in, out := &in.ExtendedResourceClaimStatus, &out.ExtendedResourceClaimStatus
*out = new(PodExtendedResourceClaimStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus.
func (in *PodStatus) DeepCopy() *PodStatus {
if in == nil {
return nil
}
out := new(PodStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult.
func (in *PodStatusResult) DeepCopy() *PodStatusResult {
if in == nil {
return nil
}
out := new(PodStatusResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodStatusResult) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodTemplate) DeepCopyInto(out *PodTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate.
func (in *PodTemplate) DeepCopy() *PodTemplate {
if in == nil {
return nil
}
out := new(PodTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList.
func (in *PodTemplateList) DeepCopy() *PodTemplateList {
if in == nil {
return nil
}
out := new(PodTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec.
func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec {
if in == nil {
return nil
}
out := new(PodTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortStatus) DeepCopyInto(out *PortStatus) {
*out = *in
if in.Error != nil {
in, out := &in.Error, &out.Error
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus.
func (in *PortStatus) DeepCopy() *PortStatus {
if in == nil {
return nil
}
out := new(PortStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource.
func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource {
if in == nil {
return nil
}
out := new(PortworxVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Preconditions) DeepCopyInto(out *Preconditions) {
*out = *in
if in.UID != nil {
in, out := &in.UID, &out.UID
*out = new(types.UID)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions.
func (in *Preconditions) DeepCopy() *Preconditions {
if in == nil {
return nil
}
out := new(Preconditions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) {
*out = *in
in.PodSignature.DeepCopyInto(&out.PodSignature)
in.EvictionTime.DeepCopyInto(&out.EvictionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry.
func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry {
if in == nil {
return nil
}
out := new(PreferAvoidPodsEntry)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) {
*out = *in
in.Preference.DeepCopyInto(&out.Preference)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm.
func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm {
if in == nil {
return nil
}
out := new(PreferredSchedulingTerm)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Probe) DeepCopyInto(out *Probe) {
*out = *in
in.ProbeHandler.DeepCopyInto(&out.ProbeHandler)
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe.
func (in *Probe) DeepCopy() *Probe {
if in == nil {
return nil
}
out := new(Probe)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProbeHandler) DeepCopyInto(out *ProbeHandler) {
*out = *in
if in.Exec != nil {
in, out := &in.Exec, &out.Exec
*out = new(ExecAction)
(*in).DeepCopyInto(*out)
}
if in.HTTPGet != nil {
in, out := &in.HTTPGet, &out.HTTPGet
*out = new(HTTPGetAction)
(*in).DeepCopyInto(*out)
}
if in.TCPSocket != nil {
in, out := &in.TCPSocket, &out.TCPSocket
*out = new(TCPSocketAction)
**out = **in
}
if in.GRPC != nil {
in, out := &in.GRPC, &out.GRPC
*out = new(GRPCAction)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeHandler.
func (in *ProbeHandler) DeepCopy() *ProbeHandler {
if in == nil {
return nil
}
out := new(ProbeHandler)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) {
*out = *in
if in.Sources != nil {
in, out := &in.Sources, &out.Sources
*out = make([]VolumeProjection, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource.
func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource {
if in == nil {
return nil
}
out := new(ProjectedVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource.
func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource {
if in == nil {
return nil
}
out := new(QuobyteVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) {
*out = *in
if in.CephMonitors != nil {
in, out := &in.CephMonitors, &out.CephMonitors
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource.
func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource {
if in == nil {
return nil
}
out := new(RBDPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) {
*out = *in
if in.CephMonitors != nil {
in, out := &in.CephMonitors, &out.CephMonitors
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource.
func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource {
if in == nil {
return nil
}
out := new(RBDVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation.
func (in *RangeAllocation) DeepCopy() *RangeAllocation {
if in == nil {
return nil
}
out := new(RangeAllocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RangeAllocation) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationController) DeepCopyInto(out *ReplicationController) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController.
func (in *ReplicationController) DeepCopy() *ReplicationController {
if in == nil {
return nil
}
out := new(ReplicationController)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicationController) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition.
func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition {
if in == nil {
return nil
}
out := new(ReplicationControllerCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ReplicationController, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList.
func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList {
if in == nil {
return nil
}
out := new(ReplicationControllerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicationControllerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(PodTemplateSpec)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec.
func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec {
if in == nil {
return nil
}
out := new(ReplicationControllerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicationControllerCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus.
func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus {
if in == nil {
return nil
}
out := new(ReplicationControllerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
func (in *ResourceClaim) DeepCopy() *ResourceClaim {
if in == nil {
return nil
}
out := new(ResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) {
*out = *in
out.Divisor = in.Divisor.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector.
func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
if in == nil {
return nil
}
out := new(ResourceFieldSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth.
func (in *ResourceHealth) DeepCopy() *ResourceHealth {
if in == nil {
return nil
}
out := new(ResourceHealth)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceList) DeepCopyInto(out *ResourceList) {
{
in := &in
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
func (in ResourceList) DeepCopy() ResourceList {
if in == nil {
return nil
}
out := new(ResourceList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota.
func (in *ResourceQuota) DeepCopy() *ResourceQuota {
if in == nil {
return nil
}
out := new(ResourceQuota)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceQuota) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceQuota, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList.
func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList {
if in == nil {
return nil
}
out := new(ResourceQuotaList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceQuotaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) {
*out = *in
if in.Hard != nil {
in, out := &in.Hard, &out.Hard
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Scopes != nil {
in, out := &in.Scopes, &out.Scopes
*out = make([]ResourceQuotaScope, len(*in))
copy(*out, *in)
}
if in.ScopeSelector != nil {
in, out := &in.ScopeSelector, &out.ScopeSelector
*out = new(ScopeSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec.
func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec {
if in == nil {
return nil
}
out := new(ResourceQuotaSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) {
*out = *in
if in.Hard != nil {
in, out := &in.Hard, &out.Hard
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Used != nil {
in, out := &in.Used, &out.Used
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus.
func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus {
if in == nil {
return nil
}
out := new(ResourceQuotaStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Claims != nil {
in, out := &in.Claims, &out.Claims
*out = make([]ResourceClaim, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
if in == nil {
return nil
}
out := new(ResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceHealth, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
func (in *ResourceStatus) DeepCopy() *ResourceStatus {
if in == nil {
return nil
}
out := new(ResourceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions.
func (in *SELinuxOptions) DeepCopy() *SELinuxOptions {
if in == nil {
return nil
}
out := new(SELinuxOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource.
func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource {
if in == nil {
return nil
}
out := new(ScaleIOPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource.
func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource {
if in == nil {
return nil
}
out := new(ScaleIOVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) {
*out = *in
if in.MatchExpressions != nil {
in, out := &in.MatchExpressions, &out.MatchExpressions
*out = make([]ScopedResourceSelectorRequirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector.
func (in *ScopeSelector) DeepCopy() *ScopeSelector {
if in == nil {
return nil
}
out := new(ScopeSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement.
func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement {
if in == nil {
return nil
}
out := new(ScopedResourceSelectorRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SeccompProfile) DeepCopyInto(out *SeccompProfile) {
*out = *in
if in.LocalhostProfile != nil {
in, out := &in.LocalhostProfile, &out.LocalhostProfile
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeccompProfile.
func (in *SeccompProfile) DeepCopy() *SeccompProfile {
if in == nil {
return nil
}
out := new(SeccompProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Secret) DeepCopyInto(out *Secret) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Immutable != nil {
in, out := &in.Immutable, &out.Immutable
*out = new(bool)
**out = **in
}
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make(map[string][]byte, len(*in))
for key, val := range *in {
var outVal []byte
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]byte, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
func (in *Secret) DeepCopy() *Secret {
if in == nil {
return nil
}
out := new(Secret)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Secret) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource.
func (in *SecretEnvSource) DeepCopy() *SecretEnvSource {
if in == nil {
return nil
}
out := new(SecretEnvSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector.
func (in *SecretKeySelector) DeepCopy() *SecretKeySelector {
if in == nil {
return nil
}
out := new(SecretKeySelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretList) DeepCopyInto(out *SecretList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Secret, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList.
func (in *SecretList) DeepCopy() *SecretList {
if in == nil {
return nil
}
out := new(SecretList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SecretList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretProjection) DeepCopyInto(out *SecretProjection) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KeyToPath, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection.
func (in *SecretProjection) DeepCopy() *SecretProjection {
if in == nil {
return nil
}
out := new(SecretProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
func (in *SecretReference) DeepCopy() *SecretReference {
if in == nil {
return nil
}
out := new(SecretReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KeyToPath, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DefaultMode != nil {
in, out := &in.DefaultMode, &out.DefaultMode
*out = new(int32)
**out = **in
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource.
func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource {
if in == nil {
return nil
}
out := new(SecretVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecurityContext) DeepCopyInto(out *SecurityContext) {
*out = *in
if in.Capabilities != nil {
in, out := &in.Capabilities, &out.Capabilities
*out = new(Capabilities)
(*in).DeepCopyInto(*out)
}
if in.Privileged != nil {
in, out := &in.Privileged, &out.Privileged
*out = new(bool)
**out = **in
}
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
*out = new(SELinuxOptions)
**out = **in
}
if in.WindowsOptions != nil {
in, out := &in.WindowsOptions, &out.WindowsOptions
*out = new(WindowsSecurityContextOptions)
(*in).DeepCopyInto(*out)
}
if in.RunAsUser != nil {
in, out := &in.RunAsUser, &out.RunAsUser
*out = new(int64)
**out = **in
}
if in.RunAsGroup != nil {
in, out := &in.RunAsGroup, &out.RunAsGroup
*out = new(int64)
**out = **in
}
if in.RunAsNonRoot != nil {
in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
*out = new(bool)
**out = **in
}
if in.ReadOnlyRootFilesystem != nil {
in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem
*out = new(bool)
**out = **in
}
if in.AllowPrivilegeEscalation != nil {
in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
*out = new(bool)
**out = **in
}
if in.ProcMount != nil {
in, out := &in.ProcMount, &out.ProcMount
*out = new(ProcMountType)
**out = **in
}
if in.SeccompProfile != nil {
in, out := &in.SeccompProfile, &out.SeccompProfile
*out = new(SeccompProfile)
(*in).DeepCopyInto(*out)
}
if in.AppArmorProfile != nil {
in, out := &in.AppArmorProfile, &out.AppArmorProfile
*out = new(AppArmorProfile)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext.
func (in *SecurityContext) DeepCopy() *SecurityContext {
if in == nil {
return nil
}
out := new(SecurityContext)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SerializedReference) DeepCopyInto(out *SerializedReference) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Reference = in.Reference
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference.
func (in *SerializedReference) DeepCopy() *SerializedReference {
if in == nil {
return nil
}
out := new(SerializedReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SerializedReference) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Service) DeepCopyInto(out *Service) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
func (in *Service) DeepCopy() *Service {
if in == nil {
return nil
}
out := new(Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Service) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Secrets != nil {
in, out := &in.Secrets, &out.Secrets
*out = make([]ObjectReference, len(*in))
copy(*out, *in)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.AutomountServiceAccountToken != nil {
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount.
func (in *ServiceAccount) DeepCopy() *ServiceAccount {
if in == nil {
return nil
}
out := new(ServiceAccount)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceAccount) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ServiceAccount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList.
func (in *ServiceAccountList) DeepCopy() *ServiceAccountList {
if in == nil {
return nil
}
out := new(ServiceAccountList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceAccountList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection.
func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection {
if in == nil {
return nil
}
out := new(ServiceAccountTokenProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceList) DeepCopyInto(out *ServiceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Service, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList.
func (in *ServiceList) DeepCopy() *ServiceList {
if in == nil {
return nil
}
out := new(ServiceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServicePort) DeepCopyInto(out *ServicePort) {
*out = *in
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
out.TargetPort = in.TargetPort
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort.
func (in *ServicePort) DeepCopy() *ServicePort {
if in == nil {
return nil
}
out := new(ServicePort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions.
func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions {
if in == nil {
return nil
}
out := new(ServiceProxyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]ServicePort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ClusterIPs != nil {
in, out := &in.ClusterIPs, &out.ClusterIPs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IPFamilies != nil {
in, out := &in.IPFamilies, &out.IPFamilies
*out = make([]IPFamily, len(*in))
copy(*out, *in)
}
if in.IPFamilyPolicy != nil {
in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy
*out = new(IPFamilyPolicy)
**out = **in
}
if in.ExternalIPs != nil {
in, out := &in.ExternalIPs, &out.ExternalIPs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SessionAffinityConfig != nil {
in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig
*out = new(SessionAffinityConfig)
(*in).DeepCopyInto(*out)
}
if in.LoadBalancerSourceRanges != nil {
in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllocateLoadBalancerNodePorts != nil {
in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts
*out = new(bool)
**out = **in
}
if in.LoadBalancerClass != nil {
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
*out = new(string)
**out = **in
}
if in.InternalTrafficPolicy != nil {
in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy
*out = new(ServiceInternalTrafficPolicy)
**out = **in
}
if in.TrafficDistribution != nil {
in, out := &in.TrafficDistribution, &out.TrafficDistribution
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
func (in *ServiceSpec) DeepCopy() *ServiceSpec {
if in == nil {
return nil
}
out := new(ServiceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus.
func (in *ServiceStatus) DeepCopy() *ServiceStatus {
if in == nil {
return nil
}
out := new(ServiceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) {
*out = *in
if in.ClientIP != nil {
in, out := &in.ClientIP, &out.ClientIP
*out = new(ClientIPConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig.
func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig {
if in == nil {
return nil
}
out := new(SessionAffinityConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SleepAction) DeepCopyInto(out *SleepAction) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction.
func (in *SleepAction) DeepCopy() *SleepAction {
if in == nil {
return nil
}
out := new(SleepAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(ObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource.
func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource {
if in == nil {
return nil
}
out := new(StorageOSPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource.
func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource {
if in == nil {
return nil
}
out := new(StorageOSVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Sysctl) DeepCopyInto(out *Sysctl) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl.
func (in *Sysctl) DeepCopy() *Sysctl {
if in == nil {
return nil
}
out := new(Sysctl)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) {
*out = *in
out.Port = in.Port
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction.
func (in *TCPSocketAction) DeepCopy() *TCPSocketAction {
if in == nil {
return nil
}
out := new(TCPSocketAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in
if in.TimeAdded != nil {
in, out := &in.TimeAdded, &out.TimeAdded
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
func (in *Taint) DeepCopy() *Taint {
if in == nil {
return nil
}
out := new(Taint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Toleration) DeepCopyInto(out *Toleration) {
*out = *in
if in.TolerationSeconds != nil {
in, out := &in.TolerationSeconds, &out.TolerationSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration.
func (in *Toleration) DeepCopy() *Toleration {
if in == nil {
return nil
}
out := new(Toleration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement.
func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement {
if in == nil {
return nil
}
out := new(TopologySelectorLabelRequirement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) {
*out = *in
if in.MatchLabelExpressions != nil {
in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions
*out = make([]TopologySelectorLabelRequirement, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm.
func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
if in == nil {
return nil
}
out := new(TopologySelectorTerm)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) {
*out = *in
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MinDomains != nil {
in, out := &in.MinDomains, &out.MinDomains
*out = new(int32)
**out = **in
}
if in.NodeAffinityPolicy != nil {
in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy
*out = new(NodeInclusionPolicy)
**out = **in
}
if in.NodeTaintsPolicy != nil {
in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy
*out = new(NodeInclusionPolicy)
**out = **in
}
if in.MatchLabelKeys != nil {
in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint.
func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint {
if in == nil {
return nil
}
out := new(TopologySpreadConstraint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference.
func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
if in == nil {
return nil
}
out := new(TypedLocalObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
if in.Namespace != nil {
in, out := &in.Namespace, &out.Namespace
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference.
func (in *TypedObjectReference) DeepCopy() *TypedObjectReference {
if in == nil {
return nil
}
out := new(TypedObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Volume) DeepCopyInto(out *Volume) {
*out = *in
in.VolumeSource.DeepCopyInto(&out.VolumeSource)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
func (in *Volume) DeepCopy() *Volume {
if in == nil {
return nil
}
out := new(Volume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice.
func (in *VolumeDevice) DeepCopy() *VolumeDevice {
if in == nil {
return nil
}
out := new(VolumeDevice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeMount) DeepCopyInto(out *VolumeMount) {
*out = *in
if in.RecursiveReadOnly != nil {
in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly
*out = new(RecursiveReadOnlyMode)
**out = **in
}
if in.MountPropagation != nil {
in, out := &in.MountPropagation, &out.MountPropagation
*out = new(MountPropagationMode)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount.
func (in *VolumeMount) DeepCopy() *VolumeMount {
if in == nil {
return nil
}
out := new(VolumeMount)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeMountStatus) DeepCopyInto(out *VolumeMountStatus) {
*out = *in
if in.RecursiveReadOnly != nil {
in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly
*out = new(RecursiveReadOnlyMode)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountStatus.
func (in *VolumeMountStatus) DeepCopy() *VolumeMountStatus {
if in == nil {
return nil
}
out := new(VolumeMountStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) {
*out = *in
if in.Required != nil {
in, out := &in.Required, &out.Required
*out = new(NodeSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity.
func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity {
if in == nil {
return nil
}
out := new(VolumeNodeAffinity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(SecretProjection)
(*in).DeepCopyInto(*out)
}
if in.DownwardAPI != nil {
in, out := &in.DownwardAPI, &out.DownwardAPI
*out = new(DownwardAPIProjection)
(*in).DeepCopyInto(*out)
}
if in.ConfigMap != nil {
in, out := &in.ConfigMap, &out.ConfigMap
*out = new(ConfigMapProjection)
(*in).DeepCopyInto(*out)
}
if in.ServiceAccountToken != nil {
in, out := &in.ServiceAccountToken, &out.ServiceAccountToken
*out = new(ServiceAccountTokenProjection)
**out = **in
}
if in.ClusterTrustBundle != nil {
in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle
*out = new(ClusterTrustBundleProjection)
(*in).DeepCopyInto(*out)
}
if in.PodCertificate != nil {
in, out := &in.PodCertificate, &out.PodCertificate
*out = new(PodCertificateProjection)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection.
func (in *VolumeProjection) DeepCopy() *VolumeProjection {
if in == nil {
return nil
}
out := new(VolumeProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements.
func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements {
if in == nil {
return nil
}
out := new(VolumeResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
*out = *in
if in.HostPath != nil {
in, out := &in.HostPath, &out.HostPath
*out = new(HostPathVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.EmptyDir != nil {
in, out := &in.EmptyDir, &out.EmptyDir
*out = new(EmptyDirVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.GCEPersistentDisk != nil {
in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
*out = new(GCEPersistentDiskVolumeSource)
**out = **in
}
if in.AWSElasticBlockStore != nil {
in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
*out = new(AWSElasticBlockStoreVolumeSource)
**out = **in
}
if in.GitRepo != nil {
in, out := &in.GitRepo, &out.GitRepo
*out = new(GitRepoVolumeSource)
**out = **in
}
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(SecretVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.NFS != nil {
in, out := &in.NFS, &out.NFS
*out = new(NFSVolumeSource)
**out = **in
}
if in.ISCSI != nil {
in, out := &in.ISCSI, &out.ISCSI
*out = new(ISCSIVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Glusterfs != nil {
in, out := &in.Glusterfs, &out.Glusterfs
*out = new(GlusterfsVolumeSource)
**out = **in
}
if in.PersistentVolumeClaim != nil {
in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
*out = new(PersistentVolumeClaimVolumeSource)
**out = **in
}
if in.RBD != nil {
in, out := &in.RBD, &out.RBD
*out = new(RBDVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Quobyte != nil {
in, out := &in.Quobyte, &out.Quobyte
*out = new(QuobyteVolumeSource)
**out = **in
}
if in.FlexVolume != nil {
in, out := &in.FlexVolume, &out.FlexVolume
*out = new(FlexVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Cinder != nil {
in, out := &in.Cinder, &out.Cinder
*out = new(CinderVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.CephFS != nil {
in, out := &in.CephFS, &out.CephFS
*out = new(CephFSVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Flocker != nil {
in, out := &in.Flocker, &out.Flocker
*out = new(FlockerVolumeSource)
**out = **in
}
if in.DownwardAPI != nil {
in, out := &in.DownwardAPI, &out.DownwardAPI
*out = new(DownwardAPIVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.FC != nil {
in, out := &in.FC, &out.FC
*out = new(FCVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.AzureFile != nil {
in, out := &in.AzureFile, &out.AzureFile
*out = new(AzureFileVolumeSource)
**out = **in
}
if in.ConfigMap != nil {
in, out := &in.ConfigMap, &out.ConfigMap
*out = new(ConfigMapVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
**out = **in
}
if in.AzureDisk != nil {
in, out := &in.AzureDisk, &out.AzureDisk
*out = new(AzureDiskVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.PhotonPersistentDisk != nil {
in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
*out = new(PhotonPersistentDiskVolumeSource)
**out = **in
}
if in.Projected != nil {
in, out := &in.Projected, &out.Projected
*out = new(ProjectedVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.PortworxVolume != nil {
in, out := &in.PortworxVolume, &out.PortworxVolume
*out = new(PortworxVolumeSource)
**out = **in
}
if in.ScaleIO != nil {
in, out := &in.ScaleIO, &out.ScaleIO
*out = new(ScaleIOVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.StorageOS != nil {
in, out := &in.StorageOS, &out.StorageOS
*out = new(StorageOSVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.CSI != nil {
in, out := &in.CSI, &out.CSI
*out = new(CSIVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Ephemeral != nil {
in, out := &in.Ephemeral, &out.Ephemeral
*out = new(EphemeralVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(ImageVolumeSource)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource.
func (in *VolumeSource) DeepCopy() *VolumeSource {
if in == nil {
return nil
}
out := new(VolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource.
func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource {
if in == nil {
return nil
}
out := new(VsphereVirtualDiskVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) {
*out = *in
in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm.
func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm {
if in == nil {
return nil
}
out := new(WeightedPodAffinityTerm)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) {
*out = *in
if in.GMSACredentialSpecName != nil {
in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName
*out = new(string)
**out = **in
}
if in.GMSACredentialSpec != nil {
in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec
*out = new(string)
**out = **in
}
if in.RunAsUserName != nil {
in, out := &in.RunAsUserName, &out.RunAsUserName
*out = new(string)
**out = **in
}
if in.HostProcess != nil {
in, out := &in.HostProcess, &out.HostProcess
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions.
func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions {
if in == nil {
return nil
}
out := new(WindowsSecurityContextOptions)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/discovery"
)
// Funcs returns the fuzzer functions for the discovery api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *discovery.EndpointSlice, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
addressTypes := []discovery.AddressType{discovery.AddressTypeIPv4, discovery.AddressTypeIPv6, discovery.AddressTypeFQDN}
obj.AddressType = addressTypes[c.Rand.Intn(len(addressTypes))]
for i, endpointPort := range obj.Ports {
if endpointPort.Name == nil {
emptyStr := ""
obj.Ports[i].Name = &emptyStr
}
if endpointPort.Protocol == nil {
protos := []api.Protocol{api.ProtocolTCP, api.ProtocolUDP, api.ProtocolSCTP}
obj.Ports[i].Protocol = &protos[c.Rand.Intn(len(protos))]
}
}
},
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the discovery API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/discovery"
v1 "k8s.io/kubernetes/pkg/apis/discovery/v1"
"k8s.io/kubernetes/pkg/apis/discovery/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(discovery.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discovery
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "discovery.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&EndpointSlice{},
&EndpointSliceList{},
)
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/runtime"
)
var (
defaultPortName = ""
defaultProtocol = v1.ProtocolTCP
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_EndpointPort(obj *discoveryv1.EndpointPort) {
if obj.Name == nil {
obj.Name = &defaultPortName
}
if obj.Protocol == nil {
obj.Protocol = &defaultProtocol
}
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
discoveryv1 "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "discovery.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &discoveryv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
discovery "k8s.io/kubernetes/pkg/apis/discovery"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*discoveryv1.Endpoint)(nil), (*discovery.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Endpoint_To_discovery_Endpoint(a.(*discoveryv1.Endpoint), b.(*discovery.Endpoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.Endpoint)(nil), (*discoveryv1.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_Endpoint_To_v1_Endpoint(a.(*discovery.Endpoint), b.(*discoveryv1.Endpoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1.EndpointConditions)(nil), (*discovery.EndpointConditions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointConditions_To_discovery_EndpointConditions(a.(*discoveryv1.EndpointConditions), b.(*discovery.EndpointConditions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointConditions)(nil), (*discoveryv1.EndpointConditions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointConditions_To_v1_EndpointConditions(a.(*discovery.EndpointConditions), b.(*discoveryv1.EndpointConditions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1.EndpointHints)(nil), (*discovery.EndpointHints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointHints_To_discovery_EndpointHints(a.(*discoveryv1.EndpointHints), b.(*discovery.EndpointHints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointHints)(nil), (*discoveryv1.EndpointHints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointHints_To_v1_EndpointHints(a.(*discovery.EndpointHints), b.(*discoveryv1.EndpointHints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1.EndpointPort)(nil), (*discovery.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointPort_To_discovery_EndpointPort(a.(*discoveryv1.EndpointPort), b.(*discovery.EndpointPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointPort)(nil), (*discoveryv1.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointPort_To_v1_EndpointPort(a.(*discovery.EndpointPort), b.(*discoveryv1.EndpointPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1.EndpointSlice)(nil), (*discovery.EndpointSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointSlice_To_discovery_EndpointSlice(a.(*discoveryv1.EndpointSlice), b.(*discovery.EndpointSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointSlice)(nil), (*discoveryv1.EndpointSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointSlice_To_v1_EndpointSlice(a.(*discovery.EndpointSlice), b.(*discoveryv1.EndpointSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1.EndpointSliceList)(nil), (*discovery.EndpointSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EndpointSliceList_To_discovery_EndpointSliceList(a.(*discoveryv1.EndpointSliceList), b.(*discovery.EndpointSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointSliceList)(nil), (*discoveryv1.EndpointSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointSliceList_To_v1_EndpointSliceList(a.(*discovery.EndpointSliceList), b.(*discoveryv1.EndpointSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1.ForNode)(nil), (*discovery.ForNode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ForNode_To_discovery_ForNode(a.(*discoveryv1.ForNode), b.(*discovery.ForNode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.ForNode)(nil), (*discoveryv1.ForNode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_ForNode_To_v1_ForNode(a.(*discovery.ForNode), b.(*discoveryv1.ForNode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1.ForZone)(nil), (*discovery.ForZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ForZone_To_discovery_ForZone(a.(*discoveryv1.ForZone), b.(*discovery.ForZone), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.ForZone)(nil), (*discoveryv1.ForZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_ForZone_To_v1_ForZone(a.(*discovery.ForZone), b.(*discoveryv1.ForZone), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_Endpoint_To_discovery_Endpoint(in *discoveryv1.Endpoint, out *discovery.Endpoint, s conversion.Scope) error {
out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses))
if err := Convert_v1_EndpointConditions_To_discovery_EndpointConditions(&in.Conditions, &out.Conditions, s); err != nil {
return err
}
out.Hostname = (*string)(unsafe.Pointer(in.Hostname))
out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef))
out.DeprecatedTopology = *(*map[string]string)(unsafe.Pointer(&in.DeprecatedTopology))
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.Zone = (*string)(unsafe.Pointer(in.Zone))
out.Hints = (*discovery.EndpointHints)(unsafe.Pointer(in.Hints))
return nil
}
// Convert_v1_Endpoint_To_discovery_Endpoint is an autogenerated conversion function.
func Convert_v1_Endpoint_To_discovery_Endpoint(in *discoveryv1.Endpoint, out *discovery.Endpoint, s conversion.Scope) error {
return autoConvert_v1_Endpoint_To_discovery_Endpoint(in, out, s)
}
func autoConvert_discovery_Endpoint_To_v1_Endpoint(in *discovery.Endpoint, out *discoveryv1.Endpoint, s conversion.Scope) error {
out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses))
if err := Convert_discovery_EndpointConditions_To_v1_EndpointConditions(&in.Conditions, &out.Conditions, s); err != nil {
return err
}
out.Hostname = (*string)(unsafe.Pointer(in.Hostname))
out.TargetRef = (*corev1.ObjectReference)(unsafe.Pointer(in.TargetRef))
out.DeprecatedTopology = *(*map[string]string)(unsafe.Pointer(&in.DeprecatedTopology))
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.Zone = (*string)(unsafe.Pointer(in.Zone))
out.Hints = (*discoveryv1.EndpointHints)(unsafe.Pointer(in.Hints))
return nil
}
// Convert_discovery_Endpoint_To_v1_Endpoint is an autogenerated conversion function.
func Convert_discovery_Endpoint_To_v1_Endpoint(in *discovery.Endpoint, out *discoveryv1.Endpoint, s conversion.Scope) error {
return autoConvert_discovery_Endpoint_To_v1_Endpoint(in, out, s)
}
func autoConvert_v1_EndpointConditions_To_discovery_EndpointConditions(in *discoveryv1.EndpointConditions, out *discovery.EndpointConditions, s conversion.Scope) error {
out.Ready = (*bool)(unsafe.Pointer(in.Ready))
out.Serving = (*bool)(unsafe.Pointer(in.Serving))
out.Terminating = (*bool)(unsafe.Pointer(in.Terminating))
return nil
}
// Convert_v1_EndpointConditions_To_discovery_EndpointConditions is an autogenerated conversion function.
func Convert_v1_EndpointConditions_To_discovery_EndpointConditions(in *discoveryv1.EndpointConditions, out *discovery.EndpointConditions, s conversion.Scope) error {
return autoConvert_v1_EndpointConditions_To_discovery_EndpointConditions(in, out, s)
}
func autoConvert_discovery_EndpointConditions_To_v1_EndpointConditions(in *discovery.EndpointConditions, out *discoveryv1.EndpointConditions, s conversion.Scope) error {
out.Ready = (*bool)(unsafe.Pointer(in.Ready))
out.Serving = (*bool)(unsafe.Pointer(in.Serving))
out.Terminating = (*bool)(unsafe.Pointer(in.Terminating))
return nil
}
// Convert_discovery_EndpointConditions_To_v1_EndpointConditions is an autogenerated conversion function.
func Convert_discovery_EndpointConditions_To_v1_EndpointConditions(in *discovery.EndpointConditions, out *discoveryv1.EndpointConditions, s conversion.Scope) error {
return autoConvert_discovery_EndpointConditions_To_v1_EndpointConditions(in, out, s)
}
func autoConvert_v1_EndpointHints_To_discovery_EndpointHints(in *discoveryv1.EndpointHints, out *discovery.EndpointHints, s conversion.Scope) error {
out.ForZones = *(*[]discovery.ForZone)(unsafe.Pointer(&in.ForZones))
out.ForNodes = *(*[]discovery.ForNode)(unsafe.Pointer(&in.ForNodes))
return nil
}
// Convert_v1_EndpointHints_To_discovery_EndpointHints is an autogenerated conversion function.
func Convert_v1_EndpointHints_To_discovery_EndpointHints(in *discoveryv1.EndpointHints, out *discovery.EndpointHints, s conversion.Scope) error {
return autoConvert_v1_EndpointHints_To_discovery_EndpointHints(in, out, s)
}
func autoConvert_discovery_EndpointHints_To_v1_EndpointHints(in *discovery.EndpointHints, out *discoveryv1.EndpointHints, s conversion.Scope) error {
out.ForZones = *(*[]discoveryv1.ForZone)(unsafe.Pointer(&in.ForZones))
out.ForNodes = *(*[]discoveryv1.ForNode)(unsafe.Pointer(&in.ForNodes))
return nil
}
// Convert_discovery_EndpointHints_To_v1_EndpointHints is an autogenerated conversion function.
func Convert_discovery_EndpointHints_To_v1_EndpointHints(in *discovery.EndpointHints, out *discoveryv1.EndpointHints, s conversion.Scope) error {
return autoConvert_discovery_EndpointHints_To_v1_EndpointHints(in, out, s)
}
func autoConvert_v1_EndpointPort_To_discovery_EndpointPort(in *discoveryv1.EndpointPort, out *discovery.EndpointPort, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*int32)(unsafe.Pointer(in.Port))
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
return nil
}
// Convert_v1_EndpointPort_To_discovery_EndpointPort is an autogenerated conversion function.
func Convert_v1_EndpointPort_To_discovery_EndpointPort(in *discoveryv1.EndpointPort, out *discovery.EndpointPort, s conversion.Scope) error {
return autoConvert_v1_EndpointPort_To_discovery_EndpointPort(in, out, s)
}
func autoConvert_discovery_EndpointPort_To_v1_EndpointPort(in *discovery.EndpointPort, out *discoveryv1.EndpointPort, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.Protocol = (*corev1.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*int32)(unsafe.Pointer(in.Port))
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
return nil
}
// Convert_discovery_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function.
func Convert_discovery_EndpointPort_To_v1_EndpointPort(in *discovery.EndpointPort, out *discoveryv1.EndpointPort, s conversion.Scope) error {
return autoConvert_discovery_EndpointPort_To_v1_EndpointPort(in, out, s)
}
func autoConvert_v1_EndpointSlice_To_discovery_EndpointSlice(in *discoveryv1.EndpointSlice, out *discovery.EndpointSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.AddressType = discovery.AddressType(in.AddressType)
out.Endpoints = *(*[]discovery.Endpoint)(unsafe.Pointer(&in.Endpoints))
out.Ports = *(*[]discovery.EndpointPort)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1_EndpointSlice_To_discovery_EndpointSlice is an autogenerated conversion function.
func Convert_v1_EndpointSlice_To_discovery_EndpointSlice(in *discoveryv1.EndpointSlice, out *discovery.EndpointSlice, s conversion.Scope) error {
return autoConvert_v1_EndpointSlice_To_discovery_EndpointSlice(in, out, s)
}
func autoConvert_discovery_EndpointSlice_To_v1_EndpointSlice(in *discovery.EndpointSlice, out *discoveryv1.EndpointSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.AddressType = discoveryv1.AddressType(in.AddressType)
out.Endpoints = *(*[]discoveryv1.Endpoint)(unsafe.Pointer(&in.Endpoints))
out.Ports = *(*[]discoveryv1.EndpointPort)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_discovery_EndpointSlice_To_v1_EndpointSlice is an autogenerated conversion function.
func Convert_discovery_EndpointSlice_To_v1_EndpointSlice(in *discovery.EndpointSlice, out *discoveryv1.EndpointSlice, s conversion.Scope) error {
return autoConvert_discovery_EndpointSlice_To_v1_EndpointSlice(in, out, s)
}
func autoConvert_v1_EndpointSliceList_To_discovery_EndpointSliceList(in *discoveryv1.EndpointSliceList, out *discovery.EndpointSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]discovery.EndpointSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_EndpointSliceList_To_discovery_EndpointSliceList is an autogenerated conversion function.
func Convert_v1_EndpointSliceList_To_discovery_EndpointSliceList(in *discoveryv1.EndpointSliceList, out *discovery.EndpointSliceList, s conversion.Scope) error {
return autoConvert_v1_EndpointSliceList_To_discovery_EndpointSliceList(in, out, s)
}
func autoConvert_discovery_EndpointSliceList_To_v1_EndpointSliceList(in *discovery.EndpointSliceList, out *discoveryv1.EndpointSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]discoveryv1.EndpointSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_discovery_EndpointSliceList_To_v1_EndpointSliceList is an autogenerated conversion function.
func Convert_discovery_EndpointSliceList_To_v1_EndpointSliceList(in *discovery.EndpointSliceList, out *discoveryv1.EndpointSliceList, s conversion.Scope) error {
return autoConvert_discovery_EndpointSliceList_To_v1_EndpointSliceList(in, out, s)
}
func autoConvert_v1_ForNode_To_discovery_ForNode(in *discoveryv1.ForNode, out *discovery.ForNode, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_ForNode_To_discovery_ForNode is an autogenerated conversion function.
func Convert_v1_ForNode_To_discovery_ForNode(in *discoveryv1.ForNode, out *discovery.ForNode, s conversion.Scope) error {
return autoConvert_v1_ForNode_To_discovery_ForNode(in, out, s)
}
func autoConvert_discovery_ForNode_To_v1_ForNode(in *discovery.ForNode, out *discoveryv1.ForNode, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_discovery_ForNode_To_v1_ForNode is an autogenerated conversion function.
func Convert_discovery_ForNode_To_v1_ForNode(in *discovery.ForNode, out *discoveryv1.ForNode, s conversion.Scope) error {
return autoConvert_discovery_ForNode_To_v1_ForNode(in, out, s)
}
func autoConvert_v1_ForZone_To_discovery_ForZone(in *discoveryv1.ForZone, out *discovery.ForZone, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_ForZone_To_discovery_ForZone is an autogenerated conversion function.
func Convert_v1_ForZone_To_discovery_ForZone(in *discoveryv1.ForZone, out *discovery.ForZone, s conversion.Scope) error {
return autoConvert_v1_ForZone_To_discovery_ForZone(in, out, s)
}
func autoConvert_discovery_ForZone_To_v1_ForZone(in *discovery.ForZone, out *discoveryv1.ForZone, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_discovery_ForZone_To_v1_ForZone is an autogenerated conversion function.
func Convert_discovery_ForZone_To_v1_ForZone(in *discovery.ForZone, out *discoveryv1.ForZone, s conversion.Scope) error {
return autoConvert_discovery_ForZone_To_v1_ForZone(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
discoveryv1 "k8s.io/api/discovery/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&discoveryv1.EndpointSlice{}, func(obj interface{}) { SetObjectDefaults_EndpointSlice(obj.(*discoveryv1.EndpointSlice)) })
scheme.AddTypeDefaultingFunc(&discoveryv1.EndpointSliceList{}, func(obj interface{}) { SetObjectDefaults_EndpointSliceList(obj.(*discoveryv1.EndpointSliceList)) })
return nil
}
func SetObjectDefaults_EndpointSlice(in *discoveryv1.EndpointSlice) {
for i := range in.Ports {
a := &in.Ports[i]
SetDefaults_EndpointPort(a)
}
}
func SetObjectDefaults_EndpointSliceList(in *discoveryv1.EndpointSliceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_EndpointSlice(a)
}
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/discovery"
)
func Convert_v1beta1_Endpoint_To_discovery_Endpoint(in *v1beta1.Endpoint, out *discovery.Endpoint, s conversion.Scope) error {
if err := autoConvert_v1beta1_Endpoint_To_discovery_Endpoint(in, out, s); err != nil {
return err
}
if in.Topology != nil {
// Copy Topology into Deprecated Topology
out.DeprecatedTopology = make(map[string]string, len(in.Topology))
for k, v := range in.Topology {
out.DeprecatedTopology[k] = v
}
// Move zone from the topology map into a field
if zone, ok := in.Topology[corev1.LabelTopologyZone]; ok {
out.Zone = &zone
delete(out.DeprecatedTopology, corev1.LabelTopologyZone)
}
// Remove hostname from the topology map ONLY IF it is the same value as
// nodeName. This preserves the (rather odd) ability to have different
// values for topology[hostname] and nodename in v1beta1, without showing
// duplicate values in v1.
if node, ok := in.Topology[corev1.LabelHostname]; ok {
if out.NodeName != nil && node == *out.NodeName {
delete(out.DeprecatedTopology, corev1.LabelHostname)
}
}
// If zone & hostname were the only field in the map or topology was empty
// set DeprecatedTopology to nil
if len(out.DeprecatedTopology) == 0 {
out.DeprecatedTopology = nil
}
}
return nil
}
func Convert_discovery_Endpoint_To_v1beta1_Endpoint(in *discovery.Endpoint, out *v1beta1.Endpoint, s conversion.Scope) error {
if err := autoConvert_discovery_Endpoint_To_v1beta1_Endpoint(in, out, s); err != nil {
return err
}
// If no deprecated topology, zone or node field, no conversion is necessary
if in.DeprecatedTopology == nil && in.Zone == nil && in.NodeName == nil {
return nil
}
// Copy Deprecated Topology into Topology
out.Topology = make(map[string]string, len(in.DeprecatedTopology))
for k, v := range in.DeprecatedTopology {
out.Topology[k] = v
}
// Add zone field into the topology map
if in.Zone != nil {
out.Topology[corev1.LabelTopologyZone] = *in.Zone
}
// Add hostname into the topology map ONLY IF it is not already present.
// This preserves the (rather odd) ability to have different values for
// topology[hostname] and nodename in v1beta1.
if in.NodeName != nil && out.Topology[corev1.LabelHostname] == "" {
out.Topology[corev1.LabelHostname] = *in.NodeName
}
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
v1 "k8s.io/api/core/v1"
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
var (
defaultPortName = ""
defaultProtocol = v1.ProtocolTCP
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_EndpointPort(obj *discoveryv1beta1.EndpointPort) {
if obj.Name == nil {
obj.Name = &defaultPortName
}
if obj.Protocol == nil {
obj.Protocol = &defaultProtocol
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "discovery.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &discoveryv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
discovery "k8s.io/kubernetes/pkg/apis/discovery"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*discoveryv1beta1.EndpointConditions)(nil), (*discovery.EndpointConditions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(a.(*discoveryv1beta1.EndpointConditions), b.(*discovery.EndpointConditions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointConditions)(nil), (*discoveryv1beta1.EndpointConditions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(a.(*discovery.EndpointConditions), b.(*discoveryv1beta1.EndpointConditions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1beta1.EndpointHints)(nil), (*discovery.EndpointHints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EndpointHints_To_discovery_EndpointHints(a.(*discoveryv1beta1.EndpointHints), b.(*discovery.EndpointHints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointHints)(nil), (*discoveryv1beta1.EndpointHints)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointHints_To_v1beta1_EndpointHints(a.(*discovery.EndpointHints), b.(*discoveryv1beta1.EndpointHints), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1beta1.EndpointPort)(nil), (*discovery.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EndpointPort_To_discovery_EndpointPort(a.(*discoveryv1beta1.EndpointPort), b.(*discovery.EndpointPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointPort)(nil), (*discoveryv1beta1.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointPort_To_v1beta1_EndpointPort(a.(*discovery.EndpointPort), b.(*discoveryv1beta1.EndpointPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1beta1.EndpointSlice)(nil), (*discovery.EndpointSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(a.(*discoveryv1beta1.EndpointSlice), b.(*discovery.EndpointSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointSlice)(nil), (*discoveryv1beta1.EndpointSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(a.(*discovery.EndpointSlice), b.(*discoveryv1beta1.EndpointSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1beta1.EndpointSliceList)(nil), (*discovery.EndpointSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(a.(*discoveryv1beta1.EndpointSliceList), b.(*discovery.EndpointSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.EndpointSliceList)(nil), (*discoveryv1beta1.EndpointSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(a.(*discovery.EndpointSliceList), b.(*discoveryv1beta1.EndpointSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1beta1.ForNode)(nil), (*discovery.ForNode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ForNode_To_discovery_ForNode(a.(*discoveryv1beta1.ForNode), b.(*discovery.ForNode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.ForNode)(nil), (*discoveryv1beta1.ForNode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_ForNode_To_v1beta1_ForNode(a.(*discovery.ForNode), b.(*discoveryv1beta1.ForNode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discoveryv1beta1.ForZone)(nil), (*discovery.ForZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ForZone_To_discovery_ForZone(a.(*discoveryv1beta1.ForZone), b.(*discovery.ForZone), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*discovery.ForZone)(nil), (*discoveryv1beta1.ForZone)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_ForZone_To_v1beta1_ForZone(a.(*discovery.ForZone), b.(*discoveryv1beta1.ForZone), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*discovery.Endpoint)(nil), (*discoveryv1beta1.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_discovery_Endpoint_To_v1beta1_Endpoint(a.(*discovery.Endpoint), b.(*discoveryv1beta1.Endpoint), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*discoveryv1beta1.Endpoint)(nil), (*discovery.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Endpoint_To_discovery_Endpoint(a.(*discoveryv1beta1.Endpoint), b.(*discovery.Endpoint), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_Endpoint_To_discovery_Endpoint(in *discoveryv1beta1.Endpoint, out *discovery.Endpoint, s conversion.Scope) error {
out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses))
if err := Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(&in.Conditions, &out.Conditions, s); err != nil {
return err
}
out.Hostname = (*string)(unsafe.Pointer(in.Hostname))
out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef))
// WARNING: in.Topology requires manual conversion: does not exist in peer-type
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.Hints = (*discovery.EndpointHints)(unsafe.Pointer(in.Hints))
return nil
}
func autoConvert_discovery_Endpoint_To_v1beta1_Endpoint(in *discovery.Endpoint, out *discoveryv1beta1.Endpoint, s conversion.Scope) error {
out.Addresses = *(*[]string)(unsafe.Pointer(&in.Addresses))
if err := Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(&in.Conditions, &out.Conditions, s); err != nil {
return err
}
out.Hostname = (*string)(unsafe.Pointer(in.Hostname))
out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef))
// WARNING: in.DeprecatedTopology requires manual conversion: does not exist in peer-type
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
// WARNING: in.Zone requires manual conversion: does not exist in peer-type
out.Hints = (*discoveryv1beta1.EndpointHints)(unsafe.Pointer(in.Hints))
return nil
}
func autoConvert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(in *discoveryv1beta1.EndpointConditions, out *discovery.EndpointConditions, s conversion.Scope) error {
out.Ready = (*bool)(unsafe.Pointer(in.Ready))
out.Serving = (*bool)(unsafe.Pointer(in.Serving))
out.Terminating = (*bool)(unsafe.Pointer(in.Terminating))
return nil
}
// Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions is an autogenerated conversion function.
func Convert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(in *discoveryv1beta1.EndpointConditions, out *discovery.EndpointConditions, s conversion.Scope) error {
return autoConvert_v1beta1_EndpointConditions_To_discovery_EndpointConditions(in, out, s)
}
func autoConvert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(in *discovery.EndpointConditions, out *discoveryv1beta1.EndpointConditions, s conversion.Scope) error {
out.Ready = (*bool)(unsafe.Pointer(in.Ready))
out.Serving = (*bool)(unsafe.Pointer(in.Serving))
out.Terminating = (*bool)(unsafe.Pointer(in.Terminating))
return nil
}
// Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions is an autogenerated conversion function.
func Convert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(in *discovery.EndpointConditions, out *discoveryv1beta1.EndpointConditions, s conversion.Scope) error {
return autoConvert_discovery_EndpointConditions_To_v1beta1_EndpointConditions(in, out, s)
}
func autoConvert_v1beta1_EndpointHints_To_discovery_EndpointHints(in *discoveryv1beta1.EndpointHints, out *discovery.EndpointHints, s conversion.Scope) error {
out.ForZones = *(*[]discovery.ForZone)(unsafe.Pointer(&in.ForZones))
out.ForNodes = *(*[]discovery.ForNode)(unsafe.Pointer(&in.ForNodes))
return nil
}
// Convert_v1beta1_EndpointHints_To_discovery_EndpointHints is an autogenerated conversion function.
func Convert_v1beta1_EndpointHints_To_discovery_EndpointHints(in *discoveryv1beta1.EndpointHints, out *discovery.EndpointHints, s conversion.Scope) error {
return autoConvert_v1beta1_EndpointHints_To_discovery_EndpointHints(in, out, s)
}
func autoConvert_discovery_EndpointHints_To_v1beta1_EndpointHints(in *discovery.EndpointHints, out *discoveryv1beta1.EndpointHints, s conversion.Scope) error {
out.ForZones = *(*[]discoveryv1beta1.ForZone)(unsafe.Pointer(&in.ForZones))
out.ForNodes = *(*[]discoveryv1beta1.ForNode)(unsafe.Pointer(&in.ForNodes))
return nil
}
// Convert_discovery_EndpointHints_To_v1beta1_EndpointHints is an autogenerated conversion function.
func Convert_discovery_EndpointHints_To_v1beta1_EndpointHints(in *discovery.EndpointHints, out *discoveryv1beta1.EndpointHints, s conversion.Scope) error {
return autoConvert_discovery_EndpointHints_To_v1beta1_EndpointHints(in, out, s)
}
func autoConvert_v1beta1_EndpointPort_To_discovery_EndpointPort(in *discoveryv1beta1.EndpointPort, out *discovery.EndpointPort, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*int32)(unsafe.Pointer(in.Port))
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
return nil
}
// Convert_v1beta1_EndpointPort_To_discovery_EndpointPort is an autogenerated conversion function.
func Convert_v1beta1_EndpointPort_To_discovery_EndpointPort(in *discoveryv1beta1.EndpointPort, out *discovery.EndpointPort, s conversion.Scope) error {
return autoConvert_v1beta1_EndpointPort_To_discovery_EndpointPort(in, out, s)
}
func autoConvert_discovery_EndpointPort_To_v1beta1_EndpointPort(in *discovery.EndpointPort, out *discoveryv1beta1.EndpointPort, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.Protocol = (*v1.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*int32)(unsafe.Pointer(in.Port))
out.AppProtocol = (*string)(unsafe.Pointer(in.AppProtocol))
return nil
}
// Convert_discovery_EndpointPort_To_v1beta1_EndpointPort is an autogenerated conversion function.
func Convert_discovery_EndpointPort_To_v1beta1_EndpointPort(in *discovery.EndpointPort, out *discoveryv1beta1.EndpointPort, s conversion.Scope) error {
return autoConvert_discovery_EndpointPort_To_v1beta1_EndpointPort(in, out, s)
}
func autoConvert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(in *discoveryv1beta1.EndpointSlice, out *discovery.EndpointSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.AddressType = discovery.AddressType(in.AddressType)
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]discovery.Endpoint, len(*in))
for i := range *in {
if err := Convert_v1beta1_Endpoint_To_discovery_Endpoint(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Endpoints = nil
}
out.Ports = *(*[]discovery.EndpointPort)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1beta1_EndpointSlice_To_discovery_EndpointSlice is an autogenerated conversion function.
func Convert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(in *discoveryv1beta1.EndpointSlice, out *discovery.EndpointSlice, s conversion.Scope) error {
return autoConvert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(in, out, s)
}
func autoConvert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(in *discovery.EndpointSlice, out *discoveryv1beta1.EndpointSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.AddressType = discoveryv1beta1.AddressType(in.AddressType)
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]discoveryv1beta1.Endpoint, len(*in))
for i := range *in {
if err := Convert_discovery_Endpoint_To_v1beta1_Endpoint(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Endpoints = nil
}
out.Ports = *(*[]discoveryv1beta1.EndpointPort)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_discovery_EndpointSlice_To_v1beta1_EndpointSlice is an autogenerated conversion function.
func Convert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(in *discovery.EndpointSlice, out *discoveryv1beta1.EndpointSlice, s conversion.Scope) error {
return autoConvert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(in, out, s)
}
func autoConvert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(in *discoveryv1beta1.EndpointSliceList, out *discovery.EndpointSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]discovery.EndpointSlice, len(*in))
for i := range *in {
if err := Convert_v1beta1_EndpointSlice_To_discovery_EndpointSlice(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList is an autogenerated conversion function.
func Convert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(in *discoveryv1beta1.EndpointSliceList, out *discovery.EndpointSliceList, s conversion.Scope) error {
return autoConvert_v1beta1_EndpointSliceList_To_discovery_EndpointSliceList(in, out, s)
}
func autoConvert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(in *discovery.EndpointSliceList, out *discoveryv1beta1.EndpointSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]discoveryv1beta1.EndpointSlice, len(*in))
for i := range *in {
if err := Convert_discovery_EndpointSlice_To_v1beta1_EndpointSlice(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList is an autogenerated conversion function.
func Convert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(in *discovery.EndpointSliceList, out *discoveryv1beta1.EndpointSliceList, s conversion.Scope) error {
return autoConvert_discovery_EndpointSliceList_To_v1beta1_EndpointSliceList(in, out, s)
}
func autoConvert_v1beta1_ForNode_To_discovery_ForNode(in *discoveryv1beta1.ForNode, out *discovery.ForNode, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta1_ForNode_To_discovery_ForNode is an autogenerated conversion function.
func Convert_v1beta1_ForNode_To_discovery_ForNode(in *discoveryv1beta1.ForNode, out *discovery.ForNode, s conversion.Scope) error {
return autoConvert_v1beta1_ForNode_To_discovery_ForNode(in, out, s)
}
func autoConvert_discovery_ForNode_To_v1beta1_ForNode(in *discovery.ForNode, out *discoveryv1beta1.ForNode, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_discovery_ForNode_To_v1beta1_ForNode is an autogenerated conversion function.
func Convert_discovery_ForNode_To_v1beta1_ForNode(in *discovery.ForNode, out *discoveryv1beta1.ForNode, s conversion.Scope) error {
return autoConvert_discovery_ForNode_To_v1beta1_ForNode(in, out, s)
}
func autoConvert_v1beta1_ForZone_To_discovery_ForZone(in *discoveryv1beta1.ForZone, out *discovery.ForZone, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta1_ForZone_To_discovery_ForZone is an autogenerated conversion function.
func Convert_v1beta1_ForZone_To_discovery_ForZone(in *discoveryv1beta1.ForZone, out *discovery.ForZone, s conversion.Scope) error {
return autoConvert_v1beta1_ForZone_To_discovery_ForZone(in, out, s)
}
func autoConvert_discovery_ForZone_To_v1beta1_ForZone(in *discovery.ForZone, out *discoveryv1beta1.ForZone, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_discovery_ForZone_To_v1beta1_ForZone is an autogenerated conversion function.
func Convert_discovery_ForZone_To_v1beta1_ForZone(in *discovery.ForZone, out *discoveryv1beta1.ForZone, s conversion.Scope) error {
return autoConvert_discovery_ForZone_To_v1beta1_ForZone(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&discoveryv1beta1.EndpointSlice{}, func(obj interface{}) { SetObjectDefaults_EndpointSlice(obj.(*discoveryv1beta1.EndpointSlice)) })
scheme.AddTypeDefaultingFunc(&discoveryv1beta1.EndpointSliceList{}, func(obj interface{}) { SetObjectDefaults_EndpointSliceList(obj.(*discoveryv1beta1.EndpointSliceList)) })
return nil
}
func SetObjectDefaults_EndpointSlice(in *discoveryv1beta1.EndpointSlice) {
for i := range in.Ports {
a := &in.Ports[i]
SetDefaults_EndpointPort(a)
}
}
func SetObjectDefaults_EndpointSliceList(in *discoveryv1beta1.EndpointSliceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_EndpointSlice(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package discovery
import (
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Endpoint) DeepCopyInto(out *Endpoint) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Conditions.DeepCopyInto(&out.Conditions)
if in.Hostname != nil {
in, out := &in.Hostname, &out.Hostname
*out = new(string)
**out = **in
}
if in.TargetRef != nil {
in, out := &in.TargetRef, &out.TargetRef
*out = new(core.ObjectReference)
**out = **in
}
if in.DeprecatedTopology != nil {
in, out := &in.DeprecatedTopology, &out.DeprecatedTopology
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodeName != nil {
in, out := &in.NodeName, &out.NodeName
*out = new(string)
**out = **in
}
if in.Zone != nil {
in, out := &in.Zone, &out.Zone
*out = new(string)
**out = **in
}
if in.Hints != nil {
in, out := &in.Hints, &out.Hints
*out = new(EndpointHints)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
func (in *Endpoint) DeepCopy() *Endpoint {
if in == nil {
return nil
}
out := new(Endpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) {
*out = *in
if in.Ready != nil {
in, out := &in.Ready, &out.Ready
*out = new(bool)
**out = **in
}
if in.Serving != nil {
in, out := &in.Serving, &out.Serving
*out = new(bool)
**out = **in
}
if in.Terminating != nil {
in, out := &in.Terminating, &out.Terminating
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions.
func (in *EndpointConditions) DeepCopy() *EndpointConditions {
if in == nil {
return nil
}
out := new(EndpointConditions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
*out = *in
if in.ForZones != nil {
in, out := &in.ForZones, &out.ForZones
*out = make([]ForZone, len(*in))
copy(*out, *in)
}
if in.ForNodes != nil {
in, out := &in.ForNodes, &out.ForNodes
*out = make([]ForNode, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointHints.
func (in *EndpointHints) DeepCopy() *EndpointHints {
if in == nil {
return nil
}
out := new(EndpointHints)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
*out = *in
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(core.Protocol)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(int32)
**out = **in
}
if in.AppProtocol != nil {
in, out := &in.AppProtocol, &out.AppProtocol
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
func (in *EndpointPort) DeepCopy() *EndpointPort {
if in == nil {
return nil
}
out := new(EndpointPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]Endpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]EndpointPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice.
func (in *EndpointSlice) DeepCopy() *EndpointSlice {
if in == nil {
return nil
}
out := new(EndpointSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]EndpointSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList.
func (in *EndpointSliceList) DeepCopy() *EndpointSliceList {
if in == nil {
return nil
}
out := new(EndpointSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ForNode) DeepCopyInto(out *ForNode) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForNode.
func (in *ForNode) DeepCopy() *ForNode {
if in == nil {
return nil
}
out := new(ForNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ForZone) DeepCopyInto(out *ForZone) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForZone.
func (in *ForZone) DeepCopy() *ForZone {
if in == nil {
return nil
}
out := new(ForZone)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the events API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/events"
"k8s.io/kubernetes/pkg/apis/events/v1"
"k8s.io/kubernetes/pkg/apis/events/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(events.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/core"
)
// GroupName is the group name use in this package
const GroupName = "events.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&core.Event{},
&core.EventList{},
)
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
v1 "k8s.io/api/events/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
k8s_api "k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
func Convert_v1_Event_To_core_Event(in *v1.Event, out *k8s_api.Event, s conversion.Scope) error {
if err := autoConvert_v1_Event_To_core_Event(in, out, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_v1_ObjectReference_To_core_ObjectReference(&in.Regarding, &out.InvolvedObject, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_v1_EventSource_To_core_EventSource(&in.DeprecatedSource, &out.Source, s); err != nil {
return err
}
out.Message = in.Note
out.FirstTimestamp = in.DeprecatedFirstTimestamp
out.LastTimestamp = in.DeprecatedLastTimestamp
out.Count = in.DeprecatedCount
return nil
}
func Convert_core_Event_To_v1_Event(in *k8s_api.Event, out *v1.Event, s conversion.Scope) error {
if err := autoConvert_core_Event_To_v1_Event(in, out, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.Regarding, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.DeprecatedSource, s); err != nil {
return err
}
out.Note = in.Message
out.DeprecatedFirstTimestamp = in.FirstTimestamp
out.DeprecatedLastTimestamp = in.LastTimestamp
out.DeprecatedCount = in.Count
return nil
}
func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error {
mapping := map[string]string{
"reason": "reason",
"regarding.kind": "involvedObject.kind", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.namespace": "involvedObject.namespace", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.name": "involvedObject.name", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.uid": "involvedObject.uid", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.apiVersion": "involvedObject.apiVersion", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.resourceVersion": "involvedObject.resourceVersion", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.fieldPath": "involvedObject.fieldPath", // map events.k8s.io field to fieldset returned by ToSelectableFields
"reportingController": "reportingComponent", // map events.k8s.io field to fieldset returned by ToSelectableFields
"type": "type",
"metadata.namespace": "metadata.namespace",
"metadata.name": "metadata.name",
}
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Event"),
func(label, value string) (string, string, error) {
mappedLabel, ok := mapping[label]
if !ok {
return "", "", fmt.Errorf("field label not supported: %s", label)
}
return mappedLabel, value, nil
},
)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
eventsv1 "k8s.io/api/events/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "events.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &eventsv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults, AddFieldLabelConversionsForEvent)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
eventsv1 "k8s.io/api/events/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*eventsv1.EventList)(nil), (*core.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EventList_To_core_EventList(a.(*eventsv1.EventList), b.(*core.EventList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EventList)(nil), (*eventsv1.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EventList_To_v1_EventList(a.(*core.EventList), b.(*eventsv1.EventList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*eventsv1.EventSeries)(nil), (*core.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_EventSeries_To_core_EventSeries(a.(*eventsv1.EventSeries), b.(*core.EventSeries), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EventSeries)(nil), (*eventsv1.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EventSeries_To_v1_EventSeries(a.(*core.EventSeries), b.(*eventsv1.EventSeries), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.Event)(nil), (*eventsv1.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Event_To_v1_Event(a.(*core.Event), b.(*eventsv1.Event), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*eventsv1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Event_To_core_Event(a.(*eventsv1.Event), b.(*core.Event), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_Event_To_core_Event(in *eventsv1.Event, out *core.Event, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.EventTime = in.EventTime
out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series))
out.ReportingController = in.ReportingController
out.ReportingInstance = in.ReportingInstance
out.Action = in.Action
out.Reason = in.Reason
// WARNING: in.Regarding requires manual conversion: does not exist in peer-type
out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related))
// WARNING: in.Note requires manual conversion: does not exist in peer-type
out.Type = in.Type
// WARNING: in.DeprecatedSource requires manual conversion: does not exist in peer-type
// WARNING: in.DeprecatedFirstTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.DeprecatedLastTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.DeprecatedCount requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_core_Event_To_v1_Event(in *core.Event, out *eventsv1.Event, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
// WARNING: in.InvolvedObject requires manual conversion: does not exist in peer-type
out.Reason = in.Reason
// WARNING: in.Message requires manual conversion: does not exist in peer-type
// WARNING: in.Source requires manual conversion: does not exist in peer-type
// WARNING: in.FirstTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.LastTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.Count requires manual conversion: does not exist in peer-type
out.Type = in.Type
out.EventTime = in.EventTime
out.Series = (*eventsv1.EventSeries)(unsafe.Pointer(in.Series))
out.Action = in.Action
out.Related = (*corev1.ObjectReference)(unsafe.Pointer(in.Related))
out.ReportingController = in.ReportingController
out.ReportingInstance = in.ReportingInstance
return nil
}
func autoConvert_v1_EventList_To_core_EventList(in *eventsv1.EventList, out *core.EventList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.Event, len(*in))
for i := range *in {
if err := Convert_v1_Event_To_core_Event(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_EventList_To_core_EventList is an autogenerated conversion function.
func Convert_v1_EventList_To_core_EventList(in *eventsv1.EventList, out *core.EventList, s conversion.Scope) error {
return autoConvert_v1_EventList_To_core_EventList(in, out, s)
}
func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *eventsv1.EventList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]eventsv1.Event, len(*in))
for i := range *in {
if err := Convert_core_Event_To_v1_Event(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_EventList_To_v1_EventList is an autogenerated conversion function.
func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *eventsv1.EventList, s conversion.Scope) error {
return autoConvert_core_EventList_To_v1_EventList(in, out, s)
}
func autoConvert_v1_EventSeries_To_core_EventSeries(in *eventsv1.EventSeries, out *core.EventSeries, s conversion.Scope) error {
out.Count = in.Count
out.LastObservedTime = in.LastObservedTime
return nil
}
// Convert_v1_EventSeries_To_core_EventSeries is an autogenerated conversion function.
func Convert_v1_EventSeries_To_core_EventSeries(in *eventsv1.EventSeries, out *core.EventSeries, s conversion.Scope) error {
return autoConvert_v1_EventSeries_To_core_EventSeries(in, out, s)
}
func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *eventsv1.EventSeries, s conversion.Scope) error {
out.Count = in.Count
out.LastObservedTime = in.LastObservedTime
return nil
}
// Convert_core_EventSeries_To_v1_EventSeries is an autogenerated conversion function.
func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *eventsv1.EventSeries, s conversion.Scope) error {
return autoConvert_core_EventSeries_To_v1_EventSeries(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
v1beta1 "k8s.io/api/events/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
k8s_api "k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
func Convert_v1beta1_Event_To_core_Event(in *v1beta1.Event, out *k8s_api.Event, s conversion.Scope) error {
if err := autoConvert_v1beta1_Event_To_core_Event(in, out, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_v1_ObjectReference_To_core_ObjectReference(&in.Regarding, &out.InvolvedObject, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_v1_EventSource_To_core_EventSource(&in.DeprecatedSource, &out.Source, s); err != nil {
return err
}
out.Message = in.Note
out.FirstTimestamp = in.DeprecatedFirstTimestamp
out.LastTimestamp = in.DeprecatedLastTimestamp
out.Count = in.DeprecatedCount
return nil
}
func Convert_core_Event_To_v1beta1_Event(in *k8s_api.Event, out *v1beta1.Event, s conversion.Scope) error {
if err := autoConvert_core_Event_To_v1beta1_Event(in, out, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.Regarding, s); err != nil {
return err
}
if err := k8s_api_v1.Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.DeprecatedSource, s); err != nil {
return err
}
out.Note = in.Message
out.DeprecatedFirstTimestamp = in.FirstTimestamp
out.DeprecatedLastTimestamp = in.LastTimestamp
out.DeprecatedCount = in.Count
return nil
}
func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error {
mapping := map[string]string{
"reason": "reason",
"regarding.kind": "involvedObject.kind", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.namespace": "involvedObject.namespace", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.name": "involvedObject.name", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.uid": "involvedObject.uid", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.apiVersion": "involvedObject.apiVersion", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.resourceVersion": "involvedObject.resourceVersion", // map events.k8s.io field to fieldset returned by ToSelectableFields
"regarding.fieldPath": "involvedObject.fieldPath", // map events.k8s.io field to fieldset returned by ToSelectableFields
"reportingController": "reportingComponent", // map events.k8s.io field to fieldset returned by ToSelectableFields
"type": "type",
"metadata.namespace": "metadata.namespace",
"metadata.name": "metadata.name",
}
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Event"),
func(label, value string) (string, string, error) {
mappedLabel, ok := mapping[label]
if !ok {
return "", "", fmt.Errorf("field label not supported: %s", label)
}
return mappedLabel, value, nil
},
)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
eventsv1beta1 "k8s.io/api/events/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "events.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &eventsv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults, AddFieldLabelConversionsForEvent)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
eventsv1beta1 "k8s.io/api/events/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*eventsv1beta1.EventList)(nil), (*core.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EventList_To_core_EventList(a.(*eventsv1beta1.EventList), b.(*core.EventList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EventList)(nil), (*eventsv1beta1.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EventList_To_v1beta1_EventList(a.(*core.EventList), b.(*eventsv1beta1.EventList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*eventsv1beta1.EventSeries)(nil), (*core.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_EventSeries_To_core_EventSeries(a.(*eventsv1beta1.EventSeries), b.(*core.EventSeries), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.EventSeries)(nil), (*eventsv1beta1.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_EventSeries_To_v1beta1_EventSeries(a.(*core.EventSeries), b.(*eventsv1beta1.EventSeries), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.Event)(nil), (*eventsv1beta1.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Event_To_v1beta1_Event(a.(*core.Event), b.(*eventsv1beta1.Event), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*eventsv1beta1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Event_To_core_Event(a.(*eventsv1beta1.Event), b.(*core.Event), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_Event_To_core_Event(in *eventsv1beta1.Event, out *core.Event, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.EventTime = in.EventTime
out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series))
out.ReportingController = in.ReportingController
out.ReportingInstance = in.ReportingInstance
out.Action = in.Action
out.Reason = in.Reason
// WARNING: in.Regarding requires manual conversion: does not exist in peer-type
out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related))
// WARNING: in.Note requires manual conversion: does not exist in peer-type
out.Type = in.Type
// WARNING: in.DeprecatedSource requires manual conversion: does not exist in peer-type
// WARNING: in.DeprecatedFirstTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.DeprecatedLastTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.DeprecatedCount requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_core_Event_To_v1beta1_Event(in *core.Event, out *eventsv1beta1.Event, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
// WARNING: in.InvolvedObject requires manual conversion: does not exist in peer-type
out.Reason = in.Reason
// WARNING: in.Message requires manual conversion: does not exist in peer-type
// WARNING: in.Source requires manual conversion: does not exist in peer-type
// WARNING: in.FirstTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.LastTimestamp requires manual conversion: does not exist in peer-type
// WARNING: in.Count requires manual conversion: does not exist in peer-type
out.Type = in.Type
out.EventTime = in.EventTime
out.Series = (*eventsv1beta1.EventSeries)(unsafe.Pointer(in.Series))
out.Action = in.Action
out.Related = (*v1.ObjectReference)(unsafe.Pointer(in.Related))
out.ReportingController = in.ReportingController
out.ReportingInstance = in.ReportingInstance
return nil
}
func autoConvert_v1beta1_EventList_To_core_EventList(in *eventsv1beta1.EventList, out *core.EventList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]core.Event, len(*in))
for i := range *in {
if err := Convert_v1beta1_Event_To_core_Event(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_EventList_To_core_EventList is an autogenerated conversion function.
func Convert_v1beta1_EventList_To_core_EventList(in *eventsv1beta1.EventList, out *core.EventList, s conversion.Scope) error {
return autoConvert_v1beta1_EventList_To_core_EventList(in, out, s)
}
func autoConvert_core_EventList_To_v1beta1_EventList(in *core.EventList, out *eventsv1beta1.EventList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]eventsv1beta1.Event, len(*in))
for i := range *in {
if err := Convert_core_Event_To_v1beta1_Event(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_core_EventList_To_v1beta1_EventList is an autogenerated conversion function.
func Convert_core_EventList_To_v1beta1_EventList(in *core.EventList, out *eventsv1beta1.EventList, s conversion.Scope) error {
return autoConvert_core_EventList_To_v1beta1_EventList(in, out, s)
}
func autoConvert_v1beta1_EventSeries_To_core_EventSeries(in *eventsv1beta1.EventSeries, out *core.EventSeries, s conversion.Scope) error {
out.Count = in.Count
out.LastObservedTime = in.LastObservedTime
return nil
}
// Convert_v1beta1_EventSeries_To_core_EventSeries is an autogenerated conversion function.
func Convert_v1beta1_EventSeries_To_core_EventSeries(in *eventsv1beta1.EventSeries, out *core.EventSeries, s conversion.Scope) error {
return autoConvert_v1beta1_EventSeries_To_core_EventSeries(in, out, s)
}
func autoConvert_core_EventSeries_To_v1beta1_EventSeries(in *core.EventSeries, out *eventsv1beta1.EventSeries, s conversion.Scope) error {
out.Count = in.Count
out.LastObservedTime = in.LastObservedTime
return nil
}
// Convert_core_EventSeries_To_v1beta1_EventSeries is an autogenerated conversion function.
func Convert_core_EventSeries_To_v1beta1_EventSeries(in *core.EventSeries, out *eventsv1beta1.EventSeries, s conversion.Scope) error {
return autoConvert_core_EventSeries_To_v1beta1_EventSeries(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
)
// Funcs returns the fuzzer functions for the extensions api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(extensions.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion))
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package extensions
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/networking"
)
// GroupName is the group name use in this package
const GroupName = "extensions"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Builds new Scheme of known types
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&apps.Deployment{},
&apps.DeploymentList{},
&apps.DeploymentRollback{},
&apps.DaemonSetList{},
&apps.DaemonSet{},
&networking.Ingress{},
&networking.IngressList{},
&apps.ReplicaSet{},
&apps.ReplicaSetList{},
&autoscaling.Scale{},
&networking.NetworkPolicy{},
&networking.NetworkPolicyList{},
)
return nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/networking"
)
func Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *extensionsv1beta1.ScaleStatus, s conversion.Scope) error {
out.Replicas = int32(in.Replicas)
out.TargetSelector = in.Selector
out.Selector = nil
selector, err := metav1.ParseToLabelSelector(in.Selector)
if err != nil {
return fmt.Errorf("failed to parse selector: %v", err)
}
if len(selector.MatchExpressions) == 0 {
out.Selector = selector.MatchLabels
}
return nil
}
func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *extensionsv1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
if in.TargetSelector != "" {
out.Selector = in.TargetSelector
} else if in.Selector != nil {
set := labels.Set{}
for key, val := range in.Selector {
set[key] = val
}
out.Selector = labels.SelectorFromSet(set).String()
} else {
out.Selector = ""
}
return nil
}
func Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in *extensionsv1beta1.NetworkPolicySpec, out *networking.NetworkPolicySpec, s conversion.Scope) error {
if err := autoConvert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in, out, s); err != nil {
return err
}
if out.Ingress == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Ingress = make([]networking.NetworkPolicyIngressRule, 0)
}
if out.Egress == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Egress = make([]networking.NetworkPolicyEgressRule, 0)
}
return nil
}
func Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *networking.NetworkPolicySpec, out *extensionsv1beta1.NetworkPolicySpec, s conversion.Scope) error {
if err := autoConvert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in, out, s); err != nil {
return err
}
if out.Ingress == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Ingress = make([]extensionsv1beta1.NetworkPolicyIngressRule, 0)
}
if out.Egress == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Egress = make([]extensionsv1beta1.NetworkPolicyEgressRule, 0)
}
return nil
}
func Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in *extensionsv1beta1.NetworkPolicyIngressRule, out *networking.NetworkPolicyIngressRule, s conversion.Scope) error {
if err := autoConvert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in, out, s); err != nil {
return err
}
if out.Ports == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Ports = make([]networking.NetworkPolicyPort, 0)
}
return nil
}
func Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *networking.NetworkPolicyIngressRule, out *extensionsv1beta1.NetworkPolicyIngressRule, s conversion.Scope) error {
if err := autoConvert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in, out, s); err != nil {
return err
}
if out.Ports == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Ports = make([]extensionsv1beta1.NetworkPolicyPort, 0)
}
return nil
}
func Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in *extensionsv1beta1.NetworkPolicyEgressRule, out *networking.NetworkPolicyEgressRule, s conversion.Scope) error {
if err := autoConvert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in, out, s); err != nil {
return err
}
if out.Ports == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Ports = make([]networking.NetworkPolicyPort, 0)
}
if out.To == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.To = make([]networking.NetworkPolicyPeer, 0)
}
return nil
}
func Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(in *networking.NetworkPolicyEgressRule, out *extensionsv1beta1.NetworkPolicyEgressRule, s conversion.Scope) error {
if err := autoConvert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(in, out, s); err != nil {
return err
}
if out.Ports == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.Ports = make([]extensionsv1beta1.NetworkPolicyPort, 0)
}
if out.To == nil {
// Produce a zero-length non-nil slice for compatibility with previous manual conversion.
out.To = make([]extensionsv1beta1.NetworkPolicyPeer, 0)
}
return nil
}
func Convert_v1beta1_IPBlock_To_networking_IPBlock(in *extensionsv1beta1.IPBlock, out *networking.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = make([]string, len(in.Except))
copy(out.Except, in.Except)
return nil
}
func Convert_networking_IPBlock_To_v1beta1_IPBlock(in *networking.IPBlock, out *extensionsv1beta1.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = make([]string, len(in.Except))
copy(out.Except, in.Except)
return nil
}
func Convert_v1beta1_IngressBackend_To_networking_IngressBackend(in *extensionsv1beta1.IngressBackend, out *networking.IngressBackend, s conversion.Scope) error {
if err := autoConvert_v1beta1_IngressBackend_To_networking_IngressBackend(in, out, s); err != nil {
return err
}
if len(in.ServiceName) > 0 || in.ServicePort.IntVal != 0 || in.ServicePort.StrVal != "" || in.ServicePort.Type == intstr.String {
out.Service = &networking.IngressServiceBackend{}
out.Service.Name = in.ServiceName
out.Service.Port.Name = in.ServicePort.StrVal
out.Service.Port.Number = in.ServicePort.IntVal
}
return nil
}
func Convert_networking_IngressBackend_To_v1beta1_IngressBackend(in *networking.IngressBackend, out *extensionsv1beta1.IngressBackend, s conversion.Scope) error {
if err := autoConvert_networking_IngressBackend_To_v1beta1_IngressBackend(in, out, s); err != nil {
return err
}
if in.Service != nil {
out.ServiceName = in.Service.Name
if len(in.Service.Port.Name) > 0 {
out.ServicePort = intstr.FromString(in.Service.Port.Name)
} else {
out.ServicePort = intstr.FromInt32(in.Service.Port.Number)
}
}
return nil
}
func Convert_v1beta1_IngressSpec_To_networking_IngressSpec(in *extensionsv1beta1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error {
if err := autoConvert_v1beta1_IngressSpec_To_networking_IngressSpec(in, out, s); err != nil {
return err
}
if in.Backend != nil {
out.DefaultBackend = &networking.IngressBackend{}
if err := Convert_v1beta1_IngressBackend_To_networking_IngressBackend(in.Backend, out.DefaultBackend, s); err != nil {
return err
}
}
return nil
}
func Convert_networking_IngressSpec_To_v1beta1_IngressSpec(in *networking.IngressSpec, out *extensionsv1beta1.IngressSpec, s conversion.Scope) error {
if err := autoConvert_networking_IngressSpec_To_v1beta1_IngressSpec(in, out, s); err != nil {
return err
}
if in.DefaultBackend != nil {
out.Backend = &extensionsv1beta1.IngressBackend{}
if err := Convert_networking_IngressBackend_To_v1beta1_IngressBackend(in.DefaultBackend, out.Backend, s); err != nil {
return err
}
}
return nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"math"
v1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_DaemonSet(obj *extensionsv1beta1.DaemonSet) {
labels := obj.Spec.Template.Labels
// TODO: support templates defined elsewhere when we support them in the API
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{
MatchLabels: labels,
}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
updateStrategy := &obj.Spec.UpdateStrategy
if updateStrategy.Type == "" {
updateStrategy.Type = extensionsv1beta1.OnDeleteDaemonSetStrategyType
}
if updateStrategy.Type == extensionsv1beta1.RollingUpdateDaemonSetStrategyType {
if updateStrategy.RollingUpdate == nil {
rollingUpdate := extensionsv1beta1.RollingUpdateDaemonSet{}
updateStrategy.RollingUpdate = &rollingUpdate
}
if updateStrategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
maxUnavailable := intstr.FromInt32(1)
updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if updateStrategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 0 by default.
maxSurge := intstr.FromInt32(0)
updateStrategy.RollingUpdate.MaxSurge = &maxSurge
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
func SetDefaults_Deployment(obj *extensionsv1beta1.Deployment) {
// Default labels and selector to labels from pod template spec.
labels := obj.Spec.Template.Labels
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
// Set extensionsv1beta1.DeploymentSpec.Replicas to 1 if it is not set.
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
strategy := &obj.Spec.Strategy
// Set default extensionsv1beta1.DeploymentStrategyType as RollingUpdate.
if strategy.Type == "" {
strategy.Type = extensionsv1beta1.RollingUpdateDeploymentStrategyType
}
if strategy.Type == extensionsv1beta1.RollingUpdateDeploymentStrategyType || strategy.RollingUpdate != nil {
if strategy.RollingUpdate == nil {
rollingUpdate := extensionsv1beta1.RollingUpdateDeployment{}
strategy.RollingUpdate = &rollingUpdate
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
maxUnavailable := intstr.FromInt32(1)
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 1 by default.
maxSurge := intstr.FromInt32(1)
strategy.RollingUpdate.MaxSurge = &maxSurge
}
}
// Set extensionsv1beta1.DeploymentSpec.ProgressDeadlineSeconds to MaxInt,
// which has the same meaning as unset.
if obj.Spec.ProgressDeadlineSeconds == nil {
obj.Spec.ProgressDeadlineSeconds = new(int32)
*obj.Spec.ProgressDeadlineSeconds = math.MaxInt32
}
// Set extensionsv1beta1.DeploymentSpec.RevisionHistoryLimit to MaxInt32,
// which has the same meaning as unset.
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = math.MaxInt32
}
}
func SetDefaults_ReplicaSet(obj *extensionsv1beta1.ReplicaSet) {
labels := obj.Spec.Template.Labels
// TODO: support templates defined elsewhere when we support them in the API
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{
MatchLabels: labels,
}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
}
func SetDefaults_NetworkPolicy(obj *extensionsv1beta1.NetworkPolicy) {
// Default any undefined Protocol fields to TCP.
for _, i := range obj.Spec.Ingress {
for _, p := range i.Ports {
if p.Protocol == nil {
proto := v1.ProtocolTCP
p.Protocol = &proto
}
}
}
if len(obj.Spec.PolicyTypes) == 0 {
// Any policy that does not specify policyTypes implies at least "Ingress".
obj.Spec.PolicyTypes = []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeIngress}
if len(obj.Spec.Egress) != 0 {
obj.Spec.PolicyTypes = append(obj.Spec.PolicyTypes, extensionsv1beta1.PolicyTypeEgress)
}
}
}
func SetDefaults_HTTPIngressPath(obj *extensionsv1beta1.HTTPIngressPath) {
var defaultPathType = extensionsv1beta1.PathTypeImplementationSpecific
if obj.PathType == nil {
obj.PathType = &defaultPathType
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "extensions"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &extensionsv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
apps "k8s.io/kubernetes/pkg/apis/apps"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
core "k8s.io/kubernetes/pkg/apis/core"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
networking "k8s.io/kubernetes/pkg/apis/networking"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DaemonSet_To_apps_DaemonSet(a.(*extensionsv1beta1.DaemonSet), b.(*apps.DaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*extensionsv1beta1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSet_To_v1beta1_DaemonSet(a.(*apps.DaemonSet), b.(*extensionsv1beta1.DaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*extensionsv1beta1.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*extensionsv1beta1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*extensionsv1beta1.DaemonSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList(a.(*extensionsv1beta1.DaemonSetList), b.(*apps.DaemonSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*extensionsv1beta1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList(a.(*apps.DaemonSetList), b.(*extensionsv1beta1.DaemonSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*extensionsv1beta1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*extensionsv1beta1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*extensionsv1beta1.DaemonSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*extensionsv1beta1.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*extensionsv1beta1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*extensionsv1beta1.DaemonSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*extensionsv1beta1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*extensionsv1beta1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*extensionsv1beta1.DaemonSetUpdateStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Deployment_To_apps_Deployment(a.(*extensionsv1beta1.Deployment), b.(*apps.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*extensionsv1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_Deployment_To_v1beta1_Deployment(a.(*apps.Deployment), b.(*extensionsv1beta1.Deployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(a.(*extensionsv1beta1.DeploymentCondition), b.(*apps.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*extensionsv1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*extensionsv1beta1.DeploymentCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentList_To_apps_DeploymentList(a.(*extensionsv1beta1.DeploymentList), b.(*apps.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*extensionsv1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentList_To_v1beta1_DeploymentList(a.(*apps.DeploymentList), b.(*extensionsv1beta1.DeploymentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DeploymentRollback)(nil), (*apps.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(a.(*extensionsv1beta1.DeploymentRollback), b.(*apps.DeploymentRollback), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentRollback)(nil), (*extensionsv1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*apps.DeploymentRollback), b.(*extensionsv1beta1.DeploymentRollback), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*extensionsv1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*extensionsv1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*extensionsv1beta1.DeploymentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(a.(*extensionsv1beta1.DeploymentStatus), b.(*apps.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*extensionsv1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*extensionsv1beta1.DeploymentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*extensionsv1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*extensionsv1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*extensionsv1beta1.DeploymentStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.HTTPIngressPath)(nil), (*networking.HTTPIngressPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(a.(*extensionsv1beta1.HTTPIngressPath), b.(*networking.HTTPIngressPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.HTTPIngressPath)(nil), (*extensionsv1beta1.HTTPIngressPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(a.(*networking.HTTPIngressPath), b.(*extensionsv1beta1.HTTPIngressPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.HTTPIngressRuleValue)(nil), (*networking.HTTPIngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(a.(*extensionsv1beta1.HTTPIngressRuleValue), b.(*networking.HTTPIngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.HTTPIngressRuleValue)(nil), (*extensionsv1beta1.HTTPIngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(a.(*networking.HTTPIngressRuleValue), b.(*extensionsv1beta1.HTTPIngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.Ingress)(nil), (*networking.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Ingress_To_networking_Ingress(a.(*extensionsv1beta1.Ingress), b.(*networking.Ingress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.Ingress)(nil), (*extensionsv1beta1.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_Ingress_To_v1beta1_Ingress(a.(*networking.Ingress), b.(*extensionsv1beta1.Ingress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressList)(nil), (*networking.IngressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressList_To_networking_IngressList(a.(*extensionsv1beta1.IngressList), b.(*networking.IngressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressList)(nil), (*extensionsv1beta1.IngressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressList_To_v1beta1_IngressList(a.(*networking.IngressList), b.(*extensionsv1beta1.IngressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressLoadBalancerIngress)(nil), (*networking.IngressLoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(a.(*extensionsv1beta1.IngressLoadBalancerIngress), b.(*networking.IngressLoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressLoadBalancerIngress)(nil), (*extensionsv1beta1.IngressLoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(a.(*networking.IngressLoadBalancerIngress), b.(*extensionsv1beta1.IngressLoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressLoadBalancerStatus)(nil), (*networking.IngressLoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(a.(*extensionsv1beta1.IngressLoadBalancerStatus), b.(*networking.IngressLoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressLoadBalancerStatus)(nil), (*extensionsv1beta1.IngressLoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(a.(*networking.IngressLoadBalancerStatus), b.(*extensionsv1beta1.IngressLoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressPortStatus)(nil), (*networking.IngressPortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(a.(*extensionsv1beta1.IngressPortStatus), b.(*networking.IngressPortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressPortStatus)(nil), (*extensionsv1beta1.IngressPortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(a.(*networking.IngressPortStatus), b.(*extensionsv1beta1.IngressPortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressRule)(nil), (*networking.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressRule_To_networking_IngressRule(a.(*extensionsv1beta1.IngressRule), b.(*networking.IngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressRule)(nil), (*extensionsv1beta1.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressRule_To_v1beta1_IngressRule(a.(*networking.IngressRule), b.(*extensionsv1beta1.IngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressRuleValue)(nil), (*networking.IngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(a.(*extensionsv1beta1.IngressRuleValue), b.(*networking.IngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressRuleValue)(nil), (*extensionsv1beta1.IngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(a.(*networking.IngressRuleValue), b.(*extensionsv1beta1.IngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressStatus)(nil), (*networking.IngressStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressStatus_To_networking_IngressStatus(a.(*extensionsv1beta1.IngressStatus), b.(*networking.IngressStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressStatus)(nil), (*extensionsv1beta1.IngressStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressStatus_To_v1beta1_IngressStatus(a.(*networking.IngressStatus), b.(*extensionsv1beta1.IngressStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.IngressTLS)(nil), (*networking.IngressTLS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressTLS_To_networking_IngressTLS(a.(*extensionsv1beta1.IngressTLS), b.(*networking.IngressTLS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressTLS)(nil), (*extensionsv1beta1.IngressTLS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressTLS_To_v1beta1_IngressTLS(a.(*networking.IngressTLS), b.(*extensionsv1beta1.IngressTLS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.NetworkPolicy)(nil), (*networking.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(a.(*extensionsv1beta1.NetworkPolicy), b.(*networking.NetworkPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicy)(nil), (*extensionsv1beta1.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(a.(*networking.NetworkPolicy), b.(*extensionsv1beta1.NetworkPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.NetworkPolicyList)(nil), (*networking.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList(a.(*extensionsv1beta1.NetworkPolicyList), b.(*networking.NetworkPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyList)(nil), (*extensionsv1beta1.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList(a.(*networking.NetworkPolicyList), b.(*extensionsv1beta1.NetworkPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.NetworkPolicyPeer)(nil), (*networking.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(a.(*extensionsv1beta1.NetworkPolicyPeer), b.(*networking.NetworkPolicyPeer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPeer)(nil), (*extensionsv1beta1.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(a.(*networking.NetworkPolicyPeer), b.(*extensionsv1beta1.NetworkPolicyPeer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.NetworkPolicyPort)(nil), (*networking.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(a.(*extensionsv1beta1.NetworkPolicyPort), b.(*networking.NetworkPolicyPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPort)(nil), (*extensionsv1beta1.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(a.(*networking.NetworkPolicyPort), b.(*extensionsv1beta1.NetworkPolicyPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(a.(*extensionsv1beta1.ReplicaSet), b.(*apps.ReplicaSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*extensionsv1beta1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(a.(*apps.ReplicaSet), b.(*extensionsv1beta1.ReplicaSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*extensionsv1beta1.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*extensionsv1beta1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*extensionsv1beta1.ReplicaSetCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(a.(*extensionsv1beta1.ReplicaSetList), b.(*apps.ReplicaSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*extensionsv1beta1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*extensionsv1beta1.ReplicaSetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*extensionsv1beta1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*extensionsv1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*extensionsv1beta1.ReplicaSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*extensionsv1beta1.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*extensionsv1beta1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*extensionsv1beta1.ReplicaSetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.RollbackConfig)(nil), (*apps.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(a.(*extensionsv1beta1.RollbackConfig), b.(*apps.RollbackConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollbackConfig)(nil), (*extensionsv1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(a.(*apps.RollbackConfig), b.(*extensionsv1beta1.RollbackConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*extensionsv1beta1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*extensionsv1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*extensionsv1beta1.RollingUpdateDaemonSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*extensionsv1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*extensionsv1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*extensionsv1beta1.RollingUpdateDeployment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.Scale)(nil), (*autoscaling.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Scale_To_autoscaling_Scale(a.(*extensionsv1beta1.Scale), b.(*autoscaling.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.Scale)(nil), (*extensionsv1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_Scale_To_v1beta1_Scale(a.(*autoscaling.Scale), b.(*extensionsv1beta1.Scale), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*extensionsv1beta1.ScaleSpec)(nil), (*autoscaling.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(a.(*extensionsv1beta1.ScaleSpec), b.(*autoscaling.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.ScaleSpec)(nil), (*extensionsv1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(a.(*autoscaling.ScaleSpec), b.(*extensionsv1beta1.ScaleSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*extensionsv1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*extensionsv1beta1.ScaleStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.IPBlock)(nil), (*extensionsv1beta1.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPBlock_To_v1beta1_IPBlock(a.(*networking.IPBlock), b.(*extensionsv1beta1.IPBlock), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.IngressBackend)(nil), (*extensionsv1beta1.IngressBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressBackend_To_v1beta1_IngressBackend(a.(*networking.IngressBackend), b.(*extensionsv1beta1.IngressBackend), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.IngressSpec)(nil), (*extensionsv1beta1.IngressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressSpec_To_v1beta1_IngressSpec(a.(*networking.IngressSpec), b.(*extensionsv1beta1.IngressSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.NetworkPolicyEgressRule)(nil), (*extensionsv1beta1.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(a.(*networking.NetworkPolicyEgressRule), b.(*extensionsv1beta1.NetworkPolicyEgressRule), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.NetworkPolicyIngressRule)(nil), (*extensionsv1beta1.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(a.(*networking.NetworkPolicyIngressRule), b.(*extensionsv1beta1.NetworkPolicyIngressRule), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.NetworkPolicySpec)(nil), (*extensionsv1beta1.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(a.(*networking.NetworkPolicySpec), b.(*extensionsv1beta1.NetworkPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*extensionsv1beta1.IPBlock)(nil), (*networking.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IPBlock_To_networking_IPBlock(a.(*extensionsv1beta1.IPBlock), b.(*networking.IPBlock), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*extensionsv1beta1.IngressBackend)(nil), (*networking.IngressBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressBackend_To_networking_IngressBackend(a.(*extensionsv1beta1.IngressBackend), b.(*networking.IngressBackend), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*extensionsv1beta1.IngressSpec)(nil), (*networking.IngressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressSpec_To_networking_IngressSpec(a.(*extensionsv1beta1.IngressSpec), b.(*networking.IngressSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*extensionsv1beta1.NetworkPolicyEgressRule)(nil), (*networking.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(a.(*extensionsv1beta1.NetworkPolicyEgressRule), b.(*networking.NetworkPolicyEgressRule), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*extensionsv1beta1.NetworkPolicyIngressRule)(nil), (*networking.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(a.(*extensionsv1beta1.NetworkPolicyIngressRule), b.(*networking.NetworkPolicyIngressRule), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*extensionsv1beta1.NetworkPolicySpec)(nil), (*networking.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(a.(*extensionsv1beta1.NetworkPolicySpec), b.(*networking.NetworkPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*extensionsv1beta1.ScaleStatus)(nil), (*autoscaling.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(a.(*extensionsv1beta1.ScaleStatus), b.(*autoscaling.ScaleStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_DaemonSet_To_apps_DaemonSet(in *extensionsv1beta1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DaemonSet_To_apps_DaemonSet is an autogenerated conversion function.
func Convert_v1beta1_DaemonSet_To_apps_DaemonSet(in *extensionsv1beta1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error {
return autoConvert_v1beta1_DaemonSet_To_apps_DaemonSet(in, out, s)
}
func autoConvert_apps_DaemonSet_To_v1beta1_DaemonSet(in *apps.DaemonSet, out *extensionsv1beta1.DaemonSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_DaemonSet_To_v1beta1_DaemonSet is an autogenerated conversion function.
func Convert_apps_DaemonSet_To_v1beta1_DaemonSet(in *apps.DaemonSet, out *extensionsv1beta1.DaemonSet, s conversion.Scope) error {
return autoConvert_apps_DaemonSet_To_v1beta1_DaemonSet(in, out, s)
}
func autoConvert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in *extensionsv1beta1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error {
out.Type = apps.DaemonSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function.
func Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in *extensionsv1beta1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error {
return autoConvert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s)
}
func autoConvert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *apps.DaemonSetCondition, out *extensionsv1beta1.DaemonSetCondition, s conversion.Scope) error {
out.Type = extensionsv1beta1.DaemonSetConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition is an autogenerated conversion function.
func Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *apps.DaemonSetCondition, out *extensionsv1beta1.DaemonSetCondition, s conversion.Scope) error {
return autoConvert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in, out, s)
}
func autoConvert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in *extensionsv1beta1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.DaemonSet, len(*in))
for i := range *in {
if err := Convert_v1beta1_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function.
func Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in *extensionsv1beta1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error {
return autoConvert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in, out, s)
}
func autoConvert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in *apps.DaemonSetList, out *extensionsv1beta1.DaemonSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensionsv1beta1.DaemonSet, len(*in))
for i := range *in {
if err := Convert_apps_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList is an autogenerated conversion function.
func Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in *apps.DaemonSetList, out *extensionsv1beta1.DaemonSetList, s conversion.Scope) error {
return autoConvert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s)
}
func autoConvert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in *extensionsv1beta1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.TemplateGeneration = in.TemplateGeneration
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
// Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec is an autogenerated conversion function.
func Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in *extensionsv1beta1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error {
return autoConvert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in, out, s)
}
func autoConvert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *apps.DaemonSetSpec, out *extensionsv1beta1.DaemonSetSpec, s conversion.Scope) error {
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.TemplateGeneration = in.TemplateGeneration
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
return nil
}
// Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec is an autogenerated conversion function.
func Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *apps.DaemonSetSpec, out *extensionsv1beta1.DaemonSetSpec, s conversion.Scope) error {
return autoConvert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s)
}
func autoConvert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in *extensionsv1beta1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function.
func Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in *extensionsv1beta1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s)
}
func autoConvert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *apps.DaemonSetStatus, out *extensionsv1beta1.DaemonSetStatus, s conversion.Scope) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
out.ObservedGeneration = in.ObservedGeneration
out.UpdatedNumberScheduled = in.UpdatedNumberScheduled
out.NumberAvailable = in.NumberAvailable
out.NumberUnavailable = in.NumberUnavailable
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
out.Conditions = *(*[]extensionsv1beta1.DaemonSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus is an autogenerated conversion function.
func Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *apps.DaemonSetStatus, out *extensionsv1beta1.DaemonSetStatus, s conversion.Scope) error {
return autoConvert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s)
}
func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *extensionsv1beta1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = apps.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateDaemonSet)
if err := Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *extensionsv1beta1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *extensionsv1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error {
out.Type = extensionsv1beta1.DaemonSetUpdateStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(extensionsv1beta1.RollingUpdateDaemonSet)
if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy is an autogenerated conversion function.
func Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *extensionsv1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error {
return autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s)
}
func autoConvert_v1beta1_Deployment_To_apps_Deployment(in *extensionsv1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Deployment_To_apps_Deployment is an autogenerated conversion function.
func Convert_v1beta1_Deployment_To_apps_Deployment(in *extensionsv1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error {
return autoConvert_v1beta1_Deployment_To_apps_Deployment(in, out, s)
}
func autoConvert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *extensionsv1beta1.Deployment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_Deployment_To_v1beta1_Deployment is an autogenerated conversion function.
func Convert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *extensionsv1beta1.Deployment, s conversion.Scope) error {
return autoConvert_apps_Deployment_To_v1beta1_Deployment(in, out, s)
}
func autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *extensionsv1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
out.Type = apps.DeploymentConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function.
func Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *extensionsv1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s)
}
func autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *extensionsv1beta1.DeploymentCondition, s conversion.Scope) error {
out.Type = extensionsv1beta1.DeploymentConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastUpdateTime = in.LastUpdateTime
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function.
func Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *extensionsv1beta1.DeploymentCondition, s conversion.Scope) error {
return autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s)
}
func autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in *extensionsv1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.Deployment, len(*in))
for i := range *in {
if err := Convert_v1beta1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function.
func Convert_v1beta1_DeploymentList_To_apps_DeploymentList(in *extensionsv1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in, out, s)
}
func autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *extensionsv1beta1.DeploymentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensionsv1beta1.Deployment, len(*in))
for i := range *in {
if err := Convert_apps_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function.
func Convert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *extensionsv1beta1.DeploymentList, s conversion.Scope) error {
return autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in, out, s)
}
func autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *extensionsv1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback is an autogenerated conversion function.
func Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *extensionsv1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in, out, s)
}
func autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *extensionsv1beta1.DeploymentRollback, s conversion.Scope) error {
out.Name = in.Name
out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations))
if err := Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil {
return err
}
return nil
}
// Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function.
func Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *extensionsv1beta1.DeploymentRollback, s conversion.Scope) error {
return autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s)
}
func autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *extensionsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*apps.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
// Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec is an autogenerated conversion function.
func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *extensionsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in, out, s)
}
func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit))
out.Paused = in.Paused
out.RollbackTo = (*extensionsv1beta1.RollbackConfig)(unsafe.Pointer(in.RollbackTo))
out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds))
return nil
}
// Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec is an autogenerated conversion function.
func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error {
return autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in, out, s)
}
func autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *extensionsv1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function.
func Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *extensionsv1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s)
}
func autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *extensionsv1beta1.DeploymentStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.Conditions = *(*[]extensionsv1beta1.DeploymentCondition)(unsafe.Pointer(&in.Conditions))
out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount))
return nil
}
// Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function.
func Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *extensionsv1beta1.DeploymentStatus, s conversion.Scope) error {
return autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s)
}
func autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *extensionsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
out.Type = apps.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(apps.RollingUpdateDeployment)
if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy is an autogenerated conversion function.
func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *extensionsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in, out, s)
}
func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *extensionsv1beta1.DeploymentStrategy, s conversion.Scope) error {
out.Type = extensionsv1beta1.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(extensionsv1beta1.RollingUpdateDeployment)
if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
// Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy is an autogenerated conversion function.
func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *extensionsv1beta1.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in, out, s)
}
func autoConvert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(in *extensionsv1beta1.HTTPIngressPath, out *networking.HTTPIngressPath, s conversion.Scope) error {
out.Path = in.Path
out.PathType = (*networking.PathType)(unsafe.Pointer(in.PathType))
if err := Convert_v1beta1_IngressBackend_To_networking_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath is an autogenerated conversion function.
func Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(in *extensionsv1beta1.HTTPIngressPath, out *networking.HTTPIngressPath, s conversion.Scope) error {
return autoConvert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(in, out, s)
}
func autoConvert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *networking.HTTPIngressPath, out *extensionsv1beta1.HTTPIngressPath, s conversion.Scope) error {
out.Path = in.Path
out.PathType = (*extensionsv1beta1.PathType)(unsafe.Pointer(in.PathType))
if err := Convert_networking_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
return err
}
return nil
}
// Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath is an autogenerated conversion function.
func Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *networking.HTTPIngressPath, out *extensionsv1beta1.HTTPIngressPath, s conversion.Scope) error {
return autoConvert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s)
}
func autoConvert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in *extensionsv1beta1.HTTPIngressRuleValue, out *networking.HTTPIngressRuleValue, s conversion.Scope) error {
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]networking.HTTPIngressPath, len(*in))
for i := range *in {
if err := Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Paths = nil
}
return nil
}
// Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue is an autogenerated conversion function.
func Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in *extensionsv1beta1.HTTPIngressRuleValue, out *networking.HTTPIngressRuleValue, s conversion.Scope) error {
return autoConvert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in, out, s)
}
func autoConvert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *networking.HTTPIngressRuleValue, out *extensionsv1beta1.HTTPIngressRuleValue, s conversion.Scope) error {
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]extensionsv1beta1.HTTPIngressPath, len(*in))
for i := range *in {
if err := Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Paths = nil
}
return nil
}
// Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue is an autogenerated conversion function.
func Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *networking.HTTPIngressRuleValue, out *extensionsv1beta1.HTTPIngressRuleValue, s conversion.Scope) error {
return autoConvert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s)
}
func autoConvert_v1beta1_IPBlock_To_networking_IPBlock(in *extensionsv1beta1.IPBlock, out *networking.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = *(*[]string)(unsafe.Pointer(&in.Except))
return nil
}
func autoConvert_networking_IPBlock_To_v1beta1_IPBlock(in *networking.IPBlock, out *extensionsv1beta1.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = *(*[]string)(unsafe.Pointer(&in.Except))
return nil
}
func autoConvert_v1beta1_Ingress_To_networking_Ingress(in *extensionsv1beta1.Ingress, out *networking.Ingress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_IngressSpec_To_networking_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_IngressStatus_To_networking_IngressStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Ingress_To_networking_Ingress is an autogenerated conversion function.
func Convert_v1beta1_Ingress_To_networking_Ingress(in *extensionsv1beta1.Ingress, out *networking.Ingress, s conversion.Scope) error {
return autoConvert_v1beta1_Ingress_To_networking_Ingress(in, out, s)
}
func autoConvert_networking_Ingress_To_v1beta1_Ingress(in *networking.Ingress, out *extensionsv1beta1.Ingress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_networking_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_networking_Ingress_To_v1beta1_Ingress is an autogenerated conversion function.
func Convert_networking_Ingress_To_v1beta1_Ingress(in *networking.Ingress, out *extensionsv1beta1.Ingress, s conversion.Scope) error {
return autoConvert_networking_Ingress_To_v1beta1_Ingress(in, out, s)
}
func autoConvert_v1beta1_IngressBackend_To_networking_IngressBackend(in *extensionsv1beta1.IngressBackend, out *networking.IngressBackend, s conversion.Scope) error {
// WARNING: in.ServiceName requires manual conversion: does not exist in peer-type
// WARNING: in.ServicePort requires manual conversion: does not exist in peer-type
out.Resource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.Resource))
return nil
}
func autoConvert_networking_IngressBackend_To_v1beta1_IngressBackend(in *networking.IngressBackend, out *extensionsv1beta1.IngressBackend, s conversion.Scope) error {
// WARNING: in.Service requires manual conversion: does not exist in peer-type
out.Resource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.Resource))
return nil
}
func autoConvert_v1beta1_IngressList_To_networking_IngressList(in *extensionsv1beta1.IngressList, out *networking.IngressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]networking.Ingress, len(*in))
for i := range *in {
if err := Convert_v1beta1_Ingress_To_networking_Ingress(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_IngressList_To_networking_IngressList is an autogenerated conversion function.
func Convert_v1beta1_IngressList_To_networking_IngressList(in *extensionsv1beta1.IngressList, out *networking.IngressList, s conversion.Scope) error {
return autoConvert_v1beta1_IngressList_To_networking_IngressList(in, out, s)
}
func autoConvert_networking_IngressList_To_v1beta1_IngressList(in *networking.IngressList, out *extensionsv1beta1.IngressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensionsv1beta1.Ingress, len(*in))
for i := range *in {
if err := Convert_networking_Ingress_To_v1beta1_Ingress(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_networking_IngressList_To_v1beta1_IngressList is an autogenerated conversion function.
func Convert_networking_IngressList_To_v1beta1_IngressList(in *networking.IngressList, out *extensionsv1beta1.IngressList, s conversion.Scope) error {
return autoConvert_networking_IngressList_To_v1beta1_IngressList(in, out, s)
}
func autoConvert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in *extensionsv1beta1.IngressLoadBalancerIngress, out *networking.IngressLoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.Ports = *(*[]networking.IngressPortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress is an autogenerated conversion function.
func Convert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in *extensionsv1beta1.IngressLoadBalancerIngress, out *networking.IngressLoadBalancerIngress, s conversion.Scope) error {
return autoConvert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in, out, s)
}
func autoConvert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(in *networking.IngressLoadBalancerIngress, out *extensionsv1beta1.IngressLoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.Ports = *(*[]extensionsv1beta1.IngressPortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress is an autogenerated conversion function.
func Convert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(in *networking.IngressLoadBalancerIngress, out *extensionsv1beta1.IngressLoadBalancerIngress, s conversion.Scope) error {
return autoConvert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(in, out, s)
}
func autoConvert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in *extensionsv1beta1.IngressLoadBalancerStatus, out *networking.IngressLoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]networking.IngressLoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus is an autogenerated conversion function.
func Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in *extensionsv1beta1.IngressLoadBalancerStatus, out *networking.IngressLoadBalancerStatus, s conversion.Scope) error {
return autoConvert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in, out, s)
}
func autoConvert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(in *networking.IngressLoadBalancerStatus, out *extensionsv1beta1.IngressLoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]extensionsv1beta1.IngressLoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus is an autogenerated conversion function.
func Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(in *networking.IngressLoadBalancerStatus, out *extensionsv1beta1.IngressLoadBalancerStatus, s conversion.Scope) error {
return autoConvert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(in, out, s)
}
func autoConvert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(in *extensionsv1beta1.IngressPortStatus, out *networking.IngressPortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = core.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus is an autogenerated conversion function.
func Convert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(in *extensionsv1beta1.IngressPortStatus, out *networking.IngressPortStatus, s conversion.Scope) error {
return autoConvert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(in, out, s)
}
func autoConvert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(in *networking.IngressPortStatus, out *extensionsv1beta1.IngressPortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = v1.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus is an autogenerated conversion function.
func Convert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(in *networking.IngressPortStatus, out *extensionsv1beta1.IngressPortStatus, s conversion.Scope) error {
return autoConvert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(in, out, s)
}
func autoConvert_v1beta1_IngressRule_To_networking_IngressRule(in *extensionsv1beta1.IngressRule, out *networking.IngressRule, s conversion.Scope) error {
out.Host = in.Host
if err := Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_IngressRule_To_networking_IngressRule is an autogenerated conversion function.
func Convert_v1beta1_IngressRule_To_networking_IngressRule(in *extensionsv1beta1.IngressRule, out *networking.IngressRule, s conversion.Scope) error {
return autoConvert_v1beta1_IngressRule_To_networking_IngressRule(in, out, s)
}
func autoConvert_networking_IngressRule_To_v1beta1_IngressRule(in *networking.IngressRule, out *extensionsv1beta1.IngressRule, s conversion.Scope) error {
out.Host = in.Host
if err := Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressRule_To_v1beta1_IngressRule is an autogenerated conversion function.
func Convert_networking_IngressRule_To_v1beta1_IngressRule(in *networking.IngressRule, out *extensionsv1beta1.IngressRule, s conversion.Scope) error {
return autoConvert_networking_IngressRule_To_v1beta1_IngressRule(in, out, s)
}
func autoConvert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(in *extensionsv1beta1.IngressRuleValue, out *networking.IngressRuleValue, s conversion.Scope) error {
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(networking.HTTPIngressRuleValue)
if err := Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(*in, *out, s); err != nil {
return err
}
} else {
out.HTTP = nil
}
return nil
}
// Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue is an autogenerated conversion function.
func Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(in *extensionsv1beta1.IngressRuleValue, out *networking.IngressRuleValue, s conversion.Scope) error {
return autoConvert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(in, out, s)
}
func autoConvert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(in *networking.IngressRuleValue, out *extensionsv1beta1.IngressRuleValue, s conversion.Scope) error {
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(extensionsv1beta1.HTTPIngressRuleValue)
if err := Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(*in, *out, s); err != nil {
return err
}
} else {
out.HTTP = nil
}
return nil
}
// Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue is an autogenerated conversion function.
func Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(in *networking.IngressRuleValue, out *extensionsv1beta1.IngressRuleValue, s conversion.Scope) error {
return autoConvert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s)
}
func autoConvert_v1beta1_IngressSpec_To_networking_IngressSpec(in *extensionsv1beta1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error {
out.IngressClassName = (*string)(unsafe.Pointer(in.IngressClassName))
// WARNING: in.Backend requires manual conversion: does not exist in peer-type
out.TLS = *(*[]networking.IngressTLS)(unsafe.Pointer(&in.TLS))
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]networking.IngressRule, len(*in))
for i := range *in {
if err := Convert_v1beta1_IngressRule_To_networking_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
return nil
}
func autoConvert_networking_IngressSpec_To_v1beta1_IngressSpec(in *networking.IngressSpec, out *extensionsv1beta1.IngressSpec, s conversion.Scope) error {
out.IngressClassName = (*string)(unsafe.Pointer(in.IngressClassName))
// WARNING: in.DefaultBackend requires manual conversion: does not exist in peer-type
out.TLS = *(*[]extensionsv1beta1.IngressTLS)(unsafe.Pointer(&in.TLS))
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]extensionsv1beta1.IngressRule, len(*in))
for i := range *in {
if err := Convert_networking_IngressRule_To_v1beta1_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
return nil
}
func autoConvert_v1beta1_IngressStatus_To_networking_IngressStatus(in *extensionsv1beta1.IngressStatus, out *networking.IngressStatus, s conversion.Scope) error {
if err := Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_IngressStatus_To_networking_IngressStatus is an autogenerated conversion function.
func Convert_v1beta1_IngressStatus_To_networking_IngressStatus(in *extensionsv1beta1.IngressStatus, out *networking.IngressStatus, s conversion.Scope) error {
return autoConvert_v1beta1_IngressStatus_To_networking_IngressStatus(in, out, s)
}
func autoConvert_networking_IngressStatus_To_v1beta1_IngressStatus(in *networking.IngressStatus, out *extensionsv1beta1.IngressStatus, s conversion.Scope) error {
if err := Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressStatus_To_v1beta1_IngressStatus is an autogenerated conversion function.
func Convert_networking_IngressStatus_To_v1beta1_IngressStatus(in *networking.IngressStatus, out *extensionsv1beta1.IngressStatus, s conversion.Scope) error {
return autoConvert_networking_IngressStatus_To_v1beta1_IngressStatus(in, out, s)
}
func autoConvert_v1beta1_IngressTLS_To_networking_IngressTLS(in *extensionsv1beta1.IngressTLS, out *networking.IngressTLS, s conversion.Scope) error {
out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts))
out.SecretName = in.SecretName
return nil
}
// Convert_v1beta1_IngressTLS_To_networking_IngressTLS is an autogenerated conversion function.
func Convert_v1beta1_IngressTLS_To_networking_IngressTLS(in *extensionsv1beta1.IngressTLS, out *networking.IngressTLS, s conversion.Scope) error {
return autoConvert_v1beta1_IngressTLS_To_networking_IngressTLS(in, out, s)
}
func autoConvert_networking_IngressTLS_To_v1beta1_IngressTLS(in *networking.IngressTLS, out *extensionsv1beta1.IngressTLS, s conversion.Scope) error {
out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts))
out.SecretName = in.SecretName
return nil
}
// Convert_networking_IngressTLS_To_v1beta1_IngressTLS is an autogenerated conversion function.
func Convert_networking_IngressTLS_To_v1beta1_IngressTLS(in *networking.IngressTLS, out *extensionsv1beta1.IngressTLS, s conversion.Scope) error {
return autoConvert_networking_IngressTLS_To_v1beta1_IngressTLS(in, out, s)
}
func autoConvert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(in *extensionsv1beta1.NetworkPolicy, out *networking.NetworkPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy is an autogenerated conversion function.
func Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(in *extensionsv1beta1.NetworkPolicy, out *networking.NetworkPolicy, s conversion.Scope) error {
return autoConvert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(in, out, s)
}
func autoConvert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(in *networking.NetworkPolicy, out *extensionsv1beta1.NetworkPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy is an autogenerated conversion function.
func Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(in *networking.NetworkPolicy, out *extensionsv1beta1.NetworkPolicy, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s)
}
func autoConvert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in *extensionsv1beta1.NetworkPolicyEgressRule, out *networking.NetworkPolicyEgressRule, s conversion.Scope) error {
out.Ports = *(*[]networking.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
if in.To != nil {
in, out := &in.To, &out.To
*out = make([]networking.NetworkPolicyPeer, len(*in))
for i := range *in {
if err := Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.To = nil
}
return nil
}
func autoConvert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(in *networking.NetworkPolicyEgressRule, out *extensionsv1beta1.NetworkPolicyEgressRule, s conversion.Scope) error {
out.Ports = *(*[]extensionsv1beta1.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
if in.To != nil {
in, out := &in.To, &out.To
*out = make([]extensionsv1beta1.NetworkPolicyPeer, len(*in))
for i := range *in {
if err := Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.To = nil
}
return nil
}
func autoConvert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in *extensionsv1beta1.NetworkPolicyIngressRule, out *networking.NetworkPolicyIngressRule, s conversion.Scope) error {
out.Ports = *(*[]networking.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
if in.From != nil {
in, out := &in.From, &out.From
*out = make([]networking.NetworkPolicyPeer, len(*in))
for i := range *in {
if err := Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.From = nil
}
return nil
}
func autoConvert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *networking.NetworkPolicyIngressRule, out *extensionsv1beta1.NetworkPolicyIngressRule, s conversion.Scope) error {
out.Ports = *(*[]extensionsv1beta1.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
if in.From != nil {
in, out := &in.From, &out.From
*out = make([]extensionsv1beta1.NetworkPolicyPeer, len(*in))
for i := range *in {
if err := Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.From = nil
}
return nil
}
func autoConvert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList(in *extensionsv1beta1.NetworkPolicyList, out *networking.NetworkPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]networking.NetworkPolicy, len(*in))
for i := range *in {
if err := Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList is an autogenerated conversion function.
func Convert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList(in *extensionsv1beta1.NetworkPolicyList, out *networking.NetworkPolicyList, s conversion.Scope) error {
return autoConvert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList(in, out, s)
}
func autoConvert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *networking.NetworkPolicyList, out *extensionsv1beta1.NetworkPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensionsv1beta1.NetworkPolicy, len(*in))
for i := range *in {
if err := Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList is an autogenerated conversion function.
func Convert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *networking.NetworkPolicyList, out *extensionsv1beta1.NetworkPolicyList, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in, out, s)
}
func autoConvert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in *extensionsv1beta1.NetworkPolicyPeer, out *networking.NetworkPolicyPeer, s conversion.Scope) error {
out.PodSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.PodSelector))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
if in.IPBlock != nil {
in, out := &in.IPBlock, &out.IPBlock
*out = new(networking.IPBlock)
if err := Convert_v1beta1_IPBlock_To_networking_IPBlock(*in, *out, s); err != nil {
return err
}
} else {
out.IPBlock = nil
}
return nil
}
// Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer is an autogenerated conversion function.
func Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in *extensionsv1beta1.NetworkPolicyPeer, out *networking.NetworkPolicyPeer, s conversion.Scope) error {
return autoConvert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in, out, s)
}
func autoConvert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *networking.NetworkPolicyPeer, out *extensionsv1beta1.NetworkPolicyPeer, s conversion.Scope) error {
out.PodSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.PodSelector))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
if in.IPBlock != nil {
in, out := &in.IPBlock, &out.IPBlock
*out = new(extensionsv1beta1.IPBlock)
if err := Convert_networking_IPBlock_To_v1beta1_IPBlock(*in, *out, s); err != nil {
return err
}
} else {
out.IPBlock = nil
}
return nil
}
// Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer is an autogenerated conversion function.
func Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *networking.NetworkPolicyPeer, out *extensionsv1beta1.NetworkPolicyPeer, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in, out, s)
}
func autoConvert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *extensionsv1beta1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error {
out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port))
out.EndPort = (*int32)(unsafe.Pointer(in.EndPort))
return nil
}
// Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort is an autogenerated conversion function.
func Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *extensionsv1beta1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error {
return autoConvert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in, out, s)
}
func autoConvert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *networking.NetworkPolicyPort, out *extensionsv1beta1.NetworkPolicyPort, s conversion.Scope) error {
out.Protocol = (*v1.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port))
out.EndPort = (*int32)(unsafe.Pointer(in.EndPort))
return nil
}
// Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort is an autogenerated conversion function.
func Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *networking.NetworkPolicyPort, out *extensionsv1beta1.NetworkPolicyPort, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in, out, s)
}
func autoConvert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in *extensionsv1beta1.NetworkPolicySpec, out *networking.NetworkPolicySpec, s conversion.Scope) error {
out.PodSelector = in.PodSelector
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]networking.NetworkPolicyIngressRule, len(*in))
for i := range *in {
if err := Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]networking.NetworkPolicyEgressRule, len(*in))
for i := range *in {
if err := Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Egress = nil
}
out.PolicyTypes = *(*[]networking.PolicyType)(unsafe.Pointer(&in.PolicyTypes))
return nil
}
func autoConvert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *networking.NetworkPolicySpec, out *extensionsv1beta1.NetworkPolicySpec, s conversion.Scope) error {
out.PodSelector = in.PodSelector
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]extensionsv1beta1.NetworkPolicyIngressRule, len(*in))
for i := range *in {
if err := Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]extensionsv1beta1.NetworkPolicyEgressRule, len(*in))
for i := range *in {
if err := Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Egress = nil
}
out.PolicyTypes = *(*[]extensionsv1beta1.PolicyType)(unsafe.Pointer(&in.PolicyTypes))
return nil
}
func autoConvert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in *extensionsv1beta1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function.
func Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in *extensionsv1beta1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error {
return autoConvert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in, out, s)
}
func autoConvert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in *apps.ReplicaSet, out *extensionsv1beta1.ReplicaSet, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet is an autogenerated conversion function.
func Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in *apps.ReplicaSet, out *extensionsv1beta1.ReplicaSet, s conversion.Scope) error {
return autoConvert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s)
}
func autoConvert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *extensionsv1beta1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error {
out.Type = apps.ReplicaSetConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function.
func Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *extensionsv1beta1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s)
}
func autoConvert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *extensionsv1beta1.ReplicaSetCondition, s conversion.Scope) error {
out.Type = extensionsv1beta1.ReplicaSetConditionType(in.Type)
out.Status = v1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition is an autogenerated conversion function.
func Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *extensionsv1beta1.ReplicaSetCondition, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s)
}
func autoConvert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in *extensionsv1beta1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]apps.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function.
func Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in *extensionsv1beta1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error {
return autoConvert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in, out, s)
}
func autoConvert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in *apps.ReplicaSetList, out *extensionsv1beta1.ReplicaSetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]extensionsv1beta1.ReplicaSet, len(*in))
for i := range *in {
if err := Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList is an autogenerated conversion function.
func Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in *apps.ReplicaSetList, out *extensionsv1beta1.ReplicaSetList, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s)
}
func autoConvert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *extensionsv1beta1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec is an autogenerated conversion function.
func Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *extensionsv1beta1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in, out, s)
}
func autoConvert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *extensionsv1beta1.ReplicaSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
// Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec is an autogenerated conversion function.
func Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *extensionsv1beta1.ReplicaSetSpec, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in, out, s)
}
func autoConvert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *extensionsv1beta1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function.
func Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *extensionsv1beta1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s)
}
func autoConvert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *extensionsv1beta1.ReplicaSetStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
out.FullyLabeledReplicas = in.FullyLabeledReplicas
out.ReadyReplicas = in.ReadyReplicas
out.AvailableReplicas = in.AvailableReplicas
out.TerminatingReplicas = (*int32)(unsafe.Pointer(in.TerminatingReplicas))
out.ObservedGeneration = in.ObservedGeneration
out.Conditions = *(*[]extensionsv1beta1.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus is an autogenerated conversion function.
func Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *extensionsv1beta1.ReplicaSetStatus, s conversion.Scope) error {
return autoConvert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s)
}
func autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *extensionsv1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig is an autogenerated conversion function.
func Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *extensionsv1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error {
return autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in, out, s)
}
func autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *extensionsv1beta1.RollbackConfig, s conversion.Scope) error {
out.Revision = in.Revision
return nil
}
// Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function.
func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *extensionsv1beta1.RollbackConfig, s conversion.Scope) error {
return autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s)
}
func autoConvert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *extensionsv1beta1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet is an autogenerated conversion function.
func Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *extensionsv1beta1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error {
return autoConvert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in, out, s)
}
func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *extensionsv1beta1.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet is an autogenerated conversion function.
func Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *extensionsv1beta1.RollingUpdateDaemonSet, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in, out, s)
}
func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *extensionsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *extensionsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in, out, s)
}
func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *extensionsv1beta1.RollingUpdateDeployment, s conversion.Scope) error {
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxUnavailable, &out.MaxUnavailable, s); err != nil {
return err
}
if err := metav1.Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(&in.MaxSurge, &out.MaxSurge, s); err != nil {
return err
}
return nil
}
// Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment is an autogenerated conversion function.
func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *extensionsv1beta1.RollingUpdateDeployment, s conversion.Scope) error {
return autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in, out, s)
}
func autoConvert_v1beta1_Scale_To_autoscaling_Scale(in *extensionsv1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Scale_To_autoscaling_Scale is an autogenerated conversion function.
func Convert_v1beta1_Scale_To_autoscaling_Scale(in *extensionsv1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error {
return autoConvert_v1beta1_Scale_To_autoscaling_Scale(in, out, s)
}
func autoConvert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *extensionsv1beta1.Scale, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_autoscaling_Scale_To_v1beta1_Scale is an autogenerated conversion function.
func Convert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *extensionsv1beta1.Scale, s conversion.Scope) error {
return autoConvert_autoscaling_Scale_To_v1beta1_Scale(in, out, s)
}
func autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *extensionsv1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec is an autogenerated conversion function.
func Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *extensionsv1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s)
}
func autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *extensionsv1beta1.ScaleSpec, s conversion.Scope) error {
out.Replicas = in.Replicas
return nil
}
// Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function.
func Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *extensionsv1beta1.ScaleSpec, s conversion.Scope) error {
return autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s)
}
func autoConvert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *extensionsv1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs string)
// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *extensionsv1beta1.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
// WARNING: in.Selector requires manual conversion: inconvertible types (string vs map[string]string)
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
v1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*extensionsv1beta1.DaemonSet)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*extensionsv1beta1.DaemonSetList)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*extensionsv1beta1.Deployment)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*extensionsv1beta1.DeploymentList)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.Ingress{}, func(obj interface{}) { SetObjectDefaults_Ingress(obj.(*extensionsv1beta1.Ingress)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.IngressList{}, func(obj interface{}) { SetObjectDefaults_IngressList(obj.(*extensionsv1beta1.IngressList)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*extensionsv1beta1.NetworkPolicy)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*extensionsv1beta1.NetworkPolicyList)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*extensionsv1beta1.ReplicaSet)) })
scheme.AddTypeDefaultingFunc(&extensionsv1beta1.ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*extensionsv1beta1.ReplicaSetList)) })
return nil
}
func SetObjectDefaults_DaemonSet(in *extensionsv1beta1.DaemonSet) {
SetDefaults_DaemonSet(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DaemonSetList(in *extensionsv1beta1.DaemonSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_DaemonSet(a)
}
}
func SetObjectDefaults_Deployment(in *extensionsv1beta1.Deployment) {
SetDefaults_Deployment(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_DeploymentList(in *extensionsv1beta1.DeploymentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Deployment(a)
}
}
func SetObjectDefaults_Ingress(in *extensionsv1beta1.Ingress) {
for i := range in.Spec.Rules {
a := &in.Spec.Rules[i]
if a.IngressRuleValue.HTTP != nil {
for j := range a.IngressRuleValue.HTTP.Paths {
b := &a.IngressRuleValue.HTTP.Paths[j]
SetDefaults_HTTPIngressPath(b)
}
}
}
}
func SetObjectDefaults_IngressList(in *extensionsv1beta1.IngressList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Ingress(a)
}
}
func SetObjectDefaults_NetworkPolicy(in *extensionsv1beta1.NetworkPolicy) {
SetDefaults_NetworkPolicy(in)
}
func SetObjectDefaults_NetworkPolicyList(in *extensionsv1beta1.NetworkPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_NetworkPolicy(a)
}
}
func SetObjectDefaults_ReplicaSet(in *extensionsv1beta1.ReplicaSet) {
SetDefaults_ReplicaSet(in)
corev1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
corev1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
corev1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
corev1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
corev1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
corev1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
corev1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
}
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
corev1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.LivenessProbe.ProbeHandler.GRPC != nil {
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.StartupProbe != nil {
corev1.SetDefaults_Probe(a.StartupProbe)
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
}
if a.StartupProbe.ProbeHandler.GRPC != nil {
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.EphemeralContainers {
a := &in.Spec.Template.Spec.EphemeralContainers[i]
corev1.SetDefaults_EphemeralContainer(a)
for j := range a.EphemeralContainerCommon.Ports {
b := &a.EphemeralContainerCommon.Ports[j]
if b.Protocol == "" {
b.Protocol = "TCP"
}
}
for j := range a.EphemeralContainerCommon.Env {
b := &a.EphemeralContainerCommon.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
corev1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
if b.ValueFrom.FileKeyRef != nil {
if b.ValueFrom.FileKeyRef.Optional == nil {
var ptrVar1 bool = false
b.ValueFrom.FileKeyRef.Optional = &ptrVar1
}
}
}
}
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Limits)
corev1.SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
if a.EphemeralContainerCommon.LivenessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.ReadinessProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.StartupProbe != nil {
corev1.SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
}
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
var ptrVar1 string = ""
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
}
}
}
if a.EphemeralContainerCommon.Lifecycle != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart != nil {
if a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PostStart.HTTPGet)
}
}
if a.EphemeralContainerCommon.Lifecycle.PreStop != nil {
if a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet != nil {
corev1.SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.Lifecycle.PreStop.HTTPGet)
}
}
}
}
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Overhead)
if in.Spec.Template.Spec.Resources != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Limits)
corev1.SetDefaults_ResourceList(&in.Spec.Template.Spec.Resources.Requests)
}
}
func SetObjectDefaults_ReplicaSetList(in *extensionsv1beta1.ReplicaSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ReplicaSet(a)
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/flowcontrol"
"k8s.io/utils/ptr"
)
// Funcs returns the fuzzer functions for the flowcontrol api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *flowcontrol.LimitedPriorityLevelConfiguration, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
// NOTE: setting a zero value here will cause the roundtrip
// test (from internal to v1beta2, v1beta1) to fail
if obj.NominalConcurrencyShares == 0 {
obj.NominalConcurrencyShares = int32(1)
}
if obj.LendablePercent == nil {
obj.LendablePercent = ptr.To(int32(0))
}
},
func(obj *flowcontrol.ExemptPriorityLevelConfiguration, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
if obj.NominalConcurrencyShares == nil {
obj.NominalConcurrencyShares = ptr.To(int32(0))
}
if obj.LendablePercent == nil {
obj.LendablePercent = ptr.To(int32(0))
}
},
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/flowcontrol"
flowcontrolv1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1"
flowcontrolv1beta1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1beta1"
flowcontrolv1beta2 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1beta2"
flowcontrolv1beta3 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1beta3"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(flowcontrol.AddToScheme(scheme))
utilruntime.Must(flowcontrolv1beta1.AddToScheme(scheme))
utilruntime.Must(flowcontrolv1beta2.AddToScheme(scheme))
utilruntime.Must(flowcontrolv1beta3.AddToScheme(scheme))
utilruntime.Must(flowcontrolv1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(flowcontrolv1.SchemeGroupVersion, flowcontrolv1beta3.SchemeGroupVersion,
flowcontrolv1beta2.SchemeGroupVersion, flowcontrolv1beta1.SchemeGroupVersion))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flowcontrol
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the name of api group
const GroupName = "flowcontrol.apiserver.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder installs the api group to a scheme
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme adds api to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&FlowSchema{},
&FlowSchemaList{},
&PriorityLevelConfiguration{},
&PriorityLevelConfigurationList{},
)
return nil
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/flowcontrol/v1"
"k8s.io/utils/ptr"
)
// Default settings for flow-schema
const (
FlowSchemaDefaultMatchingPrecedence int32 = 1000
)
// Default settings for priority-level-configuration
const (
PriorityLevelConfigurationDefaultHandSize int32 = 8
PriorityLevelConfigurationDefaultQueues int32 = 64
PriorityLevelConfigurationDefaultQueueLengthLimit int32 = 50
PriorityLevelConfigurationDefaultNominalConcurrencyShares int32 = 30
)
// SetDefaults_FlowSchema sets default values for flow schema
func SetDefaults_FlowSchemaSpec(spec *v1.FlowSchemaSpec) {
if spec.MatchingPrecedence == 0 {
spec.MatchingPrecedence = FlowSchemaDefaultMatchingPrecedence
}
}
func SetDefaults_ExemptPriorityLevelConfiguration(eplc *v1.ExemptPriorityLevelConfiguration) {
if eplc.NominalConcurrencyShares == nil {
eplc.NominalConcurrencyShares = new(int32)
*eplc.NominalConcurrencyShares = 0
}
if eplc.LendablePercent == nil {
eplc.LendablePercent = new(int32)
*eplc.LendablePercent = 0
}
}
func SetDefaults_LimitedPriorityLevelConfiguration(lplc *v1.LimitedPriorityLevelConfiguration) {
if lplc.NominalConcurrencyShares == nil {
lplc.NominalConcurrencyShares = ptr.To(PriorityLevelConfigurationDefaultNominalConcurrencyShares)
}
if lplc.LendablePercent == nil {
lplc.LendablePercent = new(int32)
*lplc.LendablePercent = 0
}
}
// SetDefaults_FlowSchema sets default values for flow schema
func SetDefaults_QueuingConfiguration(cfg *v1.QueuingConfiguration) {
if cfg.HandSize == 0 {
cfg.HandSize = PriorityLevelConfigurationDefaultHandSize
}
if cfg.Queues == 0 {
cfg.Queues = PriorityLevelConfigurationDefaultQueues
}
if cfg.QueueLengthLimit == 0 {
cfg.QueueLengthLimit = PriorityLevelConfigurationDefaultQueueLengthLimit
}
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
flowcontrolv1 "k8s.io/api/flowcontrol/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "flowcontrol.apiserver.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &flowcontrolv1.SchemeBuilder
// AddToScheme adds api to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
flowcontrolv1 "k8s.io/api/flowcontrol/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
flowcontrol "k8s.io/kubernetes/pkg/apis/flowcontrol"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.ExemptPriorityLevelConfiguration)(nil), (*flowcontrol.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(a.(*flowcontrolv1.ExemptPriorityLevelConfiguration), b.(*flowcontrol.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ExemptPriorityLevelConfiguration)(nil), (*flowcontrolv1.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1_ExemptPriorityLevelConfiguration(a.(*flowcontrol.ExemptPriorityLevelConfiguration), b.(*flowcontrolv1.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.FlowDistinguisherMethod)(nil), (*flowcontrol.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(a.(*flowcontrolv1.FlowDistinguisherMethod), b.(*flowcontrol.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowDistinguisherMethod)(nil), (*flowcontrolv1.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowDistinguisherMethod_To_v1_FlowDistinguisherMethod(a.(*flowcontrol.FlowDistinguisherMethod), b.(*flowcontrolv1.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.FlowSchema)(nil), (*flowcontrol.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlowSchema_To_flowcontrol_FlowSchema(a.(*flowcontrolv1.FlowSchema), b.(*flowcontrol.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchema)(nil), (*flowcontrolv1.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchema_To_v1_FlowSchema(a.(*flowcontrol.FlowSchema), b.(*flowcontrolv1.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.FlowSchemaCondition)(nil), (*flowcontrol.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(a.(*flowcontrolv1.FlowSchemaCondition), b.(*flowcontrol.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaCondition)(nil), (*flowcontrolv1.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaCondition_To_v1_FlowSchemaCondition(a.(*flowcontrol.FlowSchemaCondition), b.(*flowcontrolv1.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.FlowSchemaList)(nil), (*flowcontrol.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlowSchemaList_To_flowcontrol_FlowSchemaList(a.(*flowcontrolv1.FlowSchemaList), b.(*flowcontrol.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaList)(nil), (*flowcontrolv1.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaList_To_v1_FlowSchemaList(a.(*flowcontrol.FlowSchemaList), b.(*flowcontrolv1.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.FlowSchemaSpec)(nil), (*flowcontrol.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(a.(*flowcontrolv1.FlowSchemaSpec), b.(*flowcontrol.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaSpec)(nil), (*flowcontrolv1.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaSpec_To_v1_FlowSchemaSpec(a.(*flowcontrol.FlowSchemaSpec), b.(*flowcontrolv1.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.FlowSchemaStatus)(nil), (*flowcontrol.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(a.(*flowcontrolv1.FlowSchemaStatus), b.(*flowcontrol.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaStatus)(nil), (*flowcontrolv1.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaStatus_To_v1_FlowSchemaStatus(a.(*flowcontrol.FlowSchemaStatus), b.(*flowcontrolv1.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.GroupSubject)(nil), (*flowcontrol.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupSubject_To_flowcontrol_GroupSubject(a.(*flowcontrolv1.GroupSubject), b.(*flowcontrol.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.GroupSubject)(nil), (*flowcontrolv1.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_GroupSubject_To_v1_GroupSubject(a.(*flowcontrol.GroupSubject), b.(*flowcontrolv1.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.LimitResponse)(nil), (*flowcontrol.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LimitResponse_To_flowcontrol_LimitResponse(a.(*flowcontrolv1.LimitResponse), b.(*flowcontrol.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.LimitResponse)(nil), (*flowcontrolv1.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitResponse_To_v1_LimitResponse(a.(*flowcontrol.LimitResponse), b.(*flowcontrolv1.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.LimitedPriorityLevelConfiguration)(nil), (*flowcontrol.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(a.(*flowcontrolv1.LimitedPriorityLevelConfiguration), b.(*flowcontrol.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.LimitedPriorityLevelConfiguration)(nil), (*flowcontrolv1.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1_LimitedPriorityLevelConfiguration(a.(*flowcontrol.LimitedPriorityLevelConfiguration), b.(*flowcontrolv1.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.NonResourcePolicyRule)(nil), (*flowcontrol.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(a.(*flowcontrolv1.NonResourcePolicyRule), b.(*flowcontrol.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.NonResourcePolicyRule)(nil), (*flowcontrolv1.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_NonResourcePolicyRule_To_v1_NonResourcePolicyRule(a.(*flowcontrol.NonResourcePolicyRule), b.(*flowcontrolv1.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.PolicyRulesWithSubjects)(nil), (*flowcontrol.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(a.(*flowcontrolv1.PolicyRulesWithSubjects), b.(*flowcontrol.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PolicyRulesWithSubjects)(nil), (*flowcontrolv1.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PolicyRulesWithSubjects_To_v1_PolicyRulesWithSubjects(a.(*flowcontrol.PolicyRulesWithSubjects), b.(*flowcontrolv1.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.PriorityLevelConfiguration)(nil), (*flowcontrol.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(a.(*flowcontrolv1.PriorityLevelConfiguration), b.(*flowcontrol.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfiguration)(nil), (*flowcontrolv1.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfiguration_To_v1_PriorityLevelConfiguration(a.(*flowcontrol.PriorityLevelConfiguration), b.(*flowcontrolv1.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.PriorityLevelConfigurationCondition)(nil), (*flowcontrol.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(a.(*flowcontrolv1.PriorityLevelConfigurationCondition), b.(*flowcontrol.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationCondition)(nil), (*flowcontrolv1.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1_PriorityLevelConfigurationCondition(a.(*flowcontrol.PriorityLevelConfigurationCondition), b.(*flowcontrolv1.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.PriorityLevelConfigurationList)(nil), (*flowcontrol.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(a.(*flowcontrolv1.PriorityLevelConfigurationList), b.(*flowcontrol.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationList)(nil), (*flowcontrolv1.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationList_To_v1_PriorityLevelConfigurationList(a.(*flowcontrol.PriorityLevelConfigurationList), b.(*flowcontrolv1.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.PriorityLevelConfigurationReference)(nil), (*flowcontrol.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(a.(*flowcontrolv1.PriorityLevelConfigurationReference), b.(*flowcontrol.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationReference)(nil), (*flowcontrolv1.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1_PriorityLevelConfigurationReference(a.(*flowcontrol.PriorityLevelConfigurationReference), b.(*flowcontrolv1.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.PriorityLevelConfigurationSpec)(nil), (*flowcontrol.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(a.(*flowcontrolv1.PriorityLevelConfigurationSpec), b.(*flowcontrol.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationSpec)(nil), (*flowcontrolv1.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1_PriorityLevelConfigurationSpec(a.(*flowcontrol.PriorityLevelConfigurationSpec), b.(*flowcontrolv1.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.PriorityLevelConfigurationStatus)(nil), (*flowcontrol.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(a.(*flowcontrolv1.PriorityLevelConfigurationStatus), b.(*flowcontrol.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationStatus)(nil), (*flowcontrolv1.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1_PriorityLevelConfigurationStatus(a.(*flowcontrol.PriorityLevelConfigurationStatus), b.(*flowcontrolv1.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.QueuingConfiguration)(nil), (*flowcontrol.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(a.(*flowcontrolv1.QueuingConfiguration), b.(*flowcontrol.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.QueuingConfiguration)(nil), (*flowcontrolv1.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_QueuingConfiguration_To_v1_QueuingConfiguration(a.(*flowcontrol.QueuingConfiguration), b.(*flowcontrolv1.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.ResourcePolicyRule)(nil), (*flowcontrol.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(a.(*flowcontrolv1.ResourcePolicyRule), b.(*flowcontrol.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ResourcePolicyRule)(nil), (*flowcontrolv1.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ResourcePolicyRule_To_v1_ResourcePolicyRule(a.(*flowcontrol.ResourcePolicyRule), b.(*flowcontrolv1.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.ServiceAccountSubject)(nil), (*flowcontrol.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(a.(*flowcontrolv1.ServiceAccountSubject), b.(*flowcontrol.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ServiceAccountSubject)(nil), (*flowcontrolv1.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ServiceAccountSubject_To_v1_ServiceAccountSubject(a.(*flowcontrol.ServiceAccountSubject), b.(*flowcontrolv1.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.Subject)(nil), (*flowcontrol.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Subject_To_flowcontrol_Subject(a.(*flowcontrolv1.Subject), b.(*flowcontrol.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.Subject)(nil), (*flowcontrolv1.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_Subject_To_v1_Subject(a.(*flowcontrol.Subject), b.(*flowcontrolv1.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1.UserSubject)(nil), (*flowcontrol.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_UserSubject_To_flowcontrol_UserSubject(a.(*flowcontrolv1.UserSubject), b.(*flowcontrol.UserSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.UserSubject)(nil), (*flowcontrolv1.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_UserSubject_To_v1_UserSubject(a.(*flowcontrol.UserSubject), b.(*flowcontrolv1.UserSubject), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_v1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrol.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_v1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_v1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_v1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrolv1.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_flowcontrol_FlowDistinguisherMethod_To_v1_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_flowcontrol_FlowDistinguisherMethod_To_v1_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_v1_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_FlowSchema_To_flowcontrol_FlowSchema is an autogenerated conversion function.
func Convert_v1_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
return autoConvert_v1_FlowSchema_To_flowcontrol_FlowSchema(in, out, s)
}
func autoConvert_flowcontrol_FlowSchema_To_v1_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_FlowSchemaSpec_To_v1_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_FlowSchemaStatus_To_v1_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_flowcontrol_FlowSchema_To_v1_FlowSchema is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchema_To_v1_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1.FlowSchema, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchema_To_v1_FlowSchema(in, out, s)
}
func autoConvert_v1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrol.FlowSchemaConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition is an autogenerated conversion function.
func Convert_v1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_v1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaCondition_To_v1_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrolv1.FlowSchemaConditionType(in.Type)
out.Status = flowcontrolv1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_FlowSchemaCondition_To_v1_FlowSchemaCondition is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaCondition_To_v1_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaCondition_To_v1_FlowSchemaCondition(in, out, s)
}
func autoConvert_v1_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrol.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_FlowSchemaList_To_flowcontrol_FlowSchemaList is an autogenerated conversion function.
func Convert_v1_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
return autoConvert_v1_FlowSchemaList_To_flowcontrol_FlowSchemaList(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaList_To_v1_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrolv1.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_flowcontrol_FlowSchemaList_To_v1_FlowSchemaList is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaList_To_v1_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1.FlowSchemaList, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaList_To_v1_FlowSchemaList(in, out, s)
}
func autoConvert_v1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_v1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrol.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrol.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec is an autogenerated conversion function.
func Convert_v1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_v1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaSpec_To_v1_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrolv1.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrolv1.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_flowcontrol_FlowSchemaSpec_To_v1_FlowSchemaSpec is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaSpec_To_v1_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaSpec_To_v1_FlowSchemaSpec(in, out, s)
}
func autoConvert_v1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus is an autogenerated conversion function.
func Convert_v1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_v1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaStatus_To_v1_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_FlowSchemaStatus_To_v1_FlowSchemaStatus is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaStatus_To_v1_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaStatus_To_v1_FlowSchemaStatus(in, out, s)
}
func autoConvert_v1_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_GroupSubject_To_flowcontrol_GroupSubject is an autogenerated conversion function.
func Convert_v1_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
return autoConvert_v1_GroupSubject_To_flowcontrol_GroupSubject(in, out, s)
}
func autoConvert_flowcontrol_GroupSubject_To_v1_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_GroupSubject_To_v1_GroupSubject is an autogenerated conversion function.
func Convert_flowcontrol_GroupSubject_To_v1_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1.GroupSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_GroupSubject_To_v1_GroupSubject(in, out, s)
}
func autoConvert_v1_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrol.LimitResponseType(in.Type)
out.Queuing = (*flowcontrol.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_v1_LimitResponse_To_flowcontrol_LimitResponse is an autogenerated conversion function.
func Convert_v1_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
return autoConvert_v1_LimitResponse_To_flowcontrol_LimitResponse(in, out, s)
}
func autoConvert_flowcontrol_LimitResponse_To_v1_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrolv1.LimitResponseType(in.Type)
out.Queuing = (*flowcontrolv1.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_flowcontrol_LimitResponse_To_v1_LimitResponse is an autogenerated conversion function.
func Convert_flowcontrol_LimitResponse_To_v1_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1.LimitResponse, s conversion.Scope) error {
return autoConvert_flowcontrol_LimitResponse_To_v1_LimitResponse(in, out, s)
}
func autoConvert_v1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *flowcontrolv1.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.NominalConcurrencyShares, &out.NominalConcurrencyShares, s); err != nil {
return err
}
if err := Convert_v1_LimitResponse_To_flowcontrol_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
// Convert_v1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *flowcontrolv1.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *flowcontrolv1.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.NominalConcurrencyShares, &out.NominalConcurrencyShares, s); err != nil {
return err
}
if err := Convert_flowcontrol_LimitResponse_To_v1_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
// Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1_LimitedPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *flowcontrolv1.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1_LimitedPriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_v1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_NonResourcePolicyRule_To_v1_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_flowcontrol_NonResourcePolicyRule_To_v1_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_NonResourcePolicyRule_To_v1_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_NonResourcePolicyRule_To_v1_NonResourcePolicyRule(in, out, s)
}
func autoConvert_v1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrol.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrol.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrol.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_v1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_v1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_v1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrolv1.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrolv1.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrolv1.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_flowcontrol_PolicyRulesWithSubjects_To_v1_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_flowcontrol_PolicyRulesWithSubjects_To_v1_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_v1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *flowcontrolv1.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *flowcontrolv1.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *flowcontrolv1.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_flowcontrol_PriorityLevelConfiguration_To_v1_PriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfiguration_To_v1_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *flowcontrolv1.PriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1_PriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_v1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_v1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrolv1.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrolv1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_v1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrol.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_v1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_v1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_v1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrolv1.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_flowcontrol_PriorityLevelConfiguration_To_v1_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationList_To_v1_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationList_To_v1_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_v1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_v1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_v1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_v1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelEnablement(in.Type)
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(flowcontrol.LimitedPriorityLevelConfiguration)
if err := Convert_v1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(*in, *out, s); err != nil {
return err
}
} else {
out.Limited = nil
}
out.Exempt = (*flowcontrol.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_v1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_v1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_v1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrolv1.PriorityLevelEnablement(in.Type)
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(flowcontrolv1.LimitedPriorityLevelConfiguration)
if err := Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1_LimitedPriorityLevelConfiguration(*in, *out, s); err != nil {
return err
}
} else {
out.Limited = nil
}
out.Exempt = (*flowcontrolv1.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_v1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_v1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_v1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_v1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_v1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration is an autogenerated conversion function.
func Convert_v1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_v1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in, out, s)
}
func autoConvert_flowcontrol_QueuingConfiguration_To_v1_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_flowcontrol_QueuingConfiguration_To_v1_QueuingConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_QueuingConfiguration_To_v1_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_QueuingConfiguration_To_v1_QueuingConfiguration(in, out, s)
}
func autoConvert_v1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_v1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule is an autogenerated conversion function.
func Convert_v1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_ResourcePolicyRule_To_v1_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_flowcontrol_ResourcePolicyRule_To_v1_ResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_ResourcePolicyRule_To_v1_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_ResourcePolicyRule_To_v1_ResourcePolicyRule(in, out, s)
}
func autoConvert_v1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject is an autogenerated conversion function.
func Convert_v1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_v1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in, out, s)
}
func autoConvert_flowcontrol_ServiceAccountSubject_To_v1_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_flowcontrol_ServiceAccountSubject_To_v1_ServiceAccountSubject is an autogenerated conversion function.
func Convert_flowcontrol_ServiceAccountSubject_To_v1_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_ServiceAccountSubject_To_v1_ServiceAccountSubject(in, out, s)
}
func autoConvert_v1_Subject_To_flowcontrol_Subject(in *flowcontrolv1.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
out.Kind = flowcontrol.SubjectKind(in.Kind)
out.User = (*flowcontrol.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrol.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrol.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_v1_Subject_To_flowcontrol_Subject is an autogenerated conversion function.
func Convert_v1_Subject_To_flowcontrol_Subject(in *flowcontrolv1.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
return autoConvert_v1_Subject_To_flowcontrol_Subject(in, out, s)
}
func autoConvert_flowcontrol_Subject_To_v1_Subject(in *flowcontrol.Subject, out *flowcontrolv1.Subject, s conversion.Scope) error {
out.Kind = flowcontrolv1.SubjectKind(in.Kind)
out.User = (*flowcontrolv1.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrolv1.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrolv1.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_flowcontrol_Subject_To_v1_Subject is an autogenerated conversion function.
func Convert_flowcontrol_Subject_To_v1_Subject(in *flowcontrol.Subject, out *flowcontrolv1.Subject, s conversion.Scope) error {
return autoConvert_flowcontrol_Subject_To_v1_Subject(in, out, s)
}
func autoConvert_v1_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_UserSubject_To_flowcontrol_UserSubject is an autogenerated conversion function.
func Convert_v1_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
return autoConvert_v1_UserSubject_To_flowcontrol_UserSubject(in, out, s)
}
func autoConvert_flowcontrol_UserSubject_To_v1_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_UserSubject_To_v1_UserSubject is an autogenerated conversion function.
func Convert_flowcontrol_UserSubject_To_v1_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1.UserSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_UserSubject_To_v1_UserSubject(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
flowcontrolv1 "k8s.io/api/flowcontrol/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&flowcontrolv1.FlowSchema{}, func(obj interface{}) { SetObjectDefaults_FlowSchema(obj.(*flowcontrolv1.FlowSchema)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1.FlowSchemaList{}, func(obj interface{}) { SetObjectDefaults_FlowSchemaList(obj.(*flowcontrolv1.FlowSchemaList)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1.PriorityLevelConfiguration{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfiguration(obj.(*flowcontrolv1.PriorityLevelConfiguration))
})
scheme.AddTypeDefaultingFunc(&flowcontrolv1.PriorityLevelConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfigurationList(obj.(*flowcontrolv1.PriorityLevelConfigurationList))
})
return nil
}
func SetObjectDefaults_FlowSchema(in *flowcontrolv1.FlowSchema) {
SetDefaults_FlowSchemaSpec(&in.Spec)
}
func SetObjectDefaults_FlowSchemaList(in *flowcontrolv1.FlowSchemaList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_FlowSchema(a)
}
}
func SetObjectDefaults_PriorityLevelConfiguration(in *flowcontrolv1.PriorityLevelConfiguration) {
if in.Spec.Limited != nil {
SetDefaults_LimitedPriorityLevelConfiguration(in.Spec.Limited)
if in.Spec.Limited.LimitResponse.Queuing != nil {
SetDefaults_QueuingConfiguration(in.Spec.Limited.LimitResponse.Queuing)
}
}
if in.Spec.Exempt != nil {
SetDefaults_ExemptPriorityLevelConfiguration(in.Spec.Exempt)
}
}
func SetObjectDefaults_PriorityLevelConfigurationList(in *flowcontrolv1.PriorityLevelConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PriorityLevelConfiguration(a)
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/api/flowcontrol/v1beta1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/flowcontrol"
)
// LimitedPriorityLevelConfiguration.AssuredConcurrencyShares has been
// renamed to NominalConcurrencyShares in v1beta3.
func Convert_v1beta1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *v1beta1.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_v1beta1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
out.NominalConcurrencyShares = in.AssuredConcurrencyShares
return nil
}
// LimitedPriorityLevelConfiguration.AssuredConcurrencyShares has been
// renamed to NominalConcurrencyShares in v1beta3.
func Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta1_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *v1beta1.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta1_LimitedPriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
out.AssuredConcurrencyShares = in.NominalConcurrencyShares
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/api/flowcontrol/v1beta1"
)
// Default settings for flow-schema
const (
FlowSchemaDefaultMatchingPrecedence int32 = 1000
)
// Default settings for priority-level-configuration
const (
PriorityLevelConfigurationDefaultHandSize int32 = 8
PriorityLevelConfigurationDefaultQueues int32 = 64
PriorityLevelConfigurationDefaultQueueLengthLimit int32 = 50
PriorityLevelConfigurationDefaultAssuredConcurrencyShares int32 = 30
)
// SetDefaults_FlowSchema sets default values for flow schema
func SetDefaults_FlowSchemaSpec(spec *v1beta1.FlowSchemaSpec) {
if spec.MatchingPrecedence == 0 {
spec.MatchingPrecedence = FlowSchemaDefaultMatchingPrecedence
}
}
func SetDefaults_ExemptPriorityLevelConfiguration(eplc *v1beta1.ExemptPriorityLevelConfiguration) {
if eplc.NominalConcurrencyShares == nil {
eplc.NominalConcurrencyShares = new(int32)
*eplc.NominalConcurrencyShares = 0
}
if eplc.LendablePercent == nil {
eplc.LendablePercent = new(int32)
*eplc.LendablePercent = 0
}
}
func SetDefaults_LimitedPriorityLevelConfiguration(lplc *v1beta1.LimitedPriorityLevelConfiguration) {
if lplc.AssuredConcurrencyShares == 0 {
lplc.AssuredConcurrencyShares = PriorityLevelConfigurationDefaultAssuredConcurrencyShares
}
if lplc.LendablePercent == nil {
lplc.LendablePercent = new(int32)
*lplc.LendablePercent = 0
}
}
// SetDefaults_FlowSchema sets default values for flow schema
func SetDefaults_QueuingConfiguration(cfg *v1beta1.QueuingConfiguration) {
if cfg.HandSize == 0 {
cfg.HandSize = PriorityLevelConfigurationDefaultHandSize
}
if cfg.Queues == 0 {
cfg.Queues = PriorityLevelConfigurationDefaultQueues
}
if cfg.QueueLengthLimit == 0 {
cfg.QueueLengthLimit = PriorityLevelConfigurationDefaultQueueLengthLimit
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "flowcontrol.apiserver.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &flowcontrolv1beta1.SchemeBuilder
// AddToScheme adds api to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
flowcontrol "k8s.io/kubernetes/pkg/apis/flowcontrol"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.ExemptPriorityLevelConfiguration)(nil), (*flowcontrol.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(a.(*flowcontrolv1beta1.ExemptPriorityLevelConfiguration), b.(*flowcontrol.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ExemptPriorityLevelConfiguration)(nil), (*flowcontrolv1beta1.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta1_ExemptPriorityLevelConfiguration(a.(*flowcontrol.ExemptPriorityLevelConfiguration), b.(*flowcontrolv1beta1.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.FlowDistinguisherMethod)(nil), (*flowcontrol.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(a.(*flowcontrolv1beta1.FlowDistinguisherMethod), b.(*flowcontrol.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowDistinguisherMethod)(nil), (*flowcontrolv1beta1.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta1_FlowDistinguisherMethod(a.(*flowcontrol.FlowDistinguisherMethod), b.(*flowcontrolv1beta1.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.FlowSchema)(nil), (*flowcontrol.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowSchema_To_flowcontrol_FlowSchema(a.(*flowcontrolv1beta1.FlowSchema), b.(*flowcontrol.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchema)(nil), (*flowcontrolv1beta1.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchema_To_v1beta1_FlowSchema(a.(*flowcontrol.FlowSchema), b.(*flowcontrolv1beta1.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.FlowSchemaCondition)(nil), (*flowcontrol.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(a.(*flowcontrolv1beta1.FlowSchemaCondition), b.(*flowcontrol.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaCondition)(nil), (*flowcontrolv1beta1.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaCondition_To_v1beta1_FlowSchemaCondition(a.(*flowcontrol.FlowSchemaCondition), b.(*flowcontrolv1beta1.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.FlowSchemaList)(nil), (*flowcontrol.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowSchemaList_To_flowcontrol_FlowSchemaList(a.(*flowcontrolv1beta1.FlowSchemaList), b.(*flowcontrol.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaList)(nil), (*flowcontrolv1beta1.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaList_To_v1beta1_FlowSchemaList(a.(*flowcontrol.FlowSchemaList), b.(*flowcontrolv1beta1.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.FlowSchemaSpec)(nil), (*flowcontrol.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(a.(*flowcontrolv1beta1.FlowSchemaSpec), b.(*flowcontrol.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaSpec)(nil), (*flowcontrolv1beta1.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaSpec_To_v1beta1_FlowSchemaSpec(a.(*flowcontrol.FlowSchemaSpec), b.(*flowcontrolv1beta1.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.FlowSchemaStatus)(nil), (*flowcontrol.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(a.(*flowcontrolv1beta1.FlowSchemaStatus), b.(*flowcontrol.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaStatus)(nil), (*flowcontrolv1beta1.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaStatus_To_v1beta1_FlowSchemaStatus(a.(*flowcontrol.FlowSchemaStatus), b.(*flowcontrolv1beta1.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.GroupSubject)(nil), (*flowcontrol.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_GroupSubject_To_flowcontrol_GroupSubject(a.(*flowcontrolv1beta1.GroupSubject), b.(*flowcontrol.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.GroupSubject)(nil), (*flowcontrolv1beta1.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_GroupSubject_To_v1beta1_GroupSubject(a.(*flowcontrol.GroupSubject), b.(*flowcontrolv1beta1.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.LimitResponse)(nil), (*flowcontrol.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LimitResponse_To_flowcontrol_LimitResponse(a.(*flowcontrolv1beta1.LimitResponse), b.(*flowcontrol.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.LimitResponse)(nil), (*flowcontrolv1beta1.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitResponse_To_v1beta1_LimitResponse(a.(*flowcontrol.LimitResponse), b.(*flowcontrolv1beta1.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.NonResourcePolicyRule)(nil), (*flowcontrol.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(a.(*flowcontrolv1beta1.NonResourcePolicyRule), b.(*flowcontrol.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.NonResourcePolicyRule)(nil), (*flowcontrolv1beta1.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_NonResourcePolicyRule_To_v1beta1_NonResourcePolicyRule(a.(*flowcontrol.NonResourcePolicyRule), b.(*flowcontrolv1beta1.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.PolicyRulesWithSubjects)(nil), (*flowcontrol.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(a.(*flowcontrolv1beta1.PolicyRulesWithSubjects), b.(*flowcontrol.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PolicyRulesWithSubjects)(nil), (*flowcontrolv1beta1.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta1_PolicyRulesWithSubjects(a.(*flowcontrol.PolicyRulesWithSubjects), b.(*flowcontrolv1beta1.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.PriorityLevelConfiguration)(nil), (*flowcontrol.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(a.(*flowcontrolv1beta1.PriorityLevelConfiguration), b.(*flowcontrol.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfiguration)(nil), (*flowcontrolv1beta1.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta1_PriorityLevelConfiguration(a.(*flowcontrol.PriorityLevelConfiguration), b.(*flowcontrolv1beta1.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.PriorityLevelConfigurationCondition)(nil), (*flowcontrol.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(a.(*flowcontrolv1beta1.PriorityLevelConfigurationCondition), b.(*flowcontrol.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationCondition)(nil), (*flowcontrolv1beta1.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta1_PriorityLevelConfigurationCondition(a.(*flowcontrol.PriorityLevelConfigurationCondition), b.(*flowcontrolv1beta1.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.PriorityLevelConfigurationList)(nil), (*flowcontrol.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(a.(*flowcontrolv1beta1.PriorityLevelConfigurationList), b.(*flowcontrol.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationList)(nil), (*flowcontrolv1beta1.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta1_PriorityLevelConfigurationList(a.(*flowcontrol.PriorityLevelConfigurationList), b.(*flowcontrolv1beta1.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.PriorityLevelConfigurationReference)(nil), (*flowcontrol.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(a.(*flowcontrolv1beta1.PriorityLevelConfigurationReference), b.(*flowcontrol.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationReference)(nil), (*flowcontrolv1beta1.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta1_PriorityLevelConfigurationReference(a.(*flowcontrol.PriorityLevelConfigurationReference), b.(*flowcontrolv1beta1.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.PriorityLevelConfigurationSpec)(nil), (*flowcontrol.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(a.(*flowcontrolv1beta1.PriorityLevelConfigurationSpec), b.(*flowcontrol.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationSpec)(nil), (*flowcontrolv1beta1.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta1_PriorityLevelConfigurationSpec(a.(*flowcontrol.PriorityLevelConfigurationSpec), b.(*flowcontrolv1beta1.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.PriorityLevelConfigurationStatus)(nil), (*flowcontrol.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(a.(*flowcontrolv1beta1.PriorityLevelConfigurationStatus), b.(*flowcontrol.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationStatus)(nil), (*flowcontrolv1beta1.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta1_PriorityLevelConfigurationStatus(a.(*flowcontrol.PriorityLevelConfigurationStatus), b.(*flowcontrolv1beta1.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.QueuingConfiguration)(nil), (*flowcontrol.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(a.(*flowcontrolv1beta1.QueuingConfiguration), b.(*flowcontrol.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.QueuingConfiguration)(nil), (*flowcontrolv1beta1.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_QueuingConfiguration_To_v1beta1_QueuingConfiguration(a.(*flowcontrol.QueuingConfiguration), b.(*flowcontrolv1beta1.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.ResourcePolicyRule)(nil), (*flowcontrol.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(a.(*flowcontrolv1beta1.ResourcePolicyRule), b.(*flowcontrol.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ResourcePolicyRule)(nil), (*flowcontrolv1beta1.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ResourcePolicyRule_To_v1beta1_ResourcePolicyRule(a.(*flowcontrol.ResourcePolicyRule), b.(*flowcontrolv1beta1.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.ServiceAccountSubject)(nil), (*flowcontrol.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(a.(*flowcontrolv1beta1.ServiceAccountSubject), b.(*flowcontrol.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ServiceAccountSubject)(nil), (*flowcontrolv1beta1.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ServiceAccountSubject_To_v1beta1_ServiceAccountSubject(a.(*flowcontrol.ServiceAccountSubject), b.(*flowcontrolv1beta1.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.Subject)(nil), (*flowcontrol.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Subject_To_flowcontrol_Subject(a.(*flowcontrolv1beta1.Subject), b.(*flowcontrol.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.Subject)(nil), (*flowcontrolv1beta1.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_Subject_To_v1beta1_Subject(a.(*flowcontrol.Subject), b.(*flowcontrolv1beta1.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta1.UserSubject)(nil), (*flowcontrol.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_UserSubject_To_flowcontrol_UserSubject(a.(*flowcontrolv1beta1.UserSubject), b.(*flowcontrol.UserSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.UserSubject)(nil), (*flowcontrolv1beta1.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_UserSubject_To_v1beta1_UserSubject(a.(*flowcontrol.UserSubject), b.(*flowcontrolv1beta1.UserSubject), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*flowcontrol.LimitedPriorityLevelConfiguration)(nil), (*flowcontrolv1beta1.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta1_LimitedPriorityLevelConfiguration(a.(*flowcontrol.LimitedPriorityLevelConfiguration), b.(*flowcontrolv1beta1.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*flowcontrolv1beta1.LimitedPriorityLevelConfiguration)(nil), (*flowcontrol.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(a.(*flowcontrolv1beta1.LimitedPriorityLevelConfiguration), b.(*flowcontrol.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1beta1.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_v1beta1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1beta1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1beta1.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta1_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1beta1.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta1_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta1_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1beta1.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta1_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1beta1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1beta1.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrol.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_v1beta1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_v1beta1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1beta1.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_v1beta1_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1beta1_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1beta1.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrolv1beta1.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta1_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta1_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1beta1.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1beta1_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_v1beta1_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1beta1.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_FlowSchema_To_flowcontrol_FlowSchema is an autogenerated conversion function.
func Convert_v1beta1_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1beta1.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
return autoConvert_v1beta1_FlowSchema_To_flowcontrol_FlowSchema(in, out, s)
}
func autoConvert_flowcontrol_FlowSchema_To_v1beta1_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1beta1.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_FlowSchemaSpec_To_v1beta1_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_FlowSchemaStatus_To_v1beta1_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_flowcontrol_FlowSchema_To_v1beta1_FlowSchema is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchema_To_v1beta1_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1beta1.FlowSchema, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchema_To_v1beta1_FlowSchema(in, out, s)
}
func autoConvert_v1beta1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1beta1.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrol.FlowSchemaConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition is an autogenerated conversion function.
func Convert_v1beta1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1beta1.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_v1beta1_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaCondition_To_v1beta1_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1beta1.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrolv1beta1.FlowSchemaConditionType(in.Type)
out.Status = flowcontrolv1beta1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_FlowSchemaCondition_To_v1beta1_FlowSchemaCondition is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaCondition_To_v1beta1_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1beta1.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaCondition_To_v1beta1_FlowSchemaCondition(in, out, s)
}
func autoConvert_v1beta1_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1beta1.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrol.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_FlowSchemaList_To_flowcontrol_FlowSchemaList is an autogenerated conversion function.
func Convert_v1beta1_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1beta1.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
return autoConvert_v1beta1_FlowSchemaList_To_flowcontrol_FlowSchemaList(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaList_To_v1beta1_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1beta1.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrolv1beta1.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_flowcontrol_FlowSchemaList_To_v1beta1_FlowSchemaList is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaList_To_v1beta1_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1beta1.FlowSchemaList, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaList_To_v1beta1_FlowSchemaList(in, out, s)
}
func autoConvert_v1beta1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1beta1.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_v1beta1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrol.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrol.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1beta1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec is an autogenerated conversion function.
func Convert_v1beta1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1beta1.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_v1beta1_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaSpec_To_v1beta1_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1beta1.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta1_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrolv1beta1.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrolv1beta1.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_flowcontrol_FlowSchemaSpec_To_v1beta1_FlowSchemaSpec is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaSpec_To_v1beta1_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1beta1.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaSpec_To_v1beta1_FlowSchemaSpec(in, out, s)
}
func autoConvert_v1beta1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1beta1.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus is an autogenerated conversion function.
func Convert_v1beta1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1beta1.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_v1beta1_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaStatus_To_v1beta1_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1beta1.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1beta1.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_FlowSchemaStatus_To_v1beta1_FlowSchemaStatus is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaStatus_To_v1beta1_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1beta1.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaStatus_To_v1beta1_FlowSchemaStatus(in, out, s)
}
func autoConvert_v1beta1_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1beta1.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta1_GroupSubject_To_flowcontrol_GroupSubject is an autogenerated conversion function.
func Convert_v1beta1_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1beta1.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
return autoConvert_v1beta1_GroupSubject_To_flowcontrol_GroupSubject(in, out, s)
}
func autoConvert_flowcontrol_GroupSubject_To_v1beta1_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1beta1.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_GroupSubject_To_v1beta1_GroupSubject is an autogenerated conversion function.
func Convert_flowcontrol_GroupSubject_To_v1beta1_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1beta1.GroupSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_GroupSubject_To_v1beta1_GroupSubject(in, out, s)
}
func autoConvert_v1beta1_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1beta1.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrol.LimitResponseType(in.Type)
out.Queuing = (*flowcontrol.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_v1beta1_LimitResponse_To_flowcontrol_LimitResponse is an autogenerated conversion function.
func Convert_v1beta1_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1beta1.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
return autoConvert_v1beta1_LimitResponse_To_flowcontrol_LimitResponse(in, out, s)
}
func autoConvert_flowcontrol_LimitResponse_To_v1beta1_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1beta1.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrolv1beta1.LimitResponseType(in.Type)
out.Queuing = (*flowcontrolv1beta1.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_flowcontrol_LimitResponse_To_v1beta1_LimitResponse is an autogenerated conversion function.
func Convert_flowcontrol_LimitResponse_To_v1beta1_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1beta1.LimitResponse, s conversion.Scope) error {
return autoConvert_flowcontrol_LimitResponse_To_v1beta1_LimitResponse(in, out, s)
}
func autoConvert_v1beta1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *flowcontrolv1beta1.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
// WARNING: in.AssuredConcurrencyShares requires manual conversion: does not exist in peer-type
if err := Convert_v1beta1_LimitResponse_To_flowcontrol_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
func autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta1_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *flowcontrolv1beta1.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
// WARNING: in.NominalConcurrencyShares requires manual conversion: does not exist in peer-type
if err := Convert_flowcontrol_LimitResponse_To_v1beta1_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
func autoConvert_v1beta1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1beta1.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1beta1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_v1beta1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1beta1.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1beta1_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_NonResourcePolicyRule_To_v1beta1_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1beta1.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_flowcontrol_NonResourcePolicyRule_To_v1beta1_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_NonResourcePolicyRule_To_v1beta1_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1beta1.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_NonResourcePolicyRule_To_v1beta1_NonResourcePolicyRule(in, out, s)
}
func autoConvert_v1beta1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1beta1.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrol.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrol.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrol.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_v1beta1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_v1beta1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1beta1.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_v1beta1_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1beta1_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1beta1.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrolv1beta1.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrolv1beta1.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrolv1beta1.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta1_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta1_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1beta1.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1beta1_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_v1beta1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *flowcontrolv1beta1.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1beta1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *flowcontrolv1beta1.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1beta1_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *flowcontrolv1beta1.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta1_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta1_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta1_PriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta1_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *flowcontrolv1beta1.PriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1beta1_PriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1beta1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1beta1.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_v1beta1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1beta1.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta1_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1beta1.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrolv1beta1.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrolv1beta1.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta1_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta1_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1beta1.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta1_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_v1beta1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1beta1.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrol.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_v1beta1_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_v1beta1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1beta1.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1beta1_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1beta1.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrolv1beta1.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta1_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta1_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta1_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1beta1.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1beta1_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_v1beta1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1beta1.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_v1beta1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1beta1.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta1_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1beta1.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta1_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta1_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1beta1.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta1_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_v1beta1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1beta1.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelEnablement(in.Type)
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(flowcontrol.LimitedPriorityLevelConfiguration)
if err := Convert_v1beta1_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(*in, *out, s); err != nil {
return err
}
} else {
out.Limited = nil
}
out.Exempt = (*flowcontrol.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_v1beta1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_v1beta1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1beta1.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta1_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1beta1.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrolv1beta1.PriorityLevelEnablement(in.Type)
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(flowcontrolv1beta1.LimitedPriorityLevelConfiguration)
if err := Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta1_LimitedPriorityLevelConfiguration(*in, *out, s); err != nil {
return err
}
} else {
out.Limited = nil
}
out.Exempt = (*flowcontrolv1beta1.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta1_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta1_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1beta1.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta1_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_v1beta1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1beta1.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_v1beta1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1beta1.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta1_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1beta1.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1beta1.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta1_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta1_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1beta1.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta1_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_v1beta1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1beta1.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_v1beta1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration is an autogenerated conversion function.
func Convert_v1beta1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1beta1.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in, out, s)
}
func autoConvert_flowcontrol_QueuingConfiguration_To_v1beta1_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1beta1.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_flowcontrol_QueuingConfiguration_To_v1beta1_QueuingConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_QueuingConfiguration_To_v1beta1_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1beta1.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_QueuingConfiguration_To_v1beta1_QueuingConfiguration(in, out, s)
}
func autoConvert_v1beta1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1beta1.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_v1beta1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule is an autogenerated conversion function.
func Convert_v1beta1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1beta1.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1beta1_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_ResourcePolicyRule_To_v1beta1_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1beta1.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_flowcontrol_ResourcePolicyRule_To_v1beta1_ResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_ResourcePolicyRule_To_v1beta1_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1beta1.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_ResourcePolicyRule_To_v1beta1_ResourcePolicyRule(in, out, s)
}
func autoConvert_v1beta1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1beta1.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1beta1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject is an autogenerated conversion function.
func Convert_v1beta1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1beta1.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_v1beta1_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in, out, s)
}
func autoConvert_flowcontrol_ServiceAccountSubject_To_v1beta1_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1beta1.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_flowcontrol_ServiceAccountSubject_To_v1beta1_ServiceAccountSubject is an autogenerated conversion function.
func Convert_flowcontrol_ServiceAccountSubject_To_v1beta1_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1beta1.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_ServiceAccountSubject_To_v1beta1_ServiceAccountSubject(in, out, s)
}
func autoConvert_v1beta1_Subject_To_flowcontrol_Subject(in *flowcontrolv1beta1.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
out.Kind = flowcontrol.SubjectKind(in.Kind)
out.User = (*flowcontrol.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrol.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrol.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_v1beta1_Subject_To_flowcontrol_Subject is an autogenerated conversion function.
func Convert_v1beta1_Subject_To_flowcontrol_Subject(in *flowcontrolv1beta1.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
return autoConvert_v1beta1_Subject_To_flowcontrol_Subject(in, out, s)
}
func autoConvert_flowcontrol_Subject_To_v1beta1_Subject(in *flowcontrol.Subject, out *flowcontrolv1beta1.Subject, s conversion.Scope) error {
out.Kind = flowcontrolv1beta1.SubjectKind(in.Kind)
out.User = (*flowcontrolv1beta1.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrolv1beta1.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrolv1beta1.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_flowcontrol_Subject_To_v1beta1_Subject is an autogenerated conversion function.
func Convert_flowcontrol_Subject_To_v1beta1_Subject(in *flowcontrol.Subject, out *flowcontrolv1beta1.Subject, s conversion.Scope) error {
return autoConvert_flowcontrol_Subject_To_v1beta1_Subject(in, out, s)
}
func autoConvert_v1beta1_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1beta1.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta1_UserSubject_To_flowcontrol_UserSubject is an autogenerated conversion function.
func Convert_v1beta1_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1beta1.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
return autoConvert_v1beta1_UserSubject_To_flowcontrol_UserSubject(in, out, s)
}
func autoConvert_flowcontrol_UserSubject_To_v1beta1_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1beta1.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_UserSubject_To_v1beta1_UserSubject is an autogenerated conversion function.
func Convert_flowcontrol_UserSubject_To_v1beta1_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1beta1.UserSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_UserSubject_To_v1beta1_UserSubject(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta1.FlowSchema{}, func(obj interface{}) { SetObjectDefaults_FlowSchema(obj.(*flowcontrolv1beta1.FlowSchema)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta1.FlowSchemaList{}, func(obj interface{}) { SetObjectDefaults_FlowSchemaList(obj.(*flowcontrolv1beta1.FlowSchemaList)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta1.PriorityLevelConfiguration{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfiguration(obj.(*flowcontrolv1beta1.PriorityLevelConfiguration))
})
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta1.PriorityLevelConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfigurationList(obj.(*flowcontrolv1beta1.PriorityLevelConfigurationList))
})
return nil
}
func SetObjectDefaults_FlowSchema(in *flowcontrolv1beta1.FlowSchema) {
SetDefaults_FlowSchemaSpec(&in.Spec)
}
func SetObjectDefaults_FlowSchemaList(in *flowcontrolv1beta1.FlowSchemaList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_FlowSchema(a)
}
}
func SetObjectDefaults_PriorityLevelConfiguration(in *flowcontrolv1beta1.PriorityLevelConfiguration) {
if in.Spec.Limited != nil {
SetDefaults_LimitedPriorityLevelConfiguration(in.Spec.Limited)
if in.Spec.Limited.LimitResponse.Queuing != nil {
SetDefaults_QueuingConfiguration(in.Spec.Limited.LimitResponse.Queuing)
}
}
if in.Spec.Exempt != nil {
SetDefaults_ExemptPriorityLevelConfiguration(in.Spec.Exempt)
}
}
func SetObjectDefaults_PriorityLevelConfigurationList(in *flowcontrolv1beta1.PriorityLevelConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PriorityLevelConfiguration(a)
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"k8s.io/api/flowcontrol/v1beta2"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/flowcontrol"
)
// LimitedPriorityLevelConfiguration.AssuredConcurrencyShares has been
// renamed to NominalConcurrencyShares in v1beta3.
func Convert_v1beta2_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *v1beta2.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_v1beta2_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
out.NominalConcurrencyShares = in.AssuredConcurrencyShares
return nil
}
// LimitedPriorityLevelConfiguration.AssuredConcurrencyShares has been
// renamed to NominalConcurrencyShares in v1beta3.
func Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta2_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *v1beta2.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta2_LimitedPriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
out.AssuredConcurrencyShares = in.NominalConcurrencyShares
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"k8s.io/api/flowcontrol/v1beta2"
)
// Default settings for flow-schema
const (
FlowSchemaDefaultMatchingPrecedence int32 = 1000
)
// Default settings for priority-level-configuration
const (
PriorityLevelConfigurationDefaultHandSize int32 = 8
PriorityLevelConfigurationDefaultQueues int32 = 64
PriorityLevelConfigurationDefaultQueueLengthLimit int32 = 50
PriorityLevelConfigurationDefaultAssuredConcurrencyShares int32 = 30
)
// SetDefaults_FlowSchema sets default values for flow schema
func SetDefaults_FlowSchemaSpec(spec *v1beta2.FlowSchemaSpec) {
if spec.MatchingPrecedence == 0 {
spec.MatchingPrecedence = FlowSchemaDefaultMatchingPrecedence
}
}
func SetDefaults_ExemptPriorityLevelConfiguration(eplc *v1beta2.ExemptPriorityLevelConfiguration) {
if eplc.NominalConcurrencyShares == nil {
eplc.NominalConcurrencyShares = new(int32)
*eplc.NominalConcurrencyShares = 0
}
if eplc.LendablePercent == nil {
eplc.LendablePercent = new(int32)
*eplc.LendablePercent = 0
}
}
func SetDefaults_LimitedPriorityLevelConfiguration(lplc *v1beta2.LimitedPriorityLevelConfiguration) {
if lplc.AssuredConcurrencyShares == 0 {
lplc.AssuredConcurrencyShares = PriorityLevelConfigurationDefaultAssuredConcurrencyShares
}
if lplc.LendablePercent == nil {
lplc.LendablePercent = new(int32)
*lplc.LendablePercent = 0
}
}
// SetDefaults_FlowSchema sets default values for flow schema
func SetDefaults_QueuingConfiguration(cfg *v1beta2.QueuingConfiguration) {
if cfg.HandSize == 0 {
cfg.HandSize = PriorityLevelConfigurationDefaultHandSize
}
if cfg.Queues == 0 {
cfg.Queues = PriorityLevelConfigurationDefaultQueues
}
if cfg.QueueLengthLimit == 0 {
cfg.QueueLengthLimit = PriorityLevelConfigurationDefaultQueueLengthLimit
}
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "flowcontrol.apiserver.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &flowcontrolv1beta2.SchemeBuilder
// AddToScheme adds api to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta2
import (
unsafe "unsafe"
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
flowcontrol "k8s.io/kubernetes/pkg/apis/flowcontrol"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.ExemptPriorityLevelConfiguration)(nil), (*flowcontrol.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(a.(*flowcontrolv1beta2.ExemptPriorityLevelConfiguration), b.(*flowcontrol.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ExemptPriorityLevelConfiguration)(nil), (*flowcontrolv1beta2.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta2_ExemptPriorityLevelConfiguration(a.(*flowcontrol.ExemptPriorityLevelConfiguration), b.(*flowcontrolv1beta2.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.FlowDistinguisherMethod)(nil), (*flowcontrol.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(a.(*flowcontrolv1beta2.FlowDistinguisherMethod), b.(*flowcontrol.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowDistinguisherMethod)(nil), (*flowcontrolv1beta2.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta2_FlowDistinguisherMethod(a.(*flowcontrol.FlowDistinguisherMethod), b.(*flowcontrolv1beta2.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.FlowSchema)(nil), (*flowcontrol.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FlowSchema_To_flowcontrol_FlowSchema(a.(*flowcontrolv1beta2.FlowSchema), b.(*flowcontrol.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchema)(nil), (*flowcontrolv1beta2.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchema_To_v1beta2_FlowSchema(a.(*flowcontrol.FlowSchema), b.(*flowcontrolv1beta2.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.FlowSchemaCondition)(nil), (*flowcontrol.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(a.(*flowcontrolv1beta2.FlowSchemaCondition), b.(*flowcontrol.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaCondition)(nil), (*flowcontrolv1beta2.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaCondition_To_v1beta2_FlowSchemaCondition(a.(*flowcontrol.FlowSchemaCondition), b.(*flowcontrolv1beta2.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.FlowSchemaList)(nil), (*flowcontrol.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FlowSchemaList_To_flowcontrol_FlowSchemaList(a.(*flowcontrolv1beta2.FlowSchemaList), b.(*flowcontrol.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaList)(nil), (*flowcontrolv1beta2.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaList_To_v1beta2_FlowSchemaList(a.(*flowcontrol.FlowSchemaList), b.(*flowcontrolv1beta2.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.FlowSchemaSpec)(nil), (*flowcontrol.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(a.(*flowcontrolv1beta2.FlowSchemaSpec), b.(*flowcontrol.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaSpec)(nil), (*flowcontrolv1beta2.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaSpec_To_v1beta2_FlowSchemaSpec(a.(*flowcontrol.FlowSchemaSpec), b.(*flowcontrolv1beta2.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.FlowSchemaStatus)(nil), (*flowcontrol.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(a.(*flowcontrolv1beta2.FlowSchemaStatus), b.(*flowcontrol.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaStatus)(nil), (*flowcontrolv1beta2.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaStatus_To_v1beta2_FlowSchemaStatus(a.(*flowcontrol.FlowSchemaStatus), b.(*flowcontrolv1beta2.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.GroupSubject)(nil), (*flowcontrol.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_GroupSubject_To_flowcontrol_GroupSubject(a.(*flowcontrolv1beta2.GroupSubject), b.(*flowcontrol.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.GroupSubject)(nil), (*flowcontrolv1beta2.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_GroupSubject_To_v1beta2_GroupSubject(a.(*flowcontrol.GroupSubject), b.(*flowcontrolv1beta2.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.LimitResponse)(nil), (*flowcontrol.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_LimitResponse_To_flowcontrol_LimitResponse(a.(*flowcontrolv1beta2.LimitResponse), b.(*flowcontrol.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.LimitResponse)(nil), (*flowcontrolv1beta2.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitResponse_To_v1beta2_LimitResponse(a.(*flowcontrol.LimitResponse), b.(*flowcontrolv1beta2.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.NonResourcePolicyRule)(nil), (*flowcontrol.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(a.(*flowcontrolv1beta2.NonResourcePolicyRule), b.(*flowcontrol.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.NonResourcePolicyRule)(nil), (*flowcontrolv1beta2.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_NonResourcePolicyRule_To_v1beta2_NonResourcePolicyRule(a.(*flowcontrol.NonResourcePolicyRule), b.(*flowcontrolv1beta2.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.PolicyRulesWithSubjects)(nil), (*flowcontrol.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(a.(*flowcontrolv1beta2.PolicyRulesWithSubjects), b.(*flowcontrol.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PolicyRulesWithSubjects)(nil), (*flowcontrolv1beta2.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta2_PolicyRulesWithSubjects(a.(*flowcontrol.PolicyRulesWithSubjects), b.(*flowcontrolv1beta2.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.PriorityLevelConfiguration)(nil), (*flowcontrol.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(a.(*flowcontrolv1beta2.PriorityLevelConfiguration), b.(*flowcontrol.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfiguration)(nil), (*flowcontrolv1beta2.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta2_PriorityLevelConfiguration(a.(*flowcontrol.PriorityLevelConfiguration), b.(*flowcontrolv1beta2.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.PriorityLevelConfigurationCondition)(nil), (*flowcontrol.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(a.(*flowcontrolv1beta2.PriorityLevelConfigurationCondition), b.(*flowcontrol.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationCondition)(nil), (*flowcontrolv1beta2.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta2_PriorityLevelConfigurationCondition(a.(*flowcontrol.PriorityLevelConfigurationCondition), b.(*flowcontrolv1beta2.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.PriorityLevelConfigurationList)(nil), (*flowcontrol.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(a.(*flowcontrolv1beta2.PriorityLevelConfigurationList), b.(*flowcontrol.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationList)(nil), (*flowcontrolv1beta2.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta2_PriorityLevelConfigurationList(a.(*flowcontrol.PriorityLevelConfigurationList), b.(*flowcontrolv1beta2.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.PriorityLevelConfigurationReference)(nil), (*flowcontrol.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(a.(*flowcontrolv1beta2.PriorityLevelConfigurationReference), b.(*flowcontrol.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationReference)(nil), (*flowcontrolv1beta2.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta2_PriorityLevelConfigurationReference(a.(*flowcontrol.PriorityLevelConfigurationReference), b.(*flowcontrolv1beta2.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.PriorityLevelConfigurationSpec)(nil), (*flowcontrol.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(a.(*flowcontrolv1beta2.PriorityLevelConfigurationSpec), b.(*flowcontrol.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationSpec)(nil), (*flowcontrolv1beta2.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta2_PriorityLevelConfigurationSpec(a.(*flowcontrol.PriorityLevelConfigurationSpec), b.(*flowcontrolv1beta2.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.PriorityLevelConfigurationStatus)(nil), (*flowcontrol.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(a.(*flowcontrolv1beta2.PriorityLevelConfigurationStatus), b.(*flowcontrol.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationStatus)(nil), (*flowcontrolv1beta2.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta2_PriorityLevelConfigurationStatus(a.(*flowcontrol.PriorityLevelConfigurationStatus), b.(*flowcontrolv1beta2.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.QueuingConfiguration)(nil), (*flowcontrol.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(a.(*flowcontrolv1beta2.QueuingConfiguration), b.(*flowcontrol.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.QueuingConfiguration)(nil), (*flowcontrolv1beta2.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_QueuingConfiguration_To_v1beta2_QueuingConfiguration(a.(*flowcontrol.QueuingConfiguration), b.(*flowcontrolv1beta2.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.ResourcePolicyRule)(nil), (*flowcontrol.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(a.(*flowcontrolv1beta2.ResourcePolicyRule), b.(*flowcontrol.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ResourcePolicyRule)(nil), (*flowcontrolv1beta2.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ResourcePolicyRule_To_v1beta2_ResourcePolicyRule(a.(*flowcontrol.ResourcePolicyRule), b.(*flowcontrolv1beta2.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.ServiceAccountSubject)(nil), (*flowcontrol.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(a.(*flowcontrolv1beta2.ServiceAccountSubject), b.(*flowcontrol.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ServiceAccountSubject)(nil), (*flowcontrolv1beta2.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ServiceAccountSubject_To_v1beta2_ServiceAccountSubject(a.(*flowcontrol.ServiceAccountSubject), b.(*flowcontrolv1beta2.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.Subject)(nil), (*flowcontrol.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Subject_To_flowcontrol_Subject(a.(*flowcontrolv1beta2.Subject), b.(*flowcontrol.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.Subject)(nil), (*flowcontrolv1beta2.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_Subject_To_v1beta2_Subject(a.(*flowcontrol.Subject), b.(*flowcontrolv1beta2.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta2.UserSubject)(nil), (*flowcontrol.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_UserSubject_To_flowcontrol_UserSubject(a.(*flowcontrolv1beta2.UserSubject), b.(*flowcontrol.UserSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.UserSubject)(nil), (*flowcontrolv1beta2.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_UserSubject_To_v1beta2_UserSubject(a.(*flowcontrol.UserSubject), b.(*flowcontrolv1beta2.UserSubject), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*flowcontrol.LimitedPriorityLevelConfiguration)(nil), (*flowcontrolv1beta2.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta2_LimitedPriorityLevelConfiguration(a.(*flowcontrol.LimitedPriorityLevelConfiguration), b.(*flowcontrolv1beta2.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*flowcontrolv1beta2.LimitedPriorityLevelConfiguration)(nil), (*flowcontrol.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(a.(*flowcontrolv1beta2.LimitedPriorityLevelConfiguration), b.(*flowcontrol.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta2_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1beta2.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_v1beta2_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1beta2_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1beta2.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta2_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1beta2.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta2_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta2_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1beta2.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta2_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1beta2_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1beta2.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrol.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_v1beta2_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_v1beta2_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1beta2.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_v1beta2_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1beta2_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1beta2.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrolv1beta2.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta2_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta2_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1beta2.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1beta2_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_v1beta2_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1beta2.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_FlowSchema_To_flowcontrol_FlowSchema is an autogenerated conversion function.
func Convert_v1beta2_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1beta2.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
return autoConvert_v1beta2_FlowSchema_To_flowcontrol_FlowSchema(in, out, s)
}
func autoConvert_flowcontrol_FlowSchema_To_v1beta2_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1beta2.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_FlowSchemaSpec_To_v1beta2_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_FlowSchemaStatus_To_v1beta2_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_flowcontrol_FlowSchema_To_v1beta2_FlowSchema is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchema_To_v1beta2_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1beta2.FlowSchema, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchema_To_v1beta2_FlowSchema(in, out, s)
}
func autoConvert_v1beta2_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1beta2.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrol.FlowSchemaConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition is an autogenerated conversion function.
func Convert_v1beta2_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1beta2.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_v1beta2_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaCondition_To_v1beta2_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1beta2.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrolv1beta2.FlowSchemaConditionType(in.Type)
out.Status = flowcontrolv1beta2.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_FlowSchemaCondition_To_v1beta2_FlowSchemaCondition is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaCondition_To_v1beta2_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1beta2.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaCondition_To_v1beta2_FlowSchemaCondition(in, out, s)
}
func autoConvert_v1beta2_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1beta2.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrol.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta2_FlowSchemaList_To_flowcontrol_FlowSchemaList is an autogenerated conversion function.
func Convert_v1beta2_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1beta2.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
return autoConvert_v1beta2_FlowSchemaList_To_flowcontrol_FlowSchemaList(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaList_To_v1beta2_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1beta2.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrolv1beta2.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_flowcontrol_FlowSchemaList_To_v1beta2_FlowSchemaList is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaList_To_v1beta2_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1beta2.FlowSchemaList, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaList_To_v1beta2_FlowSchemaList(in, out, s)
}
func autoConvert_v1beta2_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1beta2.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_v1beta2_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrol.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrol.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1beta2_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec is an autogenerated conversion function.
func Convert_v1beta2_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1beta2.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_v1beta2_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaSpec_To_v1beta2_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1beta2.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta2_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrolv1beta2.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrolv1beta2.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_flowcontrol_FlowSchemaSpec_To_v1beta2_FlowSchemaSpec is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaSpec_To_v1beta2_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1beta2.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaSpec_To_v1beta2_FlowSchemaSpec(in, out, s)
}
func autoConvert_v1beta2_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1beta2.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta2_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus is an autogenerated conversion function.
func Convert_v1beta2_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1beta2.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_v1beta2_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaStatus_To_v1beta2_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1beta2.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1beta2.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_FlowSchemaStatus_To_v1beta2_FlowSchemaStatus is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaStatus_To_v1beta2_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1beta2.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaStatus_To_v1beta2_FlowSchemaStatus(in, out, s)
}
func autoConvert_v1beta2_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1beta2.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta2_GroupSubject_To_flowcontrol_GroupSubject is an autogenerated conversion function.
func Convert_v1beta2_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1beta2.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
return autoConvert_v1beta2_GroupSubject_To_flowcontrol_GroupSubject(in, out, s)
}
func autoConvert_flowcontrol_GroupSubject_To_v1beta2_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1beta2.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_GroupSubject_To_v1beta2_GroupSubject is an autogenerated conversion function.
func Convert_flowcontrol_GroupSubject_To_v1beta2_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1beta2.GroupSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_GroupSubject_To_v1beta2_GroupSubject(in, out, s)
}
func autoConvert_v1beta2_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1beta2.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrol.LimitResponseType(in.Type)
out.Queuing = (*flowcontrol.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_v1beta2_LimitResponse_To_flowcontrol_LimitResponse is an autogenerated conversion function.
func Convert_v1beta2_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1beta2.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
return autoConvert_v1beta2_LimitResponse_To_flowcontrol_LimitResponse(in, out, s)
}
func autoConvert_flowcontrol_LimitResponse_To_v1beta2_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1beta2.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrolv1beta2.LimitResponseType(in.Type)
out.Queuing = (*flowcontrolv1beta2.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_flowcontrol_LimitResponse_To_v1beta2_LimitResponse is an autogenerated conversion function.
func Convert_flowcontrol_LimitResponse_To_v1beta2_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1beta2.LimitResponse, s conversion.Scope) error {
return autoConvert_flowcontrol_LimitResponse_To_v1beta2_LimitResponse(in, out, s)
}
func autoConvert_v1beta2_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *flowcontrolv1beta2.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
// WARNING: in.AssuredConcurrencyShares requires manual conversion: does not exist in peer-type
if err := Convert_v1beta2_LimitResponse_To_flowcontrol_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
func autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta2_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *flowcontrolv1beta2.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
// WARNING: in.NominalConcurrencyShares requires manual conversion: does not exist in peer-type
if err := Convert_flowcontrol_LimitResponse_To_v1beta2_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
func autoConvert_v1beta2_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1beta2.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1beta2_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_v1beta2_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1beta2.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1beta2_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_NonResourcePolicyRule_To_v1beta2_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1beta2.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_flowcontrol_NonResourcePolicyRule_To_v1beta2_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_NonResourcePolicyRule_To_v1beta2_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1beta2.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_NonResourcePolicyRule_To_v1beta2_NonResourcePolicyRule(in, out, s)
}
func autoConvert_v1beta2_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1beta2.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrol.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrol.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrol.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_v1beta2_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_v1beta2_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1beta2.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_v1beta2_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1beta2_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1beta2.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrolv1beta2.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrolv1beta2.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrolv1beta2.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta2_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta2_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1beta2.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1beta2_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_v1beta2_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *flowcontrolv1beta2.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1beta2_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *flowcontrolv1beta2.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1beta2_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *flowcontrolv1beta2.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta2_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta2_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta2_PriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta2_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *flowcontrolv1beta2.PriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1beta2_PriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1beta2_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1beta2.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta2_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_v1beta2_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1beta2.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_v1beta2_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta2_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1beta2.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrolv1beta2.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrolv1beta2.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta2_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta2_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1beta2.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta2_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_v1beta2_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1beta2.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrol.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_v1beta2_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta2_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_v1beta2_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1beta2.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_v1beta2_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1beta2_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1beta2.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrolv1beta2.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta2_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta2_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta2_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1beta2.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1beta2_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_v1beta2_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1beta2.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta2_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_v1beta2_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1beta2.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_v1beta2_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta2_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1beta2.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta2_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta2_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1beta2.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta2_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_v1beta2_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1beta2.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelEnablement(in.Type)
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(flowcontrol.LimitedPriorityLevelConfiguration)
if err := Convert_v1beta2_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(*in, *out, s); err != nil {
return err
}
} else {
out.Limited = nil
}
out.Exempt = (*flowcontrol.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_v1beta2_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_v1beta2_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1beta2.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_v1beta2_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta2_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1beta2.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrolv1beta2.PriorityLevelEnablement(in.Type)
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(flowcontrolv1beta2.LimitedPriorityLevelConfiguration)
if err := Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta2_LimitedPriorityLevelConfiguration(*in, *out, s); err != nil {
return err
}
} else {
out.Limited = nil
}
out.Exempt = (*flowcontrolv1beta2.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta2_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta2_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1beta2.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta2_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_v1beta2_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1beta2.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta2_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_v1beta2_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1beta2.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_v1beta2_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta2_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1beta2.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1beta2.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta2_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta2_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1beta2.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta2_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_v1beta2_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1beta2.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_v1beta2_QueuingConfiguration_To_flowcontrol_QueuingConfiguration is an autogenerated conversion function.
func Convert_v1beta2_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1beta2.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in, out, s)
}
func autoConvert_flowcontrol_QueuingConfiguration_To_v1beta2_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1beta2.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_flowcontrol_QueuingConfiguration_To_v1beta2_QueuingConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_QueuingConfiguration_To_v1beta2_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1beta2.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_QueuingConfiguration_To_v1beta2_QueuingConfiguration(in, out, s)
}
func autoConvert_v1beta2_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1beta2.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_v1beta2_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule is an autogenerated conversion function.
func Convert_v1beta2_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1beta2.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1beta2_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_ResourcePolicyRule_To_v1beta2_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1beta2.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_flowcontrol_ResourcePolicyRule_To_v1beta2_ResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_ResourcePolicyRule_To_v1beta2_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1beta2.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_ResourcePolicyRule_To_v1beta2_ResourcePolicyRule(in, out, s)
}
func autoConvert_v1beta2_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1beta2.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1beta2_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject is an autogenerated conversion function.
func Convert_v1beta2_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1beta2.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_v1beta2_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in, out, s)
}
func autoConvert_flowcontrol_ServiceAccountSubject_To_v1beta2_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1beta2.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_flowcontrol_ServiceAccountSubject_To_v1beta2_ServiceAccountSubject is an autogenerated conversion function.
func Convert_flowcontrol_ServiceAccountSubject_To_v1beta2_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1beta2.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_ServiceAccountSubject_To_v1beta2_ServiceAccountSubject(in, out, s)
}
func autoConvert_v1beta2_Subject_To_flowcontrol_Subject(in *flowcontrolv1beta2.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
out.Kind = flowcontrol.SubjectKind(in.Kind)
out.User = (*flowcontrol.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrol.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrol.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_v1beta2_Subject_To_flowcontrol_Subject is an autogenerated conversion function.
func Convert_v1beta2_Subject_To_flowcontrol_Subject(in *flowcontrolv1beta2.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
return autoConvert_v1beta2_Subject_To_flowcontrol_Subject(in, out, s)
}
func autoConvert_flowcontrol_Subject_To_v1beta2_Subject(in *flowcontrol.Subject, out *flowcontrolv1beta2.Subject, s conversion.Scope) error {
out.Kind = flowcontrolv1beta2.SubjectKind(in.Kind)
out.User = (*flowcontrolv1beta2.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrolv1beta2.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrolv1beta2.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_flowcontrol_Subject_To_v1beta2_Subject is an autogenerated conversion function.
func Convert_flowcontrol_Subject_To_v1beta2_Subject(in *flowcontrol.Subject, out *flowcontrolv1beta2.Subject, s conversion.Scope) error {
return autoConvert_flowcontrol_Subject_To_v1beta2_Subject(in, out, s)
}
func autoConvert_v1beta2_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1beta2.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta2_UserSubject_To_flowcontrol_UserSubject is an autogenerated conversion function.
func Convert_v1beta2_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1beta2.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
return autoConvert_v1beta2_UserSubject_To_flowcontrol_UserSubject(in, out, s)
}
func autoConvert_flowcontrol_UserSubject_To_v1beta2_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1beta2.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_UserSubject_To_v1beta2_UserSubject is an autogenerated conversion function.
func Convert_flowcontrol_UserSubject_To_v1beta2_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1beta2.UserSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_UserSubject_To_v1beta2_UserSubject(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta2
import (
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta2.FlowSchema{}, func(obj interface{}) { SetObjectDefaults_FlowSchema(obj.(*flowcontrolv1beta2.FlowSchema)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta2.FlowSchemaList{}, func(obj interface{}) { SetObjectDefaults_FlowSchemaList(obj.(*flowcontrolv1beta2.FlowSchemaList)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta2.PriorityLevelConfiguration{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfiguration(obj.(*flowcontrolv1beta2.PriorityLevelConfiguration))
})
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta2.PriorityLevelConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfigurationList(obj.(*flowcontrolv1beta2.PriorityLevelConfigurationList))
})
return nil
}
func SetObjectDefaults_FlowSchema(in *flowcontrolv1beta2.FlowSchema) {
SetDefaults_FlowSchemaSpec(&in.Spec)
}
func SetObjectDefaults_FlowSchemaList(in *flowcontrolv1beta2.FlowSchemaList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_FlowSchema(a)
}
}
func SetObjectDefaults_PriorityLevelConfiguration(in *flowcontrolv1beta2.PriorityLevelConfiguration) {
if in.Spec.Limited != nil {
SetDefaults_LimitedPriorityLevelConfiguration(in.Spec.Limited)
if in.Spec.Limited.LimitResponse.Queuing != nil {
SetDefaults_QueuingConfiguration(in.Spec.Limited.LimitResponse.Queuing)
}
}
if in.Spec.Exempt != nil {
SetDefaults_ExemptPriorityLevelConfiguration(in.Spec.Exempt)
}
}
func SetObjectDefaults_PriorityLevelConfigurationList(in *flowcontrolv1beta2.PriorityLevelConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PriorityLevelConfiguration(a)
}
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta3
import (
"k8s.io/api/flowcontrol/v1beta3"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/flowcontrol"
)
func Convert_v1beta3_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *v1beta3.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_v1beta3_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
// during v1beta3 -> internal conversion:
// - remove the roundtrip annotation for the 'NominalConcurrencyShares' field
// - make sure we don't mutate the source (v1beta3) object's annotations
annotations, copied := dropPriorityLevelConcurrencyShareDefaultAnnotation(out.ObjectMeta.Annotations)
if copied {
out.ObjectMeta.Annotations = annotations
}
return nil
}
func Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta3_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *v1beta3.PriorityLevelConfiguration, s conversion.Scope) error {
if err := autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1beta3_PriorityLevelConfiguration(in, out, nil); err != nil {
return err
}
// during internal -> v1beta3 conversion:
// - add the roundtrip annotation for the 'NominalConcurrencyShares' field,
// IIF the 'NominalConcurrencyShares' field has a value of zero.
// - make sure we don't mutate the source (internal) object's annotations
if limited := in.Spec.Limited; limited != nil && limited.NominalConcurrencyShares == 0 {
annotations, copied := addPriorityLevelConcurrencyShareDefaultAnnotation(out.ObjectMeta.Annotations)
if copied {
out.ObjectMeta.Annotations = annotations
}
}
return nil
}
func dropPriorityLevelConcurrencyShareDefaultAnnotation(in map[string]string) (map[string]string, bool) {
if _, ok := in[v1beta3.PriorityLevelPreserveZeroConcurrencySharesKey]; !ok {
return in, false
}
out := copyStringMap(in)
delete(out, v1beta3.PriorityLevelPreserveZeroConcurrencySharesKey)
return out, true
}
func addPriorityLevelConcurrencyShareDefaultAnnotation(in map[string]string) (map[string]string, bool) {
if _, ok := in[v1beta3.PriorityLevelPreserveZeroConcurrencySharesKey]; ok {
return in, false
}
out := copyStringMap(in)
out[v1beta3.PriorityLevelPreserveZeroConcurrencySharesKey] = ""
return out, true
}
// copyStringMap returns a copy of the input map.
// If input is nil, an empty map is returned.
func copyStringMap(in map[string]string) map[string]string {
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta3
import (
"k8s.io/api/flowcontrol/v1beta3"
"k8s.io/utils/ptr"
)
// Default settings for flow-schema
const (
FlowSchemaDefaultMatchingPrecedence int32 = 1000
)
// Default settings for priority-level-configuration
const (
PriorityLevelConfigurationDefaultHandSize int32 = 8
PriorityLevelConfigurationDefaultQueues int32 = 64
PriorityLevelConfigurationDefaultQueueLengthLimit int32 = 50
PriorityLevelConfigurationDefaultNominalConcurrencyShares int32 = 30
)
// SetDefaults_FlowSchema sets default values for flow schema
func SetDefaults_FlowSchemaSpec(spec *v1beta3.FlowSchemaSpec) {
if spec.MatchingPrecedence == 0 {
spec.MatchingPrecedence = FlowSchemaDefaultMatchingPrecedence
}
}
// SetDefaults_PriorityLevelConfiguration sets the default values for a
// PriorityLevelConfiguration object. Since we need to inspect the presence
// of the roundtrip annotation in order to determine whether the user has
// specified a zero value for the 'NominalConcurrencyShares' field,
// the defaulting logic needs visibility to the annotations field.
func SetDefaults_PriorityLevelConfiguration(in *v1beta3.PriorityLevelConfiguration) {
if limited := in.Spec.Limited; limited != nil {
// for v1beta3, we apply a default value to the NominalConcurrencyShares
// field only when:
// a) NominalConcurrencyShares == 0, and
// b) the roundtrip annotation is not set
if _, ok := in.Annotations[v1beta3.PriorityLevelPreserveZeroConcurrencySharesKey]; !ok && limited.NominalConcurrencyShares == 0 {
limited.NominalConcurrencyShares = PriorityLevelConfigurationDefaultNominalConcurrencyShares
}
}
}
func SetDefaults_ExemptPriorityLevelConfiguration(eplc *v1beta3.ExemptPriorityLevelConfiguration) {
if eplc.NominalConcurrencyShares == nil {
eplc.NominalConcurrencyShares = ptr.To(int32(0))
}
if eplc.LendablePercent == nil {
eplc.LendablePercent = ptr.To(int32(0))
}
}
func SetDefaults_LimitedPriorityLevelConfiguration(in *v1beta3.LimitedPriorityLevelConfiguration) {
if in.LendablePercent == nil {
in.LendablePercent = ptr.To(int32(0))
}
}
func SetDefaults_QueuingConfiguration(cfg *v1beta3.QueuingConfiguration) {
if cfg.HandSize == 0 {
cfg.HandSize = PriorityLevelConfigurationDefaultHandSize
}
if cfg.Queues == 0 {
cfg.Queues = PriorityLevelConfigurationDefaultQueues
}
if cfg.QueueLengthLimit == 0 {
cfg.QueueLengthLimit = PriorityLevelConfigurationDefaultQueueLengthLimit
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta3
import (
flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "flowcontrol.apiserver.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta3"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &flowcontrolv1beta3.SchemeBuilder
// AddToScheme adds api to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta3
import (
unsafe "unsafe"
flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
flowcontrol "k8s.io/kubernetes/pkg/apis/flowcontrol"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.ExemptPriorityLevelConfiguration)(nil), (*flowcontrol.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(a.(*flowcontrolv1beta3.ExemptPriorityLevelConfiguration), b.(*flowcontrol.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ExemptPriorityLevelConfiguration)(nil), (*flowcontrolv1beta3.ExemptPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta3_ExemptPriorityLevelConfiguration(a.(*flowcontrol.ExemptPriorityLevelConfiguration), b.(*flowcontrolv1beta3.ExemptPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.FlowDistinguisherMethod)(nil), (*flowcontrol.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(a.(*flowcontrolv1beta3.FlowDistinguisherMethod), b.(*flowcontrol.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowDistinguisherMethod)(nil), (*flowcontrolv1beta3.FlowDistinguisherMethod)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta3_FlowDistinguisherMethod(a.(*flowcontrol.FlowDistinguisherMethod), b.(*flowcontrolv1beta3.FlowDistinguisherMethod), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.FlowSchema)(nil), (*flowcontrol.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_FlowSchema_To_flowcontrol_FlowSchema(a.(*flowcontrolv1beta3.FlowSchema), b.(*flowcontrol.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchema)(nil), (*flowcontrolv1beta3.FlowSchema)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchema_To_v1beta3_FlowSchema(a.(*flowcontrol.FlowSchema), b.(*flowcontrolv1beta3.FlowSchema), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.FlowSchemaCondition)(nil), (*flowcontrol.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(a.(*flowcontrolv1beta3.FlowSchemaCondition), b.(*flowcontrol.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaCondition)(nil), (*flowcontrolv1beta3.FlowSchemaCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaCondition_To_v1beta3_FlowSchemaCondition(a.(*flowcontrol.FlowSchemaCondition), b.(*flowcontrolv1beta3.FlowSchemaCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.FlowSchemaList)(nil), (*flowcontrol.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_FlowSchemaList_To_flowcontrol_FlowSchemaList(a.(*flowcontrolv1beta3.FlowSchemaList), b.(*flowcontrol.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaList)(nil), (*flowcontrolv1beta3.FlowSchemaList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaList_To_v1beta3_FlowSchemaList(a.(*flowcontrol.FlowSchemaList), b.(*flowcontrolv1beta3.FlowSchemaList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.FlowSchemaSpec)(nil), (*flowcontrol.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(a.(*flowcontrolv1beta3.FlowSchemaSpec), b.(*flowcontrol.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaSpec)(nil), (*flowcontrolv1beta3.FlowSchemaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaSpec_To_v1beta3_FlowSchemaSpec(a.(*flowcontrol.FlowSchemaSpec), b.(*flowcontrolv1beta3.FlowSchemaSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.FlowSchemaStatus)(nil), (*flowcontrol.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(a.(*flowcontrolv1beta3.FlowSchemaStatus), b.(*flowcontrol.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.FlowSchemaStatus)(nil), (*flowcontrolv1beta3.FlowSchemaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_FlowSchemaStatus_To_v1beta3_FlowSchemaStatus(a.(*flowcontrol.FlowSchemaStatus), b.(*flowcontrolv1beta3.FlowSchemaStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.GroupSubject)(nil), (*flowcontrol.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_GroupSubject_To_flowcontrol_GroupSubject(a.(*flowcontrolv1beta3.GroupSubject), b.(*flowcontrol.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.GroupSubject)(nil), (*flowcontrolv1beta3.GroupSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_GroupSubject_To_v1beta3_GroupSubject(a.(*flowcontrol.GroupSubject), b.(*flowcontrolv1beta3.GroupSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.LimitResponse)(nil), (*flowcontrol.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_LimitResponse_To_flowcontrol_LimitResponse(a.(*flowcontrolv1beta3.LimitResponse), b.(*flowcontrol.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.LimitResponse)(nil), (*flowcontrolv1beta3.LimitResponse)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitResponse_To_v1beta3_LimitResponse(a.(*flowcontrol.LimitResponse), b.(*flowcontrolv1beta3.LimitResponse), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.LimitedPriorityLevelConfiguration)(nil), (*flowcontrol.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(a.(*flowcontrolv1beta3.LimitedPriorityLevelConfiguration), b.(*flowcontrol.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.LimitedPriorityLevelConfiguration)(nil), (*flowcontrolv1beta3.LimitedPriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta3_LimitedPriorityLevelConfiguration(a.(*flowcontrol.LimitedPriorityLevelConfiguration), b.(*flowcontrolv1beta3.LimitedPriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.NonResourcePolicyRule)(nil), (*flowcontrol.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(a.(*flowcontrolv1beta3.NonResourcePolicyRule), b.(*flowcontrol.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.NonResourcePolicyRule)(nil), (*flowcontrolv1beta3.NonResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_NonResourcePolicyRule_To_v1beta3_NonResourcePolicyRule(a.(*flowcontrol.NonResourcePolicyRule), b.(*flowcontrolv1beta3.NonResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.PolicyRulesWithSubjects)(nil), (*flowcontrol.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(a.(*flowcontrolv1beta3.PolicyRulesWithSubjects), b.(*flowcontrol.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PolicyRulesWithSubjects)(nil), (*flowcontrolv1beta3.PolicyRulesWithSubjects)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta3_PolicyRulesWithSubjects(a.(*flowcontrol.PolicyRulesWithSubjects), b.(*flowcontrolv1beta3.PolicyRulesWithSubjects), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.PriorityLevelConfigurationCondition)(nil), (*flowcontrol.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(a.(*flowcontrolv1beta3.PriorityLevelConfigurationCondition), b.(*flowcontrol.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationCondition)(nil), (*flowcontrolv1beta3.PriorityLevelConfigurationCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta3_PriorityLevelConfigurationCondition(a.(*flowcontrol.PriorityLevelConfigurationCondition), b.(*flowcontrolv1beta3.PriorityLevelConfigurationCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.PriorityLevelConfigurationList)(nil), (*flowcontrol.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(a.(*flowcontrolv1beta3.PriorityLevelConfigurationList), b.(*flowcontrol.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationList)(nil), (*flowcontrolv1beta3.PriorityLevelConfigurationList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta3_PriorityLevelConfigurationList(a.(*flowcontrol.PriorityLevelConfigurationList), b.(*flowcontrolv1beta3.PriorityLevelConfigurationList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.PriorityLevelConfigurationReference)(nil), (*flowcontrol.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(a.(*flowcontrolv1beta3.PriorityLevelConfigurationReference), b.(*flowcontrol.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationReference)(nil), (*flowcontrolv1beta3.PriorityLevelConfigurationReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta3_PriorityLevelConfigurationReference(a.(*flowcontrol.PriorityLevelConfigurationReference), b.(*flowcontrolv1beta3.PriorityLevelConfigurationReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.PriorityLevelConfigurationSpec)(nil), (*flowcontrol.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(a.(*flowcontrolv1beta3.PriorityLevelConfigurationSpec), b.(*flowcontrol.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationSpec)(nil), (*flowcontrolv1beta3.PriorityLevelConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta3_PriorityLevelConfigurationSpec(a.(*flowcontrol.PriorityLevelConfigurationSpec), b.(*flowcontrolv1beta3.PriorityLevelConfigurationSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.PriorityLevelConfigurationStatus)(nil), (*flowcontrol.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(a.(*flowcontrolv1beta3.PriorityLevelConfigurationStatus), b.(*flowcontrol.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.PriorityLevelConfigurationStatus)(nil), (*flowcontrolv1beta3.PriorityLevelConfigurationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta3_PriorityLevelConfigurationStatus(a.(*flowcontrol.PriorityLevelConfigurationStatus), b.(*flowcontrolv1beta3.PriorityLevelConfigurationStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.QueuingConfiguration)(nil), (*flowcontrol.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(a.(*flowcontrolv1beta3.QueuingConfiguration), b.(*flowcontrol.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.QueuingConfiguration)(nil), (*flowcontrolv1beta3.QueuingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_QueuingConfiguration_To_v1beta3_QueuingConfiguration(a.(*flowcontrol.QueuingConfiguration), b.(*flowcontrolv1beta3.QueuingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.ResourcePolicyRule)(nil), (*flowcontrol.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(a.(*flowcontrolv1beta3.ResourcePolicyRule), b.(*flowcontrol.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ResourcePolicyRule)(nil), (*flowcontrolv1beta3.ResourcePolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ResourcePolicyRule_To_v1beta3_ResourcePolicyRule(a.(*flowcontrol.ResourcePolicyRule), b.(*flowcontrolv1beta3.ResourcePolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.ServiceAccountSubject)(nil), (*flowcontrol.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(a.(*flowcontrolv1beta3.ServiceAccountSubject), b.(*flowcontrol.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.ServiceAccountSubject)(nil), (*flowcontrolv1beta3.ServiceAccountSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_ServiceAccountSubject_To_v1beta3_ServiceAccountSubject(a.(*flowcontrol.ServiceAccountSubject), b.(*flowcontrolv1beta3.ServiceAccountSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.Subject)(nil), (*flowcontrol.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_Subject_To_flowcontrol_Subject(a.(*flowcontrolv1beta3.Subject), b.(*flowcontrol.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.Subject)(nil), (*flowcontrolv1beta3.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_Subject_To_v1beta3_Subject(a.(*flowcontrol.Subject), b.(*flowcontrolv1beta3.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrolv1beta3.UserSubject)(nil), (*flowcontrol.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_UserSubject_To_flowcontrol_UserSubject(a.(*flowcontrolv1beta3.UserSubject), b.(*flowcontrol.UserSubject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*flowcontrol.UserSubject)(nil), (*flowcontrolv1beta3.UserSubject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_UserSubject_To_v1beta3_UserSubject(a.(*flowcontrol.UserSubject), b.(*flowcontrolv1beta3.UserSubject), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*flowcontrol.PriorityLevelConfiguration)(nil), (*flowcontrolv1beta3.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta3_PriorityLevelConfiguration(a.(*flowcontrol.PriorityLevelConfiguration), b.(*flowcontrolv1beta3.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*flowcontrolv1beta3.PriorityLevelConfiguration)(nil), (*flowcontrol.PriorityLevelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta3_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(a.(*flowcontrolv1beta3.PriorityLevelConfiguration), b.(*flowcontrol.PriorityLevelConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta3_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1beta3.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_v1beta3_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1beta3_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in *flowcontrolv1beta3.ExemptPriorityLevelConfiguration, out *flowcontrol.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1beta3_ExemptPriorityLevelConfiguration_To_flowcontrol_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta3_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1beta3.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = (*int32)(unsafe.Pointer(in.NominalConcurrencyShares))
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
return nil
}
// Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta3_ExemptPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta3_ExemptPriorityLevelConfiguration(in *flowcontrol.ExemptPriorityLevelConfiguration, out *flowcontrolv1beta3.ExemptPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_ExemptPriorityLevelConfiguration_To_v1beta3_ExemptPriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1beta3_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1beta3.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrol.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_v1beta3_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_v1beta3_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in *flowcontrolv1beta3.FlowDistinguisherMethod, out *flowcontrol.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_v1beta3_FlowDistinguisherMethod_To_flowcontrol_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1beta3_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1beta3.FlowDistinguisherMethod, s conversion.Scope) error {
out.Type = flowcontrolv1beta3.FlowDistinguisherMethodType(in.Type)
return nil
}
// Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta3_FlowDistinguisherMethod is an autogenerated conversion function.
func Convert_flowcontrol_FlowDistinguisherMethod_To_v1beta3_FlowDistinguisherMethod(in *flowcontrol.FlowDistinguisherMethod, out *flowcontrolv1beta3.FlowDistinguisherMethod, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowDistinguisherMethod_To_v1beta3_FlowDistinguisherMethod(in, out, s)
}
func autoConvert_v1beta3_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1beta3.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta3_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta3_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta3_FlowSchema_To_flowcontrol_FlowSchema is an autogenerated conversion function.
func Convert_v1beta3_FlowSchema_To_flowcontrol_FlowSchema(in *flowcontrolv1beta3.FlowSchema, out *flowcontrol.FlowSchema, s conversion.Scope) error {
return autoConvert_v1beta3_FlowSchema_To_flowcontrol_FlowSchema(in, out, s)
}
func autoConvert_flowcontrol_FlowSchema_To_v1beta3_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1beta3.FlowSchema, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_FlowSchemaSpec_To_v1beta3_FlowSchemaSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_FlowSchemaStatus_To_v1beta3_FlowSchemaStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_flowcontrol_FlowSchema_To_v1beta3_FlowSchema is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchema_To_v1beta3_FlowSchema(in *flowcontrol.FlowSchema, out *flowcontrolv1beta3.FlowSchema, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchema_To_v1beta3_FlowSchema(in, out, s)
}
func autoConvert_v1beta3_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1beta3.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrol.FlowSchemaConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta3_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition is an autogenerated conversion function.
func Convert_v1beta3_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in *flowcontrolv1beta3.FlowSchemaCondition, out *flowcontrol.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_v1beta3_FlowSchemaCondition_To_flowcontrol_FlowSchemaCondition(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaCondition_To_v1beta3_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1beta3.FlowSchemaCondition, s conversion.Scope) error {
out.Type = flowcontrolv1beta3.FlowSchemaConditionType(in.Type)
out.Status = flowcontrolv1beta3.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_FlowSchemaCondition_To_v1beta3_FlowSchemaCondition is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaCondition_To_v1beta3_FlowSchemaCondition(in *flowcontrol.FlowSchemaCondition, out *flowcontrolv1beta3.FlowSchemaCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaCondition_To_v1beta3_FlowSchemaCondition(in, out, s)
}
func autoConvert_v1beta3_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1beta3.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrol.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta3_FlowSchemaList_To_flowcontrol_FlowSchemaList is an autogenerated conversion function.
func Convert_v1beta3_FlowSchemaList_To_flowcontrol_FlowSchemaList(in *flowcontrolv1beta3.FlowSchemaList, out *flowcontrol.FlowSchemaList, s conversion.Scope) error {
return autoConvert_v1beta3_FlowSchemaList_To_flowcontrol_FlowSchemaList(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaList_To_v1beta3_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1beta3.FlowSchemaList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]flowcontrolv1beta3.FlowSchema)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_flowcontrol_FlowSchemaList_To_v1beta3_FlowSchemaList is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaList_To_v1beta3_FlowSchemaList(in *flowcontrol.FlowSchemaList, out *flowcontrolv1beta3.FlowSchemaList, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaList_To_v1beta3_FlowSchemaList(in, out, s)
}
func autoConvert_v1beta3_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1beta3.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_v1beta3_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrol.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrol.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1beta3_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec is an autogenerated conversion function.
func Convert_v1beta3_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in *flowcontrolv1beta3.FlowSchemaSpec, out *flowcontrol.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_v1beta3_FlowSchemaSpec_To_flowcontrol_FlowSchemaSpec(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaSpec_To_v1beta3_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1beta3.FlowSchemaSpec, s conversion.Scope) error {
if err := Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta3_PriorityLevelConfigurationReference(&in.PriorityLevelConfiguration, &out.PriorityLevelConfiguration, s); err != nil {
return err
}
out.MatchingPrecedence = in.MatchingPrecedence
out.DistinguisherMethod = (*flowcontrolv1beta3.FlowDistinguisherMethod)(unsafe.Pointer(in.DistinguisherMethod))
out.Rules = *(*[]flowcontrolv1beta3.PolicyRulesWithSubjects)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_flowcontrol_FlowSchemaSpec_To_v1beta3_FlowSchemaSpec is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaSpec_To_v1beta3_FlowSchemaSpec(in *flowcontrol.FlowSchemaSpec, out *flowcontrolv1beta3.FlowSchemaSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaSpec_To_v1beta3_FlowSchemaSpec(in, out, s)
}
func autoConvert_v1beta3_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1beta3.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta3_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus is an autogenerated conversion function.
func Convert_v1beta3_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in *flowcontrolv1beta3.FlowSchemaStatus, out *flowcontrol.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_v1beta3_FlowSchemaStatus_To_flowcontrol_FlowSchemaStatus(in, out, s)
}
func autoConvert_flowcontrol_FlowSchemaStatus_To_v1beta3_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1beta3.FlowSchemaStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1beta3.FlowSchemaCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_FlowSchemaStatus_To_v1beta3_FlowSchemaStatus is an autogenerated conversion function.
func Convert_flowcontrol_FlowSchemaStatus_To_v1beta3_FlowSchemaStatus(in *flowcontrol.FlowSchemaStatus, out *flowcontrolv1beta3.FlowSchemaStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_FlowSchemaStatus_To_v1beta3_FlowSchemaStatus(in, out, s)
}
func autoConvert_v1beta3_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1beta3.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta3_GroupSubject_To_flowcontrol_GroupSubject is an autogenerated conversion function.
func Convert_v1beta3_GroupSubject_To_flowcontrol_GroupSubject(in *flowcontrolv1beta3.GroupSubject, out *flowcontrol.GroupSubject, s conversion.Scope) error {
return autoConvert_v1beta3_GroupSubject_To_flowcontrol_GroupSubject(in, out, s)
}
func autoConvert_flowcontrol_GroupSubject_To_v1beta3_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1beta3.GroupSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_GroupSubject_To_v1beta3_GroupSubject is an autogenerated conversion function.
func Convert_flowcontrol_GroupSubject_To_v1beta3_GroupSubject(in *flowcontrol.GroupSubject, out *flowcontrolv1beta3.GroupSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_GroupSubject_To_v1beta3_GroupSubject(in, out, s)
}
func autoConvert_v1beta3_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1beta3.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrol.LimitResponseType(in.Type)
out.Queuing = (*flowcontrol.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_v1beta3_LimitResponse_To_flowcontrol_LimitResponse is an autogenerated conversion function.
func Convert_v1beta3_LimitResponse_To_flowcontrol_LimitResponse(in *flowcontrolv1beta3.LimitResponse, out *flowcontrol.LimitResponse, s conversion.Scope) error {
return autoConvert_v1beta3_LimitResponse_To_flowcontrol_LimitResponse(in, out, s)
}
func autoConvert_flowcontrol_LimitResponse_To_v1beta3_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1beta3.LimitResponse, s conversion.Scope) error {
out.Type = flowcontrolv1beta3.LimitResponseType(in.Type)
out.Queuing = (*flowcontrolv1beta3.QueuingConfiguration)(unsafe.Pointer(in.Queuing))
return nil
}
// Convert_flowcontrol_LimitResponse_To_v1beta3_LimitResponse is an autogenerated conversion function.
func Convert_flowcontrol_LimitResponse_To_v1beta3_LimitResponse(in *flowcontrol.LimitResponse, out *flowcontrolv1beta3.LimitResponse, s conversion.Scope) error {
return autoConvert_flowcontrol_LimitResponse_To_v1beta3_LimitResponse(in, out, s)
}
func autoConvert_v1beta3_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *flowcontrolv1beta3.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = in.NominalConcurrencyShares
if err := Convert_v1beta3_LimitResponse_To_flowcontrol_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
// Convert_v1beta3_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_v1beta3_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in *flowcontrolv1beta3.LimitedPriorityLevelConfiguration, out *flowcontrol.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_v1beta3_LimitedPriorityLevelConfiguration_To_flowcontrol_LimitedPriorityLevelConfiguration(in, out, s)
}
func autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta3_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *flowcontrolv1beta3.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
out.NominalConcurrencyShares = in.NominalConcurrencyShares
if err := Convert_flowcontrol_LimitResponse_To_v1beta3_LimitResponse(&in.LimitResponse, &out.LimitResponse, s); err != nil {
return err
}
out.LendablePercent = (*int32)(unsafe.Pointer(in.LendablePercent))
out.BorrowingLimitPercent = (*int32)(unsafe.Pointer(in.BorrowingLimitPercent))
return nil
}
// Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta3_LimitedPriorityLevelConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta3_LimitedPriorityLevelConfiguration(in *flowcontrol.LimitedPriorityLevelConfiguration, out *flowcontrolv1beta3.LimitedPriorityLevelConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_LimitedPriorityLevelConfiguration_To_v1beta3_LimitedPriorityLevelConfiguration(in, out, s)
}
func autoConvert_v1beta3_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1beta3.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1beta3_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_v1beta3_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in *flowcontrolv1beta3.NonResourcePolicyRule, out *flowcontrol.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1beta3_NonResourcePolicyRule_To_flowcontrol_NonResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_NonResourcePolicyRule_To_v1beta3_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1beta3.NonResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_flowcontrol_NonResourcePolicyRule_To_v1beta3_NonResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_NonResourcePolicyRule_To_v1beta3_NonResourcePolicyRule(in *flowcontrol.NonResourcePolicyRule, out *flowcontrolv1beta3.NonResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_NonResourcePolicyRule_To_v1beta3_NonResourcePolicyRule(in, out, s)
}
func autoConvert_v1beta3_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1beta3.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrol.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrol.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrol.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_v1beta3_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_v1beta3_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in *flowcontrolv1beta3.PolicyRulesWithSubjects, out *flowcontrol.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_v1beta3_PolicyRulesWithSubjects_To_flowcontrol_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1beta3_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1beta3.PolicyRulesWithSubjects, s conversion.Scope) error {
out.Subjects = *(*[]flowcontrolv1beta3.Subject)(unsafe.Pointer(&in.Subjects))
out.ResourceRules = *(*[]flowcontrolv1beta3.ResourcePolicyRule)(unsafe.Pointer(&in.ResourceRules))
out.NonResourceRules = *(*[]flowcontrolv1beta3.NonResourcePolicyRule)(unsafe.Pointer(&in.NonResourceRules))
return nil
}
// Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta3_PolicyRulesWithSubjects is an autogenerated conversion function.
func Convert_flowcontrol_PolicyRulesWithSubjects_To_v1beta3_PolicyRulesWithSubjects(in *flowcontrol.PolicyRulesWithSubjects, out *flowcontrolv1beta3.PolicyRulesWithSubjects, s conversion.Scope) error {
return autoConvert_flowcontrol_PolicyRulesWithSubjects_To_v1beta3_PolicyRulesWithSubjects(in, out, s)
}
func autoConvert_v1beta3_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(in *flowcontrolv1beta3.PriorityLevelConfiguration, out *flowcontrol.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta3_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta3_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_flowcontrol_PriorityLevelConfiguration_To_v1beta3_PriorityLevelConfiguration(in *flowcontrol.PriorityLevelConfiguration, out *flowcontrolv1beta3.PriorityLevelConfiguration, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta3_PriorityLevelConfigurationSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta3_PriorityLevelConfigurationStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1beta3_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1beta3.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrol.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_v1beta3_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_v1beta3_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in *flowcontrolv1beta3.PriorityLevelConfigurationCondition, out *flowcontrol.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_v1beta3_PriorityLevelConfigurationCondition_To_flowcontrol_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta3_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1beta3.PriorityLevelConfigurationCondition, s conversion.Scope) error {
out.Type = flowcontrolv1beta3.PriorityLevelConfigurationConditionType(in.Type)
out.Status = flowcontrolv1beta3.ConditionStatus(in.Status)
out.LastTransitionTime = in.LastTransitionTime
out.Reason = in.Reason
out.Message = in.Message
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta3_PriorityLevelConfigurationCondition is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta3_PriorityLevelConfigurationCondition(in *flowcontrol.PriorityLevelConfigurationCondition, out *flowcontrolv1beta3.PriorityLevelConfigurationCondition, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationCondition_To_v1beta3_PriorityLevelConfigurationCondition(in, out, s)
}
func autoConvert_v1beta3_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1beta3.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrol.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_v1beta3_PriorityLevelConfiguration_To_flowcontrol_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta3_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_v1beta3_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in *flowcontrolv1beta3.PriorityLevelConfigurationList, out *flowcontrol.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_v1beta3_PriorityLevelConfigurationList_To_flowcontrol_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1beta3_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1beta3.PriorityLevelConfigurationList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]flowcontrolv1beta3.PriorityLevelConfiguration, len(*in))
for i := range *in {
if err := Convert_flowcontrol_PriorityLevelConfiguration_To_v1beta3_PriorityLevelConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta3_PriorityLevelConfigurationList is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationList_To_v1beta3_PriorityLevelConfigurationList(in *flowcontrol.PriorityLevelConfigurationList, out *flowcontrolv1beta3.PriorityLevelConfigurationList, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationList_To_v1beta3_PriorityLevelConfigurationList(in, out, s)
}
func autoConvert_v1beta3_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1beta3.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta3_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_v1beta3_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in *flowcontrolv1beta3.PriorityLevelConfigurationReference, out *flowcontrol.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_v1beta3_PriorityLevelConfigurationReference_To_flowcontrol_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta3_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1beta3.PriorityLevelConfigurationReference, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta3_PriorityLevelConfigurationReference is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta3_PriorityLevelConfigurationReference(in *flowcontrol.PriorityLevelConfigurationReference, out *flowcontrolv1beta3.PriorityLevelConfigurationReference, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationReference_To_v1beta3_PriorityLevelConfigurationReference(in, out, s)
}
func autoConvert_v1beta3_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1beta3.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrol.PriorityLevelEnablement(in.Type)
out.Limited = (*flowcontrol.LimitedPriorityLevelConfiguration)(unsafe.Pointer(in.Limited))
out.Exempt = (*flowcontrol.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_v1beta3_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_v1beta3_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in *flowcontrolv1beta3.PriorityLevelConfigurationSpec, out *flowcontrol.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_v1beta3_PriorityLevelConfigurationSpec_To_flowcontrol_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta3_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1beta3.PriorityLevelConfigurationSpec, s conversion.Scope) error {
out.Type = flowcontrolv1beta3.PriorityLevelEnablement(in.Type)
out.Limited = (*flowcontrolv1beta3.LimitedPriorityLevelConfiguration)(unsafe.Pointer(in.Limited))
out.Exempt = (*flowcontrolv1beta3.ExemptPriorityLevelConfiguration)(unsafe.Pointer(in.Exempt))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta3_PriorityLevelConfigurationSpec is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta3_PriorityLevelConfigurationSpec(in *flowcontrol.PriorityLevelConfigurationSpec, out *flowcontrolv1beta3.PriorityLevelConfigurationSpec, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationSpec_To_v1beta3_PriorityLevelConfigurationSpec(in, out, s)
}
func autoConvert_v1beta3_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1beta3.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrol.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta3_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_v1beta3_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in *flowcontrolv1beta3.PriorityLevelConfigurationStatus, out *flowcontrol.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_v1beta3_PriorityLevelConfigurationStatus_To_flowcontrol_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta3_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1beta3.PriorityLevelConfigurationStatus, s conversion.Scope) error {
out.Conditions = *(*[]flowcontrolv1beta3.PriorityLevelConfigurationCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta3_PriorityLevelConfigurationStatus is an autogenerated conversion function.
func Convert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta3_PriorityLevelConfigurationStatus(in *flowcontrol.PriorityLevelConfigurationStatus, out *flowcontrolv1beta3.PriorityLevelConfigurationStatus, s conversion.Scope) error {
return autoConvert_flowcontrol_PriorityLevelConfigurationStatus_To_v1beta3_PriorityLevelConfigurationStatus(in, out, s)
}
func autoConvert_v1beta3_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1beta3.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_v1beta3_QueuingConfiguration_To_flowcontrol_QueuingConfiguration is an autogenerated conversion function.
func Convert_v1beta3_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in *flowcontrolv1beta3.QueuingConfiguration, out *flowcontrol.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_v1beta3_QueuingConfiguration_To_flowcontrol_QueuingConfiguration(in, out, s)
}
func autoConvert_flowcontrol_QueuingConfiguration_To_v1beta3_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1beta3.QueuingConfiguration, s conversion.Scope) error {
out.Queues = in.Queues
out.HandSize = in.HandSize
out.QueueLengthLimit = in.QueueLengthLimit
return nil
}
// Convert_flowcontrol_QueuingConfiguration_To_v1beta3_QueuingConfiguration is an autogenerated conversion function.
func Convert_flowcontrol_QueuingConfiguration_To_v1beta3_QueuingConfiguration(in *flowcontrol.QueuingConfiguration, out *flowcontrolv1beta3.QueuingConfiguration, s conversion.Scope) error {
return autoConvert_flowcontrol_QueuingConfiguration_To_v1beta3_QueuingConfiguration(in, out, s)
}
func autoConvert_v1beta3_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1beta3.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_v1beta3_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule is an autogenerated conversion function.
func Convert_v1beta3_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in *flowcontrolv1beta3.ResourcePolicyRule, out *flowcontrol.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_v1beta3_ResourcePolicyRule_To_flowcontrol_ResourcePolicyRule(in, out, s)
}
func autoConvert_flowcontrol_ResourcePolicyRule_To_v1beta3_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1beta3.ResourcePolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ClusterScope = in.ClusterScope
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
return nil
}
// Convert_flowcontrol_ResourcePolicyRule_To_v1beta3_ResourcePolicyRule is an autogenerated conversion function.
func Convert_flowcontrol_ResourcePolicyRule_To_v1beta3_ResourcePolicyRule(in *flowcontrol.ResourcePolicyRule, out *flowcontrolv1beta3.ResourcePolicyRule, s conversion.Scope) error {
return autoConvert_flowcontrol_ResourcePolicyRule_To_v1beta3_ResourcePolicyRule(in, out, s)
}
func autoConvert_v1beta3_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1beta3.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1beta3_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject is an autogenerated conversion function.
func Convert_v1beta3_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in *flowcontrolv1beta3.ServiceAccountSubject, out *flowcontrol.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_v1beta3_ServiceAccountSubject_To_flowcontrol_ServiceAccountSubject(in, out, s)
}
func autoConvert_flowcontrol_ServiceAccountSubject_To_v1beta3_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1beta3.ServiceAccountSubject, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_flowcontrol_ServiceAccountSubject_To_v1beta3_ServiceAccountSubject is an autogenerated conversion function.
func Convert_flowcontrol_ServiceAccountSubject_To_v1beta3_ServiceAccountSubject(in *flowcontrol.ServiceAccountSubject, out *flowcontrolv1beta3.ServiceAccountSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_ServiceAccountSubject_To_v1beta3_ServiceAccountSubject(in, out, s)
}
func autoConvert_v1beta3_Subject_To_flowcontrol_Subject(in *flowcontrolv1beta3.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
out.Kind = flowcontrol.SubjectKind(in.Kind)
out.User = (*flowcontrol.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrol.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrol.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_v1beta3_Subject_To_flowcontrol_Subject is an autogenerated conversion function.
func Convert_v1beta3_Subject_To_flowcontrol_Subject(in *flowcontrolv1beta3.Subject, out *flowcontrol.Subject, s conversion.Scope) error {
return autoConvert_v1beta3_Subject_To_flowcontrol_Subject(in, out, s)
}
func autoConvert_flowcontrol_Subject_To_v1beta3_Subject(in *flowcontrol.Subject, out *flowcontrolv1beta3.Subject, s conversion.Scope) error {
out.Kind = flowcontrolv1beta3.SubjectKind(in.Kind)
out.User = (*flowcontrolv1beta3.UserSubject)(unsafe.Pointer(in.User))
out.Group = (*flowcontrolv1beta3.GroupSubject)(unsafe.Pointer(in.Group))
out.ServiceAccount = (*flowcontrolv1beta3.ServiceAccountSubject)(unsafe.Pointer(in.ServiceAccount))
return nil
}
// Convert_flowcontrol_Subject_To_v1beta3_Subject is an autogenerated conversion function.
func Convert_flowcontrol_Subject_To_v1beta3_Subject(in *flowcontrol.Subject, out *flowcontrolv1beta3.Subject, s conversion.Scope) error {
return autoConvert_flowcontrol_Subject_To_v1beta3_Subject(in, out, s)
}
func autoConvert_v1beta3_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1beta3.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1beta3_UserSubject_To_flowcontrol_UserSubject is an autogenerated conversion function.
func Convert_v1beta3_UserSubject_To_flowcontrol_UserSubject(in *flowcontrolv1beta3.UserSubject, out *flowcontrol.UserSubject, s conversion.Scope) error {
return autoConvert_v1beta3_UserSubject_To_flowcontrol_UserSubject(in, out, s)
}
func autoConvert_flowcontrol_UserSubject_To_v1beta3_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1beta3.UserSubject, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_flowcontrol_UserSubject_To_v1beta3_UserSubject is an autogenerated conversion function.
func Convert_flowcontrol_UserSubject_To_v1beta3_UserSubject(in *flowcontrol.UserSubject, out *flowcontrolv1beta3.UserSubject, s conversion.Scope) error {
return autoConvert_flowcontrol_UserSubject_To_v1beta3_UserSubject(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta3
import (
flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta3.FlowSchema{}, func(obj interface{}) { SetObjectDefaults_FlowSchema(obj.(*flowcontrolv1beta3.FlowSchema)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta3.FlowSchemaList{}, func(obj interface{}) { SetObjectDefaults_FlowSchemaList(obj.(*flowcontrolv1beta3.FlowSchemaList)) })
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta3.PriorityLevelConfiguration{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfiguration(obj.(*flowcontrolv1beta3.PriorityLevelConfiguration))
})
scheme.AddTypeDefaultingFunc(&flowcontrolv1beta3.PriorityLevelConfigurationList{}, func(obj interface{}) {
SetObjectDefaults_PriorityLevelConfigurationList(obj.(*flowcontrolv1beta3.PriorityLevelConfigurationList))
})
return nil
}
func SetObjectDefaults_FlowSchema(in *flowcontrolv1beta3.FlowSchema) {
SetDefaults_FlowSchemaSpec(&in.Spec)
}
func SetObjectDefaults_FlowSchemaList(in *flowcontrolv1beta3.FlowSchemaList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_FlowSchema(a)
}
}
func SetObjectDefaults_PriorityLevelConfiguration(in *flowcontrolv1beta3.PriorityLevelConfiguration) {
SetDefaults_PriorityLevelConfiguration(in)
if in.Spec.Limited != nil {
SetDefaults_LimitedPriorityLevelConfiguration(in.Spec.Limited)
if in.Spec.Limited.LimitResponse.Queuing != nil {
SetDefaults_QueuingConfiguration(in.Spec.Limited.LimitResponse.Queuing)
}
}
if in.Spec.Exempt != nil {
SetDefaults_ExemptPriorityLevelConfiguration(in.Spec.Exempt)
}
}
func SetObjectDefaults_PriorityLevelConfigurationList(in *flowcontrolv1beta3.PriorityLevelConfigurationList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PriorityLevelConfiguration(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package flowcontrol
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) {
*out = *in
if in.NominalConcurrencyShares != nil {
in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares
*out = new(int32)
**out = **in
}
if in.LendablePercent != nil {
in, out := &in.LendablePercent, &out.LendablePercent
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration.
func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration {
if in == nil {
return nil
}
out := new(ExemptPriorityLevelConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod.
func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod {
if in == nil {
return nil
}
out := new(FlowDistinguisherMethod)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchema) DeepCopyInto(out *FlowSchema) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema.
func (in *FlowSchema) DeepCopy() *FlowSchema {
if in == nil {
return nil
}
out := new(FlowSchema)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FlowSchema) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition.
func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition {
if in == nil {
return nil
}
out := new(FlowSchemaCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]FlowSchema, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList.
func (in *FlowSchemaList) DeepCopy() *FlowSchemaList {
if in == nil {
return nil
}
out := new(FlowSchemaList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FlowSchemaList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) {
*out = *in
out.PriorityLevelConfiguration = in.PriorityLevelConfiguration
if in.DistinguisherMethod != nil {
in, out := &in.DistinguisherMethod, &out.DistinguisherMethod
*out = new(FlowDistinguisherMethod)
**out = **in
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRulesWithSubjects, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec.
func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec {
if in == nil {
return nil
}
out := new(FlowSchemaSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]FlowSchemaCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus.
func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus {
if in == nil {
return nil
}
out := new(FlowSchemaStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GroupSubject) DeepCopyInto(out *GroupSubject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject.
func (in *GroupSubject) DeepCopy() *GroupSubject {
if in == nil {
return nil
}
out := new(GroupSubject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitResponse) DeepCopyInto(out *LimitResponse) {
*out = *in
if in.Queuing != nil {
in, out := &in.Queuing, &out.Queuing
*out = new(QueuingConfiguration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse.
func (in *LimitResponse) DeepCopy() *LimitResponse {
if in == nil {
return nil
}
out := new(LimitResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) {
*out = *in
in.LimitResponse.DeepCopyInto(&out.LimitResponse)
if in.LendablePercent != nil {
in, out := &in.LendablePercent, &out.LendablePercent
*out = new(int32)
**out = **in
}
if in.BorrowingLimitPercent != nil {
in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration.
func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration {
if in == nil {
return nil
}
out := new(LimitedPriorityLevelConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NonResourceURLs != nil {
in, out := &in.NonResourceURLs, &out.NonResourceURLs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule.
func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule {
if in == nil {
return nil
}
out := new(NonResourcePolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) {
*out = *in
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]Subject, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ResourceRules != nil {
in, out := &in.ResourceRules, &out.ResourceRules
*out = make([]ResourcePolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NonResourceRules != nil {
in, out := &in.NonResourceRules, &out.NonResourceRules
*out = make([]NonResourcePolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects.
func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects {
if in == nil {
return nil
}
out := new(PolicyRulesWithSubjects)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration.
func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration {
if in == nil {
return nil
}
out := new(PriorityLevelConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition.
func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PriorityLevelConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList.
func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference.
func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) {
*out = *in
if in.Limited != nil {
in, out := &in.Limited, &out.Limited
*out = new(LimitedPriorityLevelConfiguration)
(*in).DeepCopyInto(*out)
}
if in.Exempt != nil {
in, out := &in.Exempt, &out.Exempt
*out = new(ExemptPriorityLevelConfiguration)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec.
func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]PriorityLevelConfigurationCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus.
func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus {
if in == nil {
return nil
}
out := new(PriorityLevelConfigurationStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration.
func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration {
if in == nil {
return nil
}
out := new(QueuingConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.APIGroups != nil {
in, out := &in.APIGroups, &out.APIGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule.
func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule {
if in == nil {
return nil
}
out := new(ResourcePolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject.
func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject {
if in == nil {
return nil
}
out := new(ServiceAccountSubject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subject) DeepCopyInto(out *Subject) {
*out = *in
if in.User != nil {
in, out := &in.User, &out.User
*out = new(UserSubject)
**out = **in
}
if in.Group != nil {
in, out := &in.Group, &out.Group
*out = new(GroupSubject)
**out = **in
}
if in.ServiceAccount != nil {
in, out := &in.ServiceAccount, &out.ServiceAccount
*out = new(ServiceAccountSubject)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
func (in *Subject) DeepCopy() *Subject {
if in == nil {
return nil
}
out := new(Subject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserSubject) DeepCopyInto(out *UserSubject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject.
func (in *UserSubject) DeepCopy() *UserSubject {
if in == nil {
return nil
}
out := new(UserSubject)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/imagepolicy"
"k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(imagepolicy.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package imagepolicy
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "imagepolicy.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ImageReview{},
)
// metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name for this API.
const GroupName = "imagepolicy.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &imagepolicyv1alpha1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
imagepolicy "k8s.io/kubernetes/pkg/apis/imagepolicy"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*imagepolicyv1alpha1.ImageReview)(nil), (*imagepolicy.ImageReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageReview_To_imagepolicy_ImageReview(a.(*imagepolicyv1alpha1.ImageReview), b.(*imagepolicy.ImageReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*imagepolicy.ImageReview)(nil), (*imagepolicyv1alpha1.ImageReview)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_imagepolicy_ImageReview_To_v1alpha1_ImageReview(a.(*imagepolicy.ImageReview), b.(*imagepolicyv1alpha1.ImageReview), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*imagepolicyv1alpha1.ImageReviewContainerSpec)(nil), (*imagepolicy.ImageReviewContainerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec(a.(*imagepolicyv1alpha1.ImageReviewContainerSpec), b.(*imagepolicy.ImageReviewContainerSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*imagepolicy.ImageReviewContainerSpec)(nil), (*imagepolicyv1alpha1.ImageReviewContainerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec(a.(*imagepolicy.ImageReviewContainerSpec), b.(*imagepolicyv1alpha1.ImageReviewContainerSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*imagepolicyv1alpha1.ImageReviewSpec)(nil), (*imagepolicy.ImageReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(a.(*imagepolicyv1alpha1.ImageReviewSpec), b.(*imagepolicy.ImageReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*imagepolicy.ImageReviewSpec)(nil), (*imagepolicyv1alpha1.ImageReviewSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(a.(*imagepolicy.ImageReviewSpec), b.(*imagepolicyv1alpha1.ImageReviewSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*imagepolicyv1alpha1.ImageReviewStatus)(nil), (*imagepolicy.ImageReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(a.(*imagepolicyv1alpha1.ImageReviewStatus), b.(*imagepolicy.ImageReviewStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*imagepolicy.ImageReviewStatus)(nil), (*imagepolicyv1alpha1.ImageReviewStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(a.(*imagepolicy.ImageReviewStatus), b.(*imagepolicyv1alpha1.ImageReviewStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_ImageReview_To_imagepolicy_ImageReview(in *imagepolicyv1alpha1.ImageReview, out *imagepolicy.ImageReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ImageReview_To_imagepolicy_ImageReview is an autogenerated conversion function.
func Convert_v1alpha1_ImageReview_To_imagepolicy_ImageReview(in *imagepolicyv1alpha1.ImageReview, out *imagepolicy.ImageReview, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageReview_To_imagepolicy_ImageReview(in, out, s)
}
func autoConvert_imagepolicy_ImageReview_To_v1alpha1_ImageReview(in *imagepolicy.ImageReview, out *imagepolicyv1alpha1.ImageReview, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_imagepolicy_ImageReview_To_v1alpha1_ImageReview is an autogenerated conversion function.
func Convert_imagepolicy_ImageReview_To_v1alpha1_ImageReview(in *imagepolicy.ImageReview, out *imagepolicyv1alpha1.ImageReview, s conversion.Scope) error {
return autoConvert_imagepolicy_ImageReview_To_v1alpha1_ImageReview(in, out, s)
}
func autoConvert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec(in *imagepolicyv1alpha1.ImageReviewContainerSpec, out *imagepolicy.ImageReviewContainerSpec, s conversion.Scope) error {
out.Image = in.Image
return nil
}
// Convert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec is an autogenerated conversion function.
func Convert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec(in *imagepolicyv1alpha1.ImageReviewContainerSpec, out *imagepolicy.ImageReviewContainerSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageReviewContainerSpec_To_imagepolicy_ImageReviewContainerSpec(in, out, s)
}
func autoConvert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec(in *imagepolicy.ImageReviewContainerSpec, out *imagepolicyv1alpha1.ImageReviewContainerSpec, s conversion.Scope) error {
out.Image = in.Image
return nil
}
// Convert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec is an autogenerated conversion function.
func Convert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec(in *imagepolicy.ImageReviewContainerSpec, out *imagepolicyv1alpha1.ImageReviewContainerSpec, s conversion.Scope) error {
return autoConvert_imagepolicy_ImageReviewContainerSpec_To_v1alpha1_ImageReviewContainerSpec(in, out, s)
}
func autoConvert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(in *imagepolicyv1alpha1.ImageReviewSpec, out *imagepolicy.ImageReviewSpec, s conversion.Scope) error {
out.Containers = *(*[]imagepolicy.ImageReviewContainerSpec)(unsafe.Pointer(&in.Containers))
out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
out.Namespace = in.Namespace
return nil
}
// Convert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec is an autogenerated conversion function.
func Convert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(in *imagepolicyv1alpha1.ImageReviewSpec, out *imagepolicy.ImageReviewSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageReviewSpec_To_imagepolicy_ImageReviewSpec(in, out, s)
}
func autoConvert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(in *imagepolicy.ImageReviewSpec, out *imagepolicyv1alpha1.ImageReviewSpec, s conversion.Scope) error {
out.Containers = *(*[]imagepolicyv1alpha1.ImageReviewContainerSpec)(unsafe.Pointer(&in.Containers))
out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
out.Namespace = in.Namespace
return nil
}
// Convert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec is an autogenerated conversion function.
func Convert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(in *imagepolicy.ImageReviewSpec, out *imagepolicyv1alpha1.ImageReviewSpec, s conversion.Scope) error {
return autoConvert_imagepolicy_ImageReviewSpec_To_v1alpha1_ImageReviewSpec(in, out, s)
}
func autoConvert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(in *imagepolicyv1alpha1.ImageReviewStatus, out *imagepolicy.ImageReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Reason = in.Reason
out.AuditAnnotations = *(*map[string]string)(unsafe.Pointer(&in.AuditAnnotations))
return nil
}
// Convert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus is an autogenerated conversion function.
func Convert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(in *imagepolicyv1alpha1.ImageReviewStatus, out *imagepolicy.ImageReviewStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageReviewStatus_To_imagepolicy_ImageReviewStatus(in, out, s)
}
func autoConvert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(in *imagepolicy.ImageReviewStatus, out *imagepolicyv1alpha1.ImageReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Reason = in.Reason
out.AuditAnnotations = *(*map[string]string)(unsafe.Pointer(&in.AuditAnnotations))
return nil
}
// Convert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus is an autogenerated conversion function.
func Convert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(in *imagepolicy.ImageReviewStatus, out *imagepolicyv1alpha1.ImageReviewStatus, s conversion.Scope) error {
return autoConvert_imagepolicy_ImageReviewStatus_To_v1alpha1_ImageReviewStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package imagepolicy
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageReview) DeepCopyInto(out *ImageReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview.
func (in *ImageReview) DeepCopy() *ImageReview {
if in == nil {
return nil
}
out := new(ImageReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec.
func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec {
if in == nil {
return nil
}
out := new(ImageReviewContainerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) {
*out = *in
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]ImageReviewContainerSpec, len(*in))
copy(*out, *in)
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec.
func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec {
if in == nil {
return nil
}
out := new(ImageReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) {
*out = *in
if in.AuditAnnotations != nil {
in, out := &in.AuditAnnotations, &out.AuditAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus.
func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus {
if in == nil {
return nil
}
out := new(ImageReviewStatus)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"fmt"
"net/netip"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/networking"
"k8s.io/utils/ptr"
"sigs.k8s.io/randfill"
)
// Funcs returns the fuzzer functions for the networking api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(np *networking.NetworkPolicyPeer, c randfill.Continue) {
c.FillNoCustom(np) // fuzz self without calling this function again
// TODO: Implement a fuzzer to generate valid keys, values and operators for
// selector requirements.
if np.IPBlock != nil {
np.IPBlock = &networking.IPBlock{
CIDR: "192.168.1.0/24",
Except: []string{"192.168.1.1/24", "192.168.1.2/24"},
}
}
},
func(np *networking.NetworkPolicy, c randfill.Continue) {
c.FillNoCustom(np) // fuzz self without calling this function again
// TODO: Implement a fuzzer to generate valid keys, values and operators for
// selector requirements.
if len(np.Spec.PolicyTypes) == 0 {
np.Spec.PolicyTypes = []networking.PolicyType{networking.PolicyTypeIngress}
}
},
func(path *networking.HTTPIngressPath, c randfill.Continue) {
c.FillNoCustom(path) // fuzz self without calling this function again
pathTypes := []networking.PathType{networking.PathTypeExact, networking.PathTypePrefix, networking.PathTypeImplementationSpecific}
path.PathType = &pathTypes[c.Rand.Intn(len(pathTypes))]
},
func(p *networking.ServiceBackendPort, c randfill.Continue) {
c.FillNoCustom(p)
// clear one of the fields
if c.Bool() {
p.Name = ""
if p.Number == 0 {
p.Number = 1
}
} else {
p.Number = 0
if p.Name == "" {
p.Name = "portname"
}
}
},
func(p *networking.IngressClass, c randfill.Continue) {
c.FillNoCustom(p) // fuzz self without calling this function again
// default Parameters to Cluster
if p.Spec.Parameters == nil || p.Spec.Parameters.Scope == nil {
p.Spec.Parameters = &networking.IngressClassParametersReference{
Scope: ptr.To(networking.IngressClassParametersReferenceScopeCluster),
}
}
},
func(obj *networking.IPAddress, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
// length in bytes of the IP Family: IPv4: 4 bytes IPv6: 16 bytes
boolean := []bool{false, true}
is6 := boolean[c.Rand.Intn(2)]
ip := generateRandomIP(is6, c)
obj.Name = ip
},
func(obj *networking.ServiceCIDR, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
boolean := []bool{false, true}
is6 := boolean[c.Rand.Intn(2)]
primary := generateRandomCIDR(is6, c)
obj.Spec.CIDRs = []string{primary}
if boolean[c.Rand.Intn(2)] {
obj.Spec.CIDRs = append(obj.Spec.CIDRs, generateRandomCIDR(!is6, c))
}
},
}
}
func generateRandomIP(is6 bool, c randfill.Continue) string {
n := 4
if is6 {
n = 16
}
bytes := make([]byte, n)
for i := 0; i < n; i++ {
bytes[i] = uint8(c.Rand.Intn(255))
}
ip, ok := netip.AddrFromSlice(bytes)
if ok {
return ip.String()
}
// this should not happen
panic(fmt.Sprintf("invalid IP %v", bytes))
}
func generateRandomCIDR(is6 bool, c randfill.Continue) string {
ip, err := netip.ParseAddr(generateRandomIP(is6, c))
if err != nil {
// generateRandomIP already panics if returns a not valid ip
panic(err)
}
n := 32
if is6 {
n = 128
}
bits := c.Rand.Intn(n)
prefix := netip.PrefixFrom(ip, bits)
return prefix.Masked().String()
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/networking"
v1 "k8s.io/kubernetes/pkg/apis/networking/v1"
"k8s.io/kubernetes/pkg/apis/networking/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(networking.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package networking
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "networking.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&NetworkPolicy{},
&NetworkPolicyList{},
&Ingress{},
&IngressList{},
&IngressClass{},
&IngressClassList{},
&IPAddress{},
&IPAddressList{},
&ServiceCIDR{},
&ServiceCIDRList{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_NetworkPolicyPort(obj *networkingv1.NetworkPolicyPort) {
// Default any undefined Protocol fields to TCP.
if obj.Protocol == nil {
proto := v1.ProtocolTCP
obj.Protocol = &proto
}
}
func SetDefaults_NetworkPolicy(obj *networkingv1.NetworkPolicy) {
if len(obj.Spec.PolicyTypes) == 0 {
// Any policy that does not specify policyTypes implies at least "Ingress".
obj.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}
if len(obj.Spec.Egress) != 0 {
obj.Spec.PolicyTypes = append(obj.Spec.PolicyTypes, networkingv1.PolicyTypeEgress)
}
}
}
func SetDefaults_IngressClass(obj *networkingv1.IngressClass) {
if obj.Spec.Parameters != nil && obj.Spec.Parameters.Scope == nil {
obj.Spec.Parameters.Scope = ptr.To(networkingv1.IngressClassParametersReferenceScopeCluster)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "networking.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &networkingv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/kubernetes/pkg/apis/core"
networking "k8s.io/kubernetes/pkg/apis/networking"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*networkingv1.HTTPIngressPath)(nil), (*networking.HTTPIngressPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HTTPIngressPath_To_networking_HTTPIngressPath(a.(*networkingv1.HTTPIngressPath), b.(*networking.HTTPIngressPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.HTTPIngressPath)(nil), (*networkingv1.HTTPIngressPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_HTTPIngressPath_To_v1_HTTPIngressPath(a.(*networking.HTTPIngressPath), b.(*networkingv1.HTTPIngressPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.HTTPIngressRuleValue)(nil), (*networking.HTTPIngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(a.(*networkingv1.HTTPIngressRuleValue), b.(*networking.HTTPIngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.HTTPIngressRuleValue)(nil), (*networkingv1.HTTPIngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_HTTPIngressRuleValue_To_v1_HTTPIngressRuleValue(a.(*networking.HTTPIngressRuleValue), b.(*networkingv1.HTTPIngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IPAddress)(nil), (*networking.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IPAddress_To_networking_IPAddress(a.(*networkingv1.IPAddress), b.(*networking.IPAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IPAddress)(nil), (*networkingv1.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPAddress_To_v1_IPAddress(a.(*networking.IPAddress), b.(*networkingv1.IPAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IPAddressList)(nil), (*networking.IPAddressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IPAddressList_To_networking_IPAddressList(a.(*networkingv1.IPAddressList), b.(*networking.IPAddressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IPAddressList)(nil), (*networkingv1.IPAddressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPAddressList_To_v1_IPAddressList(a.(*networking.IPAddressList), b.(*networkingv1.IPAddressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IPAddressSpec)(nil), (*networking.IPAddressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IPAddressSpec_To_networking_IPAddressSpec(a.(*networkingv1.IPAddressSpec), b.(*networking.IPAddressSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IPAddressSpec)(nil), (*networkingv1.IPAddressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPAddressSpec_To_v1_IPAddressSpec(a.(*networking.IPAddressSpec), b.(*networkingv1.IPAddressSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IPBlock)(nil), (*networking.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IPBlock_To_networking_IPBlock(a.(*networkingv1.IPBlock), b.(*networking.IPBlock), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IPBlock)(nil), (*networkingv1.IPBlock)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPBlock_To_v1_IPBlock(a.(*networking.IPBlock), b.(*networkingv1.IPBlock), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.Ingress)(nil), (*networking.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Ingress_To_networking_Ingress(a.(*networkingv1.Ingress), b.(*networking.Ingress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.Ingress)(nil), (*networkingv1.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_Ingress_To_v1_Ingress(a.(*networking.Ingress), b.(*networkingv1.Ingress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressBackend)(nil), (*networking.IngressBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressBackend_To_networking_IngressBackend(a.(*networkingv1.IngressBackend), b.(*networking.IngressBackend), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressBackend)(nil), (*networkingv1.IngressBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressBackend_To_v1_IngressBackend(a.(*networking.IngressBackend), b.(*networkingv1.IngressBackend), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressClass)(nil), (*networking.IngressClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressClass_To_networking_IngressClass(a.(*networkingv1.IngressClass), b.(*networking.IngressClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClass)(nil), (*networkingv1.IngressClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClass_To_v1_IngressClass(a.(*networking.IngressClass), b.(*networkingv1.IngressClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressClassList)(nil), (*networking.IngressClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressClassList_To_networking_IngressClassList(a.(*networkingv1.IngressClassList), b.(*networking.IngressClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClassList)(nil), (*networkingv1.IngressClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClassList_To_v1_IngressClassList(a.(*networking.IngressClassList), b.(*networkingv1.IngressClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressClassParametersReference)(nil), (*networking.IngressClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressClassParametersReference_To_networking_IngressClassParametersReference(a.(*networkingv1.IngressClassParametersReference), b.(*networking.IngressClassParametersReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClassParametersReference)(nil), (*networkingv1.IngressClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClassParametersReference_To_v1_IngressClassParametersReference(a.(*networking.IngressClassParametersReference), b.(*networkingv1.IngressClassParametersReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressClassSpec)(nil), (*networking.IngressClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressClassSpec_To_networking_IngressClassSpec(a.(*networkingv1.IngressClassSpec), b.(*networking.IngressClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClassSpec)(nil), (*networkingv1.IngressClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClassSpec_To_v1_IngressClassSpec(a.(*networking.IngressClassSpec), b.(*networkingv1.IngressClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressList)(nil), (*networking.IngressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressList_To_networking_IngressList(a.(*networkingv1.IngressList), b.(*networking.IngressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressList)(nil), (*networkingv1.IngressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressList_To_v1_IngressList(a.(*networking.IngressList), b.(*networkingv1.IngressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressLoadBalancerIngress)(nil), (*networking.IngressLoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(a.(*networkingv1.IngressLoadBalancerIngress), b.(*networking.IngressLoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressLoadBalancerIngress)(nil), (*networkingv1.IngressLoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressLoadBalancerIngress_To_v1_IngressLoadBalancerIngress(a.(*networking.IngressLoadBalancerIngress), b.(*networkingv1.IngressLoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressLoadBalancerStatus)(nil), (*networking.IngressLoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(a.(*networkingv1.IngressLoadBalancerStatus), b.(*networking.IngressLoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressLoadBalancerStatus)(nil), (*networkingv1.IngressLoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressLoadBalancerStatus_To_v1_IngressLoadBalancerStatus(a.(*networking.IngressLoadBalancerStatus), b.(*networkingv1.IngressLoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressPortStatus)(nil), (*networking.IngressPortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressPortStatus_To_networking_IngressPortStatus(a.(*networkingv1.IngressPortStatus), b.(*networking.IngressPortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressPortStatus)(nil), (*networkingv1.IngressPortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressPortStatus_To_v1_IngressPortStatus(a.(*networking.IngressPortStatus), b.(*networkingv1.IngressPortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressRule)(nil), (*networking.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressRule_To_networking_IngressRule(a.(*networkingv1.IngressRule), b.(*networking.IngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressRule)(nil), (*networkingv1.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressRule_To_v1_IngressRule(a.(*networking.IngressRule), b.(*networkingv1.IngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressRuleValue)(nil), (*networking.IngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressRuleValue_To_networking_IngressRuleValue(a.(*networkingv1.IngressRuleValue), b.(*networking.IngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressRuleValue)(nil), (*networkingv1.IngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressRuleValue_To_v1_IngressRuleValue(a.(*networking.IngressRuleValue), b.(*networkingv1.IngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressServiceBackend)(nil), (*networking.IngressServiceBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressServiceBackend_To_networking_IngressServiceBackend(a.(*networkingv1.IngressServiceBackend), b.(*networking.IngressServiceBackend), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressServiceBackend)(nil), (*networkingv1.IngressServiceBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressServiceBackend_To_v1_IngressServiceBackend(a.(*networking.IngressServiceBackend), b.(*networkingv1.IngressServiceBackend), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressSpec)(nil), (*networking.IngressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressSpec_To_networking_IngressSpec(a.(*networkingv1.IngressSpec), b.(*networking.IngressSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressSpec)(nil), (*networkingv1.IngressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressSpec_To_v1_IngressSpec(a.(*networking.IngressSpec), b.(*networkingv1.IngressSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressStatus)(nil), (*networking.IngressStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressStatus_To_networking_IngressStatus(a.(*networkingv1.IngressStatus), b.(*networking.IngressStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressStatus)(nil), (*networkingv1.IngressStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressStatus_To_v1_IngressStatus(a.(*networking.IngressStatus), b.(*networkingv1.IngressStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.IngressTLS)(nil), (*networking.IngressTLS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_IngressTLS_To_networking_IngressTLS(a.(*networkingv1.IngressTLS), b.(*networking.IngressTLS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressTLS)(nil), (*networkingv1.IngressTLS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressTLS_To_v1_IngressTLS(a.(*networking.IngressTLS), b.(*networkingv1.IngressTLS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.NetworkPolicy)(nil), (*networking.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkPolicy_To_networking_NetworkPolicy(a.(*networkingv1.NetworkPolicy), b.(*networking.NetworkPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicy)(nil), (*networkingv1.NetworkPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicy_To_v1_NetworkPolicy(a.(*networking.NetworkPolicy), b.(*networkingv1.NetworkPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.NetworkPolicyEgressRule)(nil), (*networking.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(a.(*networkingv1.NetworkPolicyEgressRule), b.(*networking.NetworkPolicyEgressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyEgressRule)(nil), (*networkingv1.NetworkPolicyEgressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(a.(*networking.NetworkPolicyEgressRule), b.(*networkingv1.NetworkPolicyEgressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.NetworkPolicyIngressRule)(nil), (*networking.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(a.(*networkingv1.NetworkPolicyIngressRule), b.(*networking.NetworkPolicyIngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyIngressRule)(nil), (*networkingv1.NetworkPolicyIngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(a.(*networking.NetworkPolicyIngressRule), b.(*networkingv1.NetworkPolicyIngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.NetworkPolicyList)(nil), (*networking.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(a.(*networkingv1.NetworkPolicyList), b.(*networking.NetworkPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyList)(nil), (*networkingv1.NetworkPolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(a.(*networking.NetworkPolicyList), b.(*networkingv1.NetworkPolicyList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.NetworkPolicyPeer)(nil), (*networking.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(a.(*networkingv1.NetworkPolicyPeer), b.(*networking.NetworkPolicyPeer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPeer)(nil), (*networkingv1.NetworkPolicyPeer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(a.(*networking.NetworkPolicyPeer), b.(*networkingv1.NetworkPolicyPeer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.NetworkPolicyPort)(nil), (*networking.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(a.(*networkingv1.NetworkPolicyPort), b.(*networking.NetworkPolicyPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicyPort)(nil), (*networkingv1.NetworkPolicyPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(a.(*networking.NetworkPolicyPort), b.(*networkingv1.NetworkPolicyPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.NetworkPolicySpec)(nil), (*networking.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(a.(*networkingv1.NetworkPolicySpec), b.(*networking.NetworkPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.NetworkPolicySpec)(nil), (*networkingv1.NetworkPolicySpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(a.(*networking.NetworkPolicySpec), b.(*networkingv1.NetworkPolicySpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.ParentReference)(nil), (*networking.ParentReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ParentReference_To_networking_ParentReference(a.(*networkingv1.ParentReference), b.(*networking.ParentReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ParentReference)(nil), (*networkingv1.ParentReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ParentReference_To_v1_ParentReference(a.(*networking.ParentReference), b.(*networkingv1.ParentReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.ServiceBackendPort)(nil), (*networking.ServiceBackendPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceBackendPort_To_networking_ServiceBackendPort(a.(*networkingv1.ServiceBackendPort), b.(*networking.ServiceBackendPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceBackendPort)(nil), (*networkingv1.ServiceBackendPort)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceBackendPort_To_v1_ServiceBackendPort(a.(*networking.ServiceBackendPort), b.(*networkingv1.ServiceBackendPort), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.ServiceCIDR)(nil), (*networking.ServiceCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceCIDR_To_networking_ServiceCIDR(a.(*networkingv1.ServiceCIDR), b.(*networking.ServiceCIDR), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDR)(nil), (*networkingv1.ServiceCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDR_To_v1_ServiceCIDR(a.(*networking.ServiceCIDR), b.(*networkingv1.ServiceCIDR), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.ServiceCIDRList)(nil), (*networking.ServiceCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceCIDRList_To_networking_ServiceCIDRList(a.(*networkingv1.ServiceCIDRList), b.(*networking.ServiceCIDRList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRList)(nil), (*networkingv1.ServiceCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDRList_To_v1_ServiceCIDRList(a.(*networking.ServiceCIDRList), b.(*networkingv1.ServiceCIDRList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.ServiceCIDRSpec)(nil), (*networking.ServiceCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(a.(*networkingv1.ServiceCIDRSpec), b.(*networking.ServiceCIDRSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRSpec)(nil), (*networkingv1.ServiceCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDRSpec_To_v1_ServiceCIDRSpec(a.(*networking.ServiceCIDRSpec), b.(*networkingv1.ServiceCIDRSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1.ServiceCIDRStatus)(nil), (*networking.ServiceCIDRStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(a.(*networkingv1.ServiceCIDRStatus), b.(*networking.ServiceCIDRStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRStatus)(nil), (*networkingv1.ServiceCIDRStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDRStatus_To_v1_ServiceCIDRStatus(a.(*networking.ServiceCIDRStatus), b.(*networkingv1.ServiceCIDRStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_HTTPIngressPath_To_networking_HTTPIngressPath(in *networkingv1.HTTPIngressPath, out *networking.HTTPIngressPath, s conversion.Scope) error {
out.Path = in.Path
out.PathType = (*networking.PathType)(unsafe.Pointer(in.PathType))
if err := Convert_v1_IngressBackend_To_networking_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
return err
}
return nil
}
// Convert_v1_HTTPIngressPath_To_networking_HTTPIngressPath is an autogenerated conversion function.
func Convert_v1_HTTPIngressPath_To_networking_HTTPIngressPath(in *networkingv1.HTTPIngressPath, out *networking.HTTPIngressPath, s conversion.Scope) error {
return autoConvert_v1_HTTPIngressPath_To_networking_HTTPIngressPath(in, out, s)
}
func autoConvert_networking_HTTPIngressPath_To_v1_HTTPIngressPath(in *networking.HTTPIngressPath, out *networkingv1.HTTPIngressPath, s conversion.Scope) error {
out.Path = in.Path
out.PathType = (*networkingv1.PathType)(unsafe.Pointer(in.PathType))
if err := Convert_networking_IngressBackend_To_v1_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
return err
}
return nil
}
// Convert_networking_HTTPIngressPath_To_v1_HTTPIngressPath is an autogenerated conversion function.
func Convert_networking_HTTPIngressPath_To_v1_HTTPIngressPath(in *networking.HTTPIngressPath, out *networkingv1.HTTPIngressPath, s conversion.Scope) error {
return autoConvert_networking_HTTPIngressPath_To_v1_HTTPIngressPath(in, out, s)
}
func autoConvert_v1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in *networkingv1.HTTPIngressRuleValue, out *networking.HTTPIngressRuleValue, s conversion.Scope) error {
out.Paths = *(*[]networking.HTTPIngressPath)(unsafe.Pointer(&in.Paths))
return nil
}
// Convert_v1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue is an autogenerated conversion function.
func Convert_v1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in *networkingv1.HTTPIngressRuleValue, out *networking.HTTPIngressRuleValue, s conversion.Scope) error {
return autoConvert_v1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in, out, s)
}
func autoConvert_networking_HTTPIngressRuleValue_To_v1_HTTPIngressRuleValue(in *networking.HTTPIngressRuleValue, out *networkingv1.HTTPIngressRuleValue, s conversion.Scope) error {
out.Paths = *(*[]networkingv1.HTTPIngressPath)(unsafe.Pointer(&in.Paths))
return nil
}
// Convert_networking_HTTPIngressRuleValue_To_v1_HTTPIngressRuleValue is an autogenerated conversion function.
func Convert_networking_HTTPIngressRuleValue_To_v1_HTTPIngressRuleValue(in *networking.HTTPIngressRuleValue, out *networkingv1.HTTPIngressRuleValue, s conversion.Scope) error {
return autoConvert_networking_HTTPIngressRuleValue_To_v1_HTTPIngressRuleValue(in, out, s)
}
func autoConvert_v1_IPAddress_To_networking_IPAddress(in *networkingv1.IPAddress, out *networking.IPAddress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_IPAddressSpec_To_networking_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_IPAddress_To_networking_IPAddress is an autogenerated conversion function.
func Convert_v1_IPAddress_To_networking_IPAddress(in *networkingv1.IPAddress, out *networking.IPAddress, s conversion.Scope) error {
return autoConvert_v1_IPAddress_To_networking_IPAddress(in, out, s)
}
func autoConvert_networking_IPAddress_To_v1_IPAddress(in *networking.IPAddress, out *networkingv1.IPAddress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_IPAddressSpec_To_v1_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_networking_IPAddress_To_v1_IPAddress is an autogenerated conversion function.
func Convert_networking_IPAddress_To_v1_IPAddress(in *networking.IPAddress, out *networkingv1.IPAddress, s conversion.Scope) error {
return autoConvert_networking_IPAddress_To_v1_IPAddress(in, out, s)
}
func autoConvert_v1_IPAddressList_To_networking_IPAddressList(in *networkingv1.IPAddressList, out *networking.IPAddressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.IPAddress)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_IPAddressList_To_networking_IPAddressList is an autogenerated conversion function.
func Convert_v1_IPAddressList_To_networking_IPAddressList(in *networkingv1.IPAddressList, out *networking.IPAddressList, s conversion.Scope) error {
return autoConvert_v1_IPAddressList_To_networking_IPAddressList(in, out, s)
}
func autoConvert_networking_IPAddressList_To_v1_IPAddressList(in *networking.IPAddressList, out *networkingv1.IPAddressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1.IPAddress)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_IPAddressList_To_v1_IPAddressList is an autogenerated conversion function.
func Convert_networking_IPAddressList_To_v1_IPAddressList(in *networking.IPAddressList, out *networkingv1.IPAddressList, s conversion.Scope) error {
return autoConvert_networking_IPAddressList_To_v1_IPAddressList(in, out, s)
}
func autoConvert_v1_IPAddressSpec_To_networking_IPAddressSpec(in *networkingv1.IPAddressSpec, out *networking.IPAddressSpec, s conversion.Scope) error {
out.ParentRef = (*networking.ParentReference)(unsafe.Pointer(in.ParentRef))
return nil
}
// Convert_v1_IPAddressSpec_To_networking_IPAddressSpec is an autogenerated conversion function.
func Convert_v1_IPAddressSpec_To_networking_IPAddressSpec(in *networkingv1.IPAddressSpec, out *networking.IPAddressSpec, s conversion.Scope) error {
return autoConvert_v1_IPAddressSpec_To_networking_IPAddressSpec(in, out, s)
}
func autoConvert_networking_IPAddressSpec_To_v1_IPAddressSpec(in *networking.IPAddressSpec, out *networkingv1.IPAddressSpec, s conversion.Scope) error {
out.ParentRef = (*networkingv1.ParentReference)(unsafe.Pointer(in.ParentRef))
return nil
}
// Convert_networking_IPAddressSpec_To_v1_IPAddressSpec is an autogenerated conversion function.
func Convert_networking_IPAddressSpec_To_v1_IPAddressSpec(in *networking.IPAddressSpec, out *networkingv1.IPAddressSpec, s conversion.Scope) error {
return autoConvert_networking_IPAddressSpec_To_v1_IPAddressSpec(in, out, s)
}
func autoConvert_v1_IPBlock_To_networking_IPBlock(in *networkingv1.IPBlock, out *networking.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = *(*[]string)(unsafe.Pointer(&in.Except))
return nil
}
// Convert_v1_IPBlock_To_networking_IPBlock is an autogenerated conversion function.
func Convert_v1_IPBlock_To_networking_IPBlock(in *networkingv1.IPBlock, out *networking.IPBlock, s conversion.Scope) error {
return autoConvert_v1_IPBlock_To_networking_IPBlock(in, out, s)
}
func autoConvert_networking_IPBlock_To_v1_IPBlock(in *networking.IPBlock, out *networkingv1.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = *(*[]string)(unsafe.Pointer(&in.Except))
return nil
}
// Convert_networking_IPBlock_To_v1_IPBlock is an autogenerated conversion function.
func Convert_networking_IPBlock_To_v1_IPBlock(in *networking.IPBlock, out *networkingv1.IPBlock, s conversion.Scope) error {
return autoConvert_networking_IPBlock_To_v1_IPBlock(in, out, s)
}
func autoConvert_v1_Ingress_To_networking_Ingress(in *networkingv1.Ingress, out *networking.Ingress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_IngressSpec_To_networking_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_IngressStatus_To_networking_IngressStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_Ingress_To_networking_Ingress is an autogenerated conversion function.
func Convert_v1_Ingress_To_networking_Ingress(in *networkingv1.Ingress, out *networking.Ingress, s conversion.Scope) error {
return autoConvert_v1_Ingress_To_networking_Ingress(in, out, s)
}
func autoConvert_networking_Ingress_To_v1_Ingress(in *networking.Ingress, out *networkingv1.Ingress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_IngressSpec_To_v1_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_networking_IngressStatus_To_v1_IngressStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_networking_Ingress_To_v1_Ingress is an autogenerated conversion function.
func Convert_networking_Ingress_To_v1_Ingress(in *networking.Ingress, out *networkingv1.Ingress, s conversion.Scope) error {
return autoConvert_networking_Ingress_To_v1_Ingress(in, out, s)
}
func autoConvert_v1_IngressBackend_To_networking_IngressBackend(in *networkingv1.IngressBackend, out *networking.IngressBackend, s conversion.Scope) error {
out.Service = (*networking.IngressServiceBackend)(unsafe.Pointer(in.Service))
out.Resource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.Resource))
return nil
}
// Convert_v1_IngressBackend_To_networking_IngressBackend is an autogenerated conversion function.
func Convert_v1_IngressBackend_To_networking_IngressBackend(in *networkingv1.IngressBackend, out *networking.IngressBackend, s conversion.Scope) error {
return autoConvert_v1_IngressBackend_To_networking_IngressBackend(in, out, s)
}
func autoConvert_networking_IngressBackend_To_v1_IngressBackend(in *networking.IngressBackend, out *networkingv1.IngressBackend, s conversion.Scope) error {
out.Service = (*networkingv1.IngressServiceBackend)(unsafe.Pointer(in.Service))
out.Resource = (*corev1.TypedLocalObjectReference)(unsafe.Pointer(in.Resource))
return nil
}
// Convert_networking_IngressBackend_To_v1_IngressBackend is an autogenerated conversion function.
func Convert_networking_IngressBackend_To_v1_IngressBackend(in *networking.IngressBackend, out *networkingv1.IngressBackend, s conversion.Scope) error {
return autoConvert_networking_IngressBackend_To_v1_IngressBackend(in, out, s)
}
func autoConvert_v1_IngressClass_To_networking_IngressClass(in *networkingv1.IngressClass, out *networking.IngressClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_IngressClassSpec_To_networking_IngressClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_IngressClass_To_networking_IngressClass is an autogenerated conversion function.
func Convert_v1_IngressClass_To_networking_IngressClass(in *networkingv1.IngressClass, out *networking.IngressClass, s conversion.Scope) error {
return autoConvert_v1_IngressClass_To_networking_IngressClass(in, out, s)
}
func autoConvert_networking_IngressClass_To_v1_IngressClass(in *networking.IngressClass, out *networkingv1.IngressClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_IngressClassSpec_To_v1_IngressClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressClass_To_v1_IngressClass is an autogenerated conversion function.
func Convert_networking_IngressClass_To_v1_IngressClass(in *networking.IngressClass, out *networkingv1.IngressClass, s conversion.Scope) error {
return autoConvert_networking_IngressClass_To_v1_IngressClass(in, out, s)
}
func autoConvert_v1_IngressClassList_To_networking_IngressClassList(in *networkingv1.IngressClassList, out *networking.IngressClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.IngressClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_IngressClassList_To_networking_IngressClassList is an autogenerated conversion function.
func Convert_v1_IngressClassList_To_networking_IngressClassList(in *networkingv1.IngressClassList, out *networking.IngressClassList, s conversion.Scope) error {
return autoConvert_v1_IngressClassList_To_networking_IngressClassList(in, out, s)
}
func autoConvert_networking_IngressClassList_To_v1_IngressClassList(in *networking.IngressClassList, out *networkingv1.IngressClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1.IngressClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_IngressClassList_To_v1_IngressClassList is an autogenerated conversion function.
func Convert_networking_IngressClassList_To_v1_IngressClassList(in *networking.IngressClassList, out *networkingv1.IngressClassList, s conversion.Scope) error {
return autoConvert_networking_IngressClassList_To_v1_IngressClassList(in, out, s)
}
func autoConvert_v1_IngressClassParametersReference_To_networking_IngressClassParametersReference(in *networkingv1.IngressClassParametersReference, out *networking.IngressClassParametersReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Scope = (*string)(unsafe.Pointer(in.Scope))
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_v1_IngressClassParametersReference_To_networking_IngressClassParametersReference is an autogenerated conversion function.
func Convert_v1_IngressClassParametersReference_To_networking_IngressClassParametersReference(in *networkingv1.IngressClassParametersReference, out *networking.IngressClassParametersReference, s conversion.Scope) error {
return autoConvert_v1_IngressClassParametersReference_To_networking_IngressClassParametersReference(in, out, s)
}
func autoConvert_networking_IngressClassParametersReference_To_v1_IngressClassParametersReference(in *networking.IngressClassParametersReference, out *networkingv1.IngressClassParametersReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Scope = (*string)(unsafe.Pointer(in.Scope))
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_networking_IngressClassParametersReference_To_v1_IngressClassParametersReference is an autogenerated conversion function.
func Convert_networking_IngressClassParametersReference_To_v1_IngressClassParametersReference(in *networking.IngressClassParametersReference, out *networkingv1.IngressClassParametersReference, s conversion.Scope) error {
return autoConvert_networking_IngressClassParametersReference_To_v1_IngressClassParametersReference(in, out, s)
}
func autoConvert_v1_IngressClassSpec_To_networking_IngressClassSpec(in *networkingv1.IngressClassSpec, out *networking.IngressClassSpec, s conversion.Scope) error {
out.Controller = in.Controller
out.Parameters = (*networking.IngressClassParametersReference)(unsafe.Pointer(in.Parameters))
return nil
}
// Convert_v1_IngressClassSpec_To_networking_IngressClassSpec is an autogenerated conversion function.
func Convert_v1_IngressClassSpec_To_networking_IngressClassSpec(in *networkingv1.IngressClassSpec, out *networking.IngressClassSpec, s conversion.Scope) error {
return autoConvert_v1_IngressClassSpec_To_networking_IngressClassSpec(in, out, s)
}
func autoConvert_networking_IngressClassSpec_To_v1_IngressClassSpec(in *networking.IngressClassSpec, out *networkingv1.IngressClassSpec, s conversion.Scope) error {
out.Controller = in.Controller
out.Parameters = (*networkingv1.IngressClassParametersReference)(unsafe.Pointer(in.Parameters))
return nil
}
// Convert_networking_IngressClassSpec_To_v1_IngressClassSpec is an autogenerated conversion function.
func Convert_networking_IngressClassSpec_To_v1_IngressClassSpec(in *networking.IngressClassSpec, out *networkingv1.IngressClassSpec, s conversion.Scope) error {
return autoConvert_networking_IngressClassSpec_To_v1_IngressClassSpec(in, out, s)
}
func autoConvert_v1_IngressList_To_networking_IngressList(in *networkingv1.IngressList, out *networking.IngressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.Ingress)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_IngressList_To_networking_IngressList is an autogenerated conversion function.
func Convert_v1_IngressList_To_networking_IngressList(in *networkingv1.IngressList, out *networking.IngressList, s conversion.Scope) error {
return autoConvert_v1_IngressList_To_networking_IngressList(in, out, s)
}
func autoConvert_networking_IngressList_To_v1_IngressList(in *networking.IngressList, out *networkingv1.IngressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1.Ingress)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_IngressList_To_v1_IngressList is an autogenerated conversion function.
func Convert_networking_IngressList_To_v1_IngressList(in *networking.IngressList, out *networkingv1.IngressList, s conversion.Scope) error {
return autoConvert_networking_IngressList_To_v1_IngressList(in, out, s)
}
func autoConvert_v1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in *networkingv1.IngressLoadBalancerIngress, out *networking.IngressLoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.Ports = *(*[]networking.IngressPortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress is an autogenerated conversion function.
func Convert_v1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in *networkingv1.IngressLoadBalancerIngress, out *networking.IngressLoadBalancerIngress, s conversion.Scope) error {
return autoConvert_v1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in, out, s)
}
func autoConvert_networking_IngressLoadBalancerIngress_To_v1_IngressLoadBalancerIngress(in *networking.IngressLoadBalancerIngress, out *networkingv1.IngressLoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.Ports = *(*[]networkingv1.IngressPortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_networking_IngressLoadBalancerIngress_To_v1_IngressLoadBalancerIngress is an autogenerated conversion function.
func Convert_networking_IngressLoadBalancerIngress_To_v1_IngressLoadBalancerIngress(in *networking.IngressLoadBalancerIngress, out *networkingv1.IngressLoadBalancerIngress, s conversion.Scope) error {
return autoConvert_networking_IngressLoadBalancerIngress_To_v1_IngressLoadBalancerIngress(in, out, s)
}
func autoConvert_v1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in *networkingv1.IngressLoadBalancerStatus, out *networking.IngressLoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]networking.IngressLoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_v1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus is an autogenerated conversion function.
func Convert_v1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in *networkingv1.IngressLoadBalancerStatus, out *networking.IngressLoadBalancerStatus, s conversion.Scope) error {
return autoConvert_v1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in, out, s)
}
func autoConvert_networking_IngressLoadBalancerStatus_To_v1_IngressLoadBalancerStatus(in *networking.IngressLoadBalancerStatus, out *networkingv1.IngressLoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]networkingv1.IngressLoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_networking_IngressLoadBalancerStatus_To_v1_IngressLoadBalancerStatus is an autogenerated conversion function.
func Convert_networking_IngressLoadBalancerStatus_To_v1_IngressLoadBalancerStatus(in *networking.IngressLoadBalancerStatus, out *networkingv1.IngressLoadBalancerStatus, s conversion.Scope) error {
return autoConvert_networking_IngressLoadBalancerStatus_To_v1_IngressLoadBalancerStatus(in, out, s)
}
func autoConvert_v1_IngressPortStatus_To_networking_IngressPortStatus(in *networkingv1.IngressPortStatus, out *networking.IngressPortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = core.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_v1_IngressPortStatus_To_networking_IngressPortStatus is an autogenerated conversion function.
func Convert_v1_IngressPortStatus_To_networking_IngressPortStatus(in *networkingv1.IngressPortStatus, out *networking.IngressPortStatus, s conversion.Scope) error {
return autoConvert_v1_IngressPortStatus_To_networking_IngressPortStatus(in, out, s)
}
func autoConvert_networking_IngressPortStatus_To_v1_IngressPortStatus(in *networking.IngressPortStatus, out *networkingv1.IngressPortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = corev1.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_networking_IngressPortStatus_To_v1_IngressPortStatus is an autogenerated conversion function.
func Convert_networking_IngressPortStatus_To_v1_IngressPortStatus(in *networking.IngressPortStatus, out *networkingv1.IngressPortStatus, s conversion.Scope) error {
return autoConvert_networking_IngressPortStatus_To_v1_IngressPortStatus(in, out, s)
}
func autoConvert_v1_IngressRule_To_networking_IngressRule(in *networkingv1.IngressRule, out *networking.IngressRule, s conversion.Scope) error {
out.Host = in.Host
if err := Convert_v1_IngressRuleValue_To_networking_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
return err
}
return nil
}
// Convert_v1_IngressRule_To_networking_IngressRule is an autogenerated conversion function.
func Convert_v1_IngressRule_To_networking_IngressRule(in *networkingv1.IngressRule, out *networking.IngressRule, s conversion.Scope) error {
return autoConvert_v1_IngressRule_To_networking_IngressRule(in, out, s)
}
func autoConvert_networking_IngressRule_To_v1_IngressRule(in *networking.IngressRule, out *networkingv1.IngressRule, s conversion.Scope) error {
out.Host = in.Host
if err := Convert_networking_IngressRuleValue_To_v1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressRule_To_v1_IngressRule is an autogenerated conversion function.
func Convert_networking_IngressRule_To_v1_IngressRule(in *networking.IngressRule, out *networkingv1.IngressRule, s conversion.Scope) error {
return autoConvert_networking_IngressRule_To_v1_IngressRule(in, out, s)
}
func autoConvert_v1_IngressRuleValue_To_networking_IngressRuleValue(in *networkingv1.IngressRuleValue, out *networking.IngressRuleValue, s conversion.Scope) error {
out.HTTP = (*networking.HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP))
return nil
}
// Convert_v1_IngressRuleValue_To_networking_IngressRuleValue is an autogenerated conversion function.
func Convert_v1_IngressRuleValue_To_networking_IngressRuleValue(in *networkingv1.IngressRuleValue, out *networking.IngressRuleValue, s conversion.Scope) error {
return autoConvert_v1_IngressRuleValue_To_networking_IngressRuleValue(in, out, s)
}
func autoConvert_networking_IngressRuleValue_To_v1_IngressRuleValue(in *networking.IngressRuleValue, out *networkingv1.IngressRuleValue, s conversion.Scope) error {
out.HTTP = (*networkingv1.HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP))
return nil
}
// Convert_networking_IngressRuleValue_To_v1_IngressRuleValue is an autogenerated conversion function.
func Convert_networking_IngressRuleValue_To_v1_IngressRuleValue(in *networking.IngressRuleValue, out *networkingv1.IngressRuleValue, s conversion.Scope) error {
return autoConvert_networking_IngressRuleValue_To_v1_IngressRuleValue(in, out, s)
}
func autoConvert_v1_IngressServiceBackend_To_networking_IngressServiceBackend(in *networkingv1.IngressServiceBackend, out *networking.IngressServiceBackend, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_ServiceBackendPort_To_networking_ServiceBackendPort(&in.Port, &out.Port, s); err != nil {
return err
}
return nil
}
// Convert_v1_IngressServiceBackend_To_networking_IngressServiceBackend is an autogenerated conversion function.
func Convert_v1_IngressServiceBackend_To_networking_IngressServiceBackend(in *networkingv1.IngressServiceBackend, out *networking.IngressServiceBackend, s conversion.Scope) error {
return autoConvert_v1_IngressServiceBackend_To_networking_IngressServiceBackend(in, out, s)
}
func autoConvert_networking_IngressServiceBackend_To_v1_IngressServiceBackend(in *networking.IngressServiceBackend, out *networkingv1.IngressServiceBackend, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_networking_ServiceBackendPort_To_v1_ServiceBackendPort(&in.Port, &out.Port, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressServiceBackend_To_v1_IngressServiceBackend is an autogenerated conversion function.
func Convert_networking_IngressServiceBackend_To_v1_IngressServiceBackend(in *networking.IngressServiceBackend, out *networkingv1.IngressServiceBackend, s conversion.Scope) error {
return autoConvert_networking_IngressServiceBackend_To_v1_IngressServiceBackend(in, out, s)
}
func autoConvert_v1_IngressSpec_To_networking_IngressSpec(in *networkingv1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error {
out.IngressClassName = (*string)(unsafe.Pointer(in.IngressClassName))
out.DefaultBackend = (*networking.IngressBackend)(unsafe.Pointer(in.DefaultBackend))
out.TLS = *(*[]networking.IngressTLS)(unsafe.Pointer(&in.TLS))
out.Rules = *(*[]networking.IngressRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1_IngressSpec_To_networking_IngressSpec is an autogenerated conversion function.
func Convert_v1_IngressSpec_To_networking_IngressSpec(in *networkingv1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error {
return autoConvert_v1_IngressSpec_To_networking_IngressSpec(in, out, s)
}
func autoConvert_networking_IngressSpec_To_v1_IngressSpec(in *networking.IngressSpec, out *networkingv1.IngressSpec, s conversion.Scope) error {
out.IngressClassName = (*string)(unsafe.Pointer(in.IngressClassName))
out.DefaultBackend = (*networkingv1.IngressBackend)(unsafe.Pointer(in.DefaultBackend))
out.TLS = *(*[]networkingv1.IngressTLS)(unsafe.Pointer(&in.TLS))
out.Rules = *(*[]networkingv1.IngressRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_networking_IngressSpec_To_v1_IngressSpec is an autogenerated conversion function.
func Convert_networking_IngressSpec_To_v1_IngressSpec(in *networking.IngressSpec, out *networkingv1.IngressSpec, s conversion.Scope) error {
return autoConvert_networking_IngressSpec_To_v1_IngressSpec(in, out, s)
}
func autoConvert_v1_IngressStatus_To_networking_IngressStatus(in *networkingv1.IngressStatus, out *networking.IngressStatus, s conversion.Scope) error {
if err := Convert_v1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
// Convert_v1_IngressStatus_To_networking_IngressStatus is an autogenerated conversion function.
func Convert_v1_IngressStatus_To_networking_IngressStatus(in *networkingv1.IngressStatus, out *networking.IngressStatus, s conversion.Scope) error {
return autoConvert_v1_IngressStatus_To_networking_IngressStatus(in, out, s)
}
func autoConvert_networking_IngressStatus_To_v1_IngressStatus(in *networking.IngressStatus, out *networkingv1.IngressStatus, s conversion.Scope) error {
if err := Convert_networking_IngressLoadBalancerStatus_To_v1_IngressLoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressStatus_To_v1_IngressStatus is an autogenerated conversion function.
func Convert_networking_IngressStatus_To_v1_IngressStatus(in *networking.IngressStatus, out *networkingv1.IngressStatus, s conversion.Scope) error {
return autoConvert_networking_IngressStatus_To_v1_IngressStatus(in, out, s)
}
func autoConvert_v1_IngressTLS_To_networking_IngressTLS(in *networkingv1.IngressTLS, out *networking.IngressTLS, s conversion.Scope) error {
out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts))
out.SecretName = in.SecretName
return nil
}
// Convert_v1_IngressTLS_To_networking_IngressTLS is an autogenerated conversion function.
func Convert_v1_IngressTLS_To_networking_IngressTLS(in *networkingv1.IngressTLS, out *networking.IngressTLS, s conversion.Scope) error {
return autoConvert_v1_IngressTLS_To_networking_IngressTLS(in, out, s)
}
func autoConvert_networking_IngressTLS_To_v1_IngressTLS(in *networking.IngressTLS, out *networkingv1.IngressTLS, s conversion.Scope) error {
out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts))
out.SecretName = in.SecretName
return nil
}
// Convert_networking_IngressTLS_To_v1_IngressTLS is an autogenerated conversion function.
func Convert_networking_IngressTLS_To_v1_IngressTLS(in *networking.IngressTLS, out *networkingv1.IngressTLS, s conversion.Scope) error {
return autoConvert_networking_IngressTLS_To_v1_IngressTLS(in, out, s)
}
func autoConvert_v1_NetworkPolicy_To_networking_NetworkPolicy(in *networkingv1.NetworkPolicy, out *networking.NetworkPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_NetworkPolicy_To_networking_NetworkPolicy is an autogenerated conversion function.
func Convert_v1_NetworkPolicy_To_networking_NetworkPolicy(in *networkingv1.NetworkPolicy, out *networking.NetworkPolicy, s conversion.Scope) error {
return autoConvert_v1_NetworkPolicy_To_networking_NetworkPolicy(in, out, s)
}
func autoConvert_networking_NetworkPolicy_To_v1_NetworkPolicy(in *networking.NetworkPolicy, out *networkingv1.NetworkPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_networking_NetworkPolicy_To_v1_NetworkPolicy is an autogenerated conversion function.
func Convert_networking_NetworkPolicy_To_v1_NetworkPolicy(in *networking.NetworkPolicy, out *networkingv1.NetworkPolicy, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicy_To_v1_NetworkPolicy(in, out, s)
}
func autoConvert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in *networkingv1.NetworkPolicyEgressRule, out *networking.NetworkPolicyEgressRule, s conversion.Scope) error {
out.Ports = *(*[]networking.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
out.To = *(*[]networking.NetworkPolicyPeer)(unsafe.Pointer(&in.To))
return nil
}
// Convert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule is an autogenerated conversion function.
func Convert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in *networkingv1.NetworkPolicyEgressRule, out *networking.NetworkPolicyEgressRule, s conversion.Scope) error {
return autoConvert_v1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in, out, s)
}
func autoConvert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(in *networking.NetworkPolicyEgressRule, out *networkingv1.NetworkPolicyEgressRule, s conversion.Scope) error {
out.Ports = *(*[]networkingv1.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
out.To = *(*[]networkingv1.NetworkPolicyPeer)(unsafe.Pointer(&in.To))
return nil
}
// Convert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule is an autogenerated conversion function.
func Convert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(in *networking.NetworkPolicyEgressRule, out *networkingv1.NetworkPolicyEgressRule, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyEgressRule_To_v1_NetworkPolicyEgressRule(in, out, s)
}
func autoConvert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in *networkingv1.NetworkPolicyIngressRule, out *networking.NetworkPolicyIngressRule, s conversion.Scope) error {
out.Ports = *(*[]networking.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
out.From = *(*[]networking.NetworkPolicyPeer)(unsafe.Pointer(&in.From))
return nil
}
// Convert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule is an autogenerated conversion function.
func Convert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in *networkingv1.NetworkPolicyIngressRule, out *networking.NetworkPolicyIngressRule, s conversion.Scope) error {
return autoConvert_v1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in, out, s)
}
func autoConvert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(in *networking.NetworkPolicyIngressRule, out *networkingv1.NetworkPolicyIngressRule, s conversion.Scope) error {
out.Ports = *(*[]networkingv1.NetworkPolicyPort)(unsafe.Pointer(&in.Ports))
out.From = *(*[]networkingv1.NetworkPolicyPeer)(unsafe.Pointer(&in.From))
return nil
}
// Convert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule is an autogenerated conversion function.
func Convert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(in *networking.NetworkPolicyIngressRule, out *networkingv1.NetworkPolicyIngressRule, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyIngressRule_To_v1_NetworkPolicyIngressRule(in, out, s)
}
func autoConvert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(in *networkingv1.NetworkPolicyList, out *networking.NetworkPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.NetworkPolicy)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_NetworkPolicyList_To_networking_NetworkPolicyList is an autogenerated conversion function.
func Convert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(in *networkingv1.NetworkPolicyList, out *networking.NetworkPolicyList, s conversion.Scope) error {
return autoConvert_v1_NetworkPolicyList_To_networking_NetworkPolicyList(in, out, s)
}
func autoConvert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(in *networking.NetworkPolicyList, out *networkingv1.NetworkPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1.NetworkPolicy)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_NetworkPolicyList_To_v1_NetworkPolicyList is an autogenerated conversion function.
func Convert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(in *networking.NetworkPolicyList, out *networkingv1.NetworkPolicyList, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyList_To_v1_NetworkPolicyList(in, out, s)
}
func autoConvert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in *networkingv1.NetworkPolicyPeer, out *networking.NetworkPolicyPeer, s conversion.Scope) error {
out.PodSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.PodSelector))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.IPBlock = (*networking.IPBlock)(unsafe.Pointer(in.IPBlock))
return nil
}
// Convert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer is an autogenerated conversion function.
func Convert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in *networkingv1.NetworkPolicyPeer, out *networking.NetworkPolicyPeer, s conversion.Scope) error {
return autoConvert_v1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in, out, s)
}
func autoConvert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(in *networking.NetworkPolicyPeer, out *networkingv1.NetworkPolicyPeer, s conversion.Scope) error {
out.PodSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.PodSelector))
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.IPBlock = (*networkingv1.IPBlock)(unsafe.Pointer(in.IPBlock))
return nil
}
// Convert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer is an autogenerated conversion function.
func Convert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(in *networking.NetworkPolicyPeer, out *networkingv1.NetworkPolicyPeer, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(in, out, s)
}
func autoConvert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *networkingv1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error {
out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port))
out.EndPort = (*int32)(unsafe.Pointer(in.EndPort))
return nil
}
// Convert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort is an autogenerated conversion function.
func Convert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *networkingv1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error {
return autoConvert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in, out, s)
}
func autoConvert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(in *networking.NetworkPolicyPort, out *networkingv1.NetworkPolicyPort, s conversion.Scope) error {
out.Protocol = (*corev1.Protocol)(unsafe.Pointer(in.Protocol))
out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port))
out.EndPort = (*int32)(unsafe.Pointer(in.EndPort))
return nil
}
// Convert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort is an autogenerated conversion function.
func Convert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(in *networking.NetworkPolicyPort, out *networkingv1.NetworkPolicyPort, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicyPort_To_v1_NetworkPolicyPort(in, out, s)
}
func autoConvert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in *networkingv1.NetworkPolicySpec, out *networking.NetworkPolicySpec, s conversion.Scope) error {
out.PodSelector = in.PodSelector
out.Ingress = *(*[]networking.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress))
out.Egress = *(*[]networking.NetworkPolicyEgressRule)(unsafe.Pointer(&in.Egress))
out.PolicyTypes = *(*[]networking.PolicyType)(unsafe.Pointer(&in.PolicyTypes))
return nil
}
// Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec is an autogenerated conversion function.
func Convert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in *networkingv1.NetworkPolicySpec, out *networking.NetworkPolicySpec, s conversion.Scope) error {
return autoConvert_v1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in, out, s)
}
func autoConvert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(in *networking.NetworkPolicySpec, out *networkingv1.NetworkPolicySpec, s conversion.Scope) error {
out.PodSelector = in.PodSelector
out.Ingress = *(*[]networkingv1.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress))
out.Egress = *(*[]networkingv1.NetworkPolicyEgressRule)(unsafe.Pointer(&in.Egress))
out.PolicyTypes = *(*[]networkingv1.PolicyType)(unsafe.Pointer(&in.PolicyTypes))
return nil
}
// Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec is an autogenerated conversion function.
func Convert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(in *networking.NetworkPolicySpec, out *networkingv1.NetworkPolicySpec, s conversion.Scope) error {
return autoConvert_networking_NetworkPolicySpec_To_v1_NetworkPolicySpec(in, out, s)
}
func autoConvert_v1_ParentReference_To_networking_ParentReference(in *networkingv1.ParentReference, out *networking.ParentReference, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1_ParentReference_To_networking_ParentReference is an autogenerated conversion function.
func Convert_v1_ParentReference_To_networking_ParentReference(in *networkingv1.ParentReference, out *networking.ParentReference, s conversion.Scope) error {
return autoConvert_v1_ParentReference_To_networking_ParentReference(in, out, s)
}
func autoConvert_networking_ParentReference_To_v1_ParentReference(in *networking.ParentReference, out *networkingv1.ParentReference, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_networking_ParentReference_To_v1_ParentReference is an autogenerated conversion function.
func Convert_networking_ParentReference_To_v1_ParentReference(in *networking.ParentReference, out *networkingv1.ParentReference, s conversion.Scope) error {
return autoConvert_networking_ParentReference_To_v1_ParentReference(in, out, s)
}
func autoConvert_v1_ServiceBackendPort_To_networking_ServiceBackendPort(in *networkingv1.ServiceBackendPort, out *networking.ServiceBackendPort, s conversion.Scope) error {
out.Name = in.Name
out.Number = in.Number
return nil
}
// Convert_v1_ServiceBackendPort_To_networking_ServiceBackendPort is an autogenerated conversion function.
func Convert_v1_ServiceBackendPort_To_networking_ServiceBackendPort(in *networkingv1.ServiceBackendPort, out *networking.ServiceBackendPort, s conversion.Scope) error {
return autoConvert_v1_ServiceBackendPort_To_networking_ServiceBackendPort(in, out, s)
}
func autoConvert_networking_ServiceBackendPort_To_v1_ServiceBackendPort(in *networking.ServiceBackendPort, out *networkingv1.ServiceBackendPort, s conversion.Scope) error {
out.Name = in.Name
out.Number = in.Number
return nil
}
// Convert_networking_ServiceBackendPort_To_v1_ServiceBackendPort is an autogenerated conversion function.
func Convert_networking_ServiceBackendPort_To_v1_ServiceBackendPort(in *networking.ServiceBackendPort, out *networkingv1.ServiceBackendPort, s conversion.Scope) error {
return autoConvert_networking_ServiceBackendPort_To_v1_ServiceBackendPort(in, out, s)
}
func autoConvert_v1_ServiceCIDR_To_networking_ServiceCIDR(in *networkingv1.ServiceCIDR, out *networking.ServiceCIDR, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ServiceCIDR_To_networking_ServiceCIDR is an autogenerated conversion function.
func Convert_v1_ServiceCIDR_To_networking_ServiceCIDR(in *networkingv1.ServiceCIDR, out *networking.ServiceCIDR, s conversion.Scope) error {
return autoConvert_v1_ServiceCIDR_To_networking_ServiceCIDR(in, out, s)
}
func autoConvert_networking_ServiceCIDR_To_v1_ServiceCIDR(in *networking.ServiceCIDR, out *networkingv1.ServiceCIDR, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_ServiceCIDRSpec_To_v1_ServiceCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_networking_ServiceCIDRStatus_To_v1_ServiceCIDRStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_networking_ServiceCIDR_To_v1_ServiceCIDR is an autogenerated conversion function.
func Convert_networking_ServiceCIDR_To_v1_ServiceCIDR(in *networking.ServiceCIDR, out *networkingv1.ServiceCIDR, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDR_To_v1_ServiceCIDR(in, out, s)
}
func autoConvert_v1_ServiceCIDRList_To_networking_ServiceCIDRList(in *networkingv1.ServiceCIDRList, out *networking.ServiceCIDRList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.ServiceCIDR)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ServiceCIDRList_To_networking_ServiceCIDRList is an autogenerated conversion function.
func Convert_v1_ServiceCIDRList_To_networking_ServiceCIDRList(in *networkingv1.ServiceCIDRList, out *networking.ServiceCIDRList, s conversion.Scope) error {
return autoConvert_v1_ServiceCIDRList_To_networking_ServiceCIDRList(in, out, s)
}
func autoConvert_networking_ServiceCIDRList_To_v1_ServiceCIDRList(in *networking.ServiceCIDRList, out *networkingv1.ServiceCIDRList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1.ServiceCIDR)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_ServiceCIDRList_To_v1_ServiceCIDRList is an autogenerated conversion function.
func Convert_networking_ServiceCIDRList_To_v1_ServiceCIDRList(in *networking.ServiceCIDRList, out *networkingv1.ServiceCIDRList, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDRList_To_v1_ServiceCIDRList(in, out, s)
}
func autoConvert_v1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in *networkingv1.ServiceCIDRSpec, out *networking.ServiceCIDRSpec, s conversion.Scope) error {
out.CIDRs = *(*[]string)(unsafe.Pointer(&in.CIDRs))
return nil
}
// Convert_v1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec is an autogenerated conversion function.
func Convert_v1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in *networkingv1.ServiceCIDRSpec, out *networking.ServiceCIDRSpec, s conversion.Scope) error {
return autoConvert_v1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in, out, s)
}
func autoConvert_networking_ServiceCIDRSpec_To_v1_ServiceCIDRSpec(in *networking.ServiceCIDRSpec, out *networkingv1.ServiceCIDRSpec, s conversion.Scope) error {
out.CIDRs = *(*[]string)(unsafe.Pointer(&in.CIDRs))
return nil
}
// Convert_networking_ServiceCIDRSpec_To_v1_ServiceCIDRSpec is an autogenerated conversion function.
func Convert_networking_ServiceCIDRSpec_To_v1_ServiceCIDRSpec(in *networking.ServiceCIDRSpec, out *networkingv1.ServiceCIDRSpec, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDRSpec_To_v1_ServiceCIDRSpec(in, out, s)
}
func autoConvert_v1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in *networkingv1.ServiceCIDRStatus, out *networking.ServiceCIDRStatus, s conversion.Scope) error {
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus is an autogenerated conversion function.
func Convert_v1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in *networkingv1.ServiceCIDRStatus, out *networking.ServiceCIDRStatus, s conversion.Scope) error {
return autoConvert_v1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in, out, s)
}
func autoConvert_networking_ServiceCIDRStatus_To_v1_ServiceCIDRStatus(in *networking.ServiceCIDRStatus, out *networkingv1.ServiceCIDRStatus, s conversion.Scope) error {
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_networking_ServiceCIDRStatus_To_v1_ServiceCIDRStatus is an autogenerated conversion function.
func Convert_networking_ServiceCIDRStatus_To_v1_ServiceCIDRStatus(in *networking.ServiceCIDRStatus, out *networkingv1.ServiceCIDRStatus, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDRStatus_To_v1_ServiceCIDRStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
networkingv1 "k8s.io/api/networking/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&networkingv1.IngressClass{}, func(obj interface{}) { SetObjectDefaults_IngressClass(obj.(*networkingv1.IngressClass)) })
scheme.AddTypeDefaultingFunc(&networkingv1.IngressClassList{}, func(obj interface{}) { SetObjectDefaults_IngressClassList(obj.(*networkingv1.IngressClassList)) })
scheme.AddTypeDefaultingFunc(&networkingv1.NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*networkingv1.NetworkPolicy)) })
scheme.AddTypeDefaultingFunc(&networkingv1.NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*networkingv1.NetworkPolicyList)) })
return nil
}
func SetObjectDefaults_IngressClass(in *networkingv1.IngressClass) {
SetDefaults_IngressClass(in)
}
func SetObjectDefaults_IngressClassList(in *networkingv1.IngressClassList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_IngressClass(a)
}
}
func SetObjectDefaults_NetworkPolicy(in *networkingv1.NetworkPolicy) {
SetDefaults_NetworkPolicy(in)
for i := range in.Spec.Ingress {
a := &in.Spec.Ingress[i]
for j := range a.Ports {
b := &a.Ports[j]
SetDefaults_NetworkPolicyPort(b)
}
}
for i := range in.Spec.Egress {
a := &in.Spec.Egress[i]
for j := range a.Ports {
b := &a.Ports[j]
SetDefaults_NetworkPolicyPort(b)
}
}
}
func SetObjectDefaults_NetworkPolicyList(in *networkingv1.NetworkPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_NetworkPolicy(a)
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
v1beta1 "k8s.io/api/networking/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/util/intstr"
networking "k8s.io/kubernetes/pkg/apis/networking"
)
func Convert_v1beta1_IngressBackend_To_networking_IngressBackend(in *v1beta1.IngressBackend, out *networking.IngressBackend, s conversion.Scope) error {
if err := autoConvert_v1beta1_IngressBackend_To_networking_IngressBackend(in, out, s); err != nil {
return err
}
if len(in.ServiceName) > 0 || in.ServicePort.IntVal != 0 || in.ServicePort.StrVal != "" || in.ServicePort.Type == intstr.String {
out.Service = &networking.IngressServiceBackend{}
out.Service.Name = in.ServiceName
out.Service.Port.Name = in.ServicePort.StrVal
out.Service.Port.Number = in.ServicePort.IntVal
}
return nil
}
func Convert_networking_IngressBackend_To_v1beta1_IngressBackend(in *networking.IngressBackend, out *v1beta1.IngressBackend, s conversion.Scope) error {
if err := autoConvert_networking_IngressBackend_To_v1beta1_IngressBackend(in, out, s); err != nil {
return err
}
if in.Service != nil {
out.ServiceName = in.Service.Name
if len(in.Service.Port.Name) > 0 {
out.ServicePort = intstr.FromString(in.Service.Port.Name)
} else {
out.ServicePort = intstr.FromInt32(in.Service.Port.Number)
}
}
return nil
}
func Convert_v1beta1_IngressSpec_To_networking_IngressSpec(in *v1beta1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error {
if err := autoConvert_v1beta1_IngressSpec_To_networking_IngressSpec(in, out, s); err != nil {
return err
}
if in.Backend != nil {
out.DefaultBackend = &networking.IngressBackend{}
if err := Convert_v1beta1_IngressBackend_To_networking_IngressBackend(in.Backend, out.DefaultBackend, s); err != nil {
return err
}
}
return nil
}
func Convert_networking_IngressSpec_To_v1beta1_IngressSpec(in *networking.IngressSpec, out *v1beta1.IngressSpec, s conversion.Scope) error {
if err := autoConvert_networking_IngressSpec_To_v1beta1_IngressSpec(in, out, s); err != nil {
return err
}
if in.DefaultBackend != nil {
out.Backend = &v1beta1.IngressBackend{}
if err := Convert_networking_IngressBackend_To_v1beta1_IngressBackend(in.DefaultBackend, out.Backend, s); err != nil {
return err
}
}
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
networkingv1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_HTTPIngressPath(obj *networkingv1beta1.HTTPIngressPath) {
var defaultPathType = networkingv1beta1.PathTypeImplementationSpecific
if obj.PathType == nil {
obj.PathType = &defaultPathType
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
networkingv1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "networking.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &networkingv1beta1.SchemeBuilder
// AddToScheme adds the types of this group into the given scheme.
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
networking "k8s.io/kubernetes/pkg/apis/networking"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.HTTPIngressPath)(nil), (*networking.HTTPIngressPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(a.(*networkingv1beta1.HTTPIngressPath), b.(*networking.HTTPIngressPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.HTTPIngressPath)(nil), (*networkingv1beta1.HTTPIngressPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(a.(*networking.HTTPIngressPath), b.(*networkingv1beta1.HTTPIngressPath), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.HTTPIngressRuleValue)(nil), (*networking.HTTPIngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(a.(*networkingv1beta1.HTTPIngressRuleValue), b.(*networking.HTTPIngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.HTTPIngressRuleValue)(nil), (*networkingv1beta1.HTTPIngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(a.(*networking.HTTPIngressRuleValue), b.(*networkingv1beta1.HTTPIngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IPAddress)(nil), (*networking.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IPAddress_To_networking_IPAddress(a.(*networkingv1beta1.IPAddress), b.(*networking.IPAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IPAddress)(nil), (*networkingv1beta1.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPAddress_To_v1beta1_IPAddress(a.(*networking.IPAddress), b.(*networkingv1beta1.IPAddress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IPAddressList)(nil), (*networking.IPAddressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IPAddressList_To_networking_IPAddressList(a.(*networkingv1beta1.IPAddressList), b.(*networking.IPAddressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IPAddressList)(nil), (*networkingv1beta1.IPAddressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPAddressList_To_v1beta1_IPAddressList(a.(*networking.IPAddressList), b.(*networkingv1beta1.IPAddressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IPAddressSpec)(nil), (*networking.IPAddressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IPAddressSpec_To_networking_IPAddressSpec(a.(*networkingv1beta1.IPAddressSpec), b.(*networking.IPAddressSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IPAddressSpec)(nil), (*networkingv1beta1.IPAddressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IPAddressSpec_To_v1beta1_IPAddressSpec(a.(*networking.IPAddressSpec), b.(*networkingv1beta1.IPAddressSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.Ingress)(nil), (*networking.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Ingress_To_networking_Ingress(a.(*networkingv1beta1.Ingress), b.(*networking.Ingress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.Ingress)(nil), (*networkingv1beta1.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_Ingress_To_v1beta1_Ingress(a.(*networking.Ingress), b.(*networkingv1beta1.Ingress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressClass)(nil), (*networking.IngressClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressClass_To_networking_IngressClass(a.(*networkingv1beta1.IngressClass), b.(*networking.IngressClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClass)(nil), (*networkingv1beta1.IngressClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClass_To_v1beta1_IngressClass(a.(*networking.IngressClass), b.(*networkingv1beta1.IngressClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressClassList)(nil), (*networking.IngressClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressClassList_To_networking_IngressClassList(a.(*networkingv1beta1.IngressClassList), b.(*networking.IngressClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClassList)(nil), (*networkingv1beta1.IngressClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClassList_To_v1beta1_IngressClassList(a.(*networking.IngressClassList), b.(*networkingv1beta1.IngressClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressClassParametersReference)(nil), (*networking.IngressClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressClassParametersReference_To_networking_IngressClassParametersReference(a.(*networkingv1beta1.IngressClassParametersReference), b.(*networking.IngressClassParametersReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClassParametersReference)(nil), (*networkingv1beta1.IngressClassParametersReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClassParametersReference_To_v1beta1_IngressClassParametersReference(a.(*networking.IngressClassParametersReference), b.(*networkingv1beta1.IngressClassParametersReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressClassSpec)(nil), (*networking.IngressClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressClassSpec_To_networking_IngressClassSpec(a.(*networkingv1beta1.IngressClassSpec), b.(*networking.IngressClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressClassSpec)(nil), (*networkingv1beta1.IngressClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressClassSpec_To_v1beta1_IngressClassSpec(a.(*networking.IngressClassSpec), b.(*networkingv1beta1.IngressClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressList)(nil), (*networking.IngressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressList_To_networking_IngressList(a.(*networkingv1beta1.IngressList), b.(*networking.IngressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressList)(nil), (*networkingv1beta1.IngressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressList_To_v1beta1_IngressList(a.(*networking.IngressList), b.(*networkingv1beta1.IngressList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressLoadBalancerIngress)(nil), (*networking.IngressLoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(a.(*networkingv1beta1.IngressLoadBalancerIngress), b.(*networking.IngressLoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressLoadBalancerIngress)(nil), (*networkingv1beta1.IngressLoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(a.(*networking.IngressLoadBalancerIngress), b.(*networkingv1beta1.IngressLoadBalancerIngress), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressLoadBalancerStatus)(nil), (*networking.IngressLoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(a.(*networkingv1beta1.IngressLoadBalancerStatus), b.(*networking.IngressLoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressLoadBalancerStatus)(nil), (*networkingv1beta1.IngressLoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(a.(*networking.IngressLoadBalancerStatus), b.(*networkingv1beta1.IngressLoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressPortStatus)(nil), (*networking.IngressPortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(a.(*networkingv1beta1.IngressPortStatus), b.(*networking.IngressPortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressPortStatus)(nil), (*networkingv1beta1.IngressPortStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(a.(*networking.IngressPortStatus), b.(*networkingv1beta1.IngressPortStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressRule)(nil), (*networking.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressRule_To_networking_IngressRule(a.(*networkingv1beta1.IngressRule), b.(*networking.IngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressRule)(nil), (*networkingv1beta1.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressRule_To_v1beta1_IngressRule(a.(*networking.IngressRule), b.(*networkingv1beta1.IngressRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressRuleValue)(nil), (*networking.IngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(a.(*networkingv1beta1.IngressRuleValue), b.(*networking.IngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressRuleValue)(nil), (*networkingv1beta1.IngressRuleValue)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(a.(*networking.IngressRuleValue), b.(*networkingv1beta1.IngressRuleValue), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressStatus)(nil), (*networking.IngressStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressStatus_To_networking_IngressStatus(a.(*networkingv1beta1.IngressStatus), b.(*networking.IngressStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressStatus)(nil), (*networkingv1beta1.IngressStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressStatus_To_v1beta1_IngressStatus(a.(*networking.IngressStatus), b.(*networkingv1beta1.IngressStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.IngressTLS)(nil), (*networking.IngressTLS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressTLS_To_networking_IngressTLS(a.(*networkingv1beta1.IngressTLS), b.(*networking.IngressTLS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.IngressTLS)(nil), (*networkingv1beta1.IngressTLS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressTLS_To_v1beta1_IngressTLS(a.(*networking.IngressTLS), b.(*networkingv1beta1.IngressTLS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.ParentReference)(nil), (*networking.ParentReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ParentReference_To_networking_ParentReference(a.(*networkingv1beta1.ParentReference), b.(*networking.ParentReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ParentReference)(nil), (*networkingv1beta1.ParentReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ParentReference_To_v1beta1_ParentReference(a.(*networking.ParentReference), b.(*networkingv1beta1.ParentReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.ServiceCIDR)(nil), (*networking.ServiceCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ServiceCIDR_To_networking_ServiceCIDR(a.(*networkingv1beta1.ServiceCIDR), b.(*networking.ServiceCIDR), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDR)(nil), (*networkingv1beta1.ServiceCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDR_To_v1beta1_ServiceCIDR(a.(*networking.ServiceCIDR), b.(*networkingv1beta1.ServiceCIDR), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.ServiceCIDRList)(nil), (*networking.ServiceCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ServiceCIDRList_To_networking_ServiceCIDRList(a.(*networkingv1beta1.ServiceCIDRList), b.(*networking.ServiceCIDRList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRList)(nil), (*networkingv1beta1.ServiceCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDRList_To_v1beta1_ServiceCIDRList(a.(*networking.ServiceCIDRList), b.(*networkingv1beta1.ServiceCIDRList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.ServiceCIDRSpec)(nil), (*networking.ServiceCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(a.(*networkingv1beta1.ServiceCIDRSpec), b.(*networking.ServiceCIDRSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRSpec)(nil), (*networkingv1beta1.ServiceCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDRSpec_To_v1beta1_ServiceCIDRSpec(a.(*networking.ServiceCIDRSpec), b.(*networkingv1beta1.ServiceCIDRSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networkingv1beta1.ServiceCIDRStatus)(nil), (*networking.ServiceCIDRStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(a.(*networkingv1beta1.ServiceCIDRStatus), b.(*networking.ServiceCIDRStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRStatus)(nil), (*networkingv1beta1.ServiceCIDRStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_ServiceCIDRStatus_To_v1beta1_ServiceCIDRStatus(a.(*networking.ServiceCIDRStatus), b.(*networkingv1beta1.ServiceCIDRStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.IngressBackend)(nil), (*networkingv1beta1.IngressBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressBackend_To_v1beta1_IngressBackend(a.(*networking.IngressBackend), b.(*networkingv1beta1.IngressBackend), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networking.IngressSpec)(nil), (*networkingv1beta1.IngressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_networking_IngressSpec_To_v1beta1_IngressSpec(a.(*networking.IngressSpec), b.(*networkingv1beta1.IngressSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networkingv1beta1.IngressBackend)(nil), (*networking.IngressBackend)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressBackend_To_networking_IngressBackend(a.(*networkingv1beta1.IngressBackend), b.(*networking.IngressBackend), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*networkingv1beta1.IngressSpec)(nil), (*networking.IngressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_IngressSpec_To_networking_IngressSpec(a.(*networkingv1beta1.IngressSpec), b.(*networking.IngressSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(in *networkingv1beta1.HTTPIngressPath, out *networking.HTTPIngressPath, s conversion.Scope) error {
out.Path = in.Path
out.PathType = (*networking.PathType)(unsafe.Pointer(in.PathType))
if err := Convert_v1beta1_IngressBackend_To_networking_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath is an autogenerated conversion function.
func Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(in *networkingv1beta1.HTTPIngressPath, out *networking.HTTPIngressPath, s conversion.Scope) error {
return autoConvert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(in, out, s)
}
func autoConvert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *networking.HTTPIngressPath, out *networkingv1beta1.HTTPIngressPath, s conversion.Scope) error {
out.Path = in.Path
out.PathType = (*networkingv1beta1.PathType)(unsafe.Pointer(in.PathType))
if err := Convert_networking_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil {
return err
}
return nil
}
// Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath is an autogenerated conversion function.
func Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *networking.HTTPIngressPath, out *networkingv1beta1.HTTPIngressPath, s conversion.Scope) error {
return autoConvert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s)
}
func autoConvert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in *networkingv1beta1.HTTPIngressRuleValue, out *networking.HTTPIngressRuleValue, s conversion.Scope) error {
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]networking.HTTPIngressPath, len(*in))
for i := range *in {
if err := Convert_v1beta1_HTTPIngressPath_To_networking_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Paths = nil
}
return nil
}
// Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue is an autogenerated conversion function.
func Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in *networkingv1beta1.HTTPIngressRuleValue, out *networking.HTTPIngressRuleValue, s conversion.Scope) error {
return autoConvert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(in, out, s)
}
func autoConvert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *networking.HTTPIngressRuleValue, out *networkingv1beta1.HTTPIngressRuleValue, s conversion.Scope) error {
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]networkingv1beta1.HTTPIngressPath, len(*in))
for i := range *in {
if err := Convert_networking_HTTPIngressPath_To_v1beta1_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Paths = nil
}
return nil
}
// Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue is an autogenerated conversion function.
func Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *networking.HTTPIngressRuleValue, out *networkingv1beta1.HTTPIngressRuleValue, s conversion.Scope) error {
return autoConvert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s)
}
func autoConvert_v1beta1_IPAddress_To_networking_IPAddress(in *networkingv1beta1.IPAddress, out *networking.IPAddress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_IPAddressSpec_To_networking_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_IPAddress_To_networking_IPAddress is an autogenerated conversion function.
func Convert_v1beta1_IPAddress_To_networking_IPAddress(in *networkingv1beta1.IPAddress, out *networking.IPAddress, s conversion.Scope) error {
return autoConvert_v1beta1_IPAddress_To_networking_IPAddress(in, out, s)
}
func autoConvert_networking_IPAddress_To_v1beta1_IPAddress(in *networking.IPAddress, out *networkingv1beta1.IPAddress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_IPAddressSpec_To_v1beta1_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_networking_IPAddress_To_v1beta1_IPAddress is an autogenerated conversion function.
func Convert_networking_IPAddress_To_v1beta1_IPAddress(in *networking.IPAddress, out *networkingv1beta1.IPAddress, s conversion.Scope) error {
return autoConvert_networking_IPAddress_To_v1beta1_IPAddress(in, out, s)
}
func autoConvert_v1beta1_IPAddressList_To_networking_IPAddressList(in *networkingv1beta1.IPAddressList, out *networking.IPAddressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.IPAddress)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_IPAddressList_To_networking_IPAddressList is an autogenerated conversion function.
func Convert_v1beta1_IPAddressList_To_networking_IPAddressList(in *networkingv1beta1.IPAddressList, out *networking.IPAddressList, s conversion.Scope) error {
return autoConvert_v1beta1_IPAddressList_To_networking_IPAddressList(in, out, s)
}
func autoConvert_networking_IPAddressList_To_v1beta1_IPAddressList(in *networking.IPAddressList, out *networkingv1beta1.IPAddressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1beta1.IPAddress)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_IPAddressList_To_v1beta1_IPAddressList is an autogenerated conversion function.
func Convert_networking_IPAddressList_To_v1beta1_IPAddressList(in *networking.IPAddressList, out *networkingv1beta1.IPAddressList, s conversion.Scope) error {
return autoConvert_networking_IPAddressList_To_v1beta1_IPAddressList(in, out, s)
}
func autoConvert_v1beta1_IPAddressSpec_To_networking_IPAddressSpec(in *networkingv1beta1.IPAddressSpec, out *networking.IPAddressSpec, s conversion.Scope) error {
out.ParentRef = (*networking.ParentReference)(unsafe.Pointer(in.ParentRef))
return nil
}
// Convert_v1beta1_IPAddressSpec_To_networking_IPAddressSpec is an autogenerated conversion function.
func Convert_v1beta1_IPAddressSpec_To_networking_IPAddressSpec(in *networkingv1beta1.IPAddressSpec, out *networking.IPAddressSpec, s conversion.Scope) error {
return autoConvert_v1beta1_IPAddressSpec_To_networking_IPAddressSpec(in, out, s)
}
func autoConvert_networking_IPAddressSpec_To_v1beta1_IPAddressSpec(in *networking.IPAddressSpec, out *networkingv1beta1.IPAddressSpec, s conversion.Scope) error {
out.ParentRef = (*networkingv1beta1.ParentReference)(unsafe.Pointer(in.ParentRef))
return nil
}
// Convert_networking_IPAddressSpec_To_v1beta1_IPAddressSpec is an autogenerated conversion function.
func Convert_networking_IPAddressSpec_To_v1beta1_IPAddressSpec(in *networking.IPAddressSpec, out *networkingv1beta1.IPAddressSpec, s conversion.Scope) error {
return autoConvert_networking_IPAddressSpec_To_v1beta1_IPAddressSpec(in, out, s)
}
func autoConvert_v1beta1_Ingress_To_networking_Ingress(in *networkingv1beta1.Ingress, out *networking.Ingress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_IngressSpec_To_networking_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_IngressStatus_To_networking_IngressStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_Ingress_To_networking_Ingress is an autogenerated conversion function.
func Convert_v1beta1_Ingress_To_networking_Ingress(in *networkingv1beta1.Ingress, out *networking.Ingress, s conversion.Scope) error {
return autoConvert_v1beta1_Ingress_To_networking_Ingress(in, out, s)
}
func autoConvert_networking_Ingress_To_v1beta1_Ingress(in *networking.Ingress, out *networkingv1beta1.Ingress, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_networking_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_networking_Ingress_To_v1beta1_Ingress is an autogenerated conversion function.
func Convert_networking_Ingress_To_v1beta1_Ingress(in *networking.Ingress, out *networkingv1beta1.Ingress, s conversion.Scope) error {
return autoConvert_networking_Ingress_To_v1beta1_Ingress(in, out, s)
}
func autoConvert_v1beta1_IngressBackend_To_networking_IngressBackend(in *networkingv1beta1.IngressBackend, out *networking.IngressBackend, s conversion.Scope) error {
// WARNING: in.ServiceName requires manual conversion: does not exist in peer-type
// WARNING: in.ServicePort requires manual conversion: does not exist in peer-type
out.Resource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.Resource))
return nil
}
func autoConvert_networking_IngressBackend_To_v1beta1_IngressBackend(in *networking.IngressBackend, out *networkingv1beta1.IngressBackend, s conversion.Scope) error {
// WARNING: in.Service requires manual conversion: does not exist in peer-type
out.Resource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.Resource))
return nil
}
func autoConvert_v1beta1_IngressClass_To_networking_IngressClass(in *networkingv1beta1.IngressClass, out *networking.IngressClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_IngressClassSpec_To_networking_IngressClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_IngressClass_To_networking_IngressClass is an autogenerated conversion function.
func Convert_v1beta1_IngressClass_To_networking_IngressClass(in *networkingv1beta1.IngressClass, out *networking.IngressClass, s conversion.Scope) error {
return autoConvert_v1beta1_IngressClass_To_networking_IngressClass(in, out, s)
}
func autoConvert_networking_IngressClass_To_v1beta1_IngressClass(in *networking.IngressClass, out *networkingv1beta1.IngressClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_IngressClassSpec_To_v1beta1_IngressClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressClass_To_v1beta1_IngressClass is an autogenerated conversion function.
func Convert_networking_IngressClass_To_v1beta1_IngressClass(in *networking.IngressClass, out *networkingv1beta1.IngressClass, s conversion.Scope) error {
return autoConvert_networking_IngressClass_To_v1beta1_IngressClass(in, out, s)
}
func autoConvert_v1beta1_IngressClassList_To_networking_IngressClassList(in *networkingv1beta1.IngressClassList, out *networking.IngressClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.IngressClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_IngressClassList_To_networking_IngressClassList is an autogenerated conversion function.
func Convert_v1beta1_IngressClassList_To_networking_IngressClassList(in *networkingv1beta1.IngressClassList, out *networking.IngressClassList, s conversion.Scope) error {
return autoConvert_v1beta1_IngressClassList_To_networking_IngressClassList(in, out, s)
}
func autoConvert_networking_IngressClassList_To_v1beta1_IngressClassList(in *networking.IngressClassList, out *networkingv1beta1.IngressClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1beta1.IngressClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_IngressClassList_To_v1beta1_IngressClassList is an autogenerated conversion function.
func Convert_networking_IngressClassList_To_v1beta1_IngressClassList(in *networking.IngressClassList, out *networkingv1beta1.IngressClassList, s conversion.Scope) error {
return autoConvert_networking_IngressClassList_To_v1beta1_IngressClassList(in, out, s)
}
func autoConvert_v1beta1_IngressClassParametersReference_To_networking_IngressClassParametersReference(in *networkingv1beta1.IngressClassParametersReference, out *networking.IngressClassParametersReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Scope = (*string)(unsafe.Pointer(in.Scope))
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_v1beta1_IngressClassParametersReference_To_networking_IngressClassParametersReference is an autogenerated conversion function.
func Convert_v1beta1_IngressClassParametersReference_To_networking_IngressClassParametersReference(in *networkingv1beta1.IngressClassParametersReference, out *networking.IngressClassParametersReference, s conversion.Scope) error {
return autoConvert_v1beta1_IngressClassParametersReference_To_networking_IngressClassParametersReference(in, out, s)
}
func autoConvert_networking_IngressClassParametersReference_To_v1beta1_IngressClassParametersReference(in *networking.IngressClassParametersReference, out *networkingv1beta1.IngressClassParametersReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Scope = (*string)(unsafe.Pointer(in.Scope))
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_networking_IngressClassParametersReference_To_v1beta1_IngressClassParametersReference is an autogenerated conversion function.
func Convert_networking_IngressClassParametersReference_To_v1beta1_IngressClassParametersReference(in *networking.IngressClassParametersReference, out *networkingv1beta1.IngressClassParametersReference, s conversion.Scope) error {
return autoConvert_networking_IngressClassParametersReference_To_v1beta1_IngressClassParametersReference(in, out, s)
}
func autoConvert_v1beta1_IngressClassSpec_To_networking_IngressClassSpec(in *networkingv1beta1.IngressClassSpec, out *networking.IngressClassSpec, s conversion.Scope) error {
out.Controller = in.Controller
out.Parameters = (*networking.IngressClassParametersReference)(unsafe.Pointer(in.Parameters))
return nil
}
// Convert_v1beta1_IngressClassSpec_To_networking_IngressClassSpec is an autogenerated conversion function.
func Convert_v1beta1_IngressClassSpec_To_networking_IngressClassSpec(in *networkingv1beta1.IngressClassSpec, out *networking.IngressClassSpec, s conversion.Scope) error {
return autoConvert_v1beta1_IngressClassSpec_To_networking_IngressClassSpec(in, out, s)
}
func autoConvert_networking_IngressClassSpec_To_v1beta1_IngressClassSpec(in *networking.IngressClassSpec, out *networkingv1beta1.IngressClassSpec, s conversion.Scope) error {
out.Controller = in.Controller
out.Parameters = (*networkingv1beta1.IngressClassParametersReference)(unsafe.Pointer(in.Parameters))
return nil
}
// Convert_networking_IngressClassSpec_To_v1beta1_IngressClassSpec is an autogenerated conversion function.
func Convert_networking_IngressClassSpec_To_v1beta1_IngressClassSpec(in *networking.IngressClassSpec, out *networkingv1beta1.IngressClassSpec, s conversion.Scope) error {
return autoConvert_networking_IngressClassSpec_To_v1beta1_IngressClassSpec(in, out, s)
}
func autoConvert_v1beta1_IngressList_To_networking_IngressList(in *networkingv1beta1.IngressList, out *networking.IngressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]networking.Ingress, len(*in))
for i := range *in {
if err := Convert_v1beta1_Ingress_To_networking_Ingress(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_IngressList_To_networking_IngressList is an autogenerated conversion function.
func Convert_v1beta1_IngressList_To_networking_IngressList(in *networkingv1beta1.IngressList, out *networking.IngressList, s conversion.Scope) error {
return autoConvert_v1beta1_IngressList_To_networking_IngressList(in, out, s)
}
func autoConvert_networking_IngressList_To_v1beta1_IngressList(in *networking.IngressList, out *networkingv1beta1.IngressList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]networkingv1beta1.Ingress, len(*in))
for i := range *in {
if err := Convert_networking_Ingress_To_v1beta1_Ingress(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_networking_IngressList_To_v1beta1_IngressList is an autogenerated conversion function.
func Convert_networking_IngressList_To_v1beta1_IngressList(in *networking.IngressList, out *networkingv1beta1.IngressList, s conversion.Scope) error {
return autoConvert_networking_IngressList_To_v1beta1_IngressList(in, out, s)
}
func autoConvert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in *networkingv1beta1.IngressLoadBalancerIngress, out *networking.IngressLoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.Ports = *(*[]networking.IngressPortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress is an autogenerated conversion function.
func Convert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in *networkingv1beta1.IngressLoadBalancerIngress, out *networking.IngressLoadBalancerIngress, s conversion.Scope) error {
return autoConvert_v1beta1_IngressLoadBalancerIngress_To_networking_IngressLoadBalancerIngress(in, out, s)
}
func autoConvert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(in *networking.IngressLoadBalancerIngress, out *networkingv1beta1.IngressLoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.Ports = *(*[]networkingv1beta1.IngressPortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress is an autogenerated conversion function.
func Convert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(in *networking.IngressLoadBalancerIngress, out *networkingv1beta1.IngressLoadBalancerIngress, s conversion.Scope) error {
return autoConvert_networking_IngressLoadBalancerIngress_To_v1beta1_IngressLoadBalancerIngress(in, out, s)
}
func autoConvert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in *networkingv1beta1.IngressLoadBalancerStatus, out *networking.IngressLoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]networking.IngressLoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus is an autogenerated conversion function.
func Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in *networkingv1beta1.IngressLoadBalancerStatus, out *networking.IngressLoadBalancerStatus, s conversion.Scope) error {
return autoConvert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(in, out, s)
}
func autoConvert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(in *networking.IngressLoadBalancerStatus, out *networkingv1beta1.IngressLoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]networkingv1beta1.IngressLoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus is an autogenerated conversion function.
func Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(in *networking.IngressLoadBalancerStatus, out *networkingv1beta1.IngressLoadBalancerStatus, s conversion.Scope) error {
return autoConvert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(in, out, s)
}
func autoConvert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(in *networkingv1beta1.IngressPortStatus, out *networking.IngressPortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = core.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus is an autogenerated conversion function.
func Convert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(in *networkingv1beta1.IngressPortStatus, out *networking.IngressPortStatus, s conversion.Scope) error {
return autoConvert_v1beta1_IngressPortStatus_To_networking_IngressPortStatus(in, out, s)
}
func autoConvert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(in *networking.IngressPortStatus, out *networkingv1beta1.IngressPortStatus, s conversion.Scope) error {
out.Port = in.Port
out.Protocol = v1.Protocol(in.Protocol)
out.Error = (*string)(unsafe.Pointer(in.Error))
return nil
}
// Convert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus is an autogenerated conversion function.
func Convert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(in *networking.IngressPortStatus, out *networkingv1beta1.IngressPortStatus, s conversion.Scope) error {
return autoConvert_networking_IngressPortStatus_To_v1beta1_IngressPortStatus(in, out, s)
}
func autoConvert_v1beta1_IngressRule_To_networking_IngressRule(in *networkingv1beta1.IngressRule, out *networking.IngressRule, s conversion.Scope) error {
out.Host = in.Host
if err := Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_IngressRule_To_networking_IngressRule is an autogenerated conversion function.
func Convert_v1beta1_IngressRule_To_networking_IngressRule(in *networkingv1beta1.IngressRule, out *networking.IngressRule, s conversion.Scope) error {
return autoConvert_v1beta1_IngressRule_To_networking_IngressRule(in, out, s)
}
func autoConvert_networking_IngressRule_To_v1beta1_IngressRule(in *networking.IngressRule, out *networkingv1beta1.IngressRule, s conversion.Scope) error {
out.Host = in.Host
if err := Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressRule_To_v1beta1_IngressRule is an autogenerated conversion function.
func Convert_networking_IngressRule_To_v1beta1_IngressRule(in *networking.IngressRule, out *networkingv1beta1.IngressRule, s conversion.Scope) error {
return autoConvert_networking_IngressRule_To_v1beta1_IngressRule(in, out, s)
}
func autoConvert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(in *networkingv1beta1.IngressRuleValue, out *networking.IngressRuleValue, s conversion.Scope) error {
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(networking.HTTPIngressRuleValue)
if err := Convert_v1beta1_HTTPIngressRuleValue_To_networking_HTTPIngressRuleValue(*in, *out, s); err != nil {
return err
}
} else {
out.HTTP = nil
}
return nil
}
// Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue is an autogenerated conversion function.
func Convert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(in *networkingv1beta1.IngressRuleValue, out *networking.IngressRuleValue, s conversion.Scope) error {
return autoConvert_v1beta1_IngressRuleValue_To_networking_IngressRuleValue(in, out, s)
}
func autoConvert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(in *networking.IngressRuleValue, out *networkingv1beta1.IngressRuleValue, s conversion.Scope) error {
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(networkingv1beta1.HTTPIngressRuleValue)
if err := Convert_networking_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(*in, *out, s); err != nil {
return err
}
} else {
out.HTTP = nil
}
return nil
}
// Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue is an autogenerated conversion function.
func Convert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(in *networking.IngressRuleValue, out *networkingv1beta1.IngressRuleValue, s conversion.Scope) error {
return autoConvert_networking_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s)
}
func autoConvert_v1beta1_IngressSpec_To_networking_IngressSpec(in *networkingv1beta1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error {
out.IngressClassName = (*string)(unsafe.Pointer(in.IngressClassName))
// WARNING: in.Backend requires manual conversion: does not exist in peer-type
out.TLS = *(*[]networking.IngressTLS)(unsafe.Pointer(&in.TLS))
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]networking.IngressRule, len(*in))
for i := range *in {
if err := Convert_v1beta1_IngressRule_To_networking_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
return nil
}
func autoConvert_networking_IngressSpec_To_v1beta1_IngressSpec(in *networking.IngressSpec, out *networkingv1beta1.IngressSpec, s conversion.Scope) error {
out.IngressClassName = (*string)(unsafe.Pointer(in.IngressClassName))
// WARNING: in.DefaultBackend requires manual conversion: does not exist in peer-type
out.TLS = *(*[]networkingv1beta1.IngressTLS)(unsafe.Pointer(&in.TLS))
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]networkingv1beta1.IngressRule, len(*in))
for i := range *in {
if err := Convert_networking_IngressRule_To_v1beta1_IngressRule(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Rules = nil
}
return nil
}
func autoConvert_v1beta1_IngressStatus_To_networking_IngressStatus(in *networkingv1beta1.IngressStatus, out *networking.IngressStatus, s conversion.Scope) error {
if err := Convert_v1beta1_IngressLoadBalancerStatus_To_networking_IngressLoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_IngressStatus_To_networking_IngressStatus is an autogenerated conversion function.
func Convert_v1beta1_IngressStatus_To_networking_IngressStatus(in *networkingv1beta1.IngressStatus, out *networking.IngressStatus, s conversion.Scope) error {
return autoConvert_v1beta1_IngressStatus_To_networking_IngressStatus(in, out, s)
}
func autoConvert_networking_IngressStatus_To_v1beta1_IngressStatus(in *networking.IngressStatus, out *networkingv1beta1.IngressStatus, s conversion.Scope) error {
if err := Convert_networking_IngressLoadBalancerStatus_To_v1beta1_IngressLoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
// Convert_networking_IngressStatus_To_v1beta1_IngressStatus is an autogenerated conversion function.
func Convert_networking_IngressStatus_To_v1beta1_IngressStatus(in *networking.IngressStatus, out *networkingv1beta1.IngressStatus, s conversion.Scope) error {
return autoConvert_networking_IngressStatus_To_v1beta1_IngressStatus(in, out, s)
}
func autoConvert_v1beta1_IngressTLS_To_networking_IngressTLS(in *networkingv1beta1.IngressTLS, out *networking.IngressTLS, s conversion.Scope) error {
out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts))
out.SecretName = in.SecretName
return nil
}
// Convert_v1beta1_IngressTLS_To_networking_IngressTLS is an autogenerated conversion function.
func Convert_v1beta1_IngressTLS_To_networking_IngressTLS(in *networkingv1beta1.IngressTLS, out *networking.IngressTLS, s conversion.Scope) error {
return autoConvert_v1beta1_IngressTLS_To_networking_IngressTLS(in, out, s)
}
func autoConvert_networking_IngressTLS_To_v1beta1_IngressTLS(in *networking.IngressTLS, out *networkingv1beta1.IngressTLS, s conversion.Scope) error {
out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts))
out.SecretName = in.SecretName
return nil
}
// Convert_networking_IngressTLS_To_v1beta1_IngressTLS is an autogenerated conversion function.
func Convert_networking_IngressTLS_To_v1beta1_IngressTLS(in *networking.IngressTLS, out *networkingv1beta1.IngressTLS, s conversion.Scope) error {
return autoConvert_networking_IngressTLS_To_v1beta1_IngressTLS(in, out, s)
}
func autoConvert_v1beta1_ParentReference_To_networking_ParentReference(in *networkingv1beta1.ParentReference, out *networking.ParentReference, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1beta1_ParentReference_To_networking_ParentReference is an autogenerated conversion function.
func Convert_v1beta1_ParentReference_To_networking_ParentReference(in *networkingv1beta1.ParentReference, out *networking.ParentReference, s conversion.Scope) error {
return autoConvert_v1beta1_ParentReference_To_networking_ParentReference(in, out, s)
}
func autoConvert_networking_ParentReference_To_v1beta1_ParentReference(in *networking.ParentReference, out *networkingv1beta1.ParentReference, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_networking_ParentReference_To_v1beta1_ParentReference is an autogenerated conversion function.
func Convert_networking_ParentReference_To_v1beta1_ParentReference(in *networking.ParentReference, out *networkingv1beta1.ParentReference, s conversion.Scope) error {
return autoConvert_networking_ParentReference_To_v1beta1_ParentReference(in, out, s)
}
func autoConvert_v1beta1_ServiceCIDR_To_networking_ServiceCIDR(in *networkingv1beta1.ServiceCIDR, out *networking.ServiceCIDR, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ServiceCIDR_To_networking_ServiceCIDR is an autogenerated conversion function.
func Convert_v1beta1_ServiceCIDR_To_networking_ServiceCIDR(in *networkingv1beta1.ServiceCIDR, out *networking.ServiceCIDR, s conversion.Scope) error {
return autoConvert_v1beta1_ServiceCIDR_To_networking_ServiceCIDR(in, out, s)
}
func autoConvert_networking_ServiceCIDR_To_v1beta1_ServiceCIDR(in *networking.ServiceCIDR, out *networkingv1beta1.ServiceCIDR, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_networking_ServiceCIDRSpec_To_v1beta1_ServiceCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_networking_ServiceCIDRStatus_To_v1beta1_ServiceCIDRStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_networking_ServiceCIDR_To_v1beta1_ServiceCIDR is an autogenerated conversion function.
func Convert_networking_ServiceCIDR_To_v1beta1_ServiceCIDR(in *networking.ServiceCIDR, out *networkingv1beta1.ServiceCIDR, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDR_To_v1beta1_ServiceCIDR(in, out, s)
}
func autoConvert_v1beta1_ServiceCIDRList_To_networking_ServiceCIDRList(in *networkingv1beta1.ServiceCIDRList, out *networking.ServiceCIDRList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networking.ServiceCIDR)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_ServiceCIDRList_To_networking_ServiceCIDRList is an autogenerated conversion function.
func Convert_v1beta1_ServiceCIDRList_To_networking_ServiceCIDRList(in *networkingv1beta1.ServiceCIDRList, out *networking.ServiceCIDRList, s conversion.Scope) error {
return autoConvert_v1beta1_ServiceCIDRList_To_networking_ServiceCIDRList(in, out, s)
}
func autoConvert_networking_ServiceCIDRList_To_v1beta1_ServiceCIDRList(in *networking.ServiceCIDRList, out *networkingv1beta1.ServiceCIDRList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]networkingv1beta1.ServiceCIDR)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_networking_ServiceCIDRList_To_v1beta1_ServiceCIDRList is an autogenerated conversion function.
func Convert_networking_ServiceCIDRList_To_v1beta1_ServiceCIDRList(in *networking.ServiceCIDRList, out *networkingv1beta1.ServiceCIDRList, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDRList_To_v1beta1_ServiceCIDRList(in, out, s)
}
func autoConvert_v1beta1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in *networkingv1beta1.ServiceCIDRSpec, out *networking.ServiceCIDRSpec, s conversion.Scope) error {
out.CIDRs = *(*[]string)(unsafe.Pointer(&in.CIDRs))
return nil
}
// Convert_v1beta1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec is an autogenerated conversion function.
func Convert_v1beta1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in *networkingv1beta1.ServiceCIDRSpec, out *networking.ServiceCIDRSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in, out, s)
}
func autoConvert_networking_ServiceCIDRSpec_To_v1beta1_ServiceCIDRSpec(in *networking.ServiceCIDRSpec, out *networkingv1beta1.ServiceCIDRSpec, s conversion.Scope) error {
out.CIDRs = *(*[]string)(unsafe.Pointer(&in.CIDRs))
return nil
}
// Convert_networking_ServiceCIDRSpec_To_v1beta1_ServiceCIDRSpec is an autogenerated conversion function.
func Convert_networking_ServiceCIDRSpec_To_v1beta1_ServiceCIDRSpec(in *networking.ServiceCIDRSpec, out *networkingv1beta1.ServiceCIDRSpec, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDRSpec_To_v1beta1_ServiceCIDRSpec(in, out, s)
}
func autoConvert_v1beta1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in *networkingv1beta1.ServiceCIDRStatus, out *networking.ServiceCIDRStatus, s conversion.Scope) error {
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus is an autogenerated conversion function.
func Convert_v1beta1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in *networkingv1beta1.ServiceCIDRStatus, out *networking.ServiceCIDRStatus, s conversion.Scope) error {
return autoConvert_v1beta1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in, out, s)
}
func autoConvert_networking_ServiceCIDRStatus_To_v1beta1_ServiceCIDRStatus(in *networking.ServiceCIDRStatus, out *networkingv1beta1.ServiceCIDRStatus, s conversion.Scope) error {
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_networking_ServiceCIDRStatus_To_v1beta1_ServiceCIDRStatus is an autogenerated conversion function.
func Convert_networking_ServiceCIDRStatus_To_v1beta1_ServiceCIDRStatus(in *networking.ServiceCIDRStatus, out *networkingv1beta1.ServiceCIDRStatus, s conversion.Scope) error {
return autoConvert_networking_ServiceCIDRStatus_To_v1beta1_ServiceCIDRStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
networkingv1beta1 "k8s.io/api/networking/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&networkingv1beta1.Ingress{}, func(obj interface{}) { SetObjectDefaults_Ingress(obj.(*networkingv1beta1.Ingress)) })
scheme.AddTypeDefaultingFunc(&networkingv1beta1.IngressList{}, func(obj interface{}) { SetObjectDefaults_IngressList(obj.(*networkingv1beta1.IngressList)) })
return nil
}
func SetObjectDefaults_Ingress(in *networkingv1beta1.Ingress) {
for i := range in.Spec.Rules {
a := &in.Spec.Rules[i]
if a.IngressRuleValue.HTTP != nil {
for j := range a.IngressRuleValue.HTTP.Paths {
b := &a.IngressRuleValue.HTTP.Paths[j]
SetDefaults_HTTPIngressPath(b)
}
}
}
}
func SetObjectDefaults_IngressList(in *networkingv1beta1.IngressList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Ingress(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package networking
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
*out = *in
if in.PathType != nil {
in, out := &in.PathType, &out.PathType
*out = new(PathType)
**out = **in
}
in.Backend.DeepCopyInto(&out.Backend)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath.
func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath {
if in == nil {
return nil
}
out := new(HTTPIngressPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
*out = *in
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]HTTPIngressPath, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue.
func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
if in == nil {
return nil
}
out := new(HTTPIngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddress) DeepCopyInto(out *IPAddress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
func (in *IPAddress) DeepCopy() *IPAddress {
if in == nil {
return nil
}
out := new(IPAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IPAddress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
func (in *IPAddressList) DeepCopy() *IPAddressList {
if in == nil {
return nil
}
out := new(IPAddressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
*out = *in
if in.ParentRef != nil {
in, out := &in.ParentRef, &out.ParentRef
*out = new(ParentReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
if in == nil {
return nil
}
out := new(IPAddressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
if in.Except != nil {
in, out := &in.Except, &out.Except
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock.
func (in *IPBlock) DeepCopy() *IPBlock {
if in == nil {
return nil
}
out := new(IPBlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ingress) DeepCopyInto(out *Ingress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
func (in *Ingress) DeepCopy() *Ingress {
if in == nil {
return nil
}
out := new(Ingress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Ingress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
*out = *in
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(IngressServiceBackend)
**out = **in
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(core.TypedLocalObjectReference)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend.
func (in *IngressBackend) DeepCopy() *IngressBackend {
if in == nil {
return nil
}
out := new(IngressBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClass) DeepCopyInto(out *IngressClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClass.
func (in *IngressClass) DeepCopy() *IngressClass {
if in == nil {
return nil
}
out := new(IngressClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClassList) DeepCopyInto(out *IngressClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IngressClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList.
func (in *IngressClassList) DeepCopy() *IngressClassList {
if in == nil {
return nil
}
out := new(IngressClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClassParametersReference) DeepCopyInto(out *IngressClassParametersReference) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
if in.Scope != nil {
in, out := &in.Scope, &out.Scope
*out = new(string)
**out = **in
}
if in.Namespace != nil {
in, out := &in.Namespace, &out.Namespace
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassParametersReference.
func (in *IngressClassParametersReference) DeepCopy() *IngressClassParametersReference {
if in == nil {
return nil
}
out := new(IngressClassParametersReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressClassSpec) DeepCopyInto(out *IngressClassSpec) {
*out = *in
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = new(IngressClassParametersReference)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassSpec.
func (in *IngressClassSpec) DeepCopy() *IngressClassSpec {
if in == nil {
return nil
}
out := new(IngressClassSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressList) DeepCopyInto(out *IngressList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Ingress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
func (in *IngressList) DeepCopy() *IngressList {
if in == nil {
return nil
}
out := new(IngressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]IngressPortStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress.
func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress {
if in == nil {
return nil
}
out := new(IngressLoadBalancerIngress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]IngressLoadBalancerIngress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus.
func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus {
if in == nil {
return nil
}
out := new(IngressLoadBalancerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) {
*out = *in
if in.Error != nil {
in, out := &in.Error, &out.Error
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus.
func (in *IngressPortStatus) DeepCopy() *IngressPortStatus {
if in == nil {
return nil
}
out := new(IngressPortStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = *in
in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
func (in *IngressRule) DeepCopy() *IngressRule {
if in == nil {
return nil
}
out := new(IngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) {
*out = *in
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(HTTPIngressRuleValue)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue.
func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
if in == nil {
return nil
}
out := new(IngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressServiceBackend) DeepCopyInto(out *IngressServiceBackend) {
*out = *in
out.Port = in.Port
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressServiceBackend.
func (in *IngressServiceBackend) DeepCopy() *IngressServiceBackend {
if in == nil {
return nil
}
out := new(IngressServiceBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
*out = *in
if in.IngressClassName != nil {
in, out := &in.IngressClassName, &out.IngressClassName
*out = new(string)
**out = **in
}
if in.DefaultBackend != nil {
in, out := &in.DefaultBackend, &out.DefaultBackend
*out = new(IngressBackend)
(*in).DeepCopyInto(*out)
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = make([]IngressTLS, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]IngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
func (in *IngressSpec) DeepCopy() *IngressSpec {
if in == nil {
return nil
}
out := new(IngressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
func (in *IngressStatus) DeepCopy() *IngressStatus {
if in == nil {
return nil
}
out := new(IngressStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressTLS) DeepCopyInto(out *IngressTLS) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS.
func (in *IngressTLS) DeepCopy() *IngressTLS {
if in == nil {
return nil
}
out := new(IngressTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
if in == nil {
return nil
}
out := new(NetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.To != nil {
in, out := &in.To, &out.To
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule.
func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyEgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]NetworkPolicyPort, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.From != nil {
in, out := &in.From, &out.From
*out = make([]NetworkPolicyPeer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule.
func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule {
if in == nil {
return nil
}
out := new(NetworkPolicyIngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList.
func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
if in == nil {
return nil
}
out := new(NetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) {
*out = *in
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.IPBlock != nil {
in, out := &in.IPBlock, &out.IPBlock
*out = new(IPBlock)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer.
func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer {
if in == nil {
return nil
}
out := new(NetworkPolicyPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
*out = *in
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(core.Protocol)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(intstr.IntOrString)
**out = **in
}
if in.EndPort != nil {
in, out := &in.EndPort, &out.EndPort
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort.
func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort {
if in == nil {
return nil
}
out := new(NetworkPolicyPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
*out = *in
in.PodSelector.DeepCopyInto(&out.PodSelector)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]NetworkPolicyIngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = make([]NetworkPolicyEgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PolicyTypes != nil {
in, out := &in.PolicyTypes, &out.PolicyTypes
*out = make([]PolicyType, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec.
func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
if in == nil {
return nil
}
out := new(NetworkPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParentReference) DeepCopyInto(out *ParentReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
func (in *ParentReference) DeepCopy() *ParentReference {
if in == nil {
return nil
}
out := new(ParentReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBackendPort.
func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
if in == nil {
return nil
}
out := new(ServiceBackendPort)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
if in == nil {
return nil
}
out := new(ServiceCIDR)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ServiceCIDR, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
if in == nil {
return nil
}
out := new(ServiceCIDRList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
if in == nil {
return nil
}
out := new(ServiceCIDRSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
if in == nil {
return nil
}
out := new(ServiceCIDRStatus)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install adds the node API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/node"
v1 "k8s.io/kubernetes/pkg/apis/node/v1"
"k8s.io/kubernetes/pkg/apis/node/v1alpha1"
"k8s.io/kubernetes/pkg/apis/node/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(node.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "node.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder for node api registration.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme for node api registration.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&RuntimeClass{},
&RuntimeClassList{},
)
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
nodev1 "k8s.io/api/node/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName for node API
const GroupName = "node.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &nodev1.SchemeBuilder
// AddToScheme node API registration
AddToScheme = localSchemeBuilder.AddToScheme
)
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
nodev1 "k8s.io/api/node/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
node "k8s.io/kubernetes/pkg/apis/node"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*nodev1.Overhead)(nil), (*node.Overhead)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Overhead_To_node_Overhead(a.(*nodev1.Overhead), b.(*node.Overhead), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.Overhead)(nil), (*nodev1.Overhead)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_Overhead_To_v1_Overhead(a.(*node.Overhead), b.(*nodev1.Overhead), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1.RuntimeClass)(nil), (*node.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RuntimeClass_To_node_RuntimeClass(a.(*nodev1.RuntimeClass), b.(*node.RuntimeClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.RuntimeClass)(nil), (*nodev1.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_RuntimeClass_To_v1_RuntimeClass(a.(*node.RuntimeClass), b.(*nodev1.RuntimeClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1.RuntimeClassList)(nil), (*node.RuntimeClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RuntimeClassList_To_node_RuntimeClassList(a.(*nodev1.RuntimeClassList), b.(*node.RuntimeClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.RuntimeClassList)(nil), (*nodev1.RuntimeClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_RuntimeClassList_To_v1_RuntimeClassList(a.(*node.RuntimeClassList), b.(*nodev1.RuntimeClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1.Scheduling)(nil), (*node.Scheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Scheduling_To_node_Scheduling(a.(*nodev1.Scheduling), b.(*node.Scheduling), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.Scheduling)(nil), (*nodev1.Scheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_Scheduling_To_v1_Scheduling(a.(*node.Scheduling), b.(*nodev1.Scheduling), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_Overhead_To_node_Overhead(in *nodev1.Overhead, out *node.Overhead, s conversion.Scope) error {
out.PodFixed = *(*core.ResourceList)(unsafe.Pointer(&in.PodFixed))
return nil
}
// Convert_v1_Overhead_To_node_Overhead is an autogenerated conversion function.
func Convert_v1_Overhead_To_node_Overhead(in *nodev1.Overhead, out *node.Overhead, s conversion.Scope) error {
return autoConvert_v1_Overhead_To_node_Overhead(in, out, s)
}
func autoConvert_node_Overhead_To_v1_Overhead(in *node.Overhead, out *nodev1.Overhead, s conversion.Scope) error {
out.PodFixed = *(*corev1.ResourceList)(unsafe.Pointer(&in.PodFixed))
return nil
}
// Convert_node_Overhead_To_v1_Overhead is an autogenerated conversion function.
func Convert_node_Overhead_To_v1_Overhead(in *node.Overhead, out *nodev1.Overhead, s conversion.Scope) error {
return autoConvert_node_Overhead_To_v1_Overhead(in, out, s)
}
func autoConvert_v1_RuntimeClass_To_node_RuntimeClass(in *nodev1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Handler = in.Handler
out.Overhead = (*node.Overhead)(unsafe.Pointer(in.Overhead))
out.Scheduling = (*node.Scheduling)(unsafe.Pointer(in.Scheduling))
return nil
}
// Convert_v1_RuntimeClass_To_node_RuntimeClass is an autogenerated conversion function.
func Convert_v1_RuntimeClass_To_node_RuntimeClass(in *nodev1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error {
return autoConvert_v1_RuntimeClass_To_node_RuntimeClass(in, out, s)
}
func autoConvert_node_RuntimeClass_To_v1_RuntimeClass(in *node.RuntimeClass, out *nodev1.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Handler = in.Handler
out.Overhead = (*nodev1.Overhead)(unsafe.Pointer(in.Overhead))
out.Scheduling = (*nodev1.Scheduling)(unsafe.Pointer(in.Scheduling))
return nil
}
// Convert_node_RuntimeClass_To_v1_RuntimeClass is an autogenerated conversion function.
func Convert_node_RuntimeClass_To_v1_RuntimeClass(in *node.RuntimeClass, out *nodev1.RuntimeClass, s conversion.Scope) error {
return autoConvert_node_RuntimeClass_To_v1_RuntimeClass(in, out, s)
}
func autoConvert_v1_RuntimeClassList_To_node_RuntimeClassList(in *nodev1.RuntimeClassList, out *node.RuntimeClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]node.RuntimeClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_RuntimeClassList_To_node_RuntimeClassList is an autogenerated conversion function.
func Convert_v1_RuntimeClassList_To_node_RuntimeClassList(in *nodev1.RuntimeClassList, out *node.RuntimeClassList, s conversion.Scope) error {
return autoConvert_v1_RuntimeClassList_To_node_RuntimeClassList(in, out, s)
}
func autoConvert_node_RuntimeClassList_To_v1_RuntimeClassList(in *node.RuntimeClassList, out *nodev1.RuntimeClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]nodev1.RuntimeClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_node_RuntimeClassList_To_v1_RuntimeClassList is an autogenerated conversion function.
func Convert_node_RuntimeClassList_To_v1_RuntimeClassList(in *node.RuntimeClassList, out *nodev1.RuntimeClassList, s conversion.Scope) error {
return autoConvert_node_RuntimeClassList_To_v1_RuntimeClassList(in, out, s)
}
func autoConvert_v1_Scheduling_To_node_Scheduling(in *nodev1.Scheduling, out *node.Scheduling, s conversion.Scope) error {
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations))
return nil
}
// Convert_v1_Scheduling_To_node_Scheduling is an autogenerated conversion function.
func Convert_v1_Scheduling_To_node_Scheduling(in *nodev1.Scheduling, out *node.Scheduling, s conversion.Scope) error {
return autoConvert_v1_Scheduling_To_node_Scheduling(in, out, s)
}
func autoConvert_node_Scheduling_To_v1_Scheduling(in *node.Scheduling, out *nodev1.Scheduling, s conversion.Scope) error {
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.Tolerations = *(*[]corev1.Toleration)(unsafe.Pointer(&in.Tolerations))
return nil
}
// Convert_node_Scheduling_To_v1_Scheduling is an autogenerated conversion function.
func Convert_node_Scheduling_To_v1_Scheduling(in *node.Scheduling, out *nodev1.Scheduling, s conversion.Scope) error {
return autoConvert_node_Scheduling_To_v1_Scheduling(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1alpha1 "k8s.io/api/node/v1alpha1"
conversion "k8s.io/apimachinery/pkg/conversion"
node "k8s.io/kubernetes/pkg/apis/node"
)
// Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass must override the automatic
// conversion since we unnested the spec struct after v1alpha1
func Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass(in *v1alpha1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Handler = in.Spec.RuntimeHandler
if in.Spec.Overhead != nil {
out.Overhead = &node.Overhead{}
if err := Convert_v1alpha1_Overhead_To_node_Overhead(in.Spec.Overhead, out.Overhead, s); err != nil {
return err
}
}
if in.Spec.Scheduling != nil {
out.Scheduling = &node.Scheduling{}
if err := Convert_v1alpha1_Scheduling_To_node_Scheduling(in.Spec.Scheduling, out.Scheduling, s); err != nil {
return err
}
}
return nil
}
// Convert_node_RuntimeClass_To_v1alpha1_RuntimeClass must override the automatic
// conversion since we unnested the spec struct after v1alpha1
func Convert_node_RuntimeClass_To_v1alpha1_RuntimeClass(in *node.RuntimeClass, out *v1alpha1.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Spec.RuntimeHandler = in.Handler
if in.Overhead != nil {
out.Spec.Overhead = &v1alpha1.Overhead{}
if err := Convert_node_Overhead_To_v1alpha1_Overhead(in.Overhead, out.Spec.Overhead, s); err != nil {
return err
}
}
if in.Scheduling != nil {
out.Spec.Scheduling = &v1alpha1.Scheduling{}
if err := Convert_node_Scheduling_To_v1alpha1_Scheduling(in.Scheduling, out.Spec.Scheduling, s); err != nil {
return err
}
}
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
nodev1alpha1 "k8s.io/api/node/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName for node API
const GroupName = "node.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &nodev1alpha1.SchemeBuilder
// AddToScheme node API registration
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register()
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
nodev1alpha1 "k8s.io/api/node/v1alpha1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
node "k8s.io/kubernetes/pkg/apis/node"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*nodev1alpha1.Overhead)(nil), (*node.Overhead)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Overhead_To_node_Overhead(a.(*nodev1alpha1.Overhead), b.(*node.Overhead), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.Overhead)(nil), (*nodev1alpha1.Overhead)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_Overhead_To_v1alpha1_Overhead(a.(*node.Overhead), b.(*nodev1alpha1.Overhead), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1alpha1.RuntimeClassList)(nil), (*node.RuntimeClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RuntimeClassList_To_node_RuntimeClassList(a.(*nodev1alpha1.RuntimeClassList), b.(*node.RuntimeClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.RuntimeClassList)(nil), (*nodev1alpha1.RuntimeClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_RuntimeClassList_To_v1alpha1_RuntimeClassList(a.(*node.RuntimeClassList), b.(*nodev1alpha1.RuntimeClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1alpha1.Scheduling)(nil), (*node.Scheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Scheduling_To_node_Scheduling(a.(*nodev1alpha1.Scheduling), b.(*node.Scheduling), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.Scheduling)(nil), (*nodev1alpha1.Scheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_Scheduling_To_v1alpha1_Scheduling(a.(*node.Scheduling), b.(*nodev1alpha1.Scheduling), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*node.RuntimeClass)(nil), (*nodev1alpha1.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_RuntimeClass_To_v1alpha1_RuntimeClass(a.(*node.RuntimeClass), b.(*nodev1alpha1.RuntimeClass), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*nodev1alpha1.RuntimeClass)(nil), (*node.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass(a.(*nodev1alpha1.RuntimeClass), b.(*node.RuntimeClass), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_Overhead_To_node_Overhead(in *nodev1alpha1.Overhead, out *node.Overhead, s conversion.Scope) error {
out.PodFixed = *(*core.ResourceList)(unsafe.Pointer(&in.PodFixed))
return nil
}
// Convert_v1alpha1_Overhead_To_node_Overhead is an autogenerated conversion function.
func Convert_v1alpha1_Overhead_To_node_Overhead(in *nodev1alpha1.Overhead, out *node.Overhead, s conversion.Scope) error {
return autoConvert_v1alpha1_Overhead_To_node_Overhead(in, out, s)
}
func autoConvert_node_Overhead_To_v1alpha1_Overhead(in *node.Overhead, out *nodev1alpha1.Overhead, s conversion.Scope) error {
out.PodFixed = *(*v1.ResourceList)(unsafe.Pointer(&in.PodFixed))
return nil
}
// Convert_node_Overhead_To_v1alpha1_Overhead is an autogenerated conversion function.
func Convert_node_Overhead_To_v1alpha1_Overhead(in *node.Overhead, out *nodev1alpha1.Overhead, s conversion.Scope) error {
return autoConvert_node_Overhead_To_v1alpha1_Overhead(in, out, s)
}
func autoConvert_v1alpha1_RuntimeClass_To_node_RuntimeClass(in *nodev1alpha1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
// WARNING: in.Spec requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_node_RuntimeClass_To_v1alpha1_RuntimeClass(in *node.RuntimeClass, out *nodev1alpha1.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
// WARNING: in.Handler requires manual conversion: does not exist in peer-type
// WARNING: in.Overhead requires manual conversion: does not exist in peer-type
// WARNING: in.Scheduling requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_RuntimeClassList_To_node_RuntimeClassList(in *nodev1alpha1.RuntimeClassList, out *node.RuntimeClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]node.RuntimeClass, len(*in))
for i := range *in {
if err := Convert_v1alpha1_RuntimeClass_To_node_RuntimeClass(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_RuntimeClassList_To_node_RuntimeClassList is an autogenerated conversion function.
func Convert_v1alpha1_RuntimeClassList_To_node_RuntimeClassList(in *nodev1alpha1.RuntimeClassList, out *node.RuntimeClassList, s conversion.Scope) error {
return autoConvert_v1alpha1_RuntimeClassList_To_node_RuntimeClassList(in, out, s)
}
func autoConvert_node_RuntimeClassList_To_v1alpha1_RuntimeClassList(in *node.RuntimeClassList, out *nodev1alpha1.RuntimeClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]nodev1alpha1.RuntimeClass, len(*in))
for i := range *in {
if err := Convert_node_RuntimeClass_To_v1alpha1_RuntimeClass(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_node_RuntimeClassList_To_v1alpha1_RuntimeClassList is an autogenerated conversion function.
func Convert_node_RuntimeClassList_To_v1alpha1_RuntimeClassList(in *node.RuntimeClassList, out *nodev1alpha1.RuntimeClassList, s conversion.Scope) error {
return autoConvert_node_RuntimeClassList_To_v1alpha1_RuntimeClassList(in, out, s)
}
func autoConvert_v1alpha1_Scheduling_To_node_Scheduling(in *nodev1alpha1.Scheduling, out *node.Scheduling, s conversion.Scope) error {
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations))
return nil
}
// Convert_v1alpha1_Scheduling_To_node_Scheduling is an autogenerated conversion function.
func Convert_v1alpha1_Scheduling_To_node_Scheduling(in *nodev1alpha1.Scheduling, out *node.Scheduling, s conversion.Scope) error {
return autoConvert_v1alpha1_Scheduling_To_node_Scheduling(in, out, s)
}
func autoConvert_node_Scheduling_To_v1alpha1_Scheduling(in *node.Scheduling, out *nodev1alpha1.Scheduling, s conversion.Scope) error {
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations))
return nil
}
// Convert_node_Scheduling_To_v1alpha1_Scheduling is an autogenerated conversion function.
func Convert_node_Scheduling_To_v1alpha1_Scheduling(in *node.Scheduling, out *nodev1alpha1.Scheduling, s conversion.Scope) error {
return autoConvert_node_Scheduling_To_v1alpha1_Scheduling(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
nodev1beta1 "k8s.io/api/node/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName for node API
const GroupName = "node.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &nodev1beta1.SchemeBuilder
// AddToScheme node API registration
AddToScheme = localSchemeBuilder.AddToScheme
)
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
nodev1beta1 "k8s.io/api/node/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
node "k8s.io/kubernetes/pkg/apis/node"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*nodev1beta1.Overhead)(nil), (*node.Overhead)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Overhead_To_node_Overhead(a.(*nodev1beta1.Overhead), b.(*node.Overhead), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.Overhead)(nil), (*nodev1beta1.Overhead)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_Overhead_To_v1beta1_Overhead(a.(*node.Overhead), b.(*nodev1beta1.Overhead), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1beta1.RuntimeClass)(nil), (*node.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RuntimeClass_To_node_RuntimeClass(a.(*nodev1beta1.RuntimeClass), b.(*node.RuntimeClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.RuntimeClass)(nil), (*nodev1beta1.RuntimeClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_RuntimeClass_To_v1beta1_RuntimeClass(a.(*node.RuntimeClass), b.(*nodev1beta1.RuntimeClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1beta1.RuntimeClassList)(nil), (*node.RuntimeClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RuntimeClassList_To_node_RuntimeClassList(a.(*nodev1beta1.RuntimeClassList), b.(*node.RuntimeClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.RuntimeClassList)(nil), (*nodev1beta1.RuntimeClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_RuntimeClassList_To_v1beta1_RuntimeClassList(a.(*node.RuntimeClassList), b.(*nodev1beta1.RuntimeClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*nodev1beta1.Scheduling)(nil), (*node.Scheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Scheduling_To_node_Scheduling(a.(*nodev1beta1.Scheduling), b.(*node.Scheduling), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*node.Scheduling)(nil), (*nodev1beta1.Scheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_node_Scheduling_To_v1beta1_Scheduling(a.(*node.Scheduling), b.(*nodev1beta1.Scheduling), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_Overhead_To_node_Overhead(in *nodev1beta1.Overhead, out *node.Overhead, s conversion.Scope) error {
out.PodFixed = *(*core.ResourceList)(unsafe.Pointer(&in.PodFixed))
return nil
}
// Convert_v1beta1_Overhead_To_node_Overhead is an autogenerated conversion function.
func Convert_v1beta1_Overhead_To_node_Overhead(in *nodev1beta1.Overhead, out *node.Overhead, s conversion.Scope) error {
return autoConvert_v1beta1_Overhead_To_node_Overhead(in, out, s)
}
func autoConvert_node_Overhead_To_v1beta1_Overhead(in *node.Overhead, out *nodev1beta1.Overhead, s conversion.Scope) error {
out.PodFixed = *(*v1.ResourceList)(unsafe.Pointer(&in.PodFixed))
return nil
}
// Convert_node_Overhead_To_v1beta1_Overhead is an autogenerated conversion function.
func Convert_node_Overhead_To_v1beta1_Overhead(in *node.Overhead, out *nodev1beta1.Overhead, s conversion.Scope) error {
return autoConvert_node_Overhead_To_v1beta1_Overhead(in, out, s)
}
func autoConvert_v1beta1_RuntimeClass_To_node_RuntimeClass(in *nodev1beta1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Handler = in.Handler
out.Overhead = (*node.Overhead)(unsafe.Pointer(in.Overhead))
out.Scheduling = (*node.Scheduling)(unsafe.Pointer(in.Scheduling))
return nil
}
// Convert_v1beta1_RuntimeClass_To_node_RuntimeClass is an autogenerated conversion function.
func Convert_v1beta1_RuntimeClass_To_node_RuntimeClass(in *nodev1beta1.RuntimeClass, out *node.RuntimeClass, s conversion.Scope) error {
return autoConvert_v1beta1_RuntimeClass_To_node_RuntimeClass(in, out, s)
}
func autoConvert_node_RuntimeClass_To_v1beta1_RuntimeClass(in *node.RuntimeClass, out *nodev1beta1.RuntimeClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Handler = in.Handler
out.Overhead = (*nodev1beta1.Overhead)(unsafe.Pointer(in.Overhead))
out.Scheduling = (*nodev1beta1.Scheduling)(unsafe.Pointer(in.Scheduling))
return nil
}
// Convert_node_RuntimeClass_To_v1beta1_RuntimeClass is an autogenerated conversion function.
func Convert_node_RuntimeClass_To_v1beta1_RuntimeClass(in *node.RuntimeClass, out *nodev1beta1.RuntimeClass, s conversion.Scope) error {
return autoConvert_node_RuntimeClass_To_v1beta1_RuntimeClass(in, out, s)
}
func autoConvert_v1beta1_RuntimeClassList_To_node_RuntimeClassList(in *nodev1beta1.RuntimeClassList, out *node.RuntimeClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]node.RuntimeClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_RuntimeClassList_To_node_RuntimeClassList is an autogenerated conversion function.
func Convert_v1beta1_RuntimeClassList_To_node_RuntimeClassList(in *nodev1beta1.RuntimeClassList, out *node.RuntimeClassList, s conversion.Scope) error {
return autoConvert_v1beta1_RuntimeClassList_To_node_RuntimeClassList(in, out, s)
}
func autoConvert_node_RuntimeClassList_To_v1beta1_RuntimeClassList(in *node.RuntimeClassList, out *nodev1beta1.RuntimeClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]nodev1beta1.RuntimeClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_node_RuntimeClassList_To_v1beta1_RuntimeClassList is an autogenerated conversion function.
func Convert_node_RuntimeClassList_To_v1beta1_RuntimeClassList(in *node.RuntimeClassList, out *nodev1beta1.RuntimeClassList, s conversion.Scope) error {
return autoConvert_node_RuntimeClassList_To_v1beta1_RuntimeClassList(in, out, s)
}
func autoConvert_v1beta1_Scheduling_To_node_Scheduling(in *nodev1beta1.Scheduling, out *node.Scheduling, s conversion.Scope) error {
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations))
return nil
}
// Convert_v1beta1_Scheduling_To_node_Scheduling is an autogenerated conversion function.
func Convert_v1beta1_Scheduling_To_node_Scheduling(in *nodev1beta1.Scheduling, out *node.Scheduling, s conversion.Scope) error {
return autoConvert_v1beta1_Scheduling_To_node_Scheduling(in, out, s)
}
func autoConvert_node_Scheduling_To_v1beta1_Scheduling(in *node.Scheduling, out *nodev1beta1.Scheduling, s conversion.Scope) error {
out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector))
out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations))
return nil
}
// Convert_node_Scheduling_To_v1beta1_Scheduling is an autogenerated conversion function.
func Convert_node_Scheduling_To_v1beta1_Scheduling(in *node.Scheduling, out *nodev1beta1.Scheduling, s conversion.Scope) error {
return autoConvert_node_Scheduling_To_v1beta1_Scheduling(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package node
import (
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Overhead) DeepCopyInto(out *Overhead) {
*out = *in
if in.PodFixed != nil {
in, out := &in.PodFixed, &out.PodFixed
*out = make(core.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead.
func (in *Overhead) DeepCopy() *Overhead {
if in == nil {
return nil
}
out := new(Overhead)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Overhead != nil {
in, out := &in.Overhead, &out.Overhead
*out = new(Overhead)
(*in).DeepCopyInto(*out)
}
if in.Scheduling != nil {
in, out := &in.Scheduling, &out.Scheduling
*out = new(Scheduling)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass.
func (in *RuntimeClass) DeepCopy() *RuntimeClass {
if in == nil {
return nil
}
out := new(RuntimeClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RuntimeClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RuntimeClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList.
func (in *RuntimeClassList) DeepCopy() *RuntimeClassList {
if in == nil {
return nil
}
out := new(RuntimeClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RuntimeClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scheduling) DeepCopyInto(out *Scheduling) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]core.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling.
func (in *Scheduling) DeepCopy() *Scheduling {
if in == nil {
return nil
}
out := new(Scheduling)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/policy"
)
// Funcs returns the fuzzer functions for the policy api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(s *policy.PodDisruptionBudgetStatus, c randfill.Continue) {
c.FillNoCustom(s) // fuzz self without calling this function again
s.DisruptionsAllowed = int32(c.Rand.Intn(2))
},
}
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
PDBV1beta1Label = "pdb.kubernetes.io/deprecated-v1beta1-empty-selector-match"
)
var (
NonV1beta1MatchAllSelector = &metav1.LabelSelector{}
NonV1beta1MatchNoneSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{Key: PDBV1beta1Label, Operator: metav1.LabelSelectorOpExists}},
}
V1beta1MatchNoneSelector = &metav1.LabelSelector{}
V1beta1MatchAllSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{Key: PDBV1beta1Label, Operator: metav1.LabelSelectorOpDoesNotExist}},
}
)
func StripPDBV1beta1Label(selector *metav1.LabelSelector) {
if selector == nil {
return
}
trimmedMatchExpressions := selector.MatchExpressions[:0]
for _, exp := range selector.MatchExpressions {
if exp.Key != PDBV1beta1Label {
trimmedMatchExpressions = append(trimmedMatchExpressions, exp)
}
}
selector.MatchExpressions = trimmedMatchExpressions
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/apis/policy/v1"
"k8s.io/kubernetes/pkg/apis/policy/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(policy.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "policy"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&PodDisruptionBudget{},
&PodDisruptionBudgetList{},
&Eviction{},
)
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/api/policy/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/policy"
)
func Convert_v1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *v1.PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error {
if err := autoConvert_v1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s); err != nil {
return err
}
switch {
case apiequality.Semantic.DeepEqual(in.Spec.Selector, policy.NonV1beta1MatchNoneSelector):
// no-op, preserve
case apiequality.Semantic.DeepEqual(in.Spec.Selector, policy.NonV1beta1MatchAllSelector):
// no-op, preserve
default:
// otherwise, make sure the label intended to be used in a match-all or match-none selector
// never gets combined with user-specified fields
policy.StripPDBV1beta1Label(out.Spec.Selector)
}
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "policy"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &policyv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
policy "k8s.io/kubernetes/pkg/apis/policy"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*policyv1.Eviction)(nil), (*policy.Eviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Eviction_To_policy_Eviction(a.(*policyv1.Eviction), b.(*policy.Eviction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.Eviction)(nil), (*policyv1.Eviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_Eviction_To_v1_Eviction(a.(*policy.Eviction), b.(*policyv1.Eviction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.PodDisruptionBudget)(nil), (*policyv1.PodDisruptionBudget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudget_To_v1_PodDisruptionBudget(a.(*policy.PodDisruptionBudget), b.(*policyv1.PodDisruptionBudget), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policyv1.PodDisruptionBudgetList)(nil), (*policy.PodDisruptionBudgetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(a.(*policyv1.PodDisruptionBudgetList), b.(*policy.PodDisruptionBudgetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.PodDisruptionBudgetList)(nil), (*policyv1.PodDisruptionBudgetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudgetList_To_v1_PodDisruptionBudgetList(a.(*policy.PodDisruptionBudgetList), b.(*policyv1.PodDisruptionBudgetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policyv1.PodDisruptionBudgetSpec)(nil), (*policy.PodDisruptionBudgetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(a.(*policyv1.PodDisruptionBudgetSpec), b.(*policy.PodDisruptionBudgetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.PodDisruptionBudgetSpec)(nil), (*policyv1.PodDisruptionBudgetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudgetSpec_To_v1_PodDisruptionBudgetSpec(a.(*policy.PodDisruptionBudgetSpec), b.(*policyv1.PodDisruptionBudgetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policyv1.PodDisruptionBudgetStatus)(nil), (*policy.PodDisruptionBudgetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(a.(*policyv1.PodDisruptionBudgetStatus), b.(*policy.PodDisruptionBudgetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.PodDisruptionBudgetStatus)(nil), (*policyv1.PodDisruptionBudgetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudgetStatus_To_v1_PodDisruptionBudgetStatus(a.(*policy.PodDisruptionBudgetStatus), b.(*policyv1.PodDisruptionBudgetStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*policyv1.PodDisruptionBudget)(nil), (*policy.PodDisruptionBudget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodDisruptionBudget_To_policy_PodDisruptionBudget(a.(*policyv1.PodDisruptionBudget), b.(*policy.PodDisruptionBudget), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_Eviction_To_policy_Eviction(in *policyv1.Eviction, out *policy.Eviction, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DeleteOptions = (*metav1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions))
return nil
}
// Convert_v1_Eviction_To_policy_Eviction is an autogenerated conversion function.
func Convert_v1_Eviction_To_policy_Eviction(in *policyv1.Eviction, out *policy.Eviction, s conversion.Scope) error {
return autoConvert_v1_Eviction_To_policy_Eviction(in, out, s)
}
func autoConvert_policy_Eviction_To_v1_Eviction(in *policy.Eviction, out *policyv1.Eviction, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DeleteOptions = (*metav1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions))
return nil
}
// Convert_policy_Eviction_To_v1_Eviction is an autogenerated conversion function.
func Convert_policy_Eviction_To_v1_Eviction(in *policy.Eviction, out *policyv1.Eviction, s conversion.Scope) error {
return autoConvert_policy_Eviction_To_v1_Eviction(in, out, s)
}
func autoConvert_v1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *policyv1.PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_policy_PodDisruptionBudget_To_v1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *policyv1.PodDisruptionBudget, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_policy_PodDisruptionBudgetSpec_To_v1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_policy_PodDisruptionBudgetStatus_To_v1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_policy_PodDisruptionBudget_To_v1_PodDisruptionBudget is an autogenerated conversion function.
func Convert_policy_PodDisruptionBudget_To_v1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *policyv1.PodDisruptionBudget, s conversion.Scope) error {
return autoConvert_policy_PodDisruptionBudget_To_v1_PodDisruptionBudget(in, out, s)
}
func autoConvert_v1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *policyv1.PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]policy.PodDisruptionBudget, len(*in))
for i := range *in {
if err := Convert_v1_PodDisruptionBudget_To_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList is an autogenerated conversion function.
func Convert_v1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *policyv1.PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error {
return autoConvert_v1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s)
}
func autoConvert_policy_PodDisruptionBudgetList_To_v1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *policyv1.PodDisruptionBudgetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]policyv1.PodDisruptionBudget, len(*in))
for i := range *in {
if err := Convert_policy_PodDisruptionBudget_To_v1_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_policy_PodDisruptionBudgetList_To_v1_PodDisruptionBudgetList is an autogenerated conversion function.
func Convert_policy_PodDisruptionBudgetList_To_v1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *policyv1.PodDisruptionBudgetList, s conversion.Scope) error {
return autoConvert_policy_PodDisruptionBudgetList_To_v1_PodDisruptionBudgetList(in, out, s)
}
func autoConvert_v1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *policyv1.PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error {
out.MinAvailable = (*intstr.IntOrString)(unsafe.Pointer(in.MinAvailable))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
out.UnhealthyPodEvictionPolicy = (*policy.UnhealthyPodEvictionPolicyType)(unsafe.Pointer(in.UnhealthyPodEvictionPolicy))
return nil
}
// Convert_v1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec is an autogenerated conversion function.
func Convert_v1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *policyv1.PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error {
return autoConvert_v1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s)
}
func autoConvert_policy_PodDisruptionBudgetSpec_To_v1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *policyv1.PodDisruptionBudgetSpec, s conversion.Scope) error {
out.MinAvailable = (*intstr.IntOrString)(unsafe.Pointer(in.MinAvailable))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
out.UnhealthyPodEvictionPolicy = (*policyv1.UnhealthyPodEvictionPolicyType)(unsafe.Pointer(in.UnhealthyPodEvictionPolicy))
return nil
}
// Convert_policy_PodDisruptionBudgetSpec_To_v1_PodDisruptionBudgetSpec is an autogenerated conversion function.
func Convert_policy_PodDisruptionBudgetSpec_To_v1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *policyv1.PodDisruptionBudgetSpec, s conversion.Scope) error {
return autoConvert_policy_PodDisruptionBudgetSpec_To_v1_PodDisruptionBudgetSpec(in, out, s)
}
func autoConvert_v1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *policyv1.PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.DisruptedPods = *(*map[string]metav1.Time)(unsafe.Pointer(&in.DisruptedPods))
out.DisruptionsAllowed = in.DisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus is an autogenerated conversion function.
func Convert_v1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *policyv1.PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error {
return autoConvert_v1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s)
}
func autoConvert_policy_PodDisruptionBudgetStatus_To_v1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *policyv1.PodDisruptionBudgetStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.DisruptedPods = *(*map[string]metav1.Time)(unsafe.Pointer(&in.DisruptedPods))
out.DisruptionsAllowed = in.DisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_policy_PodDisruptionBudgetStatus_To_v1_PodDisruptionBudgetStatus is an autogenerated conversion function.
func Convert_policy_PodDisruptionBudgetStatus_To_v1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *policyv1.PodDisruptionBudgetStatus, s conversion.Scope) error {
return autoConvert_policy_PodDisruptionBudgetStatus_To_v1_PodDisruptionBudgetStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/api/policy/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/policy"
)
func Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *v1beta1.PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error {
if err := autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s); err != nil {
return err
}
switch {
case apiequality.Semantic.DeepEqual(in.Spec.Selector, policy.V1beta1MatchNoneSelector):
// If the v1beta1 version has a non-nil but empty selector, it should be
// selecting no pods, even when used with the internal or v1 api. We
// add a selector that is non-empty but will never match any pods.
out.Spec.Selector = policy.NonV1beta1MatchNoneSelector.DeepCopy()
case apiequality.Semantic.DeepEqual(in.Spec.Selector, policy.V1beta1MatchAllSelector):
// If the v1beta1 version has our v1beta1-specific "match-all" selector,
// swap that out for a simpler empty "match-all" selector for v1
out.Spec.Selector = policy.NonV1beta1MatchAllSelector.DeepCopy()
default:
// otherwise, make sure the label intended to be used in a match-all or match-none selector
// never gets combined with user-specified fields
policy.StripPDBV1beta1Label(out.Spec.Selector)
}
return nil
}
func Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *v1beta1.PodDisruptionBudget, s conversion.Scope) error {
if err := autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in, out, s); err != nil {
return err
}
switch {
case apiequality.Semantic.DeepEqual(in.Spec.Selector, policy.NonV1beta1MatchNoneSelector):
// If the internal version has our v1beta1-specific "match-none" selector,
// swap that out for a simpler empty "match-none" selector for v1beta1
out.Spec.Selector = policy.V1beta1MatchNoneSelector.DeepCopy()
case apiequality.Semantic.DeepEqual(in.Spec.Selector, policy.NonV1beta1MatchAllSelector):
// If the internal version has a non-nil but empty selector, we want it to
// select all pods. We make sure this happens even with the v1beta1 api by
// adding a non-empty selector that selects all pods.
out.Spec.Selector = policy.V1beta1MatchAllSelector.DeepCopy()
}
return nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
policyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "policy"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &policyv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(RegisterDefaults)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
policyv1beta1 "k8s.io/api/policy/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
policy "k8s.io/kubernetes/pkg/apis/policy"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*policyv1beta1.Eviction)(nil), (*policy.Eviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Eviction_To_policy_Eviction(a.(*policyv1beta1.Eviction), b.(*policy.Eviction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.Eviction)(nil), (*policyv1beta1.Eviction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_Eviction_To_v1beta1_Eviction(a.(*policy.Eviction), b.(*policyv1beta1.Eviction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policyv1beta1.PodDisruptionBudgetList)(nil), (*policy.PodDisruptionBudgetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(a.(*policyv1beta1.PodDisruptionBudgetList), b.(*policy.PodDisruptionBudgetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.PodDisruptionBudgetList)(nil), (*policyv1beta1.PodDisruptionBudgetList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(a.(*policy.PodDisruptionBudgetList), b.(*policyv1beta1.PodDisruptionBudgetList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policyv1beta1.PodDisruptionBudgetSpec)(nil), (*policy.PodDisruptionBudgetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(a.(*policyv1beta1.PodDisruptionBudgetSpec), b.(*policy.PodDisruptionBudgetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.PodDisruptionBudgetSpec)(nil), (*policyv1beta1.PodDisruptionBudgetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(a.(*policy.PodDisruptionBudgetSpec), b.(*policyv1beta1.PodDisruptionBudgetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policyv1beta1.PodDisruptionBudgetStatus)(nil), (*policy.PodDisruptionBudgetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(a.(*policyv1beta1.PodDisruptionBudgetStatus), b.(*policy.PodDisruptionBudgetStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*policy.PodDisruptionBudgetStatus)(nil), (*policyv1beta1.PodDisruptionBudgetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(a.(*policy.PodDisruptionBudgetStatus), b.(*policyv1beta1.PodDisruptionBudgetStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*policy.PodDisruptionBudget)(nil), (*policyv1beta1.PodDisruptionBudget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(a.(*policy.PodDisruptionBudget), b.(*policyv1beta1.PodDisruptionBudget), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*policyv1beta1.PodDisruptionBudget)(nil), (*policy.PodDisruptionBudget)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(a.(*policyv1beta1.PodDisruptionBudget), b.(*policy.PodDisruptionBudget), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_Eviction_To_policy_Eviction(in *policyv1beta1.Eviction, out *policy.Eviction, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions))
return nil
}
// Convert_v1beta1_Eviction_To_policy_Eviction is an autogenerated conversion function.
func Convert_v1beta1_Eviction_To_policy_Eviction(in *policyv1beta1.Eviction, out *policy.Eviction, s conversion.Scope) error {
return autoConvert_v1beta1_Eviction_To_policy_Eviction(in, out, s)
}
func autoConvert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *policyv1beta1.Eviction, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions))
return nil
}
// Convert_policy_Eviction_To_v1beta1_Eviction is an autogenerated conversion function.
func Convert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *policyv1beta1.Eviction, s conversion.Scope) error {
return autoConvert_policy_Eviction_To_v1beta1_Eviction(in, out, s)
}
func autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *policyv1beta1.PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *policyv1beta1.PodDisruptionBudget, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *policyv1beta1.PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]policy.PodDisruptionBudget, len(*in))
for i := range *in {
if err := Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList is an autogenerated conversion function.
func Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *policyv1beta1.PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error {
return autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s)
}
func autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *policyv1beta1.PodDisruptionBudgetList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]policyv1beta1.PodDisruptionBudget, len(*in))
for i := range *in {
if err := Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList is an autogenerated conversion function.
func Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *policyv1beta1.PodDisruptionBudgetList, s conversion.Scope) error {
return autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in, out, s)
}
func autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *policyv1beta1.PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error {
out.MinAvailable = (*intstr.IntOrString)(unsafe.Pointer(in.MinAvailable))
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
out.UnhealthyPodEvictionPolicy = (*policy.UnhealthyPodEvictionPolicyType)(unsafe.Pointer(in.UnhealthyPodEvictionPolicy))
return nil
}
// Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec is an autogenerated conversion function.
func Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *policyv1beta1.PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error {
return autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s)
}
func autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *policyv1beta1.PodDisruptionBudgetSpec, s conversion.Scope) error {
out.MinAvailable = (*intstr.IntOrString)(unsafe.Pointer(in.MinAvailable))
out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector))
out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable))
out.UnhealthyPodEvictionPolicy = (*policyv1beta1.UnhealthyPodEvictionPolicyType)(unsafe.Pointer(in.UnhealthyPodEvictionPolicy))
return nil
}
// Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec is an autogenerated conversion function.
func Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *policyv1beta1.PodDisruptionBudgetSpec, s conversion.Scope) error {
return autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in, out, s)
}
func autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *policyv1beta1.PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods))
out.DisruptionsAllowed = in.DisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus is an autogenerated conversion function.
func Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *policyv1beta1.PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error {
return autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s)
}
func autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *policyv1beta1.PodDisruptionBudgetStatus, s conversion.Scope) error {
out.ObservedGeneration = in.ObservedGeneration
out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods))
out.DisruptionsAllowed = in.DisruptionsAllowed
out.CurrentHealthy = in.CurrentHealthy
out.DesiredHealthy = in.DesiredHealthy
out.ExpectedPods = in.ExpectedPods
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus is an autogenerated conversion function.
func Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *policyv1beta1.PodDisruptionBudgetStatus, s conversion.Scope) error {
return autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"regexp"
policyapiv1beta1 "k8s.io/api/policy/v1beta1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
appsvalidation "k8s.io/kubernetes/pkg/apis/apps/validation"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/policy"
)
var supportedUnhealthyPodEvictionPolicies = sets.NewString(
string(policy.IfHealthyBudget),
string(policy.AlwaysAllow),
)
type PodDisruptionBudgetValidationOptions struct {
AllowInvalidLabelValueInSelector bool
}
// ValidatePodDisruptionBudget validates a PodDisruptionBudget and returns an ErrorList
// with any errors.
func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget, opts PodDisruptionBudgetValidationOptions) field.ErrorList {
allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, opts, field.NewPath("spec"))
return allErrs
}
// ValidatePodDisruptionBudgetSpec validates a PodDisruptionBudgetSpec and returns an ErrorList
// with any errors.
func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, opts PodDisruptionBudgetValidationOptions, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if spec.MinAvailable != nil && spec.MaxUnavailable != nil {
allErrs = append(allErrs, field.Invalid(fldPath, spec, "minAvailable and maxUnavailable cannot be both set"))
}
if spec.MinAvailable != nil {
allErrs = append(allErrs, appsvalidation.ValidatePositiveIntOrPercent(*spec.MinAvailable, fldPath.Child("minAvailable"))...)
allErrs = append(allErrs, appsvalidation.IsNotMoreThan100Percent(*spec.MinAvailable, fldPath.Child("minAvailable"))...)
}
if spec.MaxUnavailable != nil {
allErrs = append(allErrs, appsvalidation.ValidatePositiveIntOrPercent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
allErrs = append(allErrs, appsvalidation.IsNotMoreThan100Percent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
}
labelSelectorValidationOptions := unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOptions, fldPath.Child("selector"))...)
if spec.UnhealthyPodEvictionPolicy != nil && !supportedUnhealthyPodEvictionPolicies.Has(string(*spec.UnhealthyPodEvictionPolicy)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("unhealthyPodEvictionPolicy"), *spec.UnhealthyPodEvictionPolicy, supportedUnhealthyPodEvictionPolicies.List()))
}
return allErrs
}
// ValidatePodDisruptionBudgetStatusUpdate validates a PodDisruptionBudgetStatus and returns an ErrorList
// with any errors.
func ValidatePodDisruptionBudgetStatusUpdate(status, oldStatus policy.PodDisruptionBudgetStatus, fldPath *field.Path, apiVersion schema.GroupVersion) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, unversionedvalidation.ValidateConditions(status.Conditions, fldPath.Child("conditions"))...)
// Don't run other validations for v1beta1 since we don't want to introduce
// new validations retroactively.
if apiVersion == policyapiv1beta1.SchemeGroupVersion {
return allErrs
}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DisruptionsAllowed), fldPath.Child("disruptionsAllowed"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentHealthy), fldPath.Child("currentHealthy"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredHealthy), fldPath.Child("desiredHealthy"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ExpectedPods), fldPath.Child("expectedPods"))...)
return allErrs
}
const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]"
// SysctlContainSlashPatternFmt is a regex that contains a slash used for matching valid sysctl patterns.
const SysctlContainSlashPatternFmt string = "(" + apivalidation.SysctlSegmentFmt + "[\\./])*" + sysctlPatternSegmentFmt
var sysctlContainSlashPatternRegexp = regexp.MustCompile("^" + SysctlContainSlashPatternFmt + "$")
// IsValidSysctlPattern checks if name is a valid sysctl pattern.
// i.e. matches sysctlContainSlashPatternRegexp.
// More info:
//
// https://man7.org/linux/man-pages/man8/sysctl.8.html
// https://man7.org/linux/man-pages/man5/sysctl.d.5.html
func IsValidSysctlPattern(name string) bool {
if len(name) > apivalidation.SysctlMaxLength {
return false
}
return sysctlContainSlashPatternRegexp.MatchString(name)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package policy
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Eviction) DeepCopyInto(out *Eviction) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.DeleteOptions != nil {
in, out := &in.DeleteOptions, &out.DeleteOptions
*out = new(v1.DeleteOptions)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction.
func (in *Eviction) DeepCopy() *Eviction {
if in == nil {
return nil
}
out := new(Eviction)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Eviction) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudget.
func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget {
if in == nil {
return nil
}
out := new(PodDisruptionBudget)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodDisruptionBudget, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetList.
func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) {
*out = *in
if in.MinAvailable != nil {
in, out := &in.MinAvailable, &out.MinAvailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.UnhealthyPodEvictionPolicy != nil {
in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy
*out = new(UnhealthyPodEvictionPolicyType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetSpec.
func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetStatus) DeepCopyInto(out *PodDisruptionBudgetStatus) {
*out = *in
if in.DisruptedPods != nil {
in, out := &in.DisruptedPods, &out.DisruptedPods
*out = make(map[string]v1.Time, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetStatus.
func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetStatus)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/rbac"
)
// Funcs returns the fuzzer functions for the rbac api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(r *rbac.RoleRef, c randfill.Continue) {
c.FillNoCustom(r) // fuzz self without calling this function again
// match defaulter
if len(r.APIGroup) == 0 {
r.APIGroup = rbac.GroupName
}
},
func(r *rbac.Subject, c randfill.Continue) {
switch c.Int31n(3) {
case 0:
r.Kind = rbac.ServiceAccountKind
r.APIGroup = ""
c.FillNoCustom(&r.Name)
c.FillNoCustom(&r.Namespace)
case 1:
r.Kind = rbac.UserKind
r.APIGroup = rbac.GroupName
c.FillNoCustom(&r.Name)
// user "*" won't round trip because we convert it to the system:authenticated group. try again.
for r.Name == "*" {
c.FillNoCustom(&r.Name)
}
case 2:
r.Kind = rbac.GroupKind
r.APIGroup = rbac.GroupName
c.FillNoCustom(&r.Name)
}
},
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbac
import (
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// ResourceMatches returns the result of the rule.Resources matching.
func ResourceMatches(rule *PolicyRule, combinedRequestedResource, requestedSubresource string) bool {
for _, ruleResource := range rule.Resources {
// if everything is allowed, we match
if ruleResource == ResourceAll {
return true
}
// if we have an exact match, we match
if ruleResource == combinedRequestedResource {
return true
}
// We can also match a */subresource.
// if there isn't a subresource, then continue
if len(requestedSubresource) == 0 {
continue
}
// if the rule isn't in the format */subresource, then we don't match, continue
if len(ruleResource) == len(requestedSubresource)+2 &&
strings.HasPrefix(ruleResource, "*/") &&
strings.HasSuffix(ruleResource, requestedSubresource) {
return true
}
}
return false
}
// SubjectsStrings returns users, groups, serviceaccounts, unknown for display purposes.
func SubjectsStrings(subjects []Subject) ([]string, []string, []string, []string) {
users := []string{}
groups := []string{}
sas := []string{}
others := []string{}
for _, subject := range subjects {
switch subject.Kind {
case ServiceAccountKind:
sas = append(sas, fmt.Sprintf("%s/%s", subject.Namespace, subject.Name))
case UserKind:
users = append(users, subject.Name)
case GroupKind:
groups = append(groups, subject.Name)
default:
others = append(others, fmt.Sprintf("%s/%s/%s", subject.Kind, subject.Namespace, subject.Name))
}
}
return users, groups, sas, others
}
func (r PolicyRule) String() string {
return "PolicyRule" + r.CompactString()
}
// CompactString exposes a compact string representation for use in escalation error messages
func (r PolicyRule) CompactString() string {
formatStringParts := []string{}
formatArgs := []interface{}{}
if len(r.APIGroups) > 0 {
formatStringParts = append(formatStringParts, "APIGroups:%q")
formatArgs = append(formatArgs, r.APIGroups)
}
if len(r.Resources) > 0 {
formatStringParts = append(formatStringParts, "Resources:%q")
formatArgs = append(formatArgs, r.Resources)
}
if len(r.NonResourceURLs) > 0 {
formatStringParts = append(formatStringParts, "NonResourceURLs:%q")
formatArgs = append(formatArgs, r.NonResourceURLs)
}
if len(r.ResourceNames) > 0 {
formatStringParts = append(formatStringParts, "ResourceNames:%q")
formatArgs = append(formatArgs, r.ResourceNames)
}
if len(r.Verbs) > 0 {
formatStringParts = append(formatStringParts, "Verbs:%q")
formatArgs = append(formatArgs, r.Verbs)
}
formatString := "{" + strings.Join(formatStringParts, ", ") + "}"
return fmt.Sprintf(formatString, formatArgs...)
}
// PolicyRuleBuilder let's us attach methods. A no-no for API types.
// We use it to construct rules in code. It's more compact than trying to write them
// out in a literal and allows us to perform some basic checking during construction
// +k8s:deepcopy-gen=false
type PolicyRuleBuilder struct {
PolicyRule PolicyRule
}
// NewRule returns new PolicyRule made by input verbs.
func NewRule(verbs ...string) *PolicyRuleBuilder {
return &PolicyRuleBuilder{
PolicyRule: PolicyRule{Verbs: sets.NewString(verbs...).List()},
}
}
// Groups combines the PolicyRule.APIGroups and input groups.
func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder {
r.PolicyRule.APIGroups = combine(r.PolicyRule.APIGroups, groups)
return r
}
// Resources combines the PolicyRule.Rule and input resources.
func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder {
r.PolicyRule.Resources = combine(r.PolicyRule.Resources, resources)
return r
}
// Names combines the PolicyRule.ResourceNames and input names.
func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder {
r.PolicyRule.ResourceNames = combine(r.PolicyRule.ResourceNames, names)
return r
}
// URLs combines the PolicyRule.NonResourceURLs and input urls.
func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder {
r.PolicyRule.NonResourceURLs = combine(r.PolicyRule.NonResourceURLs, urls)
return r
}
// RuleOrDie calls the binding method and panics if there is an error.
func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule {
ret, err := r.Rule()
if err != nil {
panic(err)
}
return ret
}
func combine(s1, s2 []string) []string {
s := sets.NewString(s1...)
s.Insert(s2...)
return s.List()
}
// Rule returns PolicyRule and error.
func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) {
if len(r.PolicyRule.Verbs) == 0 {
return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule)
}
switch {
case len(r.PolicyRule.NonResourceURLs) > 0:
if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 {
return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule)
}
case len(r.PolicyRule.Resources) > 0:
// resource rule may not have nonResourceURLs
if len(r.PolicyRule.APIGroups) == 0 {
// this a common bug
return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule)
}
// if resource names are set, then the verb must not be list, watch, create, or deletecollection
// since verbs are largely opaque, we don't want to accidentally prevent things like "impersonate", so
// we will backlist common mistakes, not whitelist acceptable options.
if len(r.PolicyRule.ResourceNames) != 0 {
illegalVerbs := []string{}
for _, verb := range r.PolicyRule.Verbs {
switch verb {
case "list", "watch", "create", "deletecollection":
illegalVerbs = append(illegalVerbs, verb)
}
}
if len(illegalVerbs) > 0 {
return PolicyRule{}, fmt.Errorf("verbs %v do not have names available: %#v", illegalVerbs, r.PolicyRule)
}
}
default:
return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule)
}
return r.PolicyRule, nil
}
// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types.
// We use it to construct bindings in code. It's more compact than trying to write them
// out in a literal.
// +k8s:deepcopy-gen=false
type ClusterRoleBindingBuilder struct {
ClusterRoleBinding ClusterRoleBinding
}
// NewClusterBinding creates a ClusterRoleBinding builder that can be used
// to define the subjects of a cluster role binding. At least one of
// the `Groups`, `Users` or `SAs` method must be called before
// calling the `Binding*` methods.
func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder {
return &ClusterRoleBindingBuilder{
ClusterRoleBinding: ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName},
RoleRef: RoleRef{
APIGroup: GroupName,
Kind: "ClusterRole",
Name: clusterRoleName,
},
},
}
}
// Groups adds the specified groups as the subjects of the ClusterRoleBinding.
func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder {
for _, group := range groups {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, APIGroup: GroupName, Name: group})
}
return r
}
// Users adds the specified users as the subjects of the ClusterRoleBinding.
func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder {
for _, user := range users {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, APIGroup: GroupName, Name: user})
}
return r
}
// SAs adds the specified sas as the subjects of the ClusterRoleBinding.
func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder {
for _, saName := range serviceAccountNames {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName})
}
return r
}
// BindingOrDie calls the binding method and panics if there is an error.
func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding {
ret, err := r.Binding()
if err != nil {
panic(err)
}
return ret
}
// Binding builds and returns the ClusterRoleBinding API object from the builder
// object.
func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) {
if len(r.ClusterRoleBinding.Subjects) == 0 {
return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding)
}
return r.ClusterRoleBinding, nil
}
// RoleBindingBuilder let's us attach methods. It is similar to
// ClusterRoleBindingBuilder above.
// +k8s:deepcopy-gen=false
type RoleBindingBuilder struct {
RoleBinding RoleBinding
}
// NewRoleBinding creates a RoleBinding builder that can be used
// to define the subjects of a role binding. At least one of
// the `Groups`, `Users` or `SAs` method must be called before
// calling the `Binding*` methods.
func NewRoleBinding(roleName, namespace string) *RoleBindingBuilder {
return &RoleBindingBuilder{
RoleBinding: RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
},
RoleRef: RoleRef{
APIGroup: GroupName,
Kind: "Role",
Name: roleName,
},
},
}
}
// NewRoleBindingForClusterRole creates a RoleBinding builder that can be used
// to define the subjects of a cluster role binding. At least one of
// the `Groups`, `Users` or `SAs` method must be called before
// calling the `Binding*` methods.
func NewRoleBindingForClusterRole(roleName, namespace string) *RoleBindingBuilder {
return &RoleBindingBuilder{
RoleBinding: RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
},
RoleRef: RoleRef{
APIGroup: GroupName,
Kind: "ClusterRole",
Name: roleName,
},
},
}
}
// Groups adds the specified groups as the subjects of the RoleBinding.
func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder {
for _, group := range groups {
r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: GroupKind, APIGroup: GroupName, Name: group})
}
return r
}
// Users adds the specified users as the subjects of the RoleBinding.
func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder {
for _, user := range users {
r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: UserKind, APIGroup: GroupName, Name: user})
}
return r
}
// SAs adds the specified service accounts as the subjects of the
// RoleBinding.
func (r *RoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *RoleBindingBuilder {
for _, saName := range serviceAccountNames {
r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName})
}
return r
}
// BindingOrDie calls the binding method and panics if there is an error.
func (r *RoleBindingBuilder) BindingOrDie() RoleBinding {
ret, err := r.Binding()
if err != nil {
panic(err)
}
return ret
}
// Binding builds and returns the RoleBinding API object from the builder
// object.
func (r *RoleBindingBuilder) Binding() (RoleBinding, error) {
if len(r.RoleBinding.Subjects) == 0 {
return RoleBinding{}, fmt.Errorf("subjects are required: %#v", r.RoleBinding)
}
return r.RoleBinding, nil
}
// SortableRuleSlice is the slice of PolicyRule.
type SortableRuleSlice []PolicyRule
func (s SortableRuleSlice) Len() int { return len(s) }
func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SortableRuleSlice) Less(i, j int) bool {
return strings.Compare(s[i].String(), s[j].String()) < 0
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the batch API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
"k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(rbac.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbac
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the name of this API group.
const GroupName = "rbac.authorization.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// SchemeBuilder is a function that calls Register for you.
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Role{},
&RoleBinding{},
&RoleBindingList{},
&RoleList{},
&ClusterRole{},
&ClusterRoleBinding{},
&ClusterRoleBindingList{},
&ClusterRoleList{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ClusterRoleBinding(obj *rbacv1.ClusterRoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_RoleBinding(obj *rbacv1.RoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_Subject(obj *rbacv1.Subject) {
if len(obj.APIGroup) == 0 {
switch obj.Kind {
case rbacv1.ServiceAccountKind:
obj.APIGroup = ""
case rbacv1.UserKind:
obj.APIGroup = GroupName
case rbacv1.GroupKind:
obj.APIGroup = GroupName
}
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"strings"
rbacv1 "k8s.io/api/rbac/v1"
)
func VerbMatches(rule *rbacv1.PolicyRule, requestedVerb string) bool {
for _, ruleVerb := range rule.Verbs {
if ruleVerb == rbacv1.VerbAll {
return true
}
if ruleVerb == requestedVerb {
return true
}
}
return false
}
func APIGroupMatches(rule *rbacv1.PolicyRule, requestedGroup string) bool {
for _, ruleGroup := range rule.APIGroups {
if ruleGroup == rbacv1.APIGroupAll {
return true
}
if ruleGroup == requestedGroup {
return true
}
}
return false
}
func ResourceMatches(rule *rbacv1.PolicyRule, combinedRequestedResource, requestedSubresource string) bool {
for _, ruleResource := range rule.Resources {
// if everything is allowed, we match
if ruleResource == rbacv1.ResourceAll {
return true
}
// if we have an exact match, we match
if ruleResource == combinedRequestedResource {
return true
}
// We can also match a */subresource.
// if there isn't a subresource, then continue
if len(requestedSubresource) == 0 {
continue
}
// if the rule isn't in the format */subresource, then we don't match, continue
if len(ruleResource) == len(requestedSubresource)+2 &&
strings.HasPrefix(ruleResource, "*/") &&
strings.HasSuffix(ruleResource, requestedSubresource) {
return true
}
}
return false
}
func ResourceNameMatches(rule *rbacv1.PolicyRule, requestedName string) bool {
if len(rule.ResourceNames) == 0 {
return true
}
for _, ruleName := range rule.ResourceNames {
if ruleName == requestedName {
return true
}
}
return false
}
func NonResourceURLMatches(rule *rbacv1.PolicyRule, requestedURL string) bool {
for _, ruleURL := range rule.NonResourceURLs {
if ruleURL == rbacv1.NonResourceAll {
return true
}
if ruleURL == requestedURL {
return true
}
if strings.HasSuffix(ruleURL, "*") && strings.HasPrefix(requestedURL, strings.TrimRight(ruleURL, "*")) {
return true
}
}
return false
}
// CompactString exposes a compact string representation for use in escalation error messages
func CompactString(r rbacv1.PolicyRule) string {
formatStringParts := []string{}
formatArgs := []interface{}{}
if len(r.APIGroups) > 0 {
formatStringParts = append(formatStringParts, "APIGroups:%q")
formatArgs = append(formatArgs, r.APIGroups)
}
if len(r.Resources) > 0 {
formatStringParts = append(formatStringParts, "Resources:%q")
formatArgs = append(formatArgs, r.Resources)
}
if len(r.NonResourceURLs) > 0 {
formatStringParts = append(formatStringParts, "NonResourceURLs:%q")
formatArgs = append(formatArgs, r.NonResourceURLs)
}
if len(r.ResourceNames) > 0 {
formatStringParts = append(formatStringParts, "ResourceNames:%q")
formatArgs = append(formatArgs, r.ResourceNames)
}
if len(r.Verbs) > 0 {
formatStringParts = append(formatStringParts, "Verbs:%q")
formatArgs = append(formatArgs, r.Verbs)
}
formatString := "{" + strings.Join(formatStringParts, ", ") + "}"
return fmt.Sprintf(formatString, formatArgs...)
}
type SortableRuleSlice []rbacv1.PolicyRule
func (s SortableRuleSlice) Len() int { return len(s) }
func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SortableRuleSlice) Less(i, j int) bool {
return strings.Compare(s[i].String(), s[j].String()) < 0
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
rbacv1 "k8s.io/api/rbac/v1"
"sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen=false
// PolicyRuleBuilder let's us attach methods. A no-no for API types.
// We use it to construct rules in code. It's more compact than trying to write them
// out in a literal and allows us to perform some basic checking during construction
type PolicyRuleBuilder struct {
PolicyRule rbacv1.PolicyRule `protobuf:"bytes,1,opt,name=policyRule"`
}
func NewRule(verbs ...string) *PolicyRuleBuilder {
return &PolicyRuleBuilder{
PolicyRule: rbacv1.PolicyRule{Verbs: verbs},
}
}
func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder {
r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...)
return r
}
func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder {
r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...)
return r
}
func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder {
r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...)
return r
}
func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder {
r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...)
return r
}
func (r *PolicyRuleBuilder) RuleOrDie() rbacv1.PolicyRule {
ret, err := r.Rule()
if err != nil {
panic(err)
}
return ret
}
func (r *PolicyRuleBuilder) Rule() (rbacv1.PolicyRule, error) {
if len(r.PolicyRule.Verbs) == 0 {
return rbacv1.PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule)
}
switch {
case len(r.PolicyRule.NonResourceURLs) > 0:
if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 {
return rbacv1.PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule)
}
case len(r.PolicyRule.Resources) > 0:
if len(r.PolicyRule.NonResourceURLs) != 0 {
return rbacv1.PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule)
}
if len(r.PolicyRule.APIGroups) == 0 {
// this a common bug
return rbacv1.PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule)
}
default:
return rbacv1.PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule)
}
sort.Strings(r.PolicyRule.Resources)
sort.Strings(r.PolicyRule.ResourceNames)
sort.Strings(r.PolicyRule.APIGroups)
sort.Strings(r.PolicyRule.NonResourceURLs)
sort.Strings(r.PolicyRule.Verbs)
return r.PolicyRule, nil
}
// +k8s:deepcopy-gen=false
// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types.
// We use it to construct bindings in code. It's more compact than trying to write them
// out in a literal.
type ClusterRoleBindingBuilder struct {
ClusterRoleBinding rbacv1.ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"`
}
func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder {
return &ClusterRoleBindingBuilder{
ClusterRoleBinding: rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName},
RoleRef: rbacv1.RoleRef{
APIGroup: GroupName,
Kind: "ClusterRole",
Name: clusterRoleName,
},
},
}
}
func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder {
for _, group := range groups {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.GroupKind, Name: group})
}
return r
}
func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder {
for _, user := range users {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: user})
}
return r
}
func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder {
for _, saName := range serviceAccountNames {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: namespace, Name: saName})
}
return r
}
func (r *ClusterRoleBindingBuilder) BindingOrDie() rbacv1.ClusterRoleBinding {
ret, err := r.Binding()
if err != nil {
panic(err)
}
return ret
}
func (r *ClusterRoleBindingBuilder) Binding() (rbacv1.ClusterRoleBinding, error) {
if len(r.ClusterRoleBinding.Subjects) == 0 {
return rbacv1.ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding)
}
return r.ClusterRoleBinding, nil
}
// +k8s:deepcopy-gen=false
// RoleBindingBuilder let's us attach methods. It is similar to
// ClusterRoleBindingBuilder above.
type RoleBindingBuilder struct {
RoleBinding rbacv1.RoleBinding
}
// NewRoleBinding creates a RoleBinding builder that can be used
// to define the subjects of a role binding. At least one of
// the `Groups`, `Users` or `SAs` method must be called before
// calling the `Binding*` methods.
func NewRoleBinding(roleName, namespace string) *RoleBindingBuilder {
return &RoleBindingBuilder{
RoleBinding: rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
},
RoleRef: rbacv1.RoleRef{
APIGroup: GroupName,
Kind: "Role",
Name: roleName,
},
},
}
}
// Groups adds the specified groups as the subjects of the RoleBinding.
func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder {
for _, group := range groups {
r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.GroupKind, APIGroup: GroupName, Name: group})
}
return r
}
// Users adds the specified users as the subjects of the RoleBinding.
func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder {
for _, user := range users {
r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: GroupName, Name: user})
}
return r
}
// SAs adds the specified service accounts as the subjects of the
// RoleBinding.
func (r *RoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *RoleBindingBuilder {
for _, saName := range serviceAccountNames {
r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: namespace, Name: saName})
}
return r
}
// BindingOrDie calls the binding method and panics if there is an error.
func (r *RoleBindingBuilder) BindingOrDie() rbacv1.RoleBinding {
ret, err := r.Binding()
if err != nil {
panic(err)
}
return ret
}
// Binding builds and returns the RoleBinding API object from the builder
// object.
func (r *RoleBindingBuilder) Binding() (rbacv1.RoleBinding, error) {
if len(r.RoleBinding.Subjects) == 0 {
return rbacv1.RoleBinding{}, fmt.Errorf("subjects are required: %#v", r.RoleBinding)
}
return r.RoleBinding, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const GroupName = "rbac.authorization.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &rbacv1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*rbacv1.AggregationRule)(nil), (*rbac.AggregationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AggregationRule_To_rbac_AggregationRule(a.(*rbacv1.AggregationRule), b.(*rbac.AggregationRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.AggregationRule)(nil), (*rbacv1.AggregationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_AggregationRule_To_v1_AggregationRule(a.(*rbac.AggregationRule), b.(*rbacv1.AggregationRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.ClusterRole)(nil), (*rbac.ClusterRole)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClusterRole_To_rbac_ClusterRole(a.(*rbacv1.ClusterRole), b.(*rbac.ClusterRole), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRole)(nil), (*rbacv1.ClusterRole)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRole_To_v1_ClusterRole(a.(*rbac.ClusterRole), b.(*rbacv1.ClusterRole), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.ClusterRoleBinding)(nil), (*rbac.ClusterRoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(a.(*rbacv1.ClusterRoleBinding), b.(*rbac.ClusterRoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleBinding)(nil), (*rbacv1.ClusterRoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleBinding_To_v1_ClusterRoleBinding(a.(*rbac.ClusterRoleBinding), b.(*rbacv1.ClusterRoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.ClusterRoleBindingList)(nil), (*rbac.ClusterRoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(a.(*rbacv1.ClusterRoleBindingList), b.(*rbac.ClusterRoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleBindingList)(nil), (*rbacv1.ClusterRoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleBindingList_To_v1_ClusterRoleBindingList(a.(*rbac.ClusterRoleBindingList), b.(*rbacv1.ClusterRoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.ClusterRoleList)(nil), (*rbac.ClusterRoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClusterRoleList_To_rbac_ClusterRoleList(a.(*rbacv1.ClusterRoleList), b.(*rbac.ClusterRoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleList)(nil), (*rbacv1.ClusterRoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleList_To_v1_ClusterRoleList(a.(*rbac.ClusterRoleList), b.(*rbacv1.ClusterRoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.PolicyRule)(nil), (*rbac.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PolicyRule_To_rbac_PolicyRule(a.(*rbacv1.PolicyRule), b.(*rbac.PolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.PolicyRule)(nil), (*rbacv1.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_PolicyRule_To_v1_PolicyRule(a.(*rbac.PolicyRule), b.(*rbacv1.PolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.Role)(nil), (*rbac.Role)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Role_To_rbac_Role(a.(*rbacv1.Role), b.(*rbac.Role), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.Role)(nil), (*rbacv1.Role)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_Role_To_v1_Role(a.(*rbac.Role), b.(*rbacv1.Role), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.RoleBinding)(nil), (*rbac.RoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RoleBinding_To_rbac_RoleBinding(a.(*rbacv1.RoleBinding), b.(*rbac.RoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleBinding)(nil), (*rbacv1.RoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleBinding_To_v1_RoleBinding(a.(*rbac.RoleBinding), b.(*rbacv1.RoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.RoleBindingList)(nil), (*rbac.RoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RoleBindingList_To_rbac_RoleBindingList(a.(*rbacv1.RoleBindingList), b.(*rbac.RoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleBindingList)(nil), (*rbacv1.RoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleBindingList_To_v1_RoleBindingList(a.(*rbac.RoleBindingList), b.(*rbacv1.RoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.RoleList)(nil), (*rbac.RoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RoleList_To_rbac_RoleList(a.(*rbacv1.RoleList), b.(*rbac.RoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleList)(nil), (*rbacv1.RoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleList_To_v1_RoleList(a.(*rbac.RoleList), b.(*rbacv1.RoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.RoleRef)(nil), (*rbac.RoleRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RoleRef_To_rbac_RoleRef(a.(*rbacv1.RoleRef), b.(*rbac.RoleRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleRef)(nil), (*rbacv1.RoleRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleRef_To_v1_RoleRef(a.(*rbac.RoleRef), b.(*rbacv1.RoleRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1.Subject)(nil), (*rbac.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Subject_To_rbac_Subject(a.(*rbacv1.Subject), b.(*rbac.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.Subject)(nil), (*rbacv1.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_Subject_To_v1_Subject(a.(*rbac.Subject), b.(*rbacv1.Subject), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_AggregationRule_To_rbac_AggregationRule(in *rbacv1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error {
out.ClusterRoleSelectors = *(*[]metav1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors))
return nil
}
// Convert_v1_AggregationRule_To_rbac_AggregationRule is an autogenerated conversion function.
func Convert_v1_AggregationRule_To_rbac_AggregationRule(in *rbacv1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error {
return autoConvert_v1_AggregationRule_To_rbac_AggregationRule(in, out, s)
}
func autoConvert_rbac_AggregationRule_To_v1_AggregationRule(in *rbac.AggregationRule, out *rbacv1.AggregationRule, s conversion.Scope) error {
out.ClusterRoleSelectors = *(*[]metav1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors))
return nil
}
// Convert_rbac_AggregationRule_To_v1_AggregationRule is an autogenerated conversion function.
func Convert_rbac_AggregationRule_To_v1_AggregationRule(in *rbac.AggregationRule, out *rbacv1.AggregationRule, s conversion.Scope) error {
return autoConvert_rbac_AggregationRule_To_v1_AggregationRule(in, out, s)
}
func autoConvert_v1_ClusterRole_To_rbac_ClusterRole(in *rbacv1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
out.AggregationRule = (*rbac.AggregationRule)(unsafe.Pointer(in.AggregationRule))
return nil
}
// Convert_v1_ClusterRole_To_rbac_ClusterRole is an autogenerated conversion function.
func Convert_v1_ClusterRole_To_rbac_ClusterRole(in *rbacv1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
return autoConvert_v1_ClusterRole_To_rbac_ClusterRole(in, out, s)
}
func autoConvert_rbac_ClusterRole_To_v1_ClusterRole(in *rbac.ClusterRole, out *rbacv1.ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbacv1.PolicyRule)(unsafe.Pointer(&in.Rules))
out.AggregationRule = (*rbacv1.AggregationRule)(unsafe.Pointer(in.AggregationRule))
return nil
}
// Convert_rbac_ClusterRole_To_v1_ClusterRole is an autogenerated conversion function.
func Convert_rbac_ClusterRole_To_v1_ClusterRole(in *rbac.ClusterRole, out *rbacv1.ClusterRole, s conversion.Scope) error {
return autoConvert_rbac_ClusterRole_To_v1_ClusterRole(in, out, s)
}
func autoConvert_v1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *rbacv1.ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_v1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_v1_ClusterRoleBinding_To_rbac_ClusterRoleBinding is an autogenerated conversion function.
func Convert_v1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *rbacv1.ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_v1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s)
}
func autoConvert_rbac_ClusterRoleBinding_To_v1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *rbacv1.ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbacv1.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_rbac_RoleRef_To_v1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_rbac_ClusterRoleBinding_To_v1_ClusterRoleBinding is an autogenerated conversion function.
func Convert_rbac_ClusterRoleBinding_To_v1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *rbacv1.ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBinding_To_v1_ClusterRoleBinding(in, out, s)
}
func autoConvert_v1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *rbacv1.ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList is an autogenerated conversion function.
func Convert_v1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *rbacv1.ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_v1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s)
}
func autoConvert_rbac_ClusterRoleBindingList_To_v1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *rbacv1.ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1.ClusterRoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_ClusterRoleBindingList_To_v1_ClusterRoleBindingList is an autogenerated conversion function.
func Convert_rbac_ClusterRoleBindingList_To_v1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *rbacv1.ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBindingList_To_v1_ClusterRoleBindingList(in, out, s)
}
func autoConvert_v1_ClusterRoleList_To_rbac_ClusterRoleList(in *rbacv1.ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ClusterRoleList_To_rbac_ClusterRoleList is an autogenerated conversion function.
func Convert_v1_ClusterRoleList_To_rbac_ClusterRoleList(in *rbacv1.ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
return autoConvert_v1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s)
}
func autoConvert_rbac_ClusterRoleList_To_v1_ClusterRoleList(in *rbac.ClusterRoleList, out *rbacv1.ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1.ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_ClusterRoleList_To_v1_ClusterRoleList is an autogenerated conversion function.
func Convert_rbac_ClusterRoleList_To_v1_ClusterRoleList(in *rbac.ClusterRoleList, out *rbacv1.ClusterRoleList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleList_To_v1_ClusterRoleList(in, out, s)
}
func autoConvert_v1_PolicyRule_To_rbac_PolicyRule(in *rbacv1.PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1_PolicyRule_To_rbac_PolicyRule is an autogenerated conversion function.
func Convert_v1_PolicyRule_To_rbac_PolicyRule(in *rbacv1.PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
return autoConvert_v1_PolicyRule_To_rbac_PolicyRule(in, out, s)
}
func autoConvert_rbac_PolicyRule_To_v1_PolicyRule(in *rbac.PolicyRule, out *rbacv1.PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_rbac_PolicyRule_To_v1_PolicyRule is an autogenerated conversion function.
func Convert_rbac_PolicyRule_To_v1_PolicyRule(in *rbac.PolicyRule, out *rbacv1.PolicyRule, s conversion.Scope) error {
return autoConvert_rbac_PolicyRule_To_v1_PolicyRule(in, out, s)
}
func autoConvert_v1_Role_To_rbac_Role(in *rbacv1.Role, out *rbac.Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1_Role_To_rbac_Role is an autogenerated conversion function.
func Convert_v1_Role_To_rbac_Role(in *rbacv1.Role, out *rbac.Role, s conversion.Scope) error {
return autoConvert_v1_Role_To_rbac_Role(in, out, s)
}
func autoConvert_rbac_Role_To_v1_Role(in *rbac.Role, out *rbacv1.Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbacv1.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_rbac_Role_To_v1_Role is an autogenerated conversion function.
func Convert_rbac_Role_To_v1_Role(in *rbac.Role, out *rbacv1.Role, s conversion.Scope) error {
return autoConvert_rbac_Role_To_v1_Role(in, out, s)
}
func autoConvert_v1_RoleBinding_To_rbac_RoleBinding(in *rbacv1.RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_v1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_v1_RoleBinding_To_rbac_RoleBinding is an autogenerated conversion function.
func Convert_v1_RoleBinding_To_rbac_RoleBinding(in *rbacv1.RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
return autoConvert_v1_RoleBinding_To_rbac_RoleBinding(in, out, s)
}
func autoConvert_rbac_RoleBinding_To_v1_RoleBinding(in *rbac.RoleBinding, out *rbacv1.RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbacv1.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_rbac_RoleRef_To_v1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_rbac_RoleBinding_To_v1_RoleBinding is an autogenerated conversion function.
func Convert_rbac_RoleBinding_To_v1_RoleBinding(in *rbac.RoleBinding, out *rbacv1.RoleBinding, s conversion.Scope) error {
return autoConvert_rbac_RoleBinding_To_v1_RoleBinding(in, out, s)
}
func autoConvert_v1_RoleBindingList_To_rbac_RoleBindingList(in *rbacv1.RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_RoleBindingList_To_rbac_RoleBindingList is an autogenerated conversion function.
func Convert_v1_RoleBindingList_To_rbac_RoleBindingList(in *rbacv1.RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
return autoConvert_v1_RoleBindingList_To_rbac_RoleBindingList(in, out, s)
}
func autoConvert_rbac_RoleBindingList_To_v1_RoleBindingList(in *rbac.RoleBindingList, out *rbacv1.RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1.RoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_RoleBindingList_To_v1_RoleBindingList is an autogenerated conversion function.
func Convert_rbac_RoleBindingList_To_v1_RoleBindingList(in *rbac.RoleBindingList, out *rbacv1.RoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_RoleBindingList_To_v1_RoleBindingList(in, out, s)
}
func autoConvert_v1_RoleList_To_rbac_RoleList(in *rbacv1.RoleList, out *rbac.RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_RoleList_To_rbac_RoleList is an autogenerated conversion function.
func Convert_v1_RoleList_To_rbac_RoleList(in *rbacv1.RoleList, out *rbac.RoleList, s conversion.Scope) error {
return autoConvert_v1_RoleList_To_rbac_RoleList(in, out, s)
}
func autoConvert_rbac_RoleList_To_v1_RoleList(in *rbac.RoleList, out *rbacv1.RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1.Role)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_RoleList_To_v1_RoleList is an autogenerated conversion function.
func Convert_rbac_RoleList_To_v1_RoleList(in *rbac.RoleList, out *rbacv1.RoleList, s conversion.Scope) error {
return autoConvert_rbac_RoleList_To_v1_RoleList(in, out, s)
}
func autoConvert_v1_RoleRef_To_rbac_RoleRef(in *rbacv1.RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_v1_RoleRef_To_rbac_RoleRef is an autogenerated conversion function.
func Convert_v1_RoleRef_To_rbac_RoleRef(in *rbacv1.RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
return autoConvert_v1_RoleRef_To_rbac_RoleRef(in, out, s)
}
func autoConvert_rbac_RoleRef_To_v1_RoleRef(in *rbac.RoleRef, out *rbacv1.RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_rbac_RoleRef_To_v1_RoleRef is an autogenerated conversion function.
func Convert_rbac_RoleRef_To_v1_RoleRef(in *rbac.RoleRef, out *rbacv1.RoleRef, s conversion.Scope) error {
return autoConvert_rbac_RoleRef_To_v1_RoleRef(in, out, s)
}
func autoConvert_v1_Subject_To_rbac_Subject(in *rbacv1.Subject, out *rbac.Subject, s conversion.Scope) error {
out.Kind = in.Kind
out.APIGroup = in.APIGroup
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_v1_Subject_To_rbac_Subject is an autogenerated conversion function.
func Convert_v1_Subject_To_rbac_Subject(in *rbacv1.Subject, out *rbac.Subject, s conversion.Scope) error {
return autoConvert_v1_Subject_To_rbac_Subject(in, out, s)
}
func autoConvert_rbac_Subject_To_v1_Subject(in *rbac.Subject, out *rbacv1.Subject, s conversion.Scope) error {
out.Kind = in.Kind
out.APIGroup = in.APIGroup
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_rbac_Subject_To_v1_Subject is an autogenerated conversion function.
func Convert_rbac_Subject_To_v1_Subject(in *rbac.Subject, out *rbacv1.Subject, s conversion.Scope) error {
return autoConvert_rbac_Subject_To_v1_Subject(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in SortableRuleSlice) DeepCopyInto(out *SortableRuleSlice) {
{
in := &in
*out = make(SortableRuleSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortableRuleSlice.
func (in SortableRuleSlice) DeepCopy() SortableRuleSlice {
if in == nil {
return nil
}
out := new(SortableRuleSlice)
in.DeepCopyInto(out)
return *out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
rbacv1 "k8s.io/api/rbac/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&rbacv1.ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*rbacv1.ClusterRoleBinding)) })
scheme.AddTypeDefaultingFunc(&rbacv1.ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*rbacv1.ClusterRoleBindingList)) })
scheme.AddTypeDefaultingFunc(&rbacv1.RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*rbacv1.RoleBinding)) })
scheme.AddTypeDefaultingFunc(&rbacv1.RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*rbacv1.RoleBindingList)) })
return nil
}
func SetObjectDefaults_ClusterRoleBinding(in *rbacv1.ClusterRoleBinding) {
SetDefaults_ClusterRoleBinding(in)
for i := range in.Subjects {
a := &in.Subjects[i]
SetDefaults_Subject(a)
}
}
func SetObjectDefaults_ClusterRoleBindingList(in *rbacv1.ClusterRoleBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ClusterRoleBinding(a)
}
}
func SetObjectDefaults_RoleBinding(in *rbacv1.RoleBinding) {
SetDefaults_RoleBinding(in)
for i := range in.Subjects {
a := &in.Subjects[i]
SetDefaults_Subject(a)
}
}
func SetObjectDefaults_RoleBindingList(in *rbacv1.RoleBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_RoleBinding(a)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime/schema"
api "k8s.io/kubernetes/pkg/apis/rbac"
)
// allAuthenticated matches k8s.io/apiserver/pkg/authentication/user.AllAuthenticated,
// but we don't want an client library (which must include types), depending on a server library
const allAuthenticated = "system:authenticated"
func Convert_v1alpha1_Subject_To_rbac_Subject(in *rbacv1alpha1.Subject, out *api.Subject, s conversion.Scope) error {
if err := autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s); err != nil {
return err
}
// specifically set the APIGroup for the three subjects recognized in v1alpha1
switch {
case in.Kind == rbacv1alpha1.ServiceAccountKind:
out.APIGroup = ""
case in.Kind == rbacv1alpha1.UserKind:
out.APIGroup = GroupName
case in.Kind == rbacv1alpha1.GroupKind:
out.APIGroup = GroupName
default:
// For unrecognized kinds, use the group portion of the APIVersion if we can get it
if gv, err := schema.ParseGroupVersion(in.APIVersion); err == nil {
out.APIGroup = gv.Group
}
}
// User * in v1alpha1 will only match all authenticated users
// This is only for compatibility with old RBAC bindings
// Special treatment for * should not be included in v1beta1
if out.Kind == rbacv1alpha1.UserKind && out.APIGroup == GroupName && out.Name == "*" {
out.Kind = rbacv1alpha1.GroupKind
out.Name = allAuthenticated
}
return nil
}
func Convert_rbac_Subject_To_v1alpha1_Subject(in *api.Subject, out *rbacv1alpha1.Subject, s conversion.Scope) error {
if err := autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s); err != nil {
return err
}
switch {
case in.Kind == rbacv1alpha1.ServiceAccountKind && in.APIGroup == "":
// Make service accounts v1
out.APIVersion = "v1"
case in.Kind == rbacv1alpha1.UserKind && in.APIGroup == GroupName:
// users in the rbac API group get v1alpha
out.APIVersion = SchemeGroupVersion.String()
case in.Kind == rbacv1alpha1.GroupKind && in.APIGroup == GroupName:
// groups in the rbac API group get v1alpha
out.APIVersion = SchemeGroupVersion.String()
default:
// otherwise, they get an unspecified version of a group
out.APIVersion = schema.GroupVersion{Group: in.APIGroup}.String()
}
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ClusterRoleBinding(obj *rbacv1alpha1.ClusterRoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_RoleBinding(obj *rbacv1alpha1.RoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_Subject(obj *rbacv1alpha1.Subject) {
if len(obj.APIVersion) == 0 {
switch obj.Kind {
case rbacv1alpha1.ServiceAccountKind:
obj.APIVersion = "v1"
case rbacv1alpha1.UserKind:
obj.APIVersion = SchemeGroupVersion.String()
case rbacv1alpha1.GroupKind:
obj.APIVersion = SchemeGroupVersion.String()
}
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
)
// PolicyRuleBuilder let's us attach methods. A no-no for API types.
// We use it to construct rules in code. It's more compact than trying to write them
// out in a literal and allows us to perform some basic checking during construction
type PolicyRuleBuilder struct {
PolicyRule rbacv1alpha1.PolicyRule `protobuf:"bytes,1,opt,name=policyRule"`
}
func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder {
r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...)
return r
}
func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder {
r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...)
return r
}
func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder {
r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...)
return r
}
func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder {
r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...)
return r
}
func (r *PolicyRuleBuilder) RuleOrDie() rbacv1alpha1.PolicyRule {
ret, err := r.Rule()
if err != nil {
panic(err)
}
return ret
}
func (r *PolicyRuleBuilder) Rule() (rbacv1alpha1.PolicyRule, error) {
if len(r.PolicyRule.Verbs) == 0 {
return rbacv1alpha1.PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule)
}
switch {
case len(r.PolicyRule.NonResourceURLs) > 0:
if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 {
return rbacv1alpha1.PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule)
}
case len(r.PolicyRule.Resources) > 0:
if len(r.PolicyRule.NonResourceURLs) != 0 {
return rbacv1alpha1.PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule)
}
if len(r.PolicyRule.APIGroups) == 0 {
// this a common bug
return rbacv1alpha1.PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule)
}
default:
return rbacv1alpha1.PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule)
}
return r.PolicyRule, nil
}
// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types.
// We use it to construct bindings in code. It's more compact than trying to write them
// out in a literal.
type ClusterRoleBindingBuilder struct {
ClusterRoleBinding rbacv1alpha1.ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"`
}
func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder {
for _, group := range groups {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1alpha1.Subject{Kind: rbacv1alpha1.GroupKind, Name: group})
}
return r
}
func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder {
for _, user := range users {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1alpha1.Subject{Kind: rbacv1alpha1.UserKind, Name: user})
}
return r
}
func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder {
for _, saName := range serviceAccountNames {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: namespace, Name: saName})
}
return r
}
func (r *ClusterRoleBindingBuilder) BindingOrDie() rbacv1alpha1.ClusterRoleBinding {
ret, err := r.Binding()
if err != nil {
panic(err)
}
return ret
}
func (r *ClusterRoleBindingBuilder) Binding() (rbacv1alpha1.ClusterRoleBinding, error) {
if len(r.ClusterRoleBinding.Subjects) == 0 {
return rbacv1alpha1.ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding)
}
return r.ClusterRoleBinding, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const GroupName = "rbac.authorization.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &rbacv1alpha1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.AggregationRule)(nil), (*rbac.AggregationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_AggregationRule_To_rbac_AggregationRule(a.(*rbacv1alpha1.AggregationRule), b.(*rbac.AggregationRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.AggregationRule)(nil), (*rbacv1alpha1.AggregationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_AggregationRule_To_v1alpha1_AggregationRule(a.(*rbac.AggregationRule), b.(*rbacv1alpha1.AggregationRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.ClusterRole)(nil), (*rbac.ClusterRole)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(a.(*rbacv1alpha1.ClusterRole), b.(*rbac.ClusterRole), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRole)(nil), (*rbacv1alpha1.ClusterRole)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(a.(*rbac.ClusterRole), b.(*rbacv1alpha1.ClusterRole), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.ClusterRoleBinding)(nil), (*rbac.ClusterRoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(a.(*rbacv1alpha1.ClusterRoleBinding), b.(*rbac.ClusterRoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleBinding)(nil), (*rbacv1alpha1.ClusterRoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(a.(*rbac.ClusterRoleBinding), b.(*rbacv1alpha1.ClusterRoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.ClusterRoleBindingList)(nil), (*rbac.ClusterRoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(a.(*rbacv1alpha1.ClusterRoleBindingList), b.(*rbac.ClusterRoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleBindingList)(nil), (*rbacv1alpha1.ClusterRoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(a.(*rbac.ClusterRoleBindingList), b.(*rbacv1alpha1.ClusterRoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.ClusterRoleList)(nil), (*rbac.ClusterRoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(a.(*rbacv1alpha1.ClusterRoleList), b.(*rbac.ClusterRoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleList)(nil), (*rbacv1alpha1.ClusterRoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(a.(*rbac.ClusterRoleList), b.(*rbacv1alpha1.ClusterRoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.PolicyRule)(nil), (*rbac.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(a.(*rbacv1alpha1.PolicyRule), b.(*rbac.PolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.PolicyRule)(nil), (*rbacv1alpha1.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(a.(*rbac.PolicyRule), b.(*rbacv1alpha1.PolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.Role)(nil), (*rbac.Role)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Role_To_rbac_Role(a.(*rbacv1alpha1.Role), b.(*rbac.Role), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.Role)(nil), (*rbacv1alpha1.Role)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_Role_To_v1alpha1_Role(a.(*rbac.Role), b.(*rbacv1alpha1.Role), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.RoleBinding)(nil), (*rbac.RoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(a.(*rbacv1alpha1.RoleBinding), b.(*rbac.RoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleBinding)(nil), (*rbacv1alpha1.RoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(a.(*rbac.RoleBinding), b.(*rbacv1alpha1.RoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.RoleBindingList)(nil), (*rbac.RoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(a.(*rbacv1alpha1.RoleBindingList), b.(*rbac.RoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleBindingList)(nil), (*rbacv1alpha1.RoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(a.(*rbac.RoleBindingList), b.(*rbacv1alpha1.RoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.RoleList)(nil), (*rbac.RoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RoleList_To_rbac_RoleList(a.(*rbacv1alpha1.RoleList), b.(*rbac.RoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleList)(nil), (*rbacv1alpha1.RoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleList_To_v1alpha1_RoleList(a.(*rbac.RoleList), b.(*rbacv1alpha1.RoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1alpha1.RoleRef)(nil), (*rbac.RoleRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RoleRef_To_rbac_RoleRef(a.(*rbacv1alpha1.RoleRef), b.(*rbac.RoleRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleRef)(nil), (*rbacv1alpha1.RoleRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleRef_To_v1alpha1_RoleRef(a.(*rbac.RoleRef), b.(*rbacv1alpha1.RoleRef), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*rbac.Subject)(nil), (*rbacv1alpha1.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_Subject_To_v1alpha1_Subject(a.(*rbac.Subject), b.(*rbacv1alpha1.Subject), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*rbacv1alpha1.Subject)(nil), (*rbac.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Subject_To_rbac_Subject(a.(*rbacv1alpha1.Subject), b.(*rbac.Subject), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_AggregationRule_To_rbac_AggregationRule(in *rbacv1alpha1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error {
out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors))
return nil
}
// Convert_v1alpha1_AggregationRule_To_rbac_AggregationRule is an autogenerated conversion function.
func Convert_v1alpha1_AggregationRule_To_rbac_AggregationRule(in *rbacv1alpha1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error {
return autoConvert_v1alpha1_AggregationRule_To_rbac_AggregationRule(in, out, s)
}
func autoConvert_rbac_AggregationRule_To_v1alpha1_AggregationRule(in *rbac.AggregationRule, out *rbacv1alpha1.AggregationRule, s conversion.Scope) error {
out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors))
return nil
}
// Convert_rbac_AggregationRule_To_v1alpha1_AggregationRule is an autogenerated conversion function.
func Convert_rbac_AggregationRule_To_v1alpha1_AggregationRule(in *rbac.AggregationRule, out *rbacv1alpha1.AggregationRule, s conversion.Scope) error {
return autoConvert_rbac_AggregationRule_To_v1alpha1_AggregationRule(in, out, s)
}
func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *rbacv1alpha1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
out.AggregationRule = (*rbac.AggregationRule)(unsafe.Pointer(in.AggregationRule))
return nil
}
// Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole is an autogenerated conversion function.
func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *rbacv1alpha1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s)
}
func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *rbacv1alpha1.ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbacv1alpha1.PolicyRule)(unsafe.Pointer(&in.Rules))
out.AggregationRule = (*rbacv1alpha1.AggregationRule)(unsafe.Pointer(in.AggregationRule))
return nil
}
// Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole is an autogenerated conversion function.
func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *rbacv1alpha1.ClusterRole, s conversion.Scope) error {
return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s)
}
func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *rbacv1alpha1.ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]rbac.Subject, len(*in))
for i := range *in {
if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subjects = nil
}
if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding is an autogenerated conversion function.
func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *rbacv1alpha1.ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s)
}
func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *rbacv1alpha1.ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]rbacv1alpha1.Subject, len(*in))
for i := range *in {
if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subjects = nil
}
if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding is an autogenerated conversion function.
func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *rbacv1alpha1.ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s)
}
func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *rbacv1alpha1.ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]rbac.ClusterRoleBinding, len(*in))
for i := range *in {
if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList is an autogenerated conversion function.
func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *rbacv1alpha1.ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s)
}
func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *rbacv1alpha1.ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]rbacv1alpha1.ClusterRoleBinding, len(*in))
for i := range *in {
if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList is an autogenerated conversion function.
func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *rbacv1alpha1.ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s)
}
func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *rbacv1alpha1.ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList is an autogenerated conversion function.
func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *rbacv1alpha1.ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s)
}
func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *rbacv1alpha1.ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1alpha1.ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList is an autogenerated conversion function.
func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *rbacv1alpha1.ClusterRoleList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s)
}
func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *rbacv1alpha1.PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule is an autogenerated conversion function.
func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *rbacv1alpha1.PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s)
}
func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *rbacv1alpha1.PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule is an autogenerated conversion function.
func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *rbacv1alpha1.PolicyRule, s conversion.Scope) error {
return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s)
}
func autoConvert_v1alpha1_Role_To_rbac_Role(in *rbacv1alpha1.Role, out *rbac.Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1alpha1_Role_To_rbac_Role is an autogenerated conversion function.
func Convert_v1alpha1_Role_To_rbac_Role(in *rbacv1alpha1.Role, out *rbac.Role, s conversion.Scope) error {
return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s)
}
func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *rbacv1alpha1.Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbacv1alpha1.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_rbac_Role_To_v1alpha1_Role is an autogenerated conversion function.
func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *rbacv1alpha1.Role, s conversion.Scope) error {
return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s)
}
func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *rbacv1alpha1.RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]rbac.Subject, len(*in))
for i := range *in {
if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subjects = nil
}
if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding is an autogenerated conversion function.
func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *rbacv1alpha1.RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s)
}
func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *rbacv1alpha1.RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]rbacv1alpha1.Subject, len(*in))
for i := range *in {
if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subjects = nil
}
if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding is an autogenerated conversion function.
func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *rbacv1alpha1.RoleBinding, s conversion.Scope) error {
return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s)
}
func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *rbacv1alpha1.RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]rbac.RoleBinding, len(*in))
for i := range *in {
if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList is an autogenerated conversion function.
func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *rbacv1alpha1.RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s)
}
func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *rbacv1alpha1.RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]rbacv1alpha1.RoleBinding, len(*in))
for i := range *in {
if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList is an autogenerated conversion function.
func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *rbacv1alpha1.RoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s)
}
func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *rbacv1alpha1.RoleList, out *rbac.RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_RoleList_To_rbac_RoleList is an autogenerated conversion function.
func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *rbacv1alpha1.RoleList, out *rbac.RoleList, s conversion.Scope) error {
return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s)
}
func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *rbacv1alpha1.RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1alpha1.Role)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_RoleList_To_v1alpha1_RoleList is an autogenerated conversion function.
func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *rbacv1alpha1.RoleList, s conversion.Scope) error {
return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s)
}
func autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in *rbacv1alpha1.RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_v1alpha1_RoleRef_To_rbac_RoleRef is an autogenerated conversion function.
func Convert_v1alpha1_RoleRef_To_rbac_RoleRef(in *rbacv1alpha1.RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
return autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in, out, s)
}
func autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *rbacv1alpha1.RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_rbac_RoleRef_To_v1alpha1_RoleRef is an autogenerated conversion function.
func Convert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *rbacv1alpha1.RoleRef, s conversion.Scope) error {
return autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in, out, s)
}
func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *rbacv1alpha1.Subject, out *rbac.Subject, s conversion.Scope) error {
out.Kind = in.Kind
// INFO: in.APIVersion opted out of conversion generation
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *rbacv1alpha1.Subject, s conversion.Scope) error {
out.Kind = in.Kind
// WARNING: in.APIGroup requires manual conversion: does not exist in peer-type
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&rbacv1alpha1.ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*rbacv1alpha1.ClusterRoleBinding)) })
scheme.AddTypeDefaultingFunc(&rbacv1alpha1.ClusterRoleBindingList{}, func(obj interface{}) {
SetObjectDefaults_ClusterRoleBindingList(obj.(*rbacv1alpha1.ClusterRoleBindingList))
})
scheme.AddTypeDefaultingFunc(&rbacv1alpha1.RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*rbacv1alpha1.RoleBinding)) })
scheme.AddTypeDefaultingFunc(&rbacv1alpha1.RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*rbacv1alpha1.RoleBindingList)) })
return nil
}
func SetObjectDefaults_ClusterRoleBinding(in *rbacv1alpha1.ClusterRoleBinding) {
SetDefaults_ClusterRoleBinding(in)
for i := range in.Subjects {
a := &in.Subjects[i]
SetDefaults_Subject(a)
}
}
func SetObjectDefaults_ClusterRoleBindingList(in *rbacv1alpha1.ClusterRoleBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ClusterRoleBinding(a)
}
}
func SetObjectDefaults_RoleBinding(in *rbacv1alpha1.RoleBinding) {
SetDefaults_RoleBinding(in)
for i := range in.Subjects {
a := &in.Subjects[i]
SetDefaults_Subject(a)
}
}
func SetObjectDefaults_RoleBindingList(in *rbacv1alpha1.RoleBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_RoleBinding(a)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ClusterRoleBinding(obj *rbacv1beta1.ClusterRoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_RoleBinding(obj *rbacv1beta1.RoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_Subject(obj *rbacv1beta1.Subject) {
if len(obj.APIGroup) == 0 {
switch obj.Kind {
case rbacv1beta1.ServiceAccountKind:
obj.APIGroup = ""
case rbacv1beta1.UserKind:
obj.APIGroup = GroupName
case rbacv1beta1.GroupKind:
obj.APIGroup = GroupName
}
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
)
// PolicyRuleBuilder let's us attach methods. A no-no for API types.
// We use it to construct rules in code. It's more compact than trying to write them
// out in a literal and allows us to perform some basic checking during construction
type PolicyRuleBuilder struct {
PolicyRule rbacv1beta1.PolicyRule `protobuf:"bytes,1,opt,name=policyRule"`
}
func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder {
r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...)
return r
}
func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder {
r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...)
return r
}
func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder {
r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...)
return r
}
func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder {
r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...)
return r
}
func (r *PolicyRuleBuilder) RuleOrDie() rbacv1beta1.PolicyRule {
ret, err := r.Rule()
if err != nil {
panic(err)
}
return ret
}
func (r *PolicyRuleBuilder) Rule() (rbacv1beta1.PolicyRule, error) {
if len(r.PolicyRule.Verbs) == 0 {
return rbacv1beta1.PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule)
}
switch {
case len(r.PolicyRule.NonResourceURLs) > 0:
if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 {
return rbacv1beta1.PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule)
}
case len(r.PolicyRule.Resources) > 0:
if len(r.PolicyRule.NonResourceURLs) != 0 {
return rbacv1beta1.PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule)
}
if len(r.PolicyRule.APIGroups) == 0 {
// this a common bug
return rbacv1beta1.PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule)
}
default:
return rbacv1beta1.PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule)
}
return r.PolicyRule, nil
}
// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types.
// We use it to construct bindings in code. It's more compact than trying to write them
// out in a literal.
type ClusterRoleBindingBuilder struct {
ClusterRoleBinding rbacv1beta1.ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"`
}
func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder {
for _, group := range groups {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1beta1.Subject{Kind: rbacv1beta1.GroupKind, Name: group})
}
return r
}
func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder {
for _, user := range users {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1beta1.Subject{Kind: rbacv1beta1.UserKind, Name: user})
}
return r
}
func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder {
for _, saName := range serviceAccountNames {
r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: namespace, Name: saName})
}
return r
}
func (r *ClusterRoleBindingBuilder) BindingOrDie() rbacv1beta1.ClusterRoleBinding {
ret, err := r.Binding()
if err != nil {
panic(err)
}
return ret
}
func (r *ClusterRoleBindingBuilder) Binding() (rbacv1beta1.ClusterRoleBinding, error) {
if len(r.ClusterRoleBinding.Subjects) == 0 {
return rbacv1beta1.ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding)
}
return r.ClusterRoleBinding, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const GroupName = "rbac.authorization.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &rbacv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.AggregationRule)(nil), (*rbac.AggregationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AggregationRule_To_rbac_AggregationRule(a.(*rbacv1beta1.AggregationRule), b.(*rbac.AggregationRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.AggregationRule)(nil), (*rbacv1beta1.AggregationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_AggregationRule_To_v1beta1_AggregationRule(a.(*rbac.AggregationRule), b.(*rbacv1beta1.AggregationRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.ClusterRole)(nil), (*rbac.ClusterRole)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(a.(*rbacv1beta1.ClusterRole), b.(*rbac.ClusterRole), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRole)(nil), (*rbacv1beta1.ClusterRole)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRole_To_v1beta1_ClusterRole(a.(*rbac.ClusterRole), b.(*rbacv1beta1.ClusterRole), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.ClusterRoleBinding)(nil), (*rbac.ClusterRoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(a.(*rbacv1beta1.ClusterRoleBinding), b.(*rbac.ClusterRoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleBinding)(nil), (*rbacv1beta1.ClusterRoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(a.(*rbac.ClusterRoleBinding), b.(*rbacv1beta1.ClusterRoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.ClusterRoleBindingList)(nil), (*rbac.ClusterRoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(a.(*rbacv1beta1.ClusterRoleBindingList), b.(*rbac.ClusterRoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleBindingList)(nil), (*rbacv1beta1.ClusterRoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(a.(*rbac.ClusterRoleBindingList), b.(*rbacv1beta1.ClusterRoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.ClusterRoleList)(nil), (*rbac.ClusterRoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(a.(*rbacv1beta1.ClusterRoleList), b.(*rbac.ClusterRoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.ClusterRoleList)(nil), (*rbacv1beta1.ClusterRoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(a.(*rbac.ClusterRoleList), b.(*rbacv1beta1.ClusterRoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.PolicyRule)(nil), (*rbac.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(a.(*rbacv1beta1.PolicyRule), b.(*rbac.PolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.PolicyRule)(nil), (*rbacv1beta1.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(a.(*rbac.PolicyRule), b.(*rbacv1beta1.PolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.Role)(nil), (*rbac.Role)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Role_To_rbac_Role(a.(*rbacv1beta1.Role), b.(*rbac.Role), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.Role)(nil), (*rbacv1beta1.Role)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_Role_To_v1beta1_Role(a.(*rbac.Role), b.(*rbacv1beta1.Role), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.RoleBinding)(nil), (*rbac.RoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RoleBinding_To_rbac_RoleBinding(a.(*rbacv1beta1.RoleBinding), b.(*rbac.RoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleBinding)(nil), (*rbacv1beta1.RoleBinding)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleBinding_To_v1beta1_RoleBinding(a.(*rbac.RoleBinding), b.(*rbacv1beta1.RoleBinding), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.RoleBindingList)(nil), (*rbac.RoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(a.(*rbacv1beta1.RoleBindingList), b.(*rbac.RoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleBindingList)(nil), (*rbacv1beta1.RoleBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(a.(*rbac.RoleBindingList), b.(*rbacv1beta1.RoleBindingList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.RoleList)(nil), (*rbac.RoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RoleList_To_rbac_RoleList(a.(*rbacv1beta1.RoleList), b.(*rbac.RoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleList)(nil), (*rbacv1beta1.RoleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleList_To_v1beta1_RoleList(a.(*rbac.RoleList), b.(*rbacv1beta1.RoleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.RoleRef)(nil), (*rbac.RoleRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_RoleRef_To_rbac_RoleRef(a.(*rbacv1beta1.RoleRef), b.(*rbac.RoleRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.RoleRef)(nil), (*rbacv1beta1.RoleRef)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_RoleRef_To_v1beta1_RoleRef(a.(*rbac.RoleRef), b.(*rbacv1beta1.RoleRef), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbacv1beta1.Subject)(nil), (*rbac.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Subject_To_rbac_Subject(a.(*rbacv1beta1.Subject), b.(*rbac.Subject), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*rbac.Subject)(nil), (*rbacv1beta1.Subject)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_rbac_Subject_To_v1beta1_Subject(a.(*rbac.Subject), b.(*rbacv1beta1.Subject), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_AggregationRule_To_rbac_AggregationRule(in *rbacv1beta1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error {
out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors))
return nil
}
// Convert_v1beta1_AggregationRule_To_rbac_AggregationRule is an autogenerated conversion function.
func Convert_v1beta1_AggregationRule_To_rbac_AggregationRule(in *rbacv1beta1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error {
return autoConvert_v1beta1_AggregationRule_To_rbac_AggregationRule(in, out, s)
}
func autoConvert_rbac_AggregationRule_To_v1beta1_AggregationRule(in *rbac.AggregationRule, out *rbacv1beta1.AggregationRule, s conversion.Scope) error {
out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors))
return nil
}
// Convert_rbac_AggregationRule_To_v1beta1_AggregationRule is an autogenerated conversion function.
func Convert_rbac_AggregationRule_To_v1beta1_AggregationRule(in *rbac.AggregationRule, out *rbacv1beta1.AggregationRule, s conversion.Scope) error {
return autoConvert_rbac_AggregationRule_To_v1beta1_AggregationRule(in, out, s)
}
func autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *rbacv1beta1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
out.AggregationRule = (*rbac.AggregationRule)(unsafe.Pointer(in.AggregationRule))
return nil
}
// Convert_v1beta1_ClusterRole_To_rbac_ClusterRole is an autogenerated conversion function.
func Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *rbacv1beta1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in, out, s)
}
func autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *rbacv1beta1.ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbacv1beta1.PolicyRule)(unsafe.Pointer(&in.Rules))
out.AggregationRule = (*rbacv1beta1.AggregationRule)(unsafe.Pointer(in.AggregationRule))
return nil
}
// Convert_rbac_ClusterRole_To_v1beta1_ClusterRole is an autogenerated conversion function.
func Convert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *rbacv1beta1.ClusterRole, s conversion.Scope) error {
return autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in, out, s)
}
func autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *rbacv1beta1.ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding is an autogenerated conversion function.
func Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *rbacv1beta1.ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s)
}
func autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *rbacv1beta1.ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbacv1beta1.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding is an autogenerated conversion function.
func Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *rbacv1beta1.ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in, out, s)
}
func autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *rbacv1beta1.ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList is an autogenerated conversion function.
func Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *rbacv1beta1.ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s)
}
func autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *rbacv1beta1.ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1beta1.ClusterRoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList is an autogenerated conversion function.
func Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *rbacv1beta1.ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in, out, s)
}
func autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *rbacv1beta1.ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList is an autogenerated conversion function.
func Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *rbacv1beta1.ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s)
}
func autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *rbacv1beta1.ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1beta1.ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList is an autogenerated conversion function.
func Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *rbacv1beta1.ClusterRoleList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in, out, s)
}
func autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *rbacv1beta1.PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_v1beta1_PolicyRule_To_rbac_PolicyRule is an autogenerated conversion function.
func Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *rbacv1beta1.PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
return autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in, out, s)
}
func autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *rbacv1beta1.PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
// Convert_rbac_PolicyRule_To_v1beta1_PolicyRule is an autogenerated conversion function.
func Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *rbacv1beta1.PolicyRule, s conversion.Scope) error {
return autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in, out, s)
}
func autoConvert_v1beta1_Role_To_rbac_Role(in *rbacv1beta1.Role, out *rbac.Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1beta1_Role_To_rbac_Role is an autogenerated conversion function.
func Convert_v1beta1_Role_To_rbac_Role(in *rbacv1beta1.Role, out *rbac.Role, s conversion.Scope) error {
return autoConvert_v1beta1_Role_To_rbac_Role(in, out, s)
}
func autoConvert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *rbacv1beta1.Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbacv1beta1.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_rbac_Role_To_v1beta1_Role is an autogenerated conversion function.
func Convert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *rbacv1beta1.Role, s conversion.Scope) error {
return autoConvert_rbac_Role_To_v1beta1_Role(in, out, s)
}
func autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *rbacv1beta1.RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_RoleBinding_To_rbac_RoleBinding is an autogenerated conversion function.
func Convert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *rbacv1beta1.RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
return autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in, out, s)
}
func autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *rbacv1beta1.RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbacv1beta1.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
// Convert_rbac_RoleBinding_To_v1beta1_RoleBinding is an autogenerated conversion function.
func Convert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *rbacv1beta1.RoleBinding, s conversion.Scope) error {
return autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in, out, s)
}
func autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *rbacv1beta1.RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList is an autogenerated conversion function.
func Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *rbacv1beta1.RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
return autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in, out, s)
}
func autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *rbacv1beta1.RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1beta1.RoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList is an autogenerated conversion function.
func Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *rbacv1beta1.RoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in, out, s)
}
func autoConvert_v1beta1_RoleList_To_rbac_RoleList(in *rbacv1beta1.RoleList, out *rbac.RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_RoleList_To_rbac_RoleList is an autogenerated conversion function.
func Convert_v1beta1_RoleList_To_rbac_RoleList(in *rbacv1beta1.RoleList, out *rbac.RoleList, s conversion.Scope) error {
return autoConvert_v1beta1_RoleList_To_rbac_RoleList(in, out, s)
}
func autoConvert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *rbacv1beta1.RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbacv1beta1.Role)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_rbac_RoleList_To_v1beta1_RoleList is an autogenerated conversion function.
func Convert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *rbacv1beta1.RoleList, s conversion.Scope) error {
return autoConvert_rbac_RoleList_To_v1beta1_RoleList(in, out, s)
}
func autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in *rbacv1beta1.RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_v1beta1_RoleRef_To_rbac_RoleRef is an autogenerated conversion function.
func Convert_v1beta1_RoleRef_To_rbac_RoleRef(in *rbacv1beta1.RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
return autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in, out, s)
}
func autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *rbacv1beta1.RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
// Convert_rbac_RoleRef_To_v1beta1_RoleRef is an autogenerated conversion function.
func Convert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *rbacv1beta1.RoleRef, s conversion.Scope) error {
return autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in, out, s)
}
func autoConvert_v1beta1_Subject_To_rbac_Subject(in *rbacv1beta1.Subject, out *rbac.Subject, s conversion.Scope) error {
out.Kind = in.Kind
out.APIGroup = in.APIGroup
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_v1beta1_Subject_To_rbac_Subject is an autogenerated conversion function.
func Convert_v1beta1_Subject_To_rbac_Subject(in *rbacv1beta1.Subject, out *rbac.Subject, s conversion.Scope) error {
return autoConvert_v1beta1_Subject_To_rbac_Subject(in, out, s)
}
func autoConvert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *rbacv1beta1.Subject, s conversion.Scope) error {
out.Kind = in.Kind
out.APIGroup = in.APIGroup
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
// Convert_rbac_Subject_To_v1beta1_Subject is an autogenerated conversion function.
func Convert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *rbacv1beta1.Subject, s conversion.Scope) error {
return autoConvert_rbac_Subject_To_v1beta1_Subject(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&rbacv1beta1.ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*rbacv1beta1.ClusterRoleBinding)) })
scheme.AddTypeDefaultingFunc(&rbacv1beta1.ClusterRoleBindingList{}, func(obj interface{}) {
SetObjectDefaults_ClusterRoleBindingList(obj.(*rbacv1beta1.ClusterRoleBindingList))
})
scheme.AddTypeDefaultingFunc(&rbacv1beta1.RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*rbacv1beta1.RoleBinding)) })
scheme.AddTypeDefaultingFunc(&rbacv1beta1.RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*rbacv1beta1.RoleBindingList)) })
return nil
}
func SetObjectDefaults_ClusterRoleBinding(in *rbacv1beta1.ClusterRoleBinding) {
SetDefaults_ClusterRoleBinding(in)
for i := range in.Subjects {
a := &in.Subjects[i]
SetDefaults_Subject(a)
}
}
func SetObjectDefaults_ClusterRoleBindingList(in *rbacv1beta1.ClusterRoleBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ClusterRoleBinding(a)
}
}
func SetObjectDefaults_RoleBinding(in *rbacv1beta1.RoleBinding) {
SetDefaults_RoleBinding(in)
for i := range in.Subjects {
a := &in.Subjects[i]
SetDefaults_Subject(a)
}
}
func SetObjectDefaults_RoleBindingList(in *rbacv1beta1.RoleBindingList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_RoleBinding(a)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"k8s.io/apimachinery/pkg/api/validation/path"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/rbac"
)
// ValidateRBACName is exported to allow types outside of the RBAC API group to reuse this validation logic
// Minimal validation of names for roles and bindings. Identical to the validation for Openshift. See:
// * https://github.com/kubernetes/kubernetes/blob/60db507b279ce45bd16ea3db49bf181f2aeb3c3d/pkg/api/validation/name.go
// * https://github.com/openshift/origin/blob/388478c40e751c4295dcb9a44dd69e5ac65d0e3b/pkg/api/helpers.go
func ValidateRBACName(name string, prefix bool) []string {
return path.IsValidPathSegmentName(name)
}
func ValidateRole(role *rbac.Role) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validation.ValidateObjectMeta(&role.ObjectMeta, true, ValidateRBACName, field.NewPath("metadata"))...)
for i, rule := range role.Rules {
if err := ValidatePolicyRule(rule, true, field.NewPath("rules").Index(i)); err != nil {
allErrs = append(allErrs, err...)
}
}
if len(allErrs) != 0 {
return allErrs
}
return nil
}
func ValidateRoleUpdate(role *rbac.Role, oldRole *rbac.Role) field.ErrorList {
allErrs := ValidateRole(role)
allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&role.ObjectMeta, &oldRole.ObjectMeta, field.NewPath("metadata"))...)
return allErrs
}
type ClusterRoleValidationOptions struct {
AllowInvalidLabelValueInSelector bool
}
func ValidateClusterRole(role *rbac.ClusterRole, opts ClusterRoleValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validation.ValidateObjectMeta(&role.ObjectMeta, false, ValidateRBACName, field.NewPath("metadata"))...)
for i, rule := range role.Rules {
if err := ValidatePolicyRule(rule, false, field.NewPath("rules").Index(i)); err != nil {
allErrs = append(allErrs, err...)
}
}
labelSelectorValidationOptions := unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector}
if role.AggregationRule != nil {
if len(role.AggregationRule.ClusterRoleSelectors) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("aggregationRule", "clusterRoleSelectors"), "at least one clusterRoleSelector required if aggregationRule is non-nil"))
}
for i, selector := range role.AggregationRule.ClusterRoleSelectors {
fieldPath := field.NewPath("aggregationRule", "clusterRoleSelectors").Index(i)
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&selector, labelSelectorValidationOptions, fieldPath)...)
selector, err := metav1.LabelSelectorAsSelector(&selector)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldPath, selector, "invalid label selector."))
}
}
}
if len(allErrs) != 0 {
return allErrs
}
return nil
}
func ValidateClusterRoleUpdate(role *rbac.ClusterRole, oldRole *rbac.ClusterRole, opts ClusterRoleValidationOptions) field.ErrorList {
allErrs := ValidateClusterRole(role, opts)
allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&role.ObjectMeta, &oldRole.ObjectMeta, field.NewPath("metadata"))...)
return allErrs
}
// ValidatePolicyRule is exported to allow types outside of the RBAC API group to embed a rbac.PolicyRule and reuse this validation logic
func ValidatePolicyRule(rule rbac.PolicyRule, isNamespaced bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(rule.Verbs) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("verbs"), "verbs must contain at least one value"))
}
if len(rule.NonResourceURLs) > 0 {
if isNamespaced {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nonResourceURLs"), rule.NonResourceURLs, "namespaced rules cannot apply to non-resource URLs"))
}
if len(rule.APIGroups) > 0 || len(rule.Resources) > 0 || len(rule.ResourceNames) > 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nonResourceURLs"), rule.NonResourceURLs, "rules cannot apply to both regular resources and non-resource URLs"))
}
return allErrs
}
if len(rule.APIGroups) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("apiGroups"), "resource rules must supply at least one api group"))
}
if len(rule.Resources) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("resources"), "resource rules must supply at least one resource"))
}
return allErrs
}
func ValidateRoleBinding(roleBinding *rbac.RoleBinding) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validation.ValidateObjectMeta(&roleBinding.ObjectMeta, true, ValidateRBACName, field.NewPath("metadata"))...)
// TODO allow multiple API groups. For now, restrict to one, but I can envision other experimental roles in other groups taking
// advantage of the binding infrastructure
if roleBinding.RoleRef.APIGroup != rbac.GroupName {
allErrs = append(allErrs, field.NotSupported(field.NewPath("roleRef", "apiGroup"), roleBinding.RoleRef.APIGroup, []string{rbac.GroupName}))
}
switch roleBinding.RoleRef.Kind {
case "Role", "ClusterRole":
default:
allErrs = append(allErrs, field.NotSupported(field.NewPath("roleRef", "kind"), roleBinding.RoleRef.Kind, []string{"Role", "ClusterRole"}))
}
if len(roleBinding.RoleRef.Name) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("roleRef", "name"), ""))
} else {
for _, msg := range ValidateRBACName(roleBinding.RoleRef.Name, false) {
allErrs = append(allErrs, field.Invalid(field.NewPath("roleRef", "name"), roleBinding.RoleRef.Name, msg))
}
}
subjectsPath := field.NewPath("subjects")
for i, subject := range roleBinding.Subjects {
allErrs = append(allErrs, ValidateRoleBindingSubject(subject, true, subjectsPath.Index(i))...)
}
return allErrs
}
func ValidateRoleBindingUpdate(roleBinding *rbac.RoleBinding, oldRoleBinding *rbac.RoleBinding) field.ErrorList {
allErrs := ValidateRoleBinding(roleBinding)
allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&roleBinding.ObjectMeta, &oldRoleBinding.ObjectMeta, field.NewPath("metadata"))...)
if oldRoleBinding.RoleRef != roleBinding.RoleRef {
allErrs = append(allErrs, field.Invalid(field.NewPath("roleRef"), roleBinding.RoleRef, "cannot change roleRef"))
}
return allErrs
}
func ValidateClusterRoleBinding(roleBinding *rbac.ClusterRoleBinding) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validation.ValidateObjectMeta(&roleBinding.ObjectMeta, false, ValidateRBACName, field.NewPath("metadata"))...)
// TODO allow multiple API groups. For now, restrict to one, but I can envision other experimental roles in other groups taking
// advantage of the binding infrastructure
if roleBinding.RoleRef.APIGroup != rbac.GroupName {
allErrs = append(allErrs, field.NotSupported(field.NewPath("roleRef", "apiGroup"), roleBinding.RoleRef.APIGroup, []string{rbac.GroupName}))
}
switch roleBinding.RoleRef.Kind {
case "ClusterRole":
default:
allErrs = append(allErrs, field.NotSupported(field.NewPath("roleRef", "kind"), roleBinding.RoleRef.Kind, []string{"ClusterRole"}))
}
if len(roleBinding.RoleRef.Name) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("roleRef", "name"), ""))
} else {
for _, msg := range ValidateRBACName(roleBinding.RoleRef.Name, false) {
allErrs = append(allErrs, field.Invalid(field.NewPath("roleRef", "name"), roleBinding.RoleRef.Name, msg))
}
}
subjectsPath := field.NewPath("subjects")
for i, subject := range roleBinding.Subjects {
allErrs = append(allErrs, ValidateRoleBindingSubject(subject, false, subjectsPath.Index(i))...)
}
return allErrs
}
func ValidateClusterRoleBindingUpdate(roleBinding *rbac.ClusterRoleBinding, oldRoleBinding *rbac.ClusterRoleBinding) field.ErrorList {
allErrs := ValidateClusterRoleBinding(roleBinding)
allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&roleBinding.ObjectMeta, &oldRoleBinding.ObjectMeta, field.NewPath("metadata"))...)
if oldRoleBinding.RoleRef != roleBinding.RoleRef {
allErrs = append(allErrs, field.Invalid(field.NewPath("roleRef"), roleBinding.RoleRef, "cannot change roleRef"))
}
return allErrs
}
// ValidateRoleBindingSubject is exported to allow types outside of the RBAC API group to embed a rbac.Subject and reuse this validation logic
func ValidateRoleBindingSubject(subject rbac.Subject, isNamespaced bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(subject.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
switch subject.Kind {
case rbac.ServiceAccountKind:
if len(subject.Name) > 0 {
for _, msg := range validation.ValidateServiceAccountName(subject.Name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), subject.Name, msg))
}
}
if len(subject.APIGroup) > 0 {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("apiGroup"), subject.APIGroup, []string{""}))
}
if !isNamespaced && len(subject.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
}
case rbac.UserKind:
// TODO(ericchiang): What other restrictions on user name are there?
if subject.APIGroup != rbac.GroupName {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("apiGroup"), subject.APIGroup, []string{rbac.GroupName}))
}
case rbac.GroupKind:
// TODO(ericchiang): What other restrictions on group name are there?
if subject.APIGroup != rbac.GroupName {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("apiGroup"), subject.APIGroup, []string{rbac.GroupName}))
}
default:
allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), subject.Kind, []string{rbac.ServiceAccountKind, rbac.UserKind, rbac.GroupKind}))
}
return allErrs
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package rbac
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AggregationRule) DeepCopyInto(out *AggregationRule) {
*out = *in
if in.ClusterRoleSelectors != nil {
in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors
*out = make([]v1.LabelSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule.
func (in *AggregationRule) DeepCopy() *AggregationRule {
if in == nil {
return nil
}
out := new(AggregationRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRole) DeepCopyInto(out *ClusterRole) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AggregationRule != nil {
in, out := &in.AggregationRule, &out.AggregationRule
*out = new(AggregationRule)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole.
func (in *ClusterRole) DeepCopy() *ClusterRole {
if in == nil {
return nil
}
out := new(ClusterRole)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRole) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]Subject, len(*in))
copy(*out, *in)
}
out.RoleRef = in.RoleRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding.
func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
if in == nil {
return nil
}
out := new(ClusterRoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterRoleBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList.
func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
if in == nil {
return nil
}
out := new(ClusterRoleBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterRole, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList.
func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
if in == nil {
return nil
}
out := new(ClusterRoleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
*out = *in
if in.Verbs != nil {
in, out := &in.Verbs, &out.Verbs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.APIGroups != nil {
in, out := &in.APIGroups, &out.APIGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NonResourceURLs != nil {
in, out := &in.NonResourceURLs, &out.NonResourceURLs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
func (in *PolicyRule) DeepCopy() *PolicyRule {
if in == nil {
return nil
}
out := new(PolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Role) DeepCopyInto(out *Role) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]PolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role.
func (in *Role) DeepCopy() *Role {
if in == nil {
return nil
}
out := new(Role)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Role) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]Subject, len(*in))
copy(*out, *in)
}
out.RoleRef = in.RoleRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding.
func (in *RoleBinding) DeepCopy() *RoleBinding {
if in == nil {
return nil
}
out := new(RoleBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]RoleBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList.
func (in *RoleBindingList) DeepCopy() *RoleBindingList {
if in == nil {
return nil
}
out := new(RoleBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleList) DeepCopyInto(out *RoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Role, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList.
func (in *RoleList) DeepCopy() *RoleList {
if in == nil {
return nil
}
out := new(RoleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RoleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RoleRef) DeepCopyInto(out *RoleRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef.
func (in *RoleRef) DeepCopy() *RoleRef {
if in == nil {
return nil
}
out := new(RoleRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in SortableRuleSlice) DeepCopyInto(out *SortableRuleSlice) {
{
in := &in
*out = make(SortableRuleSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortableRuleSlice.
func (in SortableRuleSlice) DeepCopy() SortableRuleSlice {
if in == nil {
return nil
}
out := new(SortableRuleSlice)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subject) DeepCopyInto(out *Subject) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject.
func (in *Subject) DeepCopy() *Subject {
if in == nil {
return nil
}
out := new(Subject)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/resource"
"sigs.k8s.io/randfill"
)
// Funcs contains the fuzzer functions for the resource group.
//
// Leaving fields empty which then get replaced by the default
// leads to errors during roundtrip tests.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(r *resource.ExactDeviceRequest, c randfill.Continue) {
c.FillNoCustom(r) // fuzz self without calling this function again
if r.AllocationMode == "" {
r.AllocationMode = []resource.DeviceAllocationMode{
resource.DeviceAllocationModeAll,
resource.DeviceAllocationModeExactCount,
}[c.Int31n(2)]
}
},
func(r *resource.DeviceSubRequest, c randfill.Continue) {
c.FillNoCustom(r) // fuzz self without calling this function again
if r.AllocationMode == "" {
r.AllocationMode = []resource.DeviceAllocationMode{
resource.DeviceAllocationModeAll,
resource.DeviceAllocationModeExactCount,
}[c.Int31n(2)]
}
},
func(r *resource.DeviceAllocationConfiguration, c randfill.Continue) {
c.FillNoCustom(r)
if r.Source == "" {
r.Source = []resource.AllocationConfigSource{
resource.AllocationConfigSourceClass,
resource.AllocationConfigSourceClaim,
}[c.Int31n(2)]
}
},
func(r *resource.DeviceToleration, c randfill.Continue) {
c.FillNoCustom(r)
if r.Operator == "" {
r.Operator = []resource.DeviceTolerationOperator{
resource.DeviceTolerationOpEqual,
resource.DeviceTolerationOpExists,
}[c.Int31n(2)]
}
},
func(r *resource.DeviceTaint, c randfill.Continue) {
c.FillNoCustom(r)
if r.TimeAdded == nil {
// Current time is more or less random.
// Truncate to seconds because sub-second resolution
// does not survive round-tripping.
r.TimeAdded = &metav1.Time{Time: time.Now().Truncate(time.Second)}
}
},
func(r *resource.OpaqueDeviceConfiguration, c randfill.Continue) {
c.FillNoCustom(r)
// Match the fuzzer default content for runtime.Object.
//
// This is necessary because randomly generated content
// might be valid JSON which changes during re-encoding.
r.Parameters = runtime.RawExtension{Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`)}
},
func(r *resource.AllocatedDeviceStatus, c randfill.Continue) {
c.FillNoCustom(r)
// Match the fuzzer default content for runtime.Object.
//
// This is necessary because randomly generated content
// might be valid JSON which changes during re-encoding.
r.Data = &runtime.RawExtension{Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`)}
},
func(r *resource.ResourceSliceSpec, c randfill.Continue) {
c.FillNoCustom(r)
// Setting AllNodes to false is not allowed. It must be
// either true or nil.
if r.AllNodes != nil && !*r.AllNodes {
r.AllNodes = nil
}
if r.NodeName != nil && *r.NodeName == "" {
r.NodeName = nil
}
},
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the resource API, making it available as an
// option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/resource"
v1 "k8s.io/kubernetes/pkg/apis/resource/v1"
"k8s.io/kubernetes/pkg/apis/resource/v1alpha3"
"k8s.io/kubernetes/pkg/apis/resource/v1beta1"
"k8s.io/kubernetes/pkg/apis/resource/v1beta2"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(resource.AddToScheme(scheme))
utilruntime.Must(v1alpha3.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1beta2.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
// TODO (https://github.com/kubernetes/kubernetes/issues/133131): put v1 first in 1.35
utilruntime.Must(scheme.SetVersionPriority(v1beta2.SchemeGroupVersion, v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha3.SchemeGroupVersion))
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "resource.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder object to register various known types
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme represents a func that can be used to apply all the registered
// funcs in a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
scheme.AddKnownTypes(SchemeGroupVersion,
&DeviceClass{},
&DeviceClassList{},
&DeviceTaintRule{},
&DeviceTaintRuleList{},
&ResourceClaim{},
&ResourceClaimList{},
&ResourceClaimTemplate{},
&ResourceClaimTemplateList{},
&ResourceSlice{},
&ResourceSliceList{},
)
return nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
resourceapi "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ResourceSlice"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", resourceapi.ResourceSliceSelectorNodeName, resourceapi.ResourceSliceSelectorDriver:
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for %s: %s", SchemeGroupVersion.WithKind("ResourceSlice"), label)
}
}); err != nil {
return err
}
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
resourceapi "k8s.io/api/resource/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ExactDeviceRequest(obj *resourceapi.ExactDeviceRequest) {
if obj.AllocationMode == "" {
obj.AllocationMode = resourceapi.DeviceAllocationModeExactCount
}
if obj.AllocationMode == resourceapi.DeviceAllocationModeExactCount && obj.Count == 0 {
obj.Count = 1
}
}
func SetDefaults_DeviceSubRequest(obj *resourceapi.DeviceSubRequest) {
if obj.AllocationMode == "" {
obj.AllocationMode = resourceapi.DeviceAllocationModeExactCount
}
if obj.AllocationMode == resourceapi.DeviceAllocationModeExactCount && obj.Count == 0 {
obj.Count = 1
}
}
func SetDefaults_DeviceTaint(obj *resourceapi.DeviceTaint) {
if obj.TimeAdded == nil {
obj.TimeAdded = &metav1.Time{Time: time.Now().Truncate(time.Second)}
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
localSchemeBuilder = &v1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
// TODO: remove these global variables
// GroupName is the group name use in this package
const GroupName = "resource.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
resourcev1 "k8s.io/api/resource/v1"
apiresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
resource "k8s.io/kubernetes/pkg/apis/resource"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*resourcev1.AllocatedDeviceStatus)(nil), (*resource.AllocatedDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(a.(*resourcev1.AllocatedDeviceStatus), b.(*resource.AllocatedDeviceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.AllocatedDeviceStatus)(nil), (*resourcev1.AllocatedDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_AllocatedDeviceStatus_To_v1_AllocatedDeviceStatus(a.(*resource.AllocatedDeviceStatus), b.(*resourcev1.AllocatedDeviceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.AllocationResult)(nil), (*resource.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_AllocationResult_To_resource_AllocationResult(a.(*resourcev1.AllocationResult), b.(*resource.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.AllocationResult)(nil), (*resourcev1.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_AllocationResult_To_v1_AllocationResult(a.(*resource.AllocationResult), b.(*resourcev1.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.CELDeviceSelector)(nil), (*resource.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CELDeviceSelector_To_resource_CELDeviceSelector(a.(*resourcev1.CELDeviceSelector), b.(*resource.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CELDeviceSelector)(nil), (*resourcev1.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CELDeviceSelector_To_v1_CELDeviceSelector(a.(*resource.CELDeviceSelector), b.(*resourcev1.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.CapacityRequestPolicy)(nil), (*resource.CapacityRequestPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(a.(*resourcev1.CapacityRequestPolicy), b.(*resource.CapacityRequestPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequestPolicy)(nil), (*resourcev1.CapacityRequestPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequestPolicy_To_v1_CapacityRequestPolicy(a.(*resource.CapacityRequestPolicy), b.(*resourcev1.CapacityRequestPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.CapacityRequestPolicyRange)(nil), (*resource.CapacityRequestPolicyRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(a.(*resourcev1.CapacityRequestPolicyRange), b.(*resource.CapacityRequestPolicyRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequestPolicyRange)(nil), (*resourcev1.CapacityRequestPolicyRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequestPolicyRange_To_v1_CapacityRequestPolicyRange(a.(*resource.CapacityRequestPolicyRange), b.(*resourcev1.CapacityRequestPolicyRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.CapacityRequirements)(nil), (*resource.CapacityRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CapacityRequirements_To_resource_CapacityRequirements(a.(*resourcev1.CapacityRequirements), b.(*resource.CapacityRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequirements)(nil), (*resourcev1.CapacityRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequirements_To_v1_CapacityRequirements(a.(*resource.CapacityRequirements), b.(*resourcev1.CapacityRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.Counter)(nil), (*resource.Counter)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Counter_To_resource_Counter(a.(*resourcev1.Counter), b.(*resource.Counter), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.Counter)(nil), (*resourcev1.Counter)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_Counter_To_v1_Counter(a.(*resource.Counter), b.(*resourcev1.Counter), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.CounterSet)(nil), (*resource.CounterSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CounterSet_To_resource_CounterSet(a.(*resourcev1.CounterSet), b.(*resource.CounterSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CounterSet)(nil), (*resourcev1.CounterSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CounterSet_To_v1_CounterSet(a.(*resource.CounterSet), b.(*resourcev1.CounterSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.Device)(nil), (*resource.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Device_To_resource_Device(a.(*resourcev1.Device), b.(*resource.Device), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.Device)(nil), (*resourcev1.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_Device_To_v1_Device(a.(*resource.Device), b.(*resourcev1.Device), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceAllocationConfiguration)(nil), (*resource.DeviceAllocationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(a.(*resourcev1.DeviceAllocationConfiguration), b.(*resource.DeviceAllocationConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAllocationConfiguration)(nil), (*resourcev1.DeviceAllocationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAllocationConfiguration_To_v1_DeviceAllocationConfiguration(a.(*resource.DeviceAllocationConfiguration), b.(*resourcev1.DeviceAllocationConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceAllocationResult)(nil), (*resource.DeviceAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceAllocationResult_To_resource_DeviceAllocationResult(a.(*resourcev1.DeviceAllocationResult), b.(*resource.DeviceAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAllocationResult)(nil), (*resourcev1.DeviceAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAllocationResult_To_v1_DeviceAllocationResult(a.(*resource.DeviceAllocationResult), b.(*resourcev1.DeviceAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceAttribute)(nil), (*resource.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceAttribute_To_resource_DeviceAttribute(a.(*resourcev1.DeviceAttribute), b.(*resource.DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAttribute)(nil), (*resourcev1.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAttribute_To_v1_DeviceAttribute(a.(*resource.DeviceAttribute), b.(*resourcev1.DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceCapacity)(nil), (*resource.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceCapacity_To_resource_DeviceCapacity(a.(*resourcev1.DeviceCapacity), b.(*resource.DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceCapacity)(nil), (*resourcev1.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceCapacity_To_v1_DeviceCapacity(a.(*resource.DeviceCapacity), b.(*resourcev1.DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceClaim)(nil), (*resource.DeviceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceClaim_To_resource_DeviceClaim(a.(*resourcev1.DeviceClaim), b.(*resource.DeviceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClaim)(nil), (*resourcev1.DeviceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClaim_To_v1_DeviceClaim(a.(*resource.DeviceClaim), b.(*resourcev1.DeviceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceClaimConfiguration)(nil), (*resource.DeviceClaimConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(a.(*resourcev1.DeviceClaimConfiguration), b.(*resource.DeviceClaimConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClaimConfiguration)(nil), (*resourcev1.DeviceClaimConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClaimConfiguration_To_v1_DeviceClaimConfiguration(a.(*resource.DeviceClaimConfiguration), b.(*resourcev1.DeviceClaimConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceClass)(nil), (*resource.DeviceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceClass_To_resource_DeviceClass(a.(*resourcev1.DeviceClass), b.(*resource.DeviceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClass)(nil), (*resourcev1.DeviceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClass_To_v1_DeviceClass(a.(*resource.DeviceClass), b.(*resourcev1.DeviceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceClassConfiguration)(nil), (*resource.DeviceClassConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(a.(*resourcev1.DeviceClassConfiguration), b.(*resource.DeviceClassConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassConfiguration)(nil), (*resourcev1.DeviceClassConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassConfiguration_To_v1_DeviceClassConfiguration(a.(*resource.DeviceClassConfiguration), b.(*resourcev1.DeviceClassConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceClassList)(nil), (*resource.DeviceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceClassList_To_resource_DeviceClassList(a.(*resourcev1.DeviceClassList), b.(*resource.DeviceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassList)(nil), (*resourcev1.DeviceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassList_To_v1_DeviceClassList(a.(*resource.DeviceClassList), b.(*resourcev1.DeviceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceClassSpec)(nil), (*resource.DeviceClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceClassSpec_To_resource_DeviceClassSpec(a.(*resourcev1.DeviceClassSpec), b.(*resource.DeviceClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassSpec)(nil), (*resourcev1.DeviceClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassSpec_To_v1_DeviceClassSpec(a.(*resource.DeviceClassSpec), b.(*resourcev1.DeviceClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceConfiguration)(nil), (*resource.DeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceConfiguration_To_resource_DeviceConfiguration(a.(*resourcev1.DeviceConfiguration), b.(*resource.DeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceConfiguration)(nil), (*resourcev1.DeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceConfiguration_To_v1_DeviceConfiguration(a.(*resource.DeviceConfiguration), b.(*resourcev1.DeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceConstraint)(nil), (*resource.DeviceConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceConstraint_To_resource_DeviceConstraint(a.(*resourcev1.DeviceConstraint), b.(*resource.DeviceConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceConstraint)(nil), (*resourcev1.DeviceConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceConstraint_To_v1_DeviceConstraint(a.(*resource.DeviceConstraint), b.(*resourcev1.DeviceConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceCounterConsumption)(nil), (*resource.DeviceCounterConsumption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(a.(*resourcev1.DeviceCounterConsumption), b.(*resource.DeviceCounterConsumption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceCounterConsumption)(nil), (*resourcev1.DeviceCounterConsumption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceCounterConsumption_To_v1_DeviceCounterConsumption(a.(*resource.DeviceCounterConsumption), b.(*resourcev1.DeviceCounterConsumption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceRequest)(nil), (*resource.DeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceRequest_To_resource_DeviceRequest(a.(*resourcev1.DeviceRequest), b.(*resource.DeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceRequest)(nil), (*resourcev1.DeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceRequest_To_v1_DeviceRequest(a.(*resource.DeviceRequest), b.(*resourcev1.DeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceRequestAllocationResult)(nil), (*resource.DeviceRequestAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(a.(*resourcev1.DeviceRequestAllocationResult), b.(*resource.DeviceRequestAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceRequestAllocationResult)(nil), (*resourcev1.DeviceRequestAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceRequestAllocationResult_To_v1_DeviceRequestAllocationResult(a.(*resource.DeviceRequestAllocationResult), b.(*resourcev1.DeviceRequestAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceSelector)(nil), (*resource.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceSelector_To_resource_DeviceSelector(a.(*resourcev1.DeviceSelector), b.(*resource.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceSelector)(nil), (*resourcev1.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceSelector_To_v1_DeviceSelector(a.(*resource.DeviceSelector), b.(*resourcev1.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceSubRequest)(nil), (*resource.DeviceSubRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceSubRequest_To_resource_DeviceSubRequest(a.(*resourcev1.DeviceSubRequest), b.(*resource.DeviceSubRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceSubRequest)(nil), (*resourcev1.DeviceSubRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceSubRequest_To_v1_DeviceSubRequest(a.(*resource.DeviceSubRequest), b.(*resourcev1.DeviceSubRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceTaint)(nil), (*resource.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceTaint_To_resource_DeviceTaint(a.(*resourcev1.DeviceTaint), b.(*resource.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaint)(nil), (*resourcev1.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaint_To_v1_DeviceTaint(a.(*resource.DeviceTaint), b.(*resourcev1.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.DeviceToleration)(nil), (*resource.DeviceToleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DeviceToleration_To_resource_DeviceToleration(a.(*resourcev1.DeviceToleration), b.(*resource.DeviceToleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceToleration)(nil), (*resourcev1.DeviceToleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceToleration_To_v1_DeviceToleration(a.(*resource.DeviceToleration), b.(*resourcev1.DeviceToleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ExactDeviceRequest)(nil), (*resource.ExactDeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExactDeviceRequest_To_resource_ExactDeviceRequest(a.(*resourcev1.ExactDeviceRequest), b.(*resource.ExactDeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ExactDeviceRequest)(nil), (*resourcev1.ExactDeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ExactDeviceRequest_To_v1_ExactDeviceRequest(a.(*resource.ExactDeviceRequest), b.(*resourcev1.ExactDeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.NetworkDeviceData)(nil), (*resource.NetworkDeviceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NetworkDeviceData_To_resource_NetworkDeviceData(a.(*resourcev1.NetworkDeviceData), b.(*resource.NetworkDeviceData), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.NetworkDeviceData)(nil), (*resourcev1.NetworkDeviceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_NetworkDeviceData_To_v1_NetworkDeviceData(a.(*resource.NetworkDeviceData), b.(*resourcev1.NetworkDeviceData), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.OpaqueDeviceConfiguration)(nil), (*resource.OpaqueDeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(a.(*resourcev1.OpaqueDeviceConfiguration), b.(*resource.OpaqueDeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.OpaqueDeviceConfiguration)(nil), (*resourcev1.OpaqueDeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_OpaqueDeviceConfiguration_To_v1_OpaqueDeviceConfiguration(a.(*resource.OpaqueDeviceConfiguration), b.(*resourcev1.OpaqueDeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaim)(nil), (*resource.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaim_To_resource_ResourceClaim(a.(*resourcev1.ResourceClaim), b.(*resource.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaim)(nil), (*resourcev1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaim_To_v1_ResourceClaim(a.(*resource.ResourceClaim), b.(*resourcev1.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaimConsumerReference)(nil), (*resource.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(a.(*resourcev1.ResourceClaimConsumerReference), b.(*resource.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimConsumerReference)(nil), (*resourcev1.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimConsumerReference_To_v1_ResourceClaimConsumerReference(a.(*resource.ResourceClaimConsumerReference), b.(*resourcev1.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaimList)(nil), (*resource.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaimList_To_resource_ResourceClaimList(a.(*resourcev1.ResourceClaimList), b.(*resource.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimList)(nil), (*resourcev1.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimList_To_v1_ResourceClaimList(a.(*resource.ResourceClaimList), b.(*resourcev1.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaimSpec)(nil), (*resource.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaimSpec_To_resource_ResourceClaimSpec(a.(*resourcev1.ResourceClaimSpec), b.(*resource.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSpec)(nil), (*resourcev1.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimSpec_To_v1_ResourceClaimSpec(a.(*resource.ResourceClaimSpec), b.(*resourcev1.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaimStatus)(nil), (*resource.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaimStatus_To_resource_ResourceClaimStatus(a.(*resourcev1.ResourceClaimStatus), b.(*resource.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimStatus)(nil), (*resourcev1.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimStatus_To_v1_ResourceClaimStatus(a.(*resource.ResourceClaimStatus), b.(*resourcev1.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaimTemplate)(nil), (*resource.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(a.(*resourcev1.ResourceClaimTemplate), b.(*resource.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplate)(nil), (*resourcev1.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplate_To_v1_ResourceClaimTemplate(a.(*resource.ResourceClaimTemplate), b.(*resourcev1.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaimTemplateList)(nil), (*resource.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(a.(*resourcev1.ResourceClaimTemplateList), b.(*resource.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateList)(nil), (*resourcev1.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateList_To_v1_ResourceClaimTemplateList(a.(*resource.ResourceClaimTemplateList), b.(*resourcev1.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceClaimTemplateSpec)(nil), (*resource.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(a.(*resourcev1.ResourceClaimTemplateSpec), b.(*resource.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateSpec)(nil), (*resourcev1.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateSpec_To_v1_ResourceClaimTemplateSpec(a.(*resource.ResourceClaimTemplateSpec), b.(*resourcev1.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourcePool)(nil), (*resource.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourcePool_To_resource_ResourcePool(a.(*resourcev1.ResourcePool), b.(*resource.ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourcePool)(nil), (*resourcev1.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourcePool_To_v1_ResourcePool(a.(*resource.ResourcePool), b.(*resourcev1.ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceSlice)(nil), (*resource.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceSlice_To_resource_ResourceSlice(a.(*resourcev1.ResourceSlice), b.(*resource.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSlice)(nil), (*resourcev1.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSlice_To_v1_ResourceSlice(a.(*resource.ResourceSlice), b.(*resourcev1.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceSliceList)(nil), (*resource.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceSliceList_To_resource_ResourceSliceList(a.(*resourcev1.ResourceSliceList), b.(*resource.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSliceList)(nil), (*resourcev1.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSliceList_To_v1_ResourceSliceList(a.(*resource.ResourceSliceList), b.(*resourcev1.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1.ResourceSliceSpec)(nil), (*resource.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceSliceSpec_To_resource_ResourceSliceSpec(a.(*resourcev1.ResourceSliceSpec), b.(*resource.ResourceSliceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSliceSpec)(nil), (*resourcev1.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSliceSpec_To_v1_ResourceSliceSpec(a.(*resource.ResourceSliceSpec), b.(*resourcev1.ResourceSliceSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in *resourcev1.AllocatedDeviceStatus, out *resource.AllocatedDeviceStatus, s conversion.Scope) error {
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.ShareID = (*string)(unsafe.Pointer(in.ShareID))
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
out.Data = (*runtime.RawExtension)(unsafe.Pointer(in.Data))
out.NetworkData = (*resource.NetworkDeviceData)(unsafe.Pointer(in.NetworkData))
return nil
}
// Convert_v1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus is an autogenerated conversion function.
func Convert_v1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in *resourcev1.AllocatedDeviceStatus, out *resource.AllocatedDeviceStatus, s conversion.Scope) error {
return autoConvert_v1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in, out, s)
}
func autoConvert_resource_AllocatedDeviceStatus_To_v1_AllocatedDeviceStatus(in *resource.AllocatedDeviceStatus, out *resourcev1.AllocatedDeviceStatus, s conversion.Scope) error {
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.ShareID = (*string)(unsafe.Pointer(in.ShareID))
out.Conditions = *(*[]metav1.Condition)(unsafe.Pointer(&in.Conditions))
out.Data = (*runtime.RawExtension)(unsafe.Pointer(in.Data))
out.NetworkData = (*resourcev1.NetworkDeviceData)(unsafe.Pointer(in.NetworkData))
return nil
}
// Convert_resource_AllocatedDeviceStatus_To_v1_AllocatedDeviceStatus is an autogenerated conversion function.
func Convert_resource_AllocatedDeviceStatus_To_v1_AllocatedDeviceStatus(in *resource.AllocatedDeviceStatus, out *resourcev1.AllocatedDeviceStatus, s conversion.Scope) error {
return autoConvert_resource_AllocatedDeviceStatus_To_v1_AllocatedDeviceStatus(in, out, s)
}
func autoConvert_v1_AllocationResult_To_resource_AllocationResult(in *resourcev1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
if err := Convert_v1_DeviceAllocationResult_To_resource_DeviceAllocationResult(&in.Devices, &out.Devices, s); err != nil {
return err
}
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllocationTimestamp = (*metav1.Time)(unsafe.Pointer(in.AllocationTimestamp))
return nil
}
// Convert_v1_AllocationResult_To_resource_AllocationResult is an autogenerated conversion function.
func Convert_v1_AllocationResult_To_resource_AllocationResult(in *resourcev1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
return autoConvert_v1_AllocationResult_To_resource_AllocationResult(in, out, s)
}
func autoConvert_resource_AllocationResult_To_v1_AllocationResult(in *resource.AllocationResult, out *resourcev1.AllocationResult, s conversion.Scope) error {
if err := Convert_resource_DeviceAllocationResult_To_v1_DeviceAllocationResult(&in.Devices, &out.Devices, s); err != nil {
return err
}
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllocationTimestamp = (*metav1.Time)(unsafe.Pointer(in.AllocationTimestamp))
return nil
}
// Convert_resource_AllocationResult_To_v1_AllocationResult is an autogenerated conversion function.
func Convert_resource_AllocationResult_To_v1_AllocationResult(in *resource.AllocationResult, out *resourcev1.AllocationResult, s conversion.Scope) error {
return autoConvert_resource_AllocationResult_To_v1_AllocationResult(in, out, s)
}
func autoConvert_v1_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1_CELDeviceSelector_To_resource_CELDeviceSelector is an autogenerated conversion function.
func Convert_v1_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_v1_CELDeviceSelector_To_resource_CELDeviceSelector(in, out, s)
}
func autoConvert_resource_CELDeviceSelector_To_v1_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_resource_CELDeviceSelector_To_v1_CELDeviceSelector is an autogenerated conversion function.
func Convert_resource_CELDeviceSelector_To_v1_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_resource_CELDeviceSelector_To_v1_CELDeviceSelector(in, out, s)
}
func autoConvert_v1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in *resourcev1.CapacityRequestPolicy, out *resource.CapacityRequestPolicy, s conversion.Scope) error {
out.Default = (*apiresource.Quantity)(unsafe.Pointer(in.Default))
out.ValidValues = *(*[]apiresource.Quantity)(unsafe.Pointer(&in.ValidValues))
out.ValidRange = (*resource.CapacityRequestPolicyRange)(unsafe.Pointer(in.ValidRange))
return nil
}
// Convert_v1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy is an autogenerated conversion function.
func Convert_v1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in *resourcev1.CapacityRequestPolicy, out *resource.CapacityRequestPolicy, s conversion.Scope) error {
return autoConvert_v1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in, out, s)
}
func autoConvert_resource_CapacityRequestPolicy_To_v1_CapacityRequestPolicy(in *resource.CapacityRequestPolicy, out *resourcev1.CapacityRequestPolicy, s conversion.Scope) error {
out.Default = (*apiresource.Quantity)(unsafe.Pointer(in.Default))
out.ValidValues = *(*[]apiresource.Quantity)(unsafe.Pointer(&in.ValidValues))
out.ValidRange = (*resourcev1.CapacityRequestPolicyRange)(unsafe.Pointer(in.ValidRange))
return nil
}
// Convert_resource_CapacityRequestPolicy_To_v1_CapacityRequestPolicy is an autogenerated conversion function.
func Convert_resource_CapacityRequestPolicy_To_v1_CapacityRequestPolicy(in *resource.CapacityRequestPolicy, out *resourcev1.CapacityRequestPolicy, s conversion.Scope) error {
return autoConvert_resource_CapacityRequestPolicy_To_v1_CapacityRequestPolicy(in, out, s)
}
func autoConvert_v1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in *resourcev1.CapacityRequestPolicyRange, out *resource.CapacityRequestPolicyRange, s conversion.Scope) error {
out.Min = (*apiresource.Quantity)(unsafe.Pointer(in.Min))
out.Max = (*apiresource.Quantity)(unsafe.Pointer(in.Max))
out.Step = (*apiresource.Quantity)(unsafe.Pointer(in.Step))
return nil
}
// Convert_v1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange is an autogenerated conversion function.
func Convert_v1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in *resourcev1.CapacityRequestPolicyRange, out *resource.CapacityRequestPolicyRange, s conversion.Scope) error {
return autoConvert_v1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in, out, s)
}
func autoConvert_resource_CapacityRequestPolicyRange_To_v1_CapacityRequestPolicyRange(in *resource.CapacityRequestPolicyRange, out *resourcev1.CapacityRequestPolicyRange, s conversion.Scope) error {
out.Min = (*apiresource.Quantity)(unsafe.Pointer(in.Min))
out.Max = (*apiresource.Quantity)(unsafe.Pointer(in.Max))
out.Step = (*apiresource.Quantity)(unsafe.Pointer(in.Step))
return nil
}
// Convert_resource_CapacityRequestPolicyRange_To_v1_CapacityRequestPolicyRange is an autogenerated conversion function.
func Convert_resource_CapacityRequestPolicyRange_To_v1_CapacityRequestPolicyRange(in *resource.CapacityRequestPolicyRange, out *resourcev1.CapacityRequestPolicyRange, s conversion.Scope) error {
return autoConvert_resource_CapacityRequestPolicyRange_To_v1_CapacityRequestPolicyRange(in, out, s)
}
func autoConvert_v1_CapacityRequirements_To_resource_CapacityRequirements(in *resourcev1.CapacityRequirements, out *resource.CapacityRequirements, s conversion.Scope) error {
out.Requests = *(*map[resource.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_v1_CapacityRequirements_To_resource_CapacityRequirements is an autogenerated conversion function.
func Convert_v1_CapacityRequirements_To_resource_CapacityRequirements(in *resourcev1.CapacityRequirements, out *resource.CapacityRequirements, s conversion.Scope) error {
return autoConvert_v1_CapacityRequirements_To_resource_CapacityRequirements(in, out, s)
}
func autoConvert_resource_CapacityRequirements_To_v1_CapacityRequirements(in *resource.CapacityRequirements, out *resourcev1.CapacityRequirements, s conversion.Scope) error {
out.Requests = *(*map[resourcev1.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_resource_CapacityRequirements_To_v1_CapacityRequirements is an autogenerated conversion function.
func Convert_resource_CapacityRequirements_To_v1_CapacityRequirements(in *resource.CapacityRequirements, out *resourcev1.CapacityRequirements, s conversion.Scope) error {
return autoConvert_resource_CapacityRequirements_To_v1_CapacityRequirements(in, out, s)
}
func autoConvert_v1_Counter_To_resource_Counter(in *resourcev1.Counter, out *resource.Counter, s conversion.Scope) error {
out.Value = in.Value
return nil
}
// Convert_v1_Counter_To_resource_Counter is an autogenerated conversion function.
func Convert_v1_Counter_To_resource_Counter(in *resourcev1.Counter, out *resource.Counter, s conversion.Scope) error {
return autoConvert_v1_Counter_To_resource_Counter(in, out, s)
}
func autoConvert_resource_Counter_To_v1_Counter(in *resource.Counter, out *resourcev1.Counter, s conversion.Scope) error {
out.Value = in.Value
return nil
}
// Convert_resource_Counter_To_v1_Counter is an autogenerated conversion function.
func Convert_resource_Counter_To_v1_Counter(in *resource.Counter, out *resourcev1.Counter, s conversion.Scope) error {
return autoConvert_resource_Counter_To_v1_Counter(in, out, s)
}
func autoConvert_v1_CounterSet_To_resource_CounterSet(in *resourcev1.CounterSet, out *resource.CounterSet, s conversion.Scope) error {
out.Name = in.Name
out.Counters = *(*map[string]resource.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_v1_CounterSet_To_resource_CounterSet is an autogenerated conversion function.
func Convert_v1_CounterSet_To_resource_CounterSet(in *resourcev1.CounterSet, out *resource.CounterSet, s conversion.Scope) error {
return autoConvert_v1_CounterSet_To_resource_CounterSet(in, out, s)
}
func autoConvert_resource_CounterSet_To_v1_CounterSet(in *resource.CounterSet, out *resourcev1.CounterSet, s conversion.Scope) error {
out.Name = in.Name
out.Counters = *(*map[string]resourcev1.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_resource_CounterSet_To_v1_CounterSet is an autogenerated conversion function.
func Convert_resource_CounterSet_To_v1_CounterSet(in *resource.CounterSet, out *resourcev1.CounterSet, s conversion.Scope) error {
return autoConvert_resource_CounterSet_To_v1_CounterSet(in, out, s)
}
func autoConvert_v1_Device_To_resource_Device(in *resourcev1.Device, out *resource.Device, s conversion.Scope) error {
out.Name = in.Name
out.Attributes = *(*map[resource.QualifiedName]resource.DeviceAttribute)(unsafe.Pointer(&in.Attributes))
out.Capacity = *(*map[resource.QualifiedName]resource.DeviceCapacity)(unsafe.Pointer(&in.Capacity))
out.ConsumesCounters = *(*[]resource.DeviceCounterConsumption)(unsafe.Pointer(&in.ConsumesCounters))
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Taints = *(*[]resource.DeviceTaint)(unsafe.Pointer(&in.Taints))
out.BindsToNode = (*bool)(unsafe.Pointer(in.BindsToNode))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.AllowMultipleAllocations = (*bool)(unsafe.Pointer(in.AllowMultipleAllocations))
return nil
}
// Convert_v1_Device_To_resource_Device is an autogenerated conversion function.
func Convert_v1_Device_To_resource_Device(in *resourcev1.Device, out *resource.Device, s conversion.Scope) error {
return autoConvert_v1_Device_To_resource_Device(in, out, s)
}
func autoConvert_resource_Device_To_v1_Device(in *resource.Device, out *resourcev1.Device, s conversion.Scope) error {
out.Name = in.Name
out.Attributes = *(*map[resourcev1.QualifiedName]resourcev1.DeviceAttribute)(unsafe.Pointer(&in.Attributes))
out.Capacity = *(*map[resourcev1.QualifiedName]resourcev1.DeviceCapacity)(unsafe.Pointer(&in.Capacity))
out.ConsumesCounters = *(*[]resourcev1.DeviceCounterConsumption)(unsafe.Pointer(&in.ConsumesCounters))
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Taints = *(*[]resourcev1.DeviceTaint)(unsafe.Pointer(&in.Taints))
out.BindsToNode = (*bool)(unsafe.Pointer(in.BindsToNode))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.AllowMultipleAllocations = (*bool)(unsafe.Pointer(in.AllowMultipleAllocations))
return nil
}
// Convert_resource_Device_To_v1_Device is an autogenerated conversion function.
func Convert_resource_Device_To_v1_Device(in *resource.Device, out *resourcev1.Device, s conversion.Scope) error {
return autoConvert_resource_Device_To_v1_Device(in, out, s)
}
func autoConvert_v1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in *resourcev1.DeviceAllocationConfiguration, out *resource.DeviceAllocationConfiguration, s conversion.Scope) error {
out.Source = resource.AllocationConfigSource(in.Source)
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_v1_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration is an autogenerated conversion function.
func Convert_v1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in *resourcev1.DeviceAllocationConfiguration, out *resource.DeviceAllocationConfiguration, s conversion.Scope) error {
return autoConvert_v1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in, out, s)
}
func autoConvert_resource_DeviceAllocationConfiguration_To_v1_DeviceAllocationConfiguration(in *resource.DeviceAllocationConfiguration, out *resourcev1.DeviceAllocationConfiguration, s conversion.Scope) error {
out.Source = resourcev1.AllocationConfigSource(in.Source)
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_resource_DeviceConfiguration_To_v1_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceAllocationConfiguration_To_v1_DeviceAllocationConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceAllocationConfiguration_To_v1_DeviceAllocationConfiguration(in *resource.DeviceAllocationConfiguration, out *resourcev1.DeviceAllocationConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceAllocationConfiguration_To_v1_DeviceAllocationConfiguration(in, out, s)
}
func autoConvert_v1_DeviceAllocationResult_To_resource_DeviceAllocationResult(in *resourcev1.DeviceAllocationResult, out *resource.DeviceAllocationResult, s conversion.Scope) error {
out.Results = *(*[]resource.DeviceRequestAllocationResult)(unsafe.Pointer(&in.Results))
out.Config = *(*[]resource.DeviceAllocationConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_v1_DeviceAllocationResult_To_resource_DeviceAllocationResult is an autogenerated conversion function.
func Convert_v1_DeviceAllocationResult_To_resource_DeviceAllocationResult(in *resourcev1.DeviceAllocationResult, out *resource.DeviceAllocationResult, s conversion.Scope) error {
return autoConvert_v1_DeviceAllocationResult_To_resource_DeviceAllocationResult(in, out, s)
}
func autoConvert_resource_DeviceAllocationResult_To_v1_DeviceAllocationResult(in *resource.DeviceAllocationResult, out *resourcev1.DeviceAllocationResult, s conversion.Scope) error {
out.Results = *(*[]resourcev1.DeviceRequestAllocationResult)(unsafe.Pointer(&in.Results))
out.Config = *(*[]resourcev1.DeviceAllocationConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_resource_DeviceAllocationResult_To_v1_DeviceAllocationResult is an autogenerated conversion function.
func Convert_resource_DeviceAllocationResult_To_v1_DeviceAllocationResult(in *resource.DeviceAllocationResult, out *resourcev1.DeviceAllocationResult, s conversion.Scope) error {
return autoConvert_resource_DeviceAllocationResult_To_v1_DeviceAllocationResult(in, out, s)
}
func autoConvert_v1_DeviceAttribute_To_resource_DeviceAttribute(in *resourcev1.DeviceAttribute, out *resource.DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
return nil
}
// Convert_v1_DeviceAttribute_To_resource_DeviceAttribute is an autogenerated conversion function.
func Convert_v1_DeviceAttribute_To_resource_DeviceAttribute(in *resourcev1.DeviceAttribute, out *resource.DeviceAttribute, s conversion.Scope) error {
return autoConvert_v1_DeviceAttribute_To_resource_DeviceAttribute(in, out, s)
}
func autoConvert_resource_DeviceAttribute_To_v1_DeviceAttribute(in *resource.DeviceAttribute, out *resourcev1.DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
return nil
}
// Convert_resource_DeviceAttribute_To_v1_DeviceAttribute is an autogenerated conversion function.
func Convert_resource_DeviceAttribute_To_v1_DeviceAttribute(in *resource.DeviceAttribute, out *resourcev1.DeviceAttribute, s conversion.Scope) error {
return autoConvert_resource_DeviceAttribute_To_v1_DeviceAttribute(in, out, s)
}
func autoConvert_v1_DeviceCapacity_To_resource_DeviceCapacity(in *resourcev1.DeviceCapacity, out *resource.DeviceCapacity, s conversion.Scope) error {
out.Value = in.Value
out.RequestPolicy = (*resource.CapacityRequestPolicy)(unsafe.Pointer(in.RequestPolicy))
return nil
}
// Convert_v1_DeviceCapacity_To_resource_DeviceCapacity is an autogenerated conversion function.
func Convert_v1_DeviceCapacity_To_resource_DeviceCapacity(in *resourcev1.DeviceCapacity, out *resource.DeviceCapacity, s conversion.Scope) error {
return autoConvert_v1_DeviceCapacity_To_resource_DeviceCapacity(in, out, s)
}
func autoConvert_resource_DeviceCapacity_To_v1_DeviceCapacity(in *resource.DeviceCapacity, out *resourcev1.DeviceCapacity, s conversion.Scope) error {
out.Value = in.Value
out.RequestPolicy = (*resourcev1.CapacityRequestPolicy)(unsafe.Pointer(in.RequestPolicy))
return nil
}
// Convert_resource_DeviceCapacity_To_v1_DeviceCapacity is an autogenerated conversion function.
func Convert_resource_DeviceCapacity_To_v1_DeviceCapacity(in *resource.DeviceCapacity, out *resourcev1.DeviceCapacity, s conversion.Scope) error {
return autoConvert_resource_DeviceCapacity_To_v1_DeviceCapacity(in, out, s)
}
func autoConvert_v1_DeviceClaim_To_resource_DeviceClaim(in *resourcev1.DeviceClaim, out *resource.DeviceClaim, s conversion.Scope) error {
out.Requests = *(*[]resource.DeviceRequest)(unsafe.Pointer(&in.Requests))
out.Constraints = *(*[]resource.DeviceConstraint)(unsafe.Pointer(&in.Constraints))
out.Config = *(*[]resource.DeviceClaimConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_v1_DeviceClaim_To_resource_DeviceClaim is an autogenerated conversion function.
func Convert_v1_DeviceClaim_To_resource_DeviceClaim(in *resourcev1.DeviceClaim, out *resource.DeviceClaim, s conversion.Scope) error {
return autoConvert_v1_DeviceClaim_To_resource_DeviceClaim(in, out, s)
}
func autoConvert_resource_DeviceClaim_To_v1_DeviceClaim(in *resource.DeviceClaim, out *resourcev1.DeviceClaim, s conversion.Scope) error {
out.Requests = *(*[]resourcev1.DeviceRequest)(unsafe.Pointer(&in.Requests))
out.Constraints = *(*[]resourcev1.DeviceConstraint)(unsafe.Pointer(&in.Constraints))
out.Config = *(*[]resourcev1.DeviceClaimConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_resource_DeviceClaim_To_v1_DeviceClaim is an autogenerated conversion function.
func Convert_resource_DeviceClaim_To_v1_DeviceClaim(in *resource.DeviceClaim, out *resourcev1.DeviceClaim, s conversion.Scope) error {
return autoConvert_resource_DeviceClaim_To_v1_DeviceClaim(in, out, s)
}
func autoConvert_v1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in *resourcev1.DeviceClaimConfiguration, out *resource.DeviceClaimConfiguration, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_v1_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration is an autogenerated conversion function.
func Convert_v1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in *resourcev1.DeviceClaimConfiguration, out *resource.DeviceClaimConfiguration, s conversion.Scope) error {
return autoConvert_v1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in, out, s)
}
func autoConvert_resource_DeviceClaimConfiguration_To_v1_DeviceClaimConfiguration(in *resource.DeviceClaimConfiguration, out *resourcev1.DeviceClaimConfiguration, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_resource_DeviceConfiguration_To_v1_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClaimConfiguration_To_v1_DeviceClaimConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceClaimConfiguration_To_v1_DeviceClaimConfiguration(in *resource.DeviceClaimConfiguration, out *resourcev1.DeviceClaimConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceClaimConfiguration_To_v1_DeviceClaimConfiguration(in, out, s)
}
func autoConvert_v1_DeviceClass_To_resource_DeviceClass(in *resourcev1.DeviceClass, out *resource.DeviceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_DeviceClassSpec_To_resource_DeviceClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_DeviceClass_To_resource_DeviceClass is an autogenerated conversion function.
func Convert_v1_DeviceClass_To_resource_DeviceClass(in *resourcev1.DeviceClass, out *resource.DeviceClass, s conversion.Scope) error {
return autoConvert_v1_DeviceClass_To_resource_DeviceClass(in, out, s)
}
func autoConvert_resource_DeviceClass_To_v1_DeviceClass(in *resource.DeviceClass, out *resourcev1.DeviceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_DeviceClassSpec_To_v1_DeviceClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClass_To_v1_DeviceClass is an autogenerated conversion function.
func Convert_resource_DeviceClass_To_v1_DeviceClass(in *resource.DeviceClass, out *resourcev1.DeviceClass, s conversion.Scope) error {
return autoConvert_resource_DeviceClass_To_v1_DeviceClass(in, out, s)
}
func autoConvert_v1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in *resourcev1.DeviceClassConfiguration, out *resource.DeviceClassConfiguration, s conversion.Scope) error {
if err := Convert_v1_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration is an autogenerated conversion function.
func Convert_v1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in *resourcev1.DeviceClassConfiguration, out *resource.DeviceClassConfiguration, s conversion.Scope) error {
return autoConvert_v1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in, out, s)
}
func autoConvert_resource_DeviceClassConfiguration_To_v1_DeviceClassConfiguration(in *resource.DeviceClassConfiguration, out *resourcev1.DeviceClassConfiguration, s conversion.Scope) error {
if err := Convert_resource_DeviceConfiguration_To_v1_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClassConfiguration_To_v1_DeviceClassConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceClassConfiguration_To_v1_DeviceClassConfiguration(in *resource.DeviceClassConfiguration, out *resourcev1.DeviceClassConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceClassConfiguration_To_v1_DeviceClassConfiguration(in, out, s)
}
func autoConvert_v1_DeviceClassList_To_resource_DeviceClassList(in *resourcev1.DeviceClassList, out *resource.DeviceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.DeviceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_DeviceClassList_To_resource_DeviceClassList is an autogenerated conversion function.
func Convert_v1_DeviceClassList_To_resource_DeviceClassList(in *resourcev1.DeviceClassList, out *resource.DeviceClassList, s conversion.Scope) error {
return autoConvert_v1_DeviceClassList_To_resource_DeviceClassList(in, out, s)
}
func autoConvert_resource_DeviceClassList_To_v1_DeviceClassList(in *resource.DeviceClassList, out *resourcev1.DeviceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1.DeviceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_DeviceClassList_To_v1_DeviceClassList is an autogenerated conversion function.
func Convert_resource_DeviceClassList_To_v1_DeviceClassList(in *resource.DeviceClassList, out *resourcev1.DeviceClassList, s conversion.Scope) error {
return autoConvert_resource_DeviceClassList_To_v1_DeviceClassList(in, out, s)
}
func autoConvert_v1_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1.DeviceClassSpec, out *resource.DeviceClassSpec, s conversion.Scope) error {
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.Config = *(*[]resource.DeviceClassConfiguration)(unsafe.Pointer(&in.Config))
out.ExtendedResourceName = (*string)(unsafe.Pointer(in.ExtendedResourceName))
return nil
}
// Convert_v1_DeviceClassSpec_To_resource_DeviceClassSpec is an autogenerated conversion function.
func Convert_v1_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1.DeviceClassSpec, out *resource.DeviceClassSpec, s conversion.Scope) error {
return autoConvert_v1_DeviceClassSpec_To_resource_DeviceClassSpec(in, out, s)
}
func autoConvert_resource_DeviceClassSpec_To_v1_DeviceClassSpec(in *resource.DeviceClassSpec, out *resourcev1.DeviceClassSpec, s conversion.Scope) error {
out.Selectors = *(*[]resourcev1.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.Config = *(*[]resourcev1.DeviceClassConfiguration)(unsafe.Pointer(&in.Config))
out.ExtendedResourceName = (*string)(unsafe.Pointer(in.ExtendedResourceName))
return nil
}
// Convert_resource_DeviceClassSpec_To_v1_DeviceClassSpec is an autogenerated conversion function.
func Convert_resource_DeviceClassSpec_To_v1_DeviceClassSpec(in *resource.DeviceClassSpec, out *resourcev1.DeviceClassSpec, s conversion.Scope) error {
return autoConvert_resource_DeviceClassSpec_To_v1_DeviceClassSpec(in, out, s)
}
func autoConvert_v1_DeviceConfiguration_To_resource_DeviceConfiguration(in *resourcev1.DeviceConfiguration, out *resource.DeviceConfiguration, s conversion.Scope) error {
out.Opaque = (*resource.OpaqueDeviceConfiguration)(unsafe.Pointer(in.Opaque))
return nil
}
// Convert_v1_DeviceConfiguration_To_resource_DeviceConfiguration is an autogenerated conversion function.
func Convert_v1_DeviceConfiguration_To_resource_DeviceConfiguration(in *resourcev1.DeviceConfiguration, out *resource.DeviceConfiguration, s conversion.Scope) error {
return autoConvert_v1_DeviceConfiguration_To_resource_DeviceConfiguration(in, out, s)
}
func autoConvert_resource_DeviceConfiguration_To_v1_DeviceConfiguration(in *resource.DeviceConfiguration, out *resourcev1.DeviceConfiguration, s conversion.Scope) error {
out.Opaque = (*resourcev1.OpaqueDeviceConfiguration)(unsafe.Pointer(in.Opaque))
return nil
}
// Convert_resource_DeviceConfiguration_To_v1_DeviceConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceConfiguration_To_v1_DeviceConfiguration(in *resource.DeviceConfiguration, out *resourcev1.DeviceConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceConfiguration_To_v1_DeviceConfiguration(in, out, s)
}
func autoConvert_v1_DeviceConstraint_To_resource_DeviceConstraint(in *resourcev1.DeviceConstraint, out *resource.DeviceConstraint, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
out.MatchAttribute = (*resource.FullyQualifiedName)(unsafe.Pointer(in.MatchAttribute))
out.DistinctAttribute = (*resource.FullyQualifiedName)(unsafe.Pointer(in.DistinctAttribute))
return nil
}
// Convert_v1_DeviceConstraint_To_resource_DeviceConstraint is an autogenerated conversion function.
func Convert_v1_DeviceConstraint_To_resource_DeviceConstraint(in *resourcev1.DeviceConstraint, out *resource.DeviceConstraint, s conversion.Scope) error {
return autoConvert_v1_DeviceConstraint_To_resource_DeviceConstraint(in, out, s)
}
func autoConvert_resource_DeviceConstraint_To_v1_DeviceConstraint(in *resource.DeviceConstraint, out *resourcev1.DeviceConstraint, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
out.MatchAttribute = (*resourcev1.FullyQualifiedName)(unsafe.Pointer(in.MatchAttribute))
out.DistinctAttribute = (*resourcev1.FullyQualifiedName)(unsafe.Pointer(in.DistinctAttribute))
return nil
}
// Convert_resource_DeviceConstraint_To_v1_DeviceConstraint is an autogenerated conversion function.
func Convert_resource_DeviceConstraint_To_v1_DeviceConstraint(in *resource.DeviceConstraint, out *resourcev1.DeviceConstraint, s conversion.Scope) error {
return autoConvert_resource_DeviceConstraint_To_v1_DeviceConstraint(in, out, s)
}
func autoConvert_v1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in *resourcev1.DeviceCounterConsumption, out *resource.DeviceCounterConsumption, s conversion.Scope) error {
out.CounterSet = in.CounterSet
out.Counters = *(*map[string]resource.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_v1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption is an autogenerated conversion function.
func Convert_v1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in *resourcev1.DeviceCounterConsumption, out *resource.DeviceCounterConsumption, s conversion.Scope) error {
return autoConvert_v1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in, out, s)
}
func autoConvert_resource_DeviceCounterConsumption_To_v1_DeviceCounterConsumption(in *resource.DeviceCounterConsumption, out *resourcev1.DeviceCounterConsumption, s conversion.Scope) error {
out.CounterSet = in.CounterSet
out.Counters = *(*map[string]resourcev1.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_resource_DeviceCounterConsumption_To_v1_DeviceCounterConsumption is an autogenerated conversion function.
func Convert_resource_DeviceCounterConsumption_To_v1_DeviceCounterConsumption(in *resource.DeviceCounterConsumption, out *resourcev1.DeviceCounterConsumption, s conversion.Scope) error {
return autoConvert_resource_DeviceCounterConsumption_To_v1_DeviceCounterConsumption(in, out, s)
}
func autoConvert_v1_DeviceRequest_To_resource_DeviceRequest(in *resourcev1.DeviceRequest, out *resource.DeviceRequest, s conversion.Scope) error {
out.Name = in.Name
out.Exactly = (*resource.ExactDeviceRequest)(unsafe.Pointer(in.Exactly))
out.FirstAvailable = *(*[]resource.DeviceSubRequest)(unsafe.Pointer(&in.FirstAvailable))
return nil
}
// Convert_v1_DeviceRequest_To_resource_DeviceRequest is an autogenerated conversion function.
func Convert_v1_DeviceRequest_To_resource_DeviceRequest(in *resourcev1.DeviceRequest, out *resource.DeviceRequest, s conversion.Scope) error {
return autoConvert_v1_DeviceRequest_To_resource_DeviceRequest(in, out, s)
}
func autoConvert_resource_DeviceRequest_To_v1_DeviceRequest(in *resource.DeviceRequest, out *resourcev1.DeviceRequest, s conversion.Scope) error {
out.Name = in.Name
out.Exactly = (*resourcev1.ExactDeviceRequest)(unsafe.Pointer(in.Exactly))
out.FirstAvailable = *(*[]resourcev1.DeviceSubRequest)(unsafe.Pointer(&in.FirstAvailable))
return nil
}
// Convert_resource_DeviceRequest_To_v1_DeviceRequest is an autogenerated conversion function.
func Convert_resource_DeviceRequest_To_v1_DeviceRequest(in *resource.DeviceRequest, out *resourcev1.DeviceRequest, s conversion.Scope) error {
return autoConvert_resource_DeviceRequest_To_v1_DeviceRequest(in, out, s)
}
func autoConvert_v1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in *resourcev1.DeviceRequestAllocationResult, out *resource.DeviceRequestAllocationResult, s conversion.Scope) error {
out.Request = in.Request
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.ShareID = (*types.UID)(unsafe.Pointer(in.ShareID))
out.ConsumedCapacity = *(*map[resource.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.ConsumedCapacity))
return nil
}
// Convert_v1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult is an autogenerated conversion function.
func Convert_v1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in *resourcev1.DeviceRequestAllocationResult, out *resource.DeviceRequestAllocationResult, s conversion.Scope) error {
return autoConvert_v1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in, out, s)
}
func autoConvert_resource_DeviceRequestAllocationResult_To_v1_DeviceRequestAllocationResult(in *resource.DeviceRequestAllocationResult, out *resourcev1.DeviceRequestAllocationResult, s conversion.Scope) error {
out.Request = in.Request
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resourcev1.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.ShareID = (*types.UID)(unsafe.Pointer(in.ShareID))
out.ConsumedCapacity = *(*map[resourcev1.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.ConsumedCapacity))
return nil
}
// Convert_resource_DeviceRequestAllocationResult_To_v1_DeviceRequestAllocationResult is an autogenerated conversion function.
func Convert_resource_DeviceRequestAllocationResult_To_v1_DeviceRequestAllocationResult(in *resource.DeviceRequestAllocationResult, out *resourcev1.DeviceRequestAllocationResult, s conversion.Scope) error {
return autoConvert_resource_DeviceRequestAllocationResult_To_v1_DeviceRequestAllocationResult(in, out, s)
}
func autoConvert_v1_DeviceSelector_To_resource_DeviceSelector(in *resourcev1.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resource.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_v1_DeviceSelector_To_resource_DeviceSelector is an autogenerated conversion function.
func Convert_v1_DeviceSelector_To_resource_DeviceSelector(in *resourcev1.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
return autoConvert_v1_DeviceSelector_To_resource_DeviceSelector(in, out, s)
}
func autoConvert_resource_DeviceSelector_To_v1_DeviceSelector(in *resource.DeviceSelector, out *resourcev1.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resourcev1.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_resource_DeviceSelector_To_v1_DeviceSelector is an autogenerated conversion function.
func Convert_resource_DeviceSelector_To_v1_DeviceSelector(in *resource.DeviceSelector, out *resourcev1.DeviceSelector, s conversion.Scope) error {
return autoConvert_resource_DeviceSelector_To_v1_DeviceSelector(in, out, s)
}
func autoConvert_v1_DeviceSubRequest_To_resource_DeviceSubRequest(in *resourcev1.DeviceSubRequest, out *resource.DeviceSubRequest, s conversion.Scope) error {
out.Name = in.Name
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resource.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resource.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_v1_DeviceSubRequest_To_resource_DeviceSubRequest is an autogenerated conversion function.
func Convert_v1_DeviceSubRequest_To_resource_DeviceSubRequest(in *resourcev1.DeviceSubRequest, out *resource.DeviceSubRequest, s conversion.Scope) error {
return autoConvert_v1_DeviceSubRequest_To_resource_DeviceSubRequest(in, out, s)
}
func autoConvert_resource_DeviceSubRequest_To_v1_DeviceSubRequest(in *resource.DeviceSubRequest, out *resourcev1.DeviceSubRequest, s conversion.Scope) error {
out.Name = in.Name
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resourcev1.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resourcev1.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.Tolerations = *(*[]resourcev1.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resourcev1.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_resource_DeviceSubRequest_To_v1_DeviceSubRequest is an autogenerated conversion function.
func Convert_resource_DeviceSubRequest_To_v1_DeviceSubRequest(in *resource.DeviceSubRequest, out *resourcev1.DeviceSubRequest, s conversion.Scope) error {
return autoConvert_resource_DeviceSubRequest_To_v1_DeviceSubRequest(in, out, s)
}
func autoConvert_v1_DeviceTaint_To_resource_DeviceTaint(in *resourcev1.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resource.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*metav1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_v1_DeviceTaint_To_resource_DeviceTaint is an autogenerated conversion function.
func Convert_v1_DeviceTaint_To_resource_DeviceTaint(in *resourcev1.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
return autoConvert_v1_DeviceTaint_To_resource_DeviceTaint(in, out, s)
}
func autoConvert_resource_DeviceTaint_To_v1_DeviceTaint(in *resource.DeviceTaint, out *resourcev1.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resourcev1.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*metav1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_resource_DeviceTaint_To_v1_DeviceTaint is an autogenerated conversion function.
func Convert_resource_DeviceTaint_To_v1_DeviceTaint(in *resource.DeviceTaint, out *resourcev1.DeviceTaint, s conversion.Scope) error {
return autoConvert_resource_DeviceTaint_To_v1_DeviceTaint(in, out, s)
}
func autoConvert_v1_DeviceToleration_To_resource_DeviceToleration(in *resourcev1.DeviceToleration, out *resource.DeviceToleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = resource.DeviceTolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = resource.DeviceTaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_v1_DeviceToleration_To_resource_DeviceToleration is an autogenerated conversion function.
func Convert_v1_DeviceToleration_To_resource_DeviceToleration(in *resourcev1.DeviceToleration, out *resource.DeviceToleration, s conversion.Scope) error {
return autoConvert_v1_DeviceToleration_To_resource_DeviceToleration(in, out, s)
}
func autoConvert_resource_DeviceToleration_To_v1_DeviceToleration(in *resource.DeviceToleration, out *resourcev1.DeviceToleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = resourcev1.DeviceTolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = resourcev1.DeviceTaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_resource_DeviceToleration_To_v1_DeviceToleration is an autogenerated conversion function.
func Convert_resource_DeviceToleration_To_v1_DeviceToleration(in *resource.DeviceToleration, out *resourcev1.DeviceToleration, s conversion.Scope) error {
return autoConvert_resource_DeviceToleration_To_v1_DeviceToleration(in, out, s)
}
func autoConvert_v1_ExactDeviceRequest_To_resource_ExactDeviceRequest(in *resourcev1.ExactDeviceRequest, out *resource.ExactDeviceRequest, s conversion.Scope) error {
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resource.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resource.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_v1_ExactDeviceRequest_To_resource_ExactDeviceRequest is an autogenerated conversion function.
func Convert_v1_ExactDeviceRequest_To_resource_ExactDeviceRequest(in *resourcev1.ExactDeviceRequest, out *resource.ExactDeviceRequest, s conversion.Scope) error {
return autoConvert_v1_ExactDeviceRequest_To_resource_ExactDeviceRequest(in, out, s)
}
func autoConvert_resource_ExactDeviceRequest_To_v1_ExactDeviceRequest(in *resource.ExactDeviceRequest, out *resourcev1.ExactDeviceRequest, s conversion.Scope) error {
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resourcev1.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resourcev1.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resourcev1.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resourcev1.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_resource_ExactDeviceRequest_To_v1_ExactDeviceRequest is an autogenerated conversion function.
func Convert_resource_ExactDeviceRequest_To_v1_ExactDeviceRequest(in *resource.ExactDeviceRequest, out *resourcev1.ExactDeviceRequest, s conversion.Scope) error {
return autoConvert_resource_ExactDeviceRequest_To_v1_ExactDeviceRequest(in, out, s)
}
func autoConvert_v1_NetworkDeviceData_To_resource_NetworkDeviceData(in *resourcev1.NetworkDeviceData, out *resource.NetworkDeviceData, s conversion.Scope) error {
out.InterfaceName = in.InterfaceName
out.IPs = *(*[]string)(unsafe.Pointer(&in.IPs))
out.HardwareAddress = in.HardwareAddress
return nil
}
// Convert_v1_NetworkDeviceData_To_resource_NetworkDeviceData is an autogenerated conversion function.
func Convert_v1_NetworkDeviceData_To_resource_NetworkDeviceData(in *resourcev1.NetworkDeviceData, out *resource.NetworkDeviceData, s conversion.Scope) error {
return autoConvert_v1_NetworkDeviceData_To_resource_NetworkDeviceData(in, out, s)
}
func autoConvert_resource_NetworkDeviceData_To_v1_NetworkDeviceData(in *resource.NetworkDeviceData, out *resourcev1.NetworkDeviceData, s conversion.Scope) error {
out.InterfaceName = in.InterfaceName
out.IPs = *(*[]string)(unsafe.Pointer(&in.IPs))
out.HardwareAddress = in.HardwareAddress
return nil
}
// Convert_resource_NetworkDeviceData_To_v1_NetworkDeviceData is an autogenerated conversion function.
func Convert_resource_NetworkDeviceData_To_v1_NetworkDeviceData(in *resource.NetworkDeviceData, out *resourcev1.NetworkDeviceData, s conversion.Scope) error {
return autoConvert_resource_NetworkDeviceData_To_v1_NetworkDeviceData(in, out, s)
}
func autoConvert_v1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in *resourcev1.OpaqueDeviceConfiguration, out *resource.OpaqueDeviceConfiguration, s conversion.Scope) error {
out.Driver = in.Driver
out.Parameters = in.Parameters
return nil
}
// Convert_v1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration is an autogenerated conversion function.
func Convert_v1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in *resourcev1.OpaqueDeviceConfiguration, out *resource.OpaqueDeviceConfiguration, s conversion.Scope) error {
return autoConvert_v1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in, out, s)
}
func autoConvert_resource_OpaqueDeviceConfiguration_To_v1_OpaqueDeviceConfiguration(in *resource.OpaqueDeviceConfiguration, out *resourcev1.OpaqueDeviceConfiguration, s conversion.Scope) error {
out.Driver = in.Driver
out.Parameters = in.Parameters
return nil
}
// Convert_resource_OpaqueDeviceConfiguration_To_v1_OpaqueDeviceConfiguration is an autogenerated conversion function.
func Convert_resource_OpaqueDeviceConfiguration_To_v1_OpaqueDeviceConfiguration(in *resource.OpaqueDeviceConfiguration, out *resourcev1.OpaqueDeviceConfiguration, s conversion.Scope) error {
return autoConvert_resource_OpaqueDeviceConfiguration_To_v1_OpaqueDeviceConfiguration(in, out, s)
}
func autoConvert_v1_ResourceClaim_To_resource_ResourceClaim(in *resourcev1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ResourceClaimStatus_To_resource_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ResourceClaim_To_resource_ResourceClaim is an autogenerated conversion function.
func Convert_v1_ResourceClaim_To_resource_ResourceClaim(in *resourcev1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
return autoConvert_v1_ResourceClaim_To_resource_ResourceClaim(in, out, s)
}
func autoConvert_resource_ResourceClaim_To_v1_ResourceClaim(in *resource.ResourceClaim, out *resourcev1.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_resource_ResourceClaimStatus_To_v1_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaim_To_v1_ResourceClaim is an autogenerated conversion function.
func Convert_resource_ResourceClaim_To_v1_ResourceClaim(in *resource.ResourceClaim, out *resourcev1.ResourceClaim, s conversion.Scope) error {
return autoConvert_resource_ResourceClaim_To_v1_ResourceClaim(in, out, s)
}
func autoConvert_v1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *resourcev1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_v1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_v1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *resourcev1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_v1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_resource_ResourceClaimConsumerReference_To_v1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *resourcev1.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_resource_ResourceClaimConsumerReference_To_v1_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_resource_ResourceClaimConsumerReference_To_v1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *resourcev1.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimConsumerReference_To_v1_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_v1_ResourceClaimList_To_resource_ResourceClaimList(in *resourcev1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ResourceClaimList_To_resource_ResourceClaimList is an autogenerated conversion function.
func Convert_v1_ResourceClaimList_To_resource_ResourceClaimList(in *resourcev1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
return autoConvert_v1_ResourceClaimList_To_resource_ResourceClaimList(in, out, s)
}
func autoConvert_resource_ResourceClaimList_To_v1_ResourceClaimList(in *resource.ResourceClaimList, out *resourcev1.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1.ResourceClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceClaimList_To_v1_ResourceClaimList is an autogenerated conversion function.
func Convert_resource_ResourceClaimList_To_v1_ResourceClaimList(in *resource.ResourceClaimList, out *resourcev1.ResourceClaimList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimList_To_v1_ResourceClaimList(in, out, s)
}
func autoConvert_v1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *resourcev1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
if err := Convert_v1_DeviceClaim_To_resource_DeviceClaim(&in.Devices, &out.Devices, s); err != nil {
return err
}
return nil
}
// Convert_v1_ResourceClaimSpec_To_resource_ResourceClaimSpec is an autogenerated conversion function.
func Convert_v1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *resourcev1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_v1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimSpec_To_v1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *resourcev1.ResourceClaimSpec, s conversion.Scope) error {
if err := Convert_resource_DeviceClaim_To_v1_DeviceClaim(&in.Devices, &out.Devices, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimSpec_To_v1_ResourceClaimSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimSpec_To_v1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *resourcev1.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimSpec_To_v1_ResourceClaimSpec(in, out, s)
}
func autoConvert_v1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *resourcev1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
out.Allocation = (*resource.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]resource.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.Devices = *(*[]resource.AllocatedDeviceStatus)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_v1_ResourceClaimStatus_To_resource_ResourceClaimStatus is an autogenerated conversion function.
func Convert_v1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *resourcev1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_v1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in, out, s)
}
func autoConvert_resource_ResourceClaimStatus_To_v1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *resourcev1.ResourceClaimStatus, s conversion.Scope) error {
out.Allocation = (*resourcev1.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]resourcev1.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.Devices = *(*[]resourcev1.AllocatedDeviceStatus)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_resource_ResourceClaimStatus_To_v1_ResourceClaimStatus is an autogenerated conversion function.
func Convert_resource_ResourceClaimStatus_To_v1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *resourcev1.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimStatus_To_v1_ResourceClaimStatus(in, out, s)
}
func autoConvert_v1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *resourcev1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_v1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *resourcev1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_v1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplate_To_v1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *resourcev1.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimTemplateSpec_To_v1_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplate_To_v1_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplate_To_v1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *resourcev1.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplate_To_v1_ResourceClaimTemplate(in, out, s)
}
func autoConvert_v1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *resourcev1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceClaimTemplate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_v1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *resourcev1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_v1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateList_To_v1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *resourcev1.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1.ResourceClaimTemplate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceClaimTemplateList_To_v1_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateList_To_v1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *resourcev1.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateList_To_v1_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_v1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *resourcev1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_v1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *resourcev1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_v1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateSpec_To_v1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *resourcev1.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplateSpec_To_v1_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateSpec_To_v1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *resourcev1.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateSpec_To_v1_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_v1_ResourcePool_To_resource_ResourcePool(in *resourcev1.ResourcePool, out *resource.ResourcePool, s conversion.Scope) error {
out.Name = in.Name
out.Generation = in.Generation
out.ResourceSliceCount = in.ResourceSliceCount
return nil
}
// Convert_v1_ResourcePool_To_resource_ResourcePool is an autogenerated conversion function.
func Convert_v1_ResourcePool_To_resource_ResourcePool(in *resourcev1.ResourcePool, out *resource.ResourcePool, s conversion.Scope) error {
return autoConvert_v1_ResourcePool_To_resource_ResourcePool(in, out, s)
}
func autoConvert_resource_ResourcePool_To_v1_ResourcePool(in *resource.ResourcePool, out *resourcev1.ResourcePool, s conversion.Scope) error {
out.Name = in.Name
out.Generation = in.Generation
out.ResourceSliceCount = in.ResourceSliceCount
return nil
}
// Convert_resource_ResourcePool_To_v1_ResourcePool is an autogenerated conversion function.
func Convert_resource_ResourcePool_To_v1_ResourcePool(in *resource.ResourcePool, out *resourcev1.ResourcePool, s conversion.Scope) error {
return autoConvert_resource_ResourcePool_To_v1_ResourcePool(in, out, s)
}
func autoConvert_v1_ResourceSlice_To_resource_ResourceSlice(in *resourcev1.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ResourceSliceSpec_To_resource_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_ResourceSlice_To_resource_ResourceSlice is an autogenerated conversion function.
func Convert_v1_ResourceSlice_To_resource_ResourceSlice(in *resourcev1.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
return autoConvert_v1_ResourceSlice_To_resource_ResourceSlice(in, out, s)
}
func autoConvert_resource_ResourceSlice_To_v1_ResourceSlice(in *resource.ResourceSlice, out *resourcev1.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceSliceSpec_To_v1_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceSlice_To_v1_ResourceSlice is an autogenerated conversion function.
func Convert_resource_ResourceSlice_To_v1_ResourceSlice(in *resource.ResourceSlice, out *resourcev1.ResourceSlice, s conversion.Scope) error {
return autoConvert_resource_ResourceSlice_To_v1_ResourceSlice(in, out, s)
}
func autoConvert_v1_ResourceSliceList_To_resource_ResourceSliceList(in *resourcev1.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ResourceSliceList_To_resource_ResourceSliceList is an autogenerated conversion function.
func Convert_v1_ResourceSliceList_To_resource_ResourceSliceList(in *resourcev1.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
return autoConvert_v1_ResourceSliceList_To_resource_ResourceSliceList(in, out, s)
}
func autoConvert_resource_ResourceSliceList_To_v1_ResourceSliceList(in *resource.ResourceSliceList, out *resourcev1.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1.ResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceSliceList_To_v1_ResourceSliceList is an autogenerated conversion function.
func Convert_resource_ResourceSliceList_To_v1_ResourceSliceList(in *resource.ResourceSliceList, out *resourcev1.ResourceSliceList, s conversion.Scope) error {
return autoConvert_resource_ResourceSliceList_To_v1_ResourceSliceList(in, out, s)
}
func autoConvert_v1_ResourceSliceSpec_To_resource_ResourceSliceSpec(in *resourcev1.ResourceSliceSpec, out *resource.ResourceSliceSpec, s conversion.Scope) error {
out.Driver = in.Driver
if err := Convert_v1_ResourcePool_To_resource_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Devices = *(*[]resource.Device)(unsafe.Pointer(&in.Devices))
out.PerDeviceNodeSelection = (*bool)(unsafe.Pointer(in.PerDeviceNodeSelection))
out.SharedCounters = *(*[]resource.CounterSet)(unsafe.Pointer(&in.SharedCounters))
return nil
}
// Convert_v1_ResourceSliceSpec_To_resource_ResourceSliceSpec is an autogenerated conversion function.
func Convert_v1_ResourceSliceSpec_To_resource_ResourceSliceSpec(in *resourcev1.ResourceSliceSpec, out *resource.ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_v1_ResourceSliceSpec_To_resource_ResourceSliceSpec(in, out, s)
}
func autoConvert_resource_ResourceSliceSpec_To_v1_ResourceSliceSpec(in *resource.ResourceSliceSpec, out *resourcev1.ResourceSliceSpec, s conversion.Scope) error {
out.Driver = in.Driver
if err := Convert_resource_ResourcePool_To_v1_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Devices = *(*[]resourcev1.Device)(unsafe.Pointer(&in.Devices))
out.PerDeviceNodeSelection = (*bool)(unsafe.Pointer(in.PerDeviceNodeSelection))
out.SharedCounters = *(*[]resourcev1.CounterSet)(unsafe.Pointer(&in.SharedCounters))
return nil
}
// Convert_resource_ResourceSliceSpec_To_v1_ResourceSliceSpec is an autogenerated conversion function.
func Convert_resource_ResourceSliceSpec_To_v1_ResourceSliceSpec(in *resource.ResourceSliceSpec, out *resourcev1.ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceSliceSpec_To_v1_ResourceSliceSpec(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
resourcev1 "k8s.io/api/resource/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&resourcev1.ResourceClaim{}, func(obj interface{}) { SetObjectDefaults_ResourceClaim(obj.(*resourcev1.ResourceClaim)) })
scheme.AddTypeDefaultingFunc(&resourcev1.ResourceClaimList{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimList(obj.(*resourcev1.ResourceClaimList)) })
scheme.AddTypeDefaultingFunc(&resourcev1.ResourceClaimTemplate{}, func(obj interface{}) {
SetObjectDefaults_ResourceClaimTemplate(obj.(*resourcev1.ResourceClaimTemplate))
})
scheme.AddTypeDefaultingFunc(&resourcev1.ResourceClaimTemplateList{}, func(obj interface{}) {
SetObjectDefaults_ResourceClaimTemplateList(obj.(*resourcev1.ResourceClaimTemplateList))
})
scheme.AddTypeDefaultingFunc(&resourcev1.ResourceSlice{}, func(obj interface{}) { SetObjectDefaults_ResourceSlice(obj.(*resourcev1.ResourceSlice)) })
scheme.AddTypeDefaultingFunc(&resourcev1.ResourceSliceList{}, func(obj interface{}) { SetObjectDefaults_ResourceSliceList(obj.(*resourcev1.ResourceSliceList)) })
return nil
}
func SetObjectDefaults_ResourceClaim(in *resourcev1.ResourceClaim) {
for i := range in.Spec.Devices.Requests {
a := &in.Spec.Devices.Requests[i]
if a.Exactly != nil {
SetDefaults_ExactDeviceRequest(a.Exactly)
for j := range a.Exactly.Tolerations {
b := &a.Exactly.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
for j := range a.FirstAvailable {
b := &a.FirstAvailable[j]
SetDefaults_DeviceSubRequest(b)
for k := range b.Tolerations {
c := &b.Tolerations[k]
if c.Operator == "" {
c.Operator = "Equal"
}
}
}
}
if in.Status.Allocation != nil {
for i := range in.Status.Allocation.Devices.Results {
a := &in.Status.Allocation.Devices.Results[i]
for j := range a.Tolerations {
b := &a.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
}
}
func SetObjectDefaults_ResourceClaimList(in *resourcev1.ResourceClaimList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaim(a)
}
}
func SetObjectDefaults_ResourceClaimTemplate(in *resourcev1.ResourceClaimTemplate) {
for i := range in.Spec.Spec.Devices.Requests {
a := &in.Spec.Spec.Devices.Requests[i]
if a.Exactly != nil {
SetDefaults_ExactDeviceRequest(a.Exactly)
for j := range a.Exactly.Tolerations {
b := &a.Exactly.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
for j := range a.FirstAvailable {
b := &a.FirstAvailable[j]
SetDefaults_DeviceSubRequest(b)
for k := range b.Tolerations {
c := &b.Tolerations[k]
if c.Operator == "" {
c.Operator = "Equal"
}
}
}
}
}
func SetObjectDefaults_ResourceClaimTemplateList(in *resourcev1.ResourceClaimTemplateList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaimTemplate(a)
}
}
func SetObjectDefaults_ResourceSlice(in *resourcev1.ResourceSlice) {
for i := range in.Spec.Devices {
a := &in.Spec.Devices[i]
for j := range a.Taints {
b := &a.Taints[j]
SetDefaults_DeviceTaint(b)
}
}
}
func SetObjectDefaults_ResourceSliceList(in *resourcev1.ResourceSliceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceSlice(a)
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
"time"
resourceapi "k8s.io/api/resource/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_DeviceTaint(obj *resourceapi.DeviceTaint) {
if obj.TimeAdded == nil {
obj.TimeAdded = &metav1.Time{Time: time.Now().Truncate(time.Second)}
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
"k8s.io/api/resource/v1alpha3"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
localSchemeBuilder = &v1alpha3.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
// TODO: remove these global variables
// GroupName is the group name use in this package
const GroupName = "resource.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha3"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha3
import (
unsafe "unsafe"
resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
resource "k8s.io/kubernetes/pkg/apis/resource"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.CELDeviceSelector)(nil), (*resource.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_CELDeviceSelector_To_resource_CELDeviceSelector(a.(*resourcev1alpha3.CELDeviceSelector), b.(*resource.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CELDeviceSelector)(nil), (*resourcev1alpha3.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CELDeviceSelector_To_v1alpha3_CELDeviceSelector(a.(*resource.CELDeviceSelector), b.(*resourcev1alpha3.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.DeviceSelector)(nil), (*resource.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_DeviceSelector_To_resource_DeviceSelector(a.(*resourcev1alpha3.DeviceSelector), b.(*resource.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceSelector)(nil), (*resourcev1alpha3.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceSelector_To_v1alpha3_DeviceSelector(a.(*resource.DeviceSelector), b.(*resourcev1alpha3.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.DeviceTaint)(nil), (*resource.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_DeviceTaint_To_resource_DeviceTaint(a.(*resourcev1alpha3.DeviceTaint), b.(*resource.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaint)(nil), (*resourcev1alpha3.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaint_To_v1alpha3_DeviceTaint(a.(*resource.DeviceTaint), b.(*resourcev1alpha3.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.DeviceTaintRule)(nil), (*resource.DeviceTaintRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_DeviceTaintRule_To_resource_DeviceTaintRule(a.(*resourcev1alpha3.DeviceTaintRule), b.(*resource.DeviceTaintRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaintRule)(nil), (*resourcev1alpha3.DeviceTaintRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaintRule_To_v1alpha3_DeviceTaintRule(a.(*resource.DeviceTaintRule), b.(*resourcev1alpha3.DeviceTaintRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.DeviceTaintRuleList)(nil), (*resource.DeviceTaintRuleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_DeviceTaintRuleList_To_resource_DeviceTaintRuleList(a.(*resourcev1alpha3.DeviceTaintRuleList), b.(*resource.DeviceTaintRuleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaintRuleList)(nil), (*resourcev1alpha3.DeviceTaintRuleList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaintRuleList_To_v1alpha3_DeviceTaintRuleList(a.(*resource.DeviceTaintRuleList), b.(*resourcev1alpha3.DeviceTaintRuleList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.DeviceTaintRuleSpec)(nil), (*resource.DeviceTaintRuleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_DeviceTaintRuleSpec_To_resource_DeviceTaintRuleSpec(a.(*resourcev1alpha3.DeviceTaintRuleSpec), b.(*resource.DeviceTaintRuleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaintRuleSpec)(nil), (*resourcev1alpha3.DeviceTaintRuleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaintRuleSpec_To_v1alpha3_DeviceTaintRuleSpec(a.(*resource.DeviceTaintRuleSpec), b.(*resourcev1alpha3.DeviceTaintRuleSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.DeviceTaintSelector)(nil), (*resource.DeviceTaintSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_DeviceTaintSelector_To_resource_DeviceTaintSelector(a.(*resourcev1alpha3.DeviceTaintSelector), b.(*resource.DeviceTaintSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaintSelector)(nil), (*resourcev1alpha3.DeviceTaintSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaintSelector_To_v1alpha3_DeviceTaintSelector(a.(*resource.DeviceTaintSelector), b.(*resourcev1alpha3.DeviceTaintSelector), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha3_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1alpha3.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1alpha3_CELDeviceSelector_To_resource_CELDeviceSelector is an autogenerated conversion function.
func Convert_v1alpha3_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1alpha3.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_v1alpha3_CELDeviceSelector_To_resource_CELDeviceSelector(in, out, s)
}
func autoConvert_resource_CELDeviceSelector_To_v1alpha3_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1alpha3.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_resource_CELDeviceSelector_To_v1alpha3_CELDeviceSelector is an autogenerated conversion function.
func Convert_resource_CELDeviceSelector_To_v1alpha3_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1alpha3.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_resource_CELDeviceSelector_To_v1alpha3_CELDeviceSelector(in, out, s)
}
func autoConvert_v1alpha3_DeviceSelector_To_resource_DeviceSelector(in *resourcev1alpha3.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resource.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_v1alpha3_DeviceSelector_To_resource_DeviceSelector is an autogenerated conversion function.
func Convert_v1alpha3_DeviceSelector_To_resource_DeviceSelector(in *resourcev1alpha3.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
return autoConvert_v1alpha3_DeviceSelector_To_resource_DeviceSelector(in, out, s)
}
func autoConvert_resource_DeviceSelector_To_v1alpha3_DeviceSelector(in *resource.DeviceSelector, out *resourcev1alpha3.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resourcev1alpha3.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_resource_DeviceSelector_To_v1alpha3_DeviceSelector is an autogenerated conversion function.
func Convert_resource_DeviceSelector_To_v1alpha3_DeviceSelector(in *resource.DeviceSelector, out *resourcev1alpha3.DeviceSelector, s conversion.Scope) error {
return autoConvert_resource_DeviceSelector_To_v1alpha3_DeviceSelector(in, out, s)
}
func autoConvert_v1alpha3_DeviceTaint_To_resource_DeviceTaint(in *resourcev1alpha3.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resource.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_v1alpha3_DeviceTaint_To_resource_DeviceTaint is an autogenerated conversion function.
func Convert_v1alpha3_DeviceTaint_To_resource_DeviceTaint(in *resourcev1alpha3.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
return autoConvert_v1alpha3_DeviceTaint_To_resource_DeviceTaint(in, out, s)
}
func autoConvert_resource_DeviceTaint_To_v1alpha3_DeviceTaint(in *resource.DeviceTaint, out *resourcev1alpha3.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resourcev1alpha3.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_resource_DeviceTaint_To_v1alpha3_DeviceTaint is an autogenerated conversion function.
func Convert_resource_DeviceTaint_To_v1alpha3_DeviceTaint(in *resource.DeviceTaint, out *resourcev1alpha3.DeviceTaint, s conversion.Scope) error {
return autoConvert_resource_DeviceTaint_To_v1alpha3_DeviceTaint(in, out, s)
}
func autoConvert_v1alpha3_DeviceTaintRule_To_resource_DeviceTaintRule(in *resourcev1alpha3.DeviceTaintRule, out *resource.DeviceTaintRule, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha3_DeviceTaintRuleSpec_To_resource_DeviceTaintRuleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha3_DeviceTaintRule_To_resource_DeviceTaintRule is an autogenerated conversion function.
func Convert_v1alpha3_DeviceTaintRule_To_resource_DeviceTaintRule(in *resourcev1alpha3.DeviceTaintRule, out *resource.DeviceTaintRule, s conversion.Scope) error {
return autoConvert_v1alpha3_DeviceTaintRule_To_resource_DeviceTaintRule(in, out, s)
}
func autoConvert_resource_DeviceTaintRule_To_v1alpha3_DeviceTaintRule(in *resource.DeviceTaintRule, out *resourcev1alpha3.DeviceTaintRule, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_DeviceTaintRuleSpec_To_v1alpha3_DeviceTaintRuleSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceTaintRule_To_v1alpha3_DeviceTaintRule is an autogenerated conversion function.
func Convert_resource_DeviceTaintRule_To_v1alpha3_DeviceTaintRule(in *resource.DeviceTaintRule, out *resourcev1alpha3.DeviceTaintRule, s conversion.Scope) error {
return autoConvert_resource_DeviceTaintRule_To_v1alpha3_DeviceTaintRule(in, out, s)
}
func autoConvert_v1alpha3_DeviceTaintRuleList_To_resource_DeviceTaintRuleList(in *resourcev1alpha3.DeviceTaintRuleList, out *resource.DeviceTaintRuleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.DeviceTaintRule)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha3_DeviceTaintRuleList_To_resource_DeviceTaintRuleList is an autogenerated conversion function.
func Convert_v1alpha3_DeviceTaintRuleList_To_resource_DeviceTaintRuleList(in *resourcev1alpha3.DeviceTaintRuleList, out *resource.DeviceTaintRuleList, s conversion.Scope) error {
return autoConvert_v1alpha3_DeviceTaintRuleList_To_resource_DeviceTaintRuleList(in, out, s)
}
func autoConvert_resource_DeviceTaintRuleList_To_v1alpha3_DeviceTaintRuleList(in *resource.DeviceTaintRuleList, out *resourcev1alpha3.DeviceTaintRuleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1alpha3.DeviceTaintRule)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_DeviceTaintRuleList_To_v1alpha3_DeviceTaintRuleList is an autogenerated conversion function.
func Convert_resource_DeviceTaintRuleList_To_v1alpha3_DeviceTaintRuleList(in *resource.DeviceTaintRuleList, out *resourcev1alpha3.DeviceTaintRuleList, s conversion.Scope) error {
return autoConvert_resource_DeviceTaintRuleList_To_v1alpha3_DeviceTaintRuleList(in, out, s)
}
func autoConvert_v1alpha3_DeviceTaintRuleSpec_To_resource_DeviceTaintRuleSpec(in *resourcev1alpha3.DeviceTaintRuleSpec, out *resource.DeviceTaintRuleSpec, s conversion.Scope) error {
out.DeviceSelector = (*resource.DeviceTaintSelector)(unsafe.Pointer(in.DeviceSelector))
if err := Convert_v1alpha3_DeviceTaint_To_resource_DeviceTaint(&in.Taint, &out.Taint, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha3_DeviceTaintRuleSpec_To_resource_DeviceTaintRuleSpec is an autogenerated conversion function.
func Convert_v1alpha3_DeviceTaintRuleSpec_To_resource_DeviceTaintRuleSpec(in *resourcev1alpha3.DeviceTaintRuleSpec, out *resource.DeviceTaintRuleSpec, s conversion.Scope) error {
return autoConvert_v1alpha3_DeviceTaintRuleSpec_To_resource_DeviceTaintRuleSpec(in, out, s)
}
func autoConvert_resource_DeviceTaintRuleSpec_To_v1alpha3_DeviceTaintRuleSpec(in *resource.DeviceTaintRuleSpec, out *resourcev1alpha3.DeviceTaintRuleSpec, s conversion.Scope) error {
out.DeviceSelector = (*resourcev1alpha3.DeviceTaintSelector)(unsafe.Pointer(in.DeviceSelector))
if err := Convert_resource_DeviceTaint_To_v1alpha3_DeviceTaint(&in.Taint, &out.Taint, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceTaintRuleSpec_To_v1alpha3_DeviceTaintRuleSpec is an autogenerated conversion function.
func Convert_resource_DeviceTaintRuleSpec_To_v1alpha3_DeviceTaintRuleSpec(in *resource.DeviceTaintRuleSpec, out *resourcev1alpha3.DeviceTaintRuleSpec, s conversion.Scope) error {
return autoConvert_resource_DeviceTaintRuleSpec_To_v1alpha3_DeviceTaintRuleSpec(in, out, s)
}
func autoConvert_v1alpha3_DeviceTaintSelector_To_resource_DeviceTaintSelector(in *resourcev1alpha3.DeviceTaintSelector, out *resource.DeviceTaintSelector, s conversion.Scope) error {
out.DeviceClassName = (*string)(unsafe.Pointer(in.DeviceClassName))
out.Driver = (*string)(unsafe.Pointer(in.Driver))
out.Pool = (*string)(unsafe.Pointer(in.Pool))
out.Device = (*string)(unsafe.Pointer(in.Device))
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_v1alpha3_DeviceTaintSelector_To_resource_DeviceTaintSelector is an autogenerated conversion function.
func Convert_v1alpha3_DeviceTaintSelector_To_resource_DeviceTaintSelector(in *resourcev1alpha3.DeviceTaintSelector, out *resource.DeviceTaintSelector, s conversion.Scope) error {
return autoConvert_v1alpha3_DeviceTaintSelector_To_resource_DeviceTaintSelector(in, out, s)
}
func autoConvert_resource_DeviceTaintSelector_To_v1alpha3_DeviceTaintSelector(in *resource.DeviceTaintSelector, out *resourcev1alpha3.DeviceTaintSelector, s conversion.Scope) error {
out.DeviceClassName = (*string)(unsafe.Pointer(in.DeviceClassName))
out.Driver = (*string)(unsafe.Pointer(in.Driver))
out.Pool = (*string)(unsafe.Pointer(in.Pool))
out.Device = (*string)(unsafe.Pointer(in.Device))
out.Selectors = *(*[]resourcev1alpha3.DeviceSelector)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_resource_DeviceTaintSelector_To_v1alpha3_DeviceTaintSelector is an autogenerated conversion function.
func Convert_resource_DeviceTaintSelector_To_v1alpha3_DeviceTaintSelector(in *resource.DeviceTaintSelector, out *resourcev1alpha3.DeviceTaintSelector, s conversion.Scope) error {
return autoConvert_resource_DeviceTaintSelector_To_v1alpha3_DeviceTaintSelector(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha3
import (
resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&resourcev1alpha3.DeviceTaintRule{}, func(obj interface{}) { SetObjectDefaults_DeviceTaintRule(obj.(*resourcev1alpha3.DeviceTaintRule)) })
scheme.AddTypeDefaultingFunc(&resourcev1alpha3.DeviceTaintRuleList{}, func(obj interface{}) {
SetObjectDefaults_DeviceTaintRuleList(obj.(*resourcev1alpha3.DeviceTaintRuleList))
})
return nil
}
func SetObjectDefaults_DeviceTaintRule(in *resourcev1alpha3.DeviceTaintRule) {
SetDefaults_DeviceTaint(&in.Spec.Taint)
}
func SetObjectDefaults_DeviceTaintRuleList(in *resourcev1alpha3.DeviceTaintRuleList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_DeviceTaintRule(a)
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
resourcev1beta1 "k8s.io/api/resource/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/resource"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ResourceSlice"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", resourcev1beta1.ResourceSliceSelectorNodeName, resourcev1beta1.ResourceSliceSelectorDriver:
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for %s: %s", SchemeGroupVersion.WithKind("ResourceSlice"), label)
}
}); err != nil {
return err
}
return nil
}
func Convert_v1beta1_DeviceRequest_To_resource_DeviceRequest(in *resourcev1beta1.DeviceRequest, out *resource.DeviceRequest, s conversion.Scope) error {
if err := autoConvert_v1beta1_DeviceRequest_To_resource_DeviceRequest(in, out, s); err != nil {
return err
}
// If any fields on the main request is set, we create a ExactDeviceRequest
// and set the Exactly field. It might be invalid but that will be caught in validation.
if hasAnyMainRequestFieldsSet(in) {
var exactDeviceRequest resource.ExactDeviceRequest
exactDeviceRequest.DeviceClassName = in.DeviceClassName
if in.Selectors != nil {
selectors := make([]resource.DeviceSelector, 0, len(in.Selectors))
for i := range in.Selectors {
var selector resource.DeviceSelector
err := Convert_v1beta1_DeviceSelector_To_resource_DeviceSelector(&in.Selectors[i], &selector, s)
if err != nil {
return err
}
selectors = append(selectors, selector)
}
exactDeviceRequest.Selectors = selectors
}
exactDeviceRequest.AllocationMode = resource.DeviceAllocationMode(in.AllocationMode)
exactDeviceRequest.Count = in.Count
exactDeviceRequest.AdminAccess = in.AdminAccess
var tolerations []resource.DeviceToleration
for _, e := range in.Tolerations {
var toleration resource.DeviceToleration
if err := Convert_v1beta1_DeviceToleration_To_resource_DeviceToleration(&e, &toleration, s); err != nil {
return err
}
tolerations = append(tolerations, toleration)
}
exactDeviceRequest.Tolerations = tolerations
if in.Capacity != nil {
var capacity resource.CapacityRequirements
if err := Convert_v1beta1_CapacityRequirements_To_resource_CapacityRequirements(in.Capacity, &capacity, s); err != nil {
return err
}
exactDeviceRequest.Capacity = &capacity
}
out.Exactly = &exactDeviceRequest
}
return nil
}
func hasAnyMainRequestFieldsSet(deviceRequest *resourcev1beta1.DeviceRequest) bool {
return deviceRequest.DeviceClassName != "" ||
deviceRequest.Selectors != nil ||
deviceRequest.AllocationMode != "" ||
deviceRequest.Count != 0 ||
deviceRequest.AdminAccess != nil ||
deviceRequest.Tolerations != nil ||
deviceRequest.Capacity != nil
}
func Convert_resource_DeviceRequest_To_v1beta1_DeviceRequest(in *resource.DeviceRequest, out *resourcev1beta1.DeviceRequest, s conversion.Scope) error {
if err := autoConvert_resource_DeviceRequest_To_v1beta1_DeviceRequest(in, out, s); err != nil {
return err
}
if in.Exactly != nil {
out.DeviceClassName = in.Exactly.DeviceClassName
if in.Exactly.Selectors != nil {
selectors := make([]resourcev1beta1.DeviceSelector, 0, len(in.Exactly.Selectors))
for i := range in.Exactly.Selectors {
var selector resourcev1beta1.DeviceSelector
err := Convert_resource_DeviceSelector_To_v1beta1_DeviceSelector(&in.Exactly.Selectors[i], &selector, s)
if err != nil {
return err
}
selectors = append(selectors, selector)
}
out.Selectors = selectors
}
out.AllocationMode = resourcev1beta1.DeviceAllocationMode(in.Exactly.AllocationMode)
out.Count = in.Exactly.Count
out.AdminAccess = in.Exactly.AdminAccess
var tolerations []resourcev1beta1.DeviceToleration
for _, e := range in.Exactly.Tolerations {
var toleration resourcev1beta1.DeviceToleration
if err := Convert_resource_DeviceToleration_To_v1beta1_DeviceToleration(&e, &toleration, s); err != nil {
return err
}
tolerations = append(tolerations, toleration)
}
out.Tolerations = tolerations
if in.Exactly.Capacity != nil {
var capacity resourcev1beta1.CapacityRequirements
if err := Convert_resource_CapacityRequirements_To_v1beta1_CapacityRequirements(in.Exactly.Capacity, &capacity, s); err != nil {
return err
}
out.Capacity = &capacity
}
}
return nil
}
func Convert_v1beta1_ResourceSliceSpec_To_resource_ResourceSliceSpec(in *resourcev1beta1.ResourceSliceSpec, out *resource.ResourceSliceSpec, s conversion.Scope) error {
if err := autoConvert_v1beta1_ResourceSliceSpec_To_resource_ResourceSliceSpec(in, out, s); err != nil {
return err
}
if in.NodeName == "" {
out.NodeName = nil
} else {
out.NodeName = &in.NodeName
}
if !in.AllNodes {
out.AllNodes = nil
} else {
out.AllNodes = &in.AllNodes
}
return nil
}
func Convert_resource_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in *resource.ResourceSliceSpec, out *resourcev1beta1.ResourceSliceSpec, s conversion.Scope) error {
if err := autoConvert_resource_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in, out, s); err != nil {
return err
}
if in.NodeName == nil {
out.NodeName = ""
} else {
out.NodeName = *in.NodeName
}
if in.AllNodes == nil {
out.AllNodes = false
} else {
out.AllNodes = *in.AllNodes
}
return nil
}
func Convert_v1beta1_Device_To_resource_Device(in *resourcev1beta1.Device, out *resource.Device, s conversion.Scope) error {
if err := autoConvert_v1beta1_Device_To_resource_Device(in, out, s); err != nil {
return err
}
if in.Basic != nil {
basic := in.Basic
if len(basic.Attributes) > 0 {
attributes := make(map[resource.QualifiedName]resource.DeviceAttribute)
if err := convert_v1beta1_Attributes_To_resource_Attributes(basic.Attributes, attributes, s); err != nil {
return err
}
out.Attributes = attributes
}
if len(basic.Capacity) > 0 {
capacity := make(map[resource.QualifiedName]resource.DeviceCapacity)
if err := convert_v1beta1_Capacity_To_resource_Capacity(basic.Capacity, capacity, s); err != nil {
return err
}
out.Capacity = capacity
}
var consumesCounters []resource.DeviceCounterConsumption
for _, e := range basic.ConsumesCounters {
var deviceCounterConsumption resource.DeviceCounterConsumption
if err := Convert_v1beta1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(&e, &deviceCounterConsumption, s); err != nil {
return err
}
consumesCounters = append(consumesCounters, deviceCounterConsumption)
}
out.ConsumesCounters = consumesCounters
out.NodeName = basic.NodeName
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(basic.NodeSelector))
out.AllNodes = basic.AllNodes
var taints []resource.DeviceTaint
for _, e := range basic.Taints {
var taint resource.DeviceTaint
if err := Convert_v1beta1_DeviceTaint_To_resource_DeviceTaint(&e, &taint, s); err != nil {
return err
}
taints = append(taints, taint)
}
out.Taints = taints
out.BindsToNode = basic.BindsToNode
out.BindingConditions = basic.BindingConditions
out.BindingFailureConditions = basic.BindingFailureConditions
out.AllowMultipleAllocations = in.Basic.AllowMultipleAllocations
}
return nil
}
func Convert_resource_Device_To_v1beta1_Device(in *resource.Device, out *resourcev1beta1.Device, s conversion.Scope) error {
if err := autoConvert_resource_Device_To_v1beta1_Device(in, out, s); err != nil {
return err
}
out.Basic = &resourcev1beta1.BasicDevice{}
if len(in.Attributes) > 0 {
attributes := make(map[resourcev1beta1.QualifiedName]resourcev1beta1.DeviceAttribute)
if err := convert_resource_Attributes_To_v1beta1_Attributes(in.Attributes, attributes, s); err != nil {
return err
}
out.Basic.Attributes = attributes
}
if len(in.Capacity) > 0 {
capacity := make(map[resourcev1beta1.QualifiedName]resourcev1beta1.DeviceCapacity)
if err := convert_resource_Capacity_To_v1beta1_Capacity(in.Capacity, capacity, s); err != nil {
return err
}
out.Basic.Capacity = capacity
}
var consumesCounters []resourcev1beta1.DeviceCounterConsumption
for _, e := range in.ConsumesCounters {
var deviceCounterConsumption resourcev1beta1.DeviceCounterConsumption
if err := Convert_resource_DeviceCounterConsumption_To_v1beta1_DeviceCounterConsumption(&e, &deviceCounterConsumption, s); err != nil {
return err
}
consumesCounters = append(consumesCounters, deviceCounterConsumption)
}
out.Basic.ConsumesCounters = consumesCounters
out.Basic.NodeName = in.NodeName
out.Basic.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.Basic.AllNodes = in.AllNodes
var taints []resourcev1beta1.DeviceTaint
for _, e := range in.Taints {
var taint resourcev1beta1.DeviceTaint
if err := Convert_resource_DeviceTaint_To_v1beta1_DeviceTaint(&e, &taint, s); err != nil {
return err
}
taints = append(taints, taint)
}
out.Basic.Taints = taints
out.Basic.BindsToNode = in.BindsToNode
out.Basic.BindingConditions = in.BindingConditions
out.Basic.BindingFailureConditions = in.BindingFailureConditions
out.Basic.AllowMultipleAllocations = in.AllowMultipleAllocations
return nil
}
func convert_resource_Attributes_To_v1beta1_Attributes(in map[resource.QualifiedName]resource.DeviceAttribute, out map[resourcev1beta1.QualifiedName]resourcev1beta1.DeviceAttribute, s conversion.Scope) error {
for k, v := range in {
var a resourcev1beta1.DeviceAttribute
if err := Convert_resource_DeviceAttribute_To_v1beta1_DeviceAttribute(&v, &a, s); err != nil {
return err
}
out[resourcev1beta1.QualifiedName(k)] = a
}
return nil
}
func convert_resource_Capacity_To_v1beta1_Capacity(in map[resource.QualifiedName]resource.DeviceCapacity, out map[resourcev1beta1.QualifiedName]resourcev1beta1.DeviceCapacity, s conversion.Scope) error {
for k, v := range in {
var c resourcev1beta1.DeviceCapacity
if err := Convert_resource_DeviceCapacity_To_v1beta1_DeviceCapacity(&v, &c, s); err != nil {
return err
}
out[resourcev1beta1.QualifiedName(k)] = c
}
return nil
}
func convert_v1beta1_Attributes_To_resource_Attributes(in map[resourcev1beta1.QualifiedName]resourcev1beta1.DeviceAttribute, out map[resource.QualifiedName]resource.DeviceAttribute, s conversion.Scope) error {
for k, v := range in {
var a resource.DeviceAttribute
if err := Convert_v1beta1_DeviceAttribute_To_resource_DeviceAttribute(&v, &a, s); err != nil {
return err
}
out[resource.QualifiedName(k)] = a
}
return nil
}
func convert_v1beta1_Capacity_To_resource_Capacity(in map[resourcev1beta1.QualifiedName]resourcev1beta1.DeviceCapacity, out map[resource.QualifiedName]resource.DeviceCapacity, s conversion.Scope) error {
for k, v := range in {
var c resource.DeviceCapacity
if err := Convert_v1beta1_DeviceCapacity_To_resource_DeviceCapacity(&v, &c, s); err != nil {
return err
}
out[resource.QualifiedName(k)] = c
}
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"time"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_DeviceRequest(obj *resourceapi.DeviceRequest) {
// If the deviceClassName is not set, then the request will have
// subrequests and the allocationMode and count fields should not
// be set.
if obj.DeviceClassName == "" {
return
}
if obj.AllocationMode == "" {
obj.AllocationMode = resourceapi.DeviceAllocationModeExactCount
}
if obj.AllocationMode == resourceapi.DeviceAllocationModeExactCount && obj.Count == 0 {
obj.Count = 1
}
}
func SetDefaults_DeviceSubRequest(obj *resourceapi.DeviceSubRequest) {
if obj.AllocationMode == "" {
obj.AllocationMode = resourceapi.DeviceAllocationModeExactCount
}
if obj.AllocationMode == resourceapi.DeviceAllocationModeExactCount && obj.Count == 0 {
obj.Count = 1
}
}
func SetDefaults_DeviceTaint(obj *resourceapi.DeviceTaint) {
if obj.TimeAdded == nil {
obj.TimeAdded = &metav1.Time{Time: time.Now().Truncate(time.Second)}
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
localSchemeBuilder = &v1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
// TODO: remove these global variables
// GroupName is the group name use in this package
const GroupName = "resource.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
resourcev1beta1 "k8s.io/api/resource/v1beta1"
apiresource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
resource "k8s.io/kubernetes/pkg/apis/resource"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.AllocatedDeviceStatus)(nil), (*resource.AllocatedDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(a.(*resourcev1beta1.AllocatedDeviceStatus), b.(*resource.AllocatedDeviceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.AllocatedDeviceStatus)(nil), (*resourcev1beta1.AllocatedDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_AllocatedDeviceStatus_To_v1beta1_AllocatedDeviceStatus(a.(*resource.AllocatedDeviceStatus), b.(*resourcev1beta1.AllocatedDeviceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.AllocationResult)(nil), (*resource.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AllocationResult_To_resource_AllocationResult(a.(*resourcev1beta1.AllocationResult), b.(*resource.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.AllocationResult)(nil), (*resourcev1beta1.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_AllocationResult_To_v1beta1_AllocationResult(a.(*resource.AllocationResult), b.(*resourcev1beta1.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.CELDeviceSelector)(nil), (*resource.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CELDeviceSelector_To_resource_CELDeviceSelector(a.(*resourcev1beta1.CELDeviceSelector), b.(*resource.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CELDeviceSelector)(nil), (*resourcev1beta1.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CELDeviceSelector_To_v1beta1_CELDeviceSelector(a.(*resource.CELDeviceSelector), b.(*resourcev1beta1.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.CapacityRequestPolicy)(nil), (*resource.CapacityRequestPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(a.(*resourcev1beta1.CapacityRequestPolicy), b.(*resource.CapacityRequestPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequestPolicy)(nil), (*resourcev1beta1.CapacityRequestPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequestPolicy_To_v1beta1_CapacityRequestPolicy(a.(*resource.CapacityRequestPolicy), b.(*resourcev1beta1.CapacityRequestPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.CapacityRequestPolicyRange)(nil), (*resource.CapacityRequestPolicyRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(a.(*resourcev1beta1.CapacityRequestPolicyRange), b.(*resource.CapacityRequestPolicyRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequestPolicyRange)(nil), (*resourcev1beta1.CapacityRequestPolicyRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequestPolicyRange_To_v1beta1_CapacityRequestPolicyRange(a.(*resource.CapacityRequestPolicyRange), b.(*resourcev1beta1.CapacityRequestPolicyRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.CapacityRequirements)(nil), (*resource.CapacityRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CapacityRequirements_To_resource_CapacityRequirements(a.(*resourcev1beta1.CapacityRequirements), b.(*resource.CapacityRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequirements)(nil), (*resourcev1beta1.CapacityRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequirements_To_v1beta1_CapacityRequirements(a.(*resource.CapacityRequirements), b.(*resourcev1beta1.CapacityRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.Counter)(nil), (*resource.Counter)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Counter_To_resource_Counter(a.(*resourcev1beta1.Counter), b.(*resource.Counter), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.Counter)(nil), (*resourcev1beta1.Counter)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_Counter_To_v1beta1_Counter(a.(*resource.Counter), b.(*resourcev1beta1.Counter), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.CounterSet)(nil), (*resource.CounterSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CounterSet_To_resource_CounterSet(a.(*resourcev1beta1.CounterSet), b.(*resource.CounterSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CounterSet)(nil), (*resourcev1beta1.CounterSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CounterSet_To_v1beta1_CounterSet(a.(*resource.CounterSet), b.(*resourcev1beta1.CounterSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceAllocationConfiguration)(nil), (*resource.DeviceAllocationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(a.(*resourcev1beta1.DeviceAllocationConfiguration), b.(*resource.DeviceAllocationConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAllocationConfiguration)(nil), (*resourcev1beta1.DeviceAllocationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAllocationConfiguration_To_v1beta1_DeviceAllocationConfiguration(a.(*resource.DeviceAllocationConfiguration), b.(*resourcev1beta1.DeviceAllocationConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceAllocationResult)(nil), (*resource.DeviceAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceAllocationResult_To_resource_DeviceAllocationResult(a.(*resourcev1beta1.DeviceAllocationResult), b.(*resource.DeviceAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAllocationResult)(nil), (*resourcev1beta1.DeviceAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAllocationResult_To_v1beta1_DeviceAllocationResult(a.(*resource.DeviceAllocationResult), b.(*resourcev1beta1.DeviceAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceAttribute)(nil), (*resource.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceAttribute_To_resource_DeviceAttribute(a.(*resourcev1beta1.DeviceAttribute), b.(*resource.DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAttribute)(nil), (*resourcev1beta1.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAttribute_To_v1beta1_DeviceAttribute(a.(*resource.DeviceAttribute), b.(*resourcev1beta1.DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceCapacity)(nil), (*resource.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceCapacity_To_resource_DeviceCapacity(a.(*resourcev1beta1.DeviceCapacity), b.(*resource.DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceCapacity)(nil), (*resourcev1beta1.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceCapacity_To_v1beta1_DeviceCapacity(a.(*resource.DeviceCapacity), b.(*resourcev1beta1.DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceClaim)(nil), (*resource.DeviceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceClaim_To_resource_DeviceClaim(a.(*resourcev1beta1.DeviceClaim), b.(*resource.DeviceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClaim)(nil), (*resourcev1beta1.DeviceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClaim_To_v1beta1_DeviceClaim(a.(*resource.DeviceClaim), b.(*resourcev1beta1.DeviceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceClaimConfiguration)(nil), (*resource.DeviceClaimConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(a.(*resourcev1beta1.DeviceClaimConfiguration), b.(*resource.DeviceClaimConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClaimConfiguration)(nil), (*resourcev1beta1.DeviceClaimConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClaimConfiguration_To_v1beta1_DeviceClaimConfiguration(a.(*resource.DeviceClaimConfiguration), b.(*resourcev1beta1.DeviceClaimConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceClass)(nil), (*resource.DeviceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceClass_To_resource_DeviceClass(a.(*resourcev1beta1.DeviceClass), b.(*resource.DeviceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClass)(nil), (*resourcev1beta1.DeviceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClass_To_v1beta1_DeviceClass(a.(*resource.DeviceClass), b.(*resourcev1beta1.DeviceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceClassConfiguration)(nil), (*resource.DeviceClassConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(a.(*resourcev1beta1.DeviceClassConfiguration), b.(*resource.DeviceClassConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassConfiguration)(nil), (*resourcev1beta1.DeviceClassConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassConfiguration_To_v1beta1_DeviceClassConfiguration(a.(*resource.DeviceClassConfiguration), b.(*resourcev1beta1.DeviceClassConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceClassList)(nil), (*resource.DeviceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceClassList_To_resource_DeviceClassList(a.(*resourcev1beta1.DeviceClassList), b.(*resource.DeviceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassList)(nil), (*resourcev1beta1.DeviceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassList_To_v1beta1_DeviceClassList(a.(*resource.DeviceClassList), b.(*resourcev1beta1.DeviceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceClassSpec)(nil), (*resource.DeviceClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceClassSpec_To_resource_DeviceClassSpec(a.(*resourcev1beta1.DeviceClassSpec), b.(*resource.DeviceClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassSpec)(nil), (*resourcev1beta1.DeviceClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassSpec_To_v1beta1_DeviceClassSpec(a.(*resource.DeviceClassSpec), b.(*resourcev1beta1.DeviceClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceConfiguration)(nil), (*resource.DeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration(a.(*resourcev1beta1.DeviceConfiguration), b.(*resource.DeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceConfiguration)(nil), (*resourcev1beta1.DeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration(a.(*resource.DeviceConfiguration), b.(*resourcev1beta1.DeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceConstraint)(nil), (*resource.DeviceConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceConstraint_To_resource_DeviceConstraint(a.(*resourcev1beta1.DeviceConstraint), b.(*resource.DeviceConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceConstraint)(nil), (*resourcev1beta1.DeviceConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceConstraint_To_v1beta1_DeviceConstraint(a.(*resource.DeviceConstraint), b.(*resourcev1beta1.DeviceConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceCounterConsumption)(nil), (*resource.DeviceCounterConsumption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(a.(*resourcev1beta1.DeviceCounterConsumption), b.(*resource.DeviceCounterConsumption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceCounterConsumption)(nil), (*resourcev1beta1.DeviceCounterConsumption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceCounterConsumption_To_v1beta1_DeviceCounterConsumption(a.(*resource.DeviceCounterConsumption), b.(*resourcev1beta1.DeviceCounterConsumption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceRequestAllocationResult)(nil), (*resource.DeviceRequestAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(a.(*resourcev1beta1.DeviceRequestAllocationResult), b.(*resource.DeviceRequestAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceRequestAllocationResult)(nil), (*resourcev1beta1.DeviceRequestAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceRequestAllocationResult_To_v1beta1_DeviceRequestAllocationResult(a.(*resource.DeviceRequestAllocationResult), b.(*resourcev1beta1.DeviceRequestAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceSelector)(nil), (*resource.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceSelector_To_resource_DeviceSelector(a.(*resourcev1beta1.DeviceSelector), b.(*resource.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceSelector)(nil), (*resourcev1beta1.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceSelector_To_v1beta1_DeviceSelector(a.(*resource.DeviceSelector), b.(*resourcev1beta1.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceSubRequest)(nil), (*resource.DeviceSubRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceSubRequest_To_resource_DeviceSubRequest(a.(*resourcev1beta1.DeviceSubRequest), b.(*resource.DeviceSubRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceSubRequest)(nil), (*resourcev1beta1.DeviceSubRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceSubRequest_To_v1beta1_DeviceSubRequest(a.(*resource.DeviceSubRequest), b.(*resourcev1beta1.DeviceSubRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceTaint)(nil), (*resource.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceTaint_To_resource_DeviceTaint(a.(*resourcev1beta1.DeviceTaint), b.(*resource.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaint)(nil), (*resourcev1beta1.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaint_To_v1beta1_DeviceTaint(a.(*resource.DeviceTaint), b.(*resourcev1beta1.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.DeviceToleration)(nil), (*resource.DeviceToleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceToleration_To_resource_DeviceToleration(a.(*resourcev1beta1.DeviceToleration), b.(*resource.DeviceToleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceToleration)(nil), (*resourcev1beta1.DeviceToleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceToleration_To_v1beta1_DeviceToleration(a.(*resource.DeviceToleration), b.(*resourcev1beta1.DeviceToleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.NetworkDeviceData)(nil), (*resource.NetworkDeviceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_NetworkDeviceData_To_resource_NetworkDeviceData(a.(*resourcev1beta1.NetworkDeviceData), b.(*resource.NetworkDeviceData), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.NetworkDeviceData)(nil), (*resourcev1beta1.NetworkDeviceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_NetworkDeviceData_To_v1beta1_NetworkDeviceData(a.(*resource.NetworkDeviceData), b.(*resourcev1beta1.NetworkDeviceData), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.OpaqueDeviceConfiguration)(nil), (*resource.OpaqueDeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(a.(*resourcev1beta1.OpaqueDeviceConfiguration), b.(*resource.OpaqueDeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.OpaqueDeviceConfiguration)(nil), (*resourcev1beta1.OpaqueDeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_OpaqueDeviceConfiguration_To_v1beta1_OpaqueDeviceConfiguration(a.(*resource.OpaqueDeviceConfiguration), b.(*resourcev1beta1.OpaqueDeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaim)(nil), (*resource.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaim_To_resource_ResourceClaim(a.(*resourcev1beta1.ResourceClaim), b.(*resource.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaim)(nil), (*resourcev1beta1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaim_To_v1beta1_ResourceClaim(a.(*resource.ResourceClaim), b.(*resourcev1beta1.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaimConsumerReference)(nil), (*resource.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(a.(*resourcev1beta1.ResourceClaimConsumerReference), b.(*resource.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimConsumerReference)(nil), (*resourcev1beta1.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimConsumerReference_To_v1beta1_ResourceClaimConsumerReference(a.(*resource.ResourceClaimConsumerReference), b.(*resourcev1beta1.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaimList)(nil), (*resource.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaimList_To_resource_ResourceClaimList(a.(*resourcev1beta1.ResourceClaimList), b.(*resource.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimList)(nil), (*resourcev1beta1.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimList_To_v1beta1_ResourceClaimList(a.(*resource.ResourceClaimList), b.(*resourcev1beta1.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaimSpec)(nil), (*resource.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaimSpec_To_resource_ResourceClaimSpec(a.(*resourcev1beta1.ResourceClaimSpec), b.(*resource.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSpec)(nil), (*resourcev1beta1.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimSpec_To_v1beta1_ResourceClaimSpec(a.(*resource.ResourceClaimSpec), b.(*resourcev1beta1.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaimStatus)(nil), (*resource.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaimStatus_To_resource_ResourceClaimStatus(a.(*resourcev1beta1.ResourceClaimStatus), b.(*resource.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimStatus)(nil), (*resourcev1beta1.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimStatus_To_v1beta1_ResourceClaimStatus(a.(*resource.ResourceClaimStatus), b.(*resourcev1beta1.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaimTemplate)(nil), (*resource.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(a.(*resourcev1beta1.ResourceClaimTemplate), b.(*resource.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplate)(nil), (*resourcev1beta1.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplate_To_v1beta1_ResourceClaimTemplate(a.(*resource.ResourceClaimTemplate), b.(*resourcev1beta1.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaimTemplateList)(nil), (*resource.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(a.(*resourcev1beta1.ResourceClaimTemplateList), b.(*resource.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateList)(nil), (*resourcev1beta1.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateList_To_v1beta1_ResourceClaimTemplateList(a.(*resource.ResourceClaimTemplateList), b.(*resourcev1beta1.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceClaimTemplateSpec)(nil), (*resource.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(a.(*resourcev1beta1.ResourceClaimTemplateSpec), b.(*resource.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateSpec)(nil), (*resourcev1beta1.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateSpec_To_v1beta1_ResourceClaimTemplateSpec(a.(*resource.ResourceClaimTemplateSpec), b.(*resourcev1beta1.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourcePool)(nil), (*resource.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourcePool_To_resource_ResourcePool(a.(*resourcev1beta1.ResourcePool), b.(*resource.ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourcePool)(nil), (*resourcev1beta1.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourcePool_To_v1beta1_ResourcePool(a.(*resource.ResourcePool), b.(*resourcev1beta1.ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceSlice)(nil), (*resource.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceSlice_To_resource_ResourceSlice(a.(*resourcev1beta1.ResourceSlice), b.(*resource.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSlice)(nil), (*resourcev1beta1.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSlice_To_v1beta1_ResourceSlice(a.(*resource.ResourceSlice), b.(*resourcev1beta1.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta1.ResourceSliceList)(nil), (*resource.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceSliceList_To_resource_ResourceSliceList(a.(*resourcev1beta1.ResourceSliceList), b.(*resource.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSliceList)(nil), (*resourcev1beta1.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSliceList_To_v1beta1_ResourceSliceList(a.(*resource.ResourceSliceList), b.(*resourcev1beta1.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*resource.DeviceRequest)(nil), (*resourcev1beta1.DeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceRequest_To_v1beta1_DeviceRequest(a.(*resource.DeviceRequest), b.(*resourcev1beta1.DeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*resource.Device)(nil), (*resourcev1beta1.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_Device_To_v1beta1_Device(a.(*resource.Device), b.(*resourcev1beta1.Device), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*resource.ResourceSliceSpec)(nil), (*resourcev1beta1.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(a.(*resource.ResourceSliceSpec), b.(*resourcev1beta1.ResourceSliceSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*resourcev1beta1.DeviceRequest)(nil), (*resource.DeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceRequest_To_resource_DeviceRequest(a.(*resourcev1beta1.DeviceRequest), b.(*resource.DeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*resourcev1beta1.Device)(nil), (*resource.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Device_To_resource_Device(a.(*resourcev1beta1.Device), b.(*resource.Device), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*resourcev1beta1.ResourceSliceSpec)(nil), (*resource.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceSliceSpec_To_resource_ResourceSliceSpec(a.(*resourcev1beta1.ResourceSliceSpec), b.(*resource.ResourceSliceSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in *resourcev1beta1.AllocatedDeviceStatus, out *resource.AllocatedDeviceStatus, s conversion.Scope) error {
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.ShareID = (*string)(unsafe.Pointer(in.ShareID))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
out.Data = (*runtime.RawExtension)(unsafe.Pointer(in.Data))
out.NetworkData = (*resource.NetworkDeviceData)(unsafe.Pointer(in.NetworkData))
return nil
}
// Convert_v1beta1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus is an autogenerated conversion function.
func Convert_v1beta1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in *resourcev1beta1.AllocatedDeviceStatus, out *resource.AllocatedDeviceStatus, s conversion.Scope) error {
return autoConvert_v1beta1_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in, out, s)
}
func autoConvert_resource_AllocatedDeviceStatus_To_v1beta1_AllocatedDeviceStatus(in *resource.AllocatedDeviceStatus, out *resourcev1beta1.AllocatedDeviceStatus, s conversion.Scope) error {
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.ShareID = (*string)(unsafe.Pointer(in.ShareID))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
out.Data = (*runtime.RawExtension)(unsafe.Pointer(in.Data))
out.NetworkData = (*resourcev1beta1.NetworkDeviceData)(unsafe.Pointer(in.NetworkData))
return nil
}
// Convert_resource_AllocatedDeviceStatus_To_v1beta1_AllocatedDeviceStatus is an autogenerated conversion function.
func Convert_resource_AllocatedDeviceStatus_To_v1beta1_AllocatedDeviceStatus(in *resource.AllocatedDeviceStatus, out *resourcev1beta1.AllocatedDeviceStatus, s conversion.Scope) error {
return autoConvert_resource_AllocatedDeviceStatus_To_v1beta1_AllocatedDeviceStatus(in, out, s)
}
func autoConvert_v1beta1_AllocationResult_To_resource_AllocationResult(in *resourcev1beta1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
if err := Convert_v1beta1_DeviceAllocationResult_To_resource_DeviceAllocationResult(&in.Devices, &out.Devices, s); err != nil {
return err
}
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllocationTimestamp = (*v1.Time)(unsafe.Pointer(in.AllocationTimestamp))
return nil
}
// Convert_v1beta1_AllocationResult_To_resource_AllocationResult is an autogenerated conversion function.
func Convert_v1beta1_AllocationResult_To_resource_AllocationResult(in *resourcev1beta1.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
return autoConvert_v1beta1_AllocationResult_To_resource_AllocationResult(in, out, s)
}
func autoConvert_resource_AllocationResult_To_v1beta1_AllocationResult(in *resource.AllocationResult, out *resourcev1beta1.AllocationResult, s conversion.Scope) error {
if err := Convert_resource_DeviceAllocationResult_To_v1beta1_DeviceAllocationResult(&in.Devices, &out.Devices, s); err != nil {
return err
}
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllocationTimestamp = (*v1.Time)(unsafe.Pointer(in.AllocationTimestamp))
return nil
}
// Convert_resource_AllocationResult_To_v1beta1_AllocationResult is an autogenerated conversion function.
func Convert_resource_AllocationResult_To_v1beta1_AllocationResult(in *resource.AllocationResult, out *resourcev1beta1.AllocationResult, s conversion.Scope) error {
return autoConvert_resource_AllocationResult_To_v1beta1_AllocationResult(in, out, s)
}
func autoConvert_v1beta1_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1beta1.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1beta1_CELDeviceSelector_To_resource_CELDeviceSelector is an autogenerated conversion function.
func Convert_v1beta1_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1beta1.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_v1beta1_CELDeviceSelector_To_resource_CELDeviceSelector(in, out, s)
}
func autoConvert_resource_CELDeviceSelector_To_v1beta1_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1beta1.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_resource_CELDeviceSelector_To_v1beta1_CELDeviceSelector is an autogenerated conversion function.
func Convert_resource_CELDeviceSelector_To_v1beta1_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1beta1.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_resource_CELDeviceSelector_To_v1beta1_CELDeviceSelector(in, out, s)
}
func autoConvert_v1beta1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in *resourcev1beta1.CapacityRequestPolicy, out *resource.CapacityRequestPolicy, s conversion.Scope) error {
out.Default = (*apiresource.Quantity)(unsafe.Pointer(in.Default))
out.ValidValues = *(*[]apiresource.Quantity)(unsafe.Pointer(&in.ValidValues))
out.ValidRange = (*resource.CapacityRequestPolicyRange)(unsafe.Pointer(in.ValidRange))
return nil
}
// Convert_v1beta1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy is an autogenerated conversion function.
func Convert_v1beta1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in *resourcev1beta1.CapacityRequestPolicy, out *resource.CapacityRequestPolicy, s conversion.Scope) error {
return autoConvert_v1beta1_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in, out, s)
}
func autoConvert_resource_CapacityRequestPolicy_To_v1beta1_CapacityRequestPolicy(in *resource.CapacityRequestPolicy, out *resourcev1beta1.CapacityRequestPolicy, s conversion.Scope) error {
out.Default = (*apiresource.Quantity)(unsafe.Pointer(in.Default))
out.ValidValues = *(*[]apiresource.Quantity)(unsafe.Pointer(&in.ValidValues))
out.ValidRange = (*resourcev1beta1.CapacityRequestPolicyRange)(unsafe.Pointer(in.ValidRange))
return nil
}
// Convert_resource_CapacityRequestPolicy_To_v1beta1_CapacityRequestPolicy is an autogenerated conversion function.
func Convert_resource_CapacityRequestPolicy_To_v1beta1_CapacityRequestPolicy(in *resource.CapacityRequestPolicy, out *resourcev1beta1.CapacityRequestPolicy, s conversion.Scope) error {
return autoConvert_resource_CapacityRequestPolicy_To_v1beta1_CapacityRequestPolicy(in, out, s)
}
func autoConvert_v1beta1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in *resourcev1beta1.CapacityRequestPolicyRange, out *resource.CapacityRequestPolicyRange, s conversion.Scope) error {
out.Min = (*apiresource.Quantity)(unsafe.Pointer(in.Min))
out.Max = (*apiresource.Quantity)(unsafe.Pointer(in.Max))
out.Step = (*apiresource.Quantity)(unsafe.Pointer(in.Step))
return nil
}
// Convert_v1beta1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange is an autogenerated conversion function.
func Convert_v1beta1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in *resourcev1beta1.CapacityRequestPolicyRange, out *resource.CapacityRequestPolicyRange, s conversion.Scope) error {
return autoConvert_v1beta1_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in, out, s)
}
func autoConvert_resource_CapacityRequestPolicyRange_To_v1beta1_CapacityRequestPolicyRange(in *resource.CapacityRequestPolicyRange, out *resourcev1beta1.CapacityRequestPolicyRange, s conversion.Scope) error {
out.Min = (*apiresource.Quantity)(unsafe.Pointer(in.Min))
out.Max = (*apiresource.Quantity)(unsafe.Pointer(in.Max))
out.Step = (*apiresource.Quantity)(unsafe.Pointer(in.Step))
return nil
}
// Convert_resource_CapacityRequestPolicyRange_To_v1beta1_CapacityRequestPolicyRange is an autogenerated conversion function.
func Convert_resource_CapacityRequestPolicyRange_To_v1beta1_CapacityRequestPolicyRange(in *resource.CapacityRequestPolicyRange, out *resourcev1beta1.CapacityRequestPolicyRange, s conversion.Scope) error {
return autoConvert_resource_CapacityRequestPolicyRange_To_v1beta1_CapacityRequestPolicyRange(in, out, s)
}
func autoConvert_v1beta1_CapacityRequirements_To_resource_CapacityRequirements(in *resourcev1beta1.CapacityRequirements, out *resource.CapacityRequirements, s conversion.Scope) error {
out.Requests = *(*map[resource.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_v1beta1_CapacityRequirements_To_resource_CapacityRequirements is an autogenerated conversion function.
func Convert_v1beta1_CapacityRequirements_To_resource_CapacityRequirements(in *resourcev1beta1.CapacityRequirements, out *resource.CapacityRequirements, s conversion.Scope) error {
return autoConvert_v1beta1_CapacityRequirements_To_resource_CapacityRequirements(in, out, s)
}
func autoConvert_resource_CapacityRequirements_To_v1beta1_CapacityRequirements(in *resource.CapacityRequirements, out *resourcev1beta1.CapacityRequirements, s conversion.Scope) error {
out.Requests = *(*map[resourcev1beta1.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_resource_CapacityRequirements_To_v1beta1_CapacityRequirements is an autogenerated conversion function.
func Convert_resource_CapacityRequirements_To_v1beta1_CapacityRequirements(in *resource.CapacityRequirements, out *resourcev1beta1.CapacityRequirements, s conversion.Scope) error {
return autoConvert_resource_CapacityRequirements_To_v1beta1_CapacityRequirements(in, out, s)
}
func autoConvert_v1beta1_Counter_To_resource_Counter(in *resourcev1beta1.Counter, out *resource.Counter, s conversion.Scope) error {
out.Value = in.Value
return nil
}
// Convert_v1beta1_Counter_To_resource_Counter is an autogenerated conversion function.
func Convert_v1beta1_Counter_To_resource_Counter(in *resourcev1beta1.Counter, out *resource.Counter, s conversion.Scope) error {
return autoConvert_v1beta1_Counter_To_resource_Counter(in, out, s)
}
func autoConvert_resource_Counter_To_v1beta1_Counter(in *resource.Counter, out *resourcev1beta1.Counter, s conversion.Scope) error {
out.Value = in.Value
return nil
}
// Convert_resource_Counter_To_v1beta1_Counter is an autogenerated conversion function.
func Convert_resource_Counter_To_v1beta1_Counter(in *resource.Counter, out *resourcev1beta1.Counter, s conversion.Scope) error {
return autoConvert_resource_Counter_To_v1beta1_Counter(in, out, s)
}
func autoConvert_v1beta1_CounterSet_To_resource_CounterSet(in *resourcev1beta1.CounterSet, out *resource.CounterSet, s conversion.Scope) error {
out.Name = in.Name
out.Counters = *(*map[string]resource.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_v1beta1_CounterSet_To_resource_CounterSet is an autogenerated conversion function.
func Convert_v1beta1_CounterSet_To_resource_CounterSet(in *resourcev1beta1.CounterSet, out *resource.CounterSet, s conversion.Scope) error {
return autoConvert_v1beta1_CounterSet_To_resource_CounterSet(in, out, s)
}
func autoConvert_resource_CounterSet_To_v1beta1_CounterSet(in *resource.CounterSet, out *resourcev1beta1.CounterSet, s conversion.Scope) error {
out.Name = in.Name
out.Counters = *(*map[string]resourcev1beta1.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_resource_CounterSet_To_v1beta1_CounterSet is an autogenerated conversion function.
func Convert_resource_CounterSet_To_v1beta1_CounterSet(in *resource.CounterSet, out *resourcev1beta1.CounterSet, s conversion.Scope) error {
return autoConvert_resource_CounterSet_To_v1beta1_CounterSet(in, out, s)
}
func autoConvert_v1beta1_Device_To_resource_Device(in *resourcev1beta1.Device, out *resource.Device, s conversion.Scope) error {
out.Name = in.Name
// WARNING: in.Basic requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_resource_Device_To_v1beta1_Device(in *resource.Device, out *resourcev1beta1.Device, s conversion.Scope) error {
out.Name = in.Name
// WARNING: in.Attributes requires manual conversion: does not exist in peer-type
// WARNING: in.Capacity requires manual conversion: does not exist in peer-type
// WARNING: in.ConsumesCounters requires manual conversion: does not exist in peer-type
// WARNING: in.NodeName requires manual conversion: does not exist in peer-type
// WARNING: in.NodeSelector requires manual conversion: does not exist in peer-type
// WARNING: in.AllNodes requires manual conversion: does not exist in peer-type
// WARNING: in.Taints requires manual conversion: does not exist in peer-type
// WARNING: in.BindsToNode requires manual conversion: does not exist in peer-type
// WARNING: in.BindingConditions requires manual conversion: does not exist in peer-type
// WARNING: in.BindingFailureConditions requires manual conversion: does not exist in peer-type
// WARNING: in.AllowMultipleAllocations requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1beta1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in *resourcev1beta1.DeviceAllocationConfiguration, out *resource.DeviceAllocationConfiguration, s conversion.Scope) error {
out.Source = resource.AllocationConfigSource(in.Source)
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration is an autogenerated conversion function.
func Convert_v1beta1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in *resourcev1beta1.DeviceAllocationConfiguration, out *resource.DeviceAllocationConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in, out, s)
}
func autoConvert_resource_DeviceAllocationConfiguration_To_v1beta1_DeviceAllocationConfiguration(in *resource.DeviceAllocationConfiguration, out *resourcev1beta1.DeviceAllocationConfiguration, s conversion.Scope) error {
out.Source = resourcev1beta1.AllocationConfigSource(in.Source)
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceAllocationConfiguration_To_v1beta1_DeviceAllocationConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceAllocationConfiguration_To_v1beta1_DeviceAllocationConfiguration(in *resource.DeviceAllocationConfiguration, out *resourcev1beta1.DeviceAllocationConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceAllocationConfiguration_To_v1beta1_DeviceAllocationConfiguration(in, out, s)
}
func autoConvert_v1beta1_DeviceAllocationResult_To_resource_DeviceAllocationResult(in *resourcev1beta1.DeviceAllocationResult, out *resource.DeviceAllocationResult, s conversion.Scope) error {
out.Results = *(*[]resource.DeviceRequestAllocationResult)(unsafe.Pointer(&in.Results))
out.Config = *(*[]resource.DeviceAllocationConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_v1beta1_DeviceAllocationResult_To_resource_DeviceAllocationResult is an autogenerated conversion function.
func Convert_v1beta1_DeviceAllocationResult_To_resource_DeviceAllocationResult(in *resourcev1beta1.DeviceAllocationResult, out *resource.DeviceAllocationResult, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceAllocationResult_To_resource_DeviceAllocationResult(in, out, s)
}
func autoConvert_resource_DeviceAllocationResult_To_v1beta1_DeviceAllocationResult(in *resource.DeviceAllocationResult, out *resourcev1beta1.DeviceAllocationResult, s conversion.Scope) error {
out.Results = *(*[]resourcev1beta1.DeviceRequestAllocationResult)(unsafe.Pointer(&in.Results))
out.Config = *(*[]resourcev1beta1.DeviceAllocationConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_resource_DeviceAllocationResult_To_v1beta1_DeviceAllocationResult is an autogenerated conversion function.
func Convert_resource_DeviceAllocationResult_To_v1beta1_DeviceAllocationResult(in *resource.DeviceAllocationResult, out *resourcev1beta1.DeviceAllocationResult, s conversion.Scope) error {
return autoConvert_resource_DeviceAllocationResult_To_v1beta1_DeviceAllocationResult(in, out, s)
}
func autoConvert_v1beta1_DeviceAttribute_To_resource_DeviceAttribute(in *resourcev1beta1.DeviceAttribute, out *resource.DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
return nil
}
// Convert_v1beta1_DeviceAttribute_To_resource_DeviceAttribute is an autogenerated conversion function.
func Convert_v1beta1_DeviceAttribute_To_resource_DeviceAttribute(in *resourcev1beta1.DeviceAttribute, out *resource.DeviceAttribute, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceAttribute_To_resource_DeviceAttribute(in, out, s)
}
func autoConvert_resource_DeviceAttribute_To_v1beta1_DeviceAttribute(in *resource.DeviceAttribute, out *resourcev1beta1.DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
return nil
}
// Convert_resource_DeviceAttribute_To_v1beta1_DeviceAttribute is an autogenerated conversion function.
func Convert_resource_DeviceAttribute_To_v1beta1_DeviceAttribute(in *resource.DeviceAttribute, out *resourcev1beta1.DeviceAttribute, s conversion.Scope) error {
return autoConvert_resource_DeviceAttribute_To_v1beta1_DeviceAttribute(in, out, s)
}
func autoConvert_v1beta1_DeviceCapacity_To_resource_DeviceCapacity(in *resourcev1beta1.DeviceCapacity, out *resource.DeviceCapacity, s conversion.Scope) error {
out.Value = in.Value
out.RequestPolicy = (*resource.CapacityRequestPolicy)(unsafe.Pointer(in.RequestPolicy))
return nil
}
// Convert_v1beta1_DeviceCapacity_To_resource_DeviceCapacity is an autogenerated conversion function.
func Convert_v1beta1_DeviceCapacity_To_resource_DeviceCapacity(in *resourcev1beta1.DeviceCapacity, out *resource.DeviceCapacity, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceCapacity_To_resource_DeviceCapacity(in, out, s)
}
func autoConvert_resource_DeviceCapacity_To_v1beta1_DeviceCapacity(in *resource.DeviceCapacity, out *resourcev1beta1.DeviceCapacity, s conversion.Scope) error {
out.Value = in.Value
out.RequestPolicy = (*resourcev1beta1.CapacityRequestPolicy)(unsafe.Pointer(in.RequestPolicy))
return nil
}
// Convert_resource_DeviceCapacity_To_v1beta1_DeviceCapacity is an autogenerated conversion function.
func Convert_resource_DeviceCapacity_To_v1beta1_DeviceCapacity(in *resource.DeviceCapacity, out *resourcev1beta1.DeviceCapacity, s conversion.Scope) error {
return autoConvert_resource_DeviceCapacity_To_v1beta1_DeviceCapacity(in, out, s)
}
func autoConvert_v1beta1_DeviceClaim_To_resource_DeviceClaim(in *resourcev1beta1.DeviceClaim, out *resource.DeviceClaim, s conversion.Scope) error {
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]resource.DeviceRequest, len(*in))
for i := range *in {
if err := Convert_v1beta1_DeviceRequest_To_resource_DeviceRequest(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Requests = nil
}
out.Constraints = *(*[]resource.DeviceConstraint)(unsafe.Pointer(&in.Constraints))
out.Config = *(*[]resource.DeviceClaimConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_v1beta1_DeviceClaim_To_resource_DeviceClaim is an autogenerated conversion function.
func Convert_v1beta1_DeviceClaim_To_resource_DeviceClaim(in *resourcev1beta1.DeviceClaim, out *resource.DeviceClaim, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceClaim_To_resource_DeviceClaim(in, out, s)
}
func autoConvert_resource_DeviceClaim_To_v1beta1_DeviceClaim(in *resource.DeviceClaim, out *resourcev1beta1.DeviceClaim, s conversion.Scope) error {
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]resourcev1beta1.DeviceRequest, len(*in))
for i := range *in {
if err := Convert_resource_DeviceRequest_To_v1beta1_DeviceRequest(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Requests = nil
}
out.Constraints = *(*[]resourcev1beta1.DeviceConstraint)(unsafe.Pointer(&in.Constraints))
out.Config = *(*[]resourcev1beta1.DeviceClaimConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_resource_DeviceClaim_To_v1beta1_DeviceClaim is an autogenerated conversion function.
func Convert_resource_DeviceClaim_To_v1beta1_DeviceClaim(in *resource.DeviceClaim, out *resourcev1beta1.DeviceClaim, s conversion.Scope) error {
return autoConvert_resource_DeviceClaim_To_v1beta1_DeviceClaim(in, out, s)
}
func autoConvert_v1beta1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in *resourcev1beta1.DeviceClaimConfiguration, out *resource.DeviceClaimConfiguration, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration is an autogenerated conversion function.
func Convert_v1beta1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in *resourcev1beta1.DeviceClaimConfiguration, out *resource.DeviceClaimConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in, out, s)
}
func autoConvert_resource_DeviceClaimConfiguration_To_v1beta1_DeviceClaimConfiguration(in *resource.DeviceClaimConfiguration, out *resourcev1beta1.DeviceClaimConfiguration, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClaimConfiguration_To_v1beta1_DeviceClaimConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceClaimConfiguration_To_v1beta1_DeviceClaimConfiguration(in *resource.DeviceClaimConfiguration, out *resourcev1beta1.DeviceClaimConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceClaimConfiguration_To_v1beta1_DeviceClaimConfiguration(in, out, s)
}
func autoConvert_v1beta1_DeviceClass_To_resource_DeviceClass(in *resourcev1beta1.DeviceClass, out *resource.DeviceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_DeviceClassSpec_To_resource_DeviceClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DeviceClass_To_resource_DeviceClass is an autogenerated conversion function.
func Convert_v1beta1_DeviceClass_To_resource_DeviceClass(in *resourcev1beta1.DeviceClass, out *resource.DeviceClass, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceClass_To_resource_DeviceClass(in, out, s)
}
func autoConvert_resource_DeviceClass_To_v1beta1_DeviceClass(in *resource.DeviceClass, out *resourcev1beta1.DeviceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_DeviceClassSpec_To_v1beta1_DeviceClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClass_To_v1beta1_DeviceClass is an autogenerated conversion function.
func Convert_resource_DeviceClass_To_v1beta1_DeviceClass(in *resource.DeviceClass, out *resourcev1beta1.DeviceClass, s conversion.Scope) error {
return autoConvert_resource_DeviceClass_To_v1beta1_DeviceClass(in, out, s)
}
func autoConvert_v1beta1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in *resourcev1beta1.DeviceClassConfiguration, out *resource.DeviceClassConfiguration, s conversion.Scope) error {
if err := Convert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration is an autogenerated conversion function.
func Convert_v1beta1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in *resourcev1beta1.DeviceClassConfiguration, out *resource.DeviceClassConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in, out, s)
}
func autoConvert_resource_DeviceClassConfiguration_To_v1beta1_DeviceClassConfiguration(in *resource.DeviceClassConfiguration, out *resourcev1beta1.DeviceClassConfiguration, s conversion.Scope) error {
if err := Convert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClassConfiguration_To_v1beta1_DeviceClassConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceClassConfiguration_To_v1beta1_DeviceClassConfiguration(in *resource.DeviceClassConfiguration, out *resourcev1beta1.DeviceClassConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceClassConfiguration_To_v1beta1_DeviceClassConfiguration(in, out, s)
}
func autoConvert_v1beta1_DeviceClassList_To_resource_DeviceClassList(in *resourcev1beta1.DeviceClassList, out *resource.DeviceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.DeviceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_DeviceClassList_To_resource_DeviceClassList is an autogenerated conversion function.
func Convert_v1beta1_DeviceClassList_To_resource_DeviceClassList(in *resourcev1beta1.DeviceClassList, out *resource.DeviceClassList, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceClassList_To_resource_DeviceClassList(in, out, s)
}
func autoConvert_resource_DeviceClassList_To_v1beta1_DeviceClassList(in *resource.DeviceClassList, out *resourcev1beta1.DeviceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1beta1.DeviceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_DeviceClassList_To_v1beta1_DeviceClassList is an autogenerated conversion function.
func Convert_resource_DeviceClassList_To_v1beta1_DeviceClassList(in *resource.DeviceClassList, out *resourcev1beta1.DeviceClassList, s conversion.Scope) error {
return autoConvert_resource_DeviceClassList_To_v1beta1_DeviceClassList(in, out, s)
}
func autoConvert_v1beta1_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1beta1.DeviceClassSpec, out *resource.DeviceClassSpec, s conversion.Scope) error {
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.Config = *(*[]resource.DeviceClassConfiguration)(unsafe.Pointer(&in.Config))
out.ExtendedResourceName = (*string)(unsafe.Pointer(in.ExtendedResourceName))
return nil
}
// Convert_v1beta1_DeviceClassSpec_To_resource_DeviceClassSpec is an autogenerated conversion function.
func Convert_v1beta1_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1beta1.DeviceClassSpec, out *resource.DeviceClassSpec, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceClassSpec_To_resource_DeviceClassSpec(in, out, s)
}
func autoConvert_resource_DeviceClassSpec_To_v1beta1_DeviceClassSpec(in *resource.DeviceClassSpec, out *resourcev1beta1.DeviceClassSpec, s conversion.Scope) error {
out.Selectors = *(*[]resourcev1beta1.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.Config = *(*[]resourcev1beta1.DeviceClassConfiguration)(unsafe.Pointer(&in.Config))
out.ExtendedResourceName = (*string)(unsafe.Pointer(in.ExtendedResourceName))
return nil
}
// Convert_resource_DeviceClassSpec_To_v1beta1_DeviceClassSpec is an autogenerated conversion function.
func Convert_resource_DeviceClassSpec_To_v1beta1_DeviceClassSpec(in *resource.DeviceClassSpec, out *resourcev1beta1.DeviceClassSpec, s conversion.Scope) error {
return autoConvert_resource_DeviceClassSpec_To_v1beta1_DeviceClassSpec(in, out, s)
}
func autoConvert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration(in *resourcev1beta1.DeviceConfiguration, out *resource.DeviceConfiguration, s conversion.Scope) error {
out.Opaque = (*resource.OpaqueDeviceConfiguration)(unsafe.Pointer(in.Opaque))
return nil
}
// Convert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration is an autogenerated conversion function.
func Convert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration(in *resourcev1beta1.DeviceConfiguration, out *resource.DeviceConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceConfiguration_To_resource_DeviceConfiguration(in, out, s)
}
func autoConvert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration(in *resource.DeviceConfiguration, out *resourcev1beta1.DeviceConfiguration, s conversion.Scope) error {
out.Opaque = (*resourcev1beta1.OpaqueDeviceConfiguration)(unsafe.Pointer(in.Opaque))
return nil
}
// Convert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration(in *resource.DeviceConfiguration, out *resourcev1beta1.DeviceConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceConfiguration_To_v1beta1_DeviceConfiguration(in, out, s)
}
func autoConvert_v1beta1_DeviceConstraint_To_resource_DeviceConstraint(in *resourcev1beta1.DeviceConstraint, out *resource.DeviceConstraint, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
out.MatchAttribute = (*resource.FullyQualifiedName)(unsafe.Pointer(in.MatchAttribute))
out.DistinctAttribute = (*resource.FullyQualifiedName)(unsafe.Pointer(in.DistinctAttribute))
return nil
}
// Convert_v1beta1_DeviceConstraint_To_resource_DeviceConstraint is an autogenerated conversion function.
func Convert_v1beta1_DeviceConstraint_To_resource_DeviceConstraint(in *resourcev1beta1.DeviceConstraint, out *resource.DeviceConstraint, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceConstraint_To_resource_DeviceConstraint(in, out, s)
}
func autoConvert_resource_DeviceConstraint_To_v1beta1_DeviceConstraint(in *resource.DeviceConstraint, out *resourcev1beta1.DeviceConstraint, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
out.MatchAttribute = (*resourcev1beta1.FullyQualifiedName)(unsafe.Pointer(in.MatchAttribute))
out.DistinctAttribute = (*resourcev1beta1.FullyQualifiedName)(unsafe.Pointer(in.DistinctAttribute))
return nil
}
// Convert_resource_DeviceConstraint_To_v1beta1_DeviceConstraint is an autogenerated conversion function.
func Convert_resource_DeviceConstraint_To_v1beta1_DeviceConstraint(in *resource.DeviceConstraint, out *resourcev1beta1.DeviceConstraint, s conversion.Scope) error {
return autoConvert_resource_DeviceConstraint_To_v1beta1_DeviceConstraint(in, out, s)
}
func autoConvert_v1beta1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in *resourcev1beta1.DeviceCounterConsumption, out *resource.DeviceCounterConsumption, s conversion.Scope) error {
out.CounterSet = in.CounterSet
out.Counters = *(*map[string]resource.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_v1beta1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption is an autogenerated conversion function.
func Convert_v1beta1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in *resourcev1beta1.DeviceCounterConsumption, out *resource.DeviceCounterConsumption, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in, out, s)
}
func autoConvert_resource_DeviceCounterConsumption_To_v1beta1_DeviceCounterConsumption(in *resource.DeviceCounterConsumption, out *resourcev1beta1.DeviceCounterConsumption, s conversion.Scope) error {
out.CounterSet = in.CounterSet
out.Counters = *(*map[string]resourcev1beta1.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_resource_DeviceCounterConsumption_To_v1beta1_DeviceCounterConsumption is an autogenerated conversion function.
func Convert_resource_DeviceCounterConsumption_To_v1beta1_DeviceCounterConsumption(in *resource.DeviceCounterConsumption, out *resourcev1beta1.DeviceCounterConsumption, s conversion.Scope) error {
return autoConvert_resource_DeviceCounterConsumption_To_v1beta1_DeviceCounterConsumption(in, out, s)
}
func autoConvert_v1beta1_DeviceRequest_To_resource_DeviceRequest(in *resourcev1beta1.DeviceRequest, out *resource.DeviceRequest, s conversion.Scope) error {
out.Name = in.Name
// WARNING: in.DeviceClassName requires manual conversion: does not exist in peer-type
// WARNING: in.Selectors requires manual conversion: does not exist in peer-type
// WARNING: in.AllocationMode requires manual conversion: does not exist in peer-type
// WARNING: in.Count requires manual conversion: does not exist in peer-type
// WARNING: in.AdminAccess requires manual conversion: does not exist in peer-type
out.FirstAvailable = *(*[]resource.DeviceSubRequest)(unsafe.Pointer(&in.FirstAvailable))
// WARNING: in.Tolerations requires manual conversion: does not exist in peer-type
// WARNING: in.Capacity requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_resource_DeviceRequest_To_v1beta1_DeviceRequest(in *resource.DeviceRequest, out *resourcev1beta1.DeviceRequest, s conversion.Scope) error {
out.Name = in.Name
// WARNING: in.Exactly requires manual conversion: does not exist in peer-type
out.FirstAvailable = *(*[]resourcev1beta1.DeviceSubRequest)(unsafe.Pointer(&in.FirstAvailable))
return nil
}
func autoConvert_v1beta1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in *resourcev1beta1.DeviceRequestAllocationResult, out *resource.DeviceRequestAllocationResult, s conversion.Scope) error {
out.Request = in.Request
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.ShareID = (*types.UID)(unsafe.Pointer(in.ShareID))
out.ConsumedCapacity = *(*map[resource.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.ConsumedCapacity))
return nil
}
// Convert_v1beta1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult is an autogenerated conversion function.
func Convert_v1beta1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in *resourcev1beta1.DeviceRequestAllocationResult, out *resource.DeviceRequestAllocationResult, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in, out, s)
}
func autoConvert_resource_DeviceRequestAllocationResult_To_v1beta1_DeviceRequestAllocationResult(in *resource.DeviceRequestAllocationResult, out *resourcev1beta1.DeviceRequestAllocationResult, s conversion.Scope) error {
out.Request = in.Request
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resourcev1beta1.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.ShareID = (*types.UID)(unsafe.Pointer(in.ShareID))
out.ConsumedCapacity = *(*map[resourcev1beta1.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.ConsumedCapacity))
return nil
}
// Convert_resource_DeviceRequestAllocationResult_To_v1beta1_DeviceRequestAllocationResult is an autogenerated conversion function.
func Convert_resource_DeviceRequestAllocationResult_To_v1beta1_DeviceRequestAllocationResult(in *resource.DeviceRequestAllocationResult, out *resourcev1beta1.DeviceRequestAllocationResult, s conversion.Scope) error {
return autoConvert_resource_DeviceRequestAllocationResult_To_v1beta1_DeviceRequestAllocationResult(in, out, s)
}
func autoConvert_v1beta1_DeviceSelector_To_resource_DeviceSelector(in *resourcev1beta1.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resource.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_v1beta1_DeviceSelector_To_resource_DeviceSelector is an autogenerated conversion function.
func Convert_v1beta1_DeviceSelector_To_resource_DeviceSelector(in *resourcev1beta1.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceSelector_To_resource_DeviceSelector(in, out, s)
}
func autoConvert_resource_DeviceSelector_To_v1beta1_DeviceSelector(in *resource.DeviceSelector, out *resourcev1beta1.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resourcev1beta1.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_resource_DeviceSelector_To_v1beta1_DeviceSelector is an autogenerated conversion function.
func Convert_resource_DeviceSelector_To_v1beta1_DeviceSelector(in *resource.DeviceSelector, out *resourcev1beta1.DeviceSelector, s conversion.Scope) error {
return autoConvert_resource_DeviceSelector_To_v1beta1_DeviceSelector(in, out, s)
}
func autoConvert_v1beta1_DeviceSubRequest_To_resource_DeviceSubRequest(in *resourcev1beta1.DeviceSubRequest, out *resource.DeviceSubRequest, s conversion.Scope) error {
out.Name = in.Name
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resource.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resource.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_v1beta1_DeviceSubRequest_To_resource_DeviceSubRequest is an autogenerated conversion function.
func Convert_v1beta1_DeviceSubRequest_To_resource_DeviceSubRequest(in *resourcev1beta1.DeviceSubRequest, out *resource.DeviceSubRequest, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceSubRequest_To_resource_DeviceSubRequest(in, out, s)
}
func autoConvert_resource_DeviceSubRequest_To_v1beta1_DeviceSubRequest(in *resource.DeviceSubRequest, out *resourcev1beta1.DeviceSubRequest, s conversion.Scope) error {
out.Name = in.Name
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resourcev1beta1.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resourcev1beta1.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.Tolerations = *(*[]resourcev1beta1.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resourcev1beta1.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_resource_DeviceSubRequest_To_v1beta1_DeviceSubRequest is an autogenerated conversion function.
func Convert_resource_DeviceSubRequest_To_v1beta1_DeviceSubRequest(in *resource.DeviceSubRequest, out *resourcev1beta1.DeviceSubRequest, s conversion.Scope) error {
return autoConvert_resource_DeviceSubRequest_To_v1beta1_DeviceSubRequest(in, out, s)
}
func autoConvert_v1beta1_DeviceTaint_To_resource_DeviceTaint(in *resourcev1beta1.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resource.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_v1beta1_DeviceTaint_To_resource_DeviceTaint is an autogenerated conversion function.
func Convert_v1beta1_DeviceTaint_To_resource_DeviceTaint(in *resourcev1beta1.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceTaint_To_resource_DeviceTaint(in, out, s)
}
func autoConvert_resource_DeviceTaint_To_v1beta1_DeviceTaint(in *resource.DeviceTaint, out *resourcev1beta1.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resourcev1beta1.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_resource_DeviceTaint_To_v1beta1_DeviceTaint is an autogenerated conversion function.
func Convert_resource_DeviceTaint_To_v1beta1_DeviceTaint(in *resource.DeviceTaint, out *resourcev1beta1.DeviceTaint, s conversion.Scope) error {
return autoConvert_resource_DeviceTaint_To_v1beta1_DeviceTaint(in, out, s)
}
func autoConvert_v1beta1_DeviceToleration_To_resource_DeviceToleration(in *resourcev1beta1.DeviceToleration, out *resource.DeviceToleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = resource.DeviceTolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = resource.DeviceTaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_v1beta1_DeviceToleration_To_resource_DeviceToleration is an autogenerated conversion function.
func Convert_v1beta1_DeviceToleration_To_resource_DeviceToleration(in *resourcev1beta1.DeviceToleration, out *resource.DeviceToleration, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceToleration_To_resource_DeviceToleration(in, out, s)
}
func autoConvert_resource_DeviceToleration_To_v1beta1_DeviceToleration(in *resource.DeviceToleration, out *resourcev1beta1.DeviceToleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = resourcev1beta1.DeviceTolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = resourcev1beta1.DeviceTaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_resource_DeviceToleration_To_v1beta1_DeviceToleration is an autogenerated conversion function.
func Convert_resource_DeviceToleration_To_v1beta1_DeviceToleration(in *resource.DeviceToleration, out *resourcev1beta1.DeviceToleration, s conversion.Scope) error {
return autoConvert_resource_DeviceToleration_To_v1beta1_DeviceToleration(in, out, s)
}
func autoConvert_v1beta1_NetworkDeviceData_To_resource_NetworkDeviceData(in *resourcev1beta1.NetworkDeviceData, out *resource.NetworkDeviceData, s conversion.Scope) error {
out.InterfaceName = in.InterfaceName
out.IPs = *(*[]string)(unsafe.Pointer(&in.IPs))
out.HardwareAddress = in.HardwareAddress
return nil
}
// Convert_v1beta1_NetworkDeviceData_To_resource_NetworkDeviceData is an autogenerated conversion function.
func Convert_v1beta1_NetworkDeviceData_To_resource_NetworkDeviceData(in *resourcev1beta1.NetworkDeviceData, out *resource.NetworkDeviceData, s conversion.Scope) error {
return autoConvert_v1beta1_NetworkDeviceData_To_resource_NetworkDeviceData(in, out, s)
}
func autoConvert_resource_NetworkDeviceData_To_v1beta1_NetworkDeviceData(in *resource.NetworkDeviceData, out *resourcev1beta1.NetworkDeviceData, s conversion.Scope) error {
out.InterfaceName = in.InterfaceName
out.IPs = *(*[]string)(unsafe.Pointer(&in.IPs))
out.HardwareAddress = in.HardwareAddress
return nil
}
// Convert_resource_NetworkDeviceData_To_v1beta1_NetworkDeviceData is an autogenerated conversion function.
func Convert_resource_NetworkDeviceData_To_v1beta1_NetworkDeviceData(in *resource.NetworkDeviceData, out *resourcev1beta1.NetworkDeviceData, s conversion.Scope) error {
return autoConvert_resource_NetworkDeviceData_To_v1beta1_NetworkDeviceData(in, out, s)
}
func autoConvert_v1beta1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in *resourcev1beta1.OpaqueDeviceConfiguration, out *resource.OpaqueDeviceConfiguration, s conversion.Scope) error {
out.Driver = in.Driver
out.Parameters = in.Parameters
return nil
}
// Convert_v1beta1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration is an autogenerated conversion function.
func Convert_v1beta1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in *resourcev1beta1.OpaqueDeviceConfiguration, out *resource.OpaqueDeviceConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in, out, s)
}
func autoConvert_resource_OpaqueDeviceConfiguration_To_v1beta1_OpaqueDeviceConfiguration(in *resource.OpaqueDeviceConfiguration, out *resourcev1beta1.OpaqueDeviceConfiguration, s conversion.Scope) error {
out.Driver = in.Driver
out.Parameters = in.Parameters
return nil
}
// Convert_resource_OpaqueDeviceConfiguration_To_v1beta1_OpaqueDeviceConfiguration is an autogenerated conversion function.
func Convert_resource_OpaqueDeviceConfiguration_To_v1beta1_OpaqueDeviceConfiguration(in *resource.OpaqueDeviceConfiguration, out *resourcev1beta1.OpaqueDeviceConfiguration, s conversion.Scope) error {
return autoConvert_resource_OpaqueDeviceConfiguration_To_v1beta1_OpaqueDeviceConfiguration(in, out, s)
}
func autoConvert_v1beta1_ResourceClaim_To_resource_ResourceClaim(in *resourcev1beta1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_ResourceClaimStatus_To_resource_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ResourceClaim_To_resource_ResourceClaim is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaim_To_resource_ResourceClaim(in *resourcev1beta1.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaim_To_resource_ResourceClaim(in, out, s)
}
func autoConvert_resource_ResourceClaim_To_v1beta1_ResourceClaim(in *resource.ResourceClaim, out *resourcev1beta1.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1beta1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_resource_ResourceClaimStatus_To_v1beta1_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaim_To_v1beta1_ResourceClaim is an autogenerated conversion function.
func Convert_resource_ResourceClaim_To_v1beta1_ResourceClaim(in *resource.ResourceClaim, out *resourcev1beta1.ResourceClaim, s conversion.Scope) error {
return autoConvert_resource_ResourceClaim_To_v1beta1_ResourceClaim(in, out, s)
}
func autoConvert_v1beta1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *resourcev1beta1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_v1beta1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *resourcev1beta1.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_resource_ResourceClaimConsumerReference_To_v1beta1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *resourcev1beta1.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_resource_ResourceClaimConsumerReference_To_v1beta1_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_resource_ResourceClaimConsumerReference_To_v1beta1_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *resourcev1beta1.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimConsumerReference_To_v1beta1_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_v1beta1_ResourceClaimList_To_resource_ResourceClaimList(in *resourcev1beta1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]resource.ResourceClaim, len(*in))
for i := range *in {
if err := Convert_v1beta1_ResourceClaim_To_resource_ResourceClaim(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_ResourceClaimList_To_resource_ResourceClaimList is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaimList_To_resource_ResourceClaimList(in *resourcev1beta1.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaimList_To_resource_ResourceClaimList(in, out, s)
}
func autoConvert_resource_ResourceClaimList_To_v1beta1_ResourceClaimList(in *resource.ResourceClaimList, out *resourcev1beta1.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]resourcev1beta1.ResourceClaim, len(*in))
for i := range *in {
if err := Convert_resource_ResourceClaim_To_v1beta1_ResourceClaim(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_resource_ResourceClaimList_To_v1beta1_ResourceClaimList is an autogenerated conversion function.
func Convert_resource_ResourceClaimList_To_v1beta1_ResourceClaimList(in *resource.ResourceClaimList, out *resourcev1beta1.ResourceClaimList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimList_To_v1beta1_ResourceClaimList(in, out, s)
}
func autoConvert_v1beta1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *resourcev1beta1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
if err := Convert_v1beta1_DeviceClaim_To_resource_DeviceClaim(&in.Devices, &out.Devices, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ResourceClaimSpec_To_resource_ResourceClaimSpec is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *resourcev1beta1.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaimSpec_To_resource_ResourceClaimSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimSpec_To_v1beta1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *resourcev1beta1.ResourceClaimSpec, s conversion.Scope) error {
if err := Convert_resource_DeviceClaim_To_v1beta1_DeviceClaim(&in.Devices, &out.Devices, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimSpec_To_v1beta1_ResourceClaimSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimSpec_To_v1beta1_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *resourcev1beta1.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimSpec_To_v1beta1_ResourceClaimSpec(in, out, s)
}
func autoConvert_v1beta1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *resourcev1beta1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
out.Allocation = (*resource.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]resource.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.Devices = *(*[]resource.AllocatedDeviceStatus)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_v1beta1_ResourceClaimStatus_To_resource_ResourceClaimStatus is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *resourcev1beta1.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaimStatus_To_resource_ResourceClaimStatus(in, out, s)
}
func autoConvert_resource_ResourceClaimStatus_To_v1beta1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *resourcev1beta1.ResourceClaimStatus, s conversion.Scope) error {
out.Allocation = (*resourcev1beta1.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]resourcev1beta1.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.Devices = *(*[]resourcev1beta1.AllocatedDeviceStatus)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_resource_ResourceClaimStatus_To_v1beta1_ResourceClaimStatus is an autogenerated conversion function.
func Convert_resource_ResourceClaimStatus_To_v1beta1_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *resourcev1beta1.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimStatus_To_v1beta1_ResourceClaimStatus(in, out, s)
}
func autoConvert_v1beta1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *resourcev1beta1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *resourcev1beta1.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplate_To_v1beta1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *resourcev1beta1.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimTemplateSpec_To_v1beta1_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplate_To_v1beta1_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplate_To_v1beta1_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *resourcev1beta1.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplate_To_v1beta1_ResourceClaimTemplate(in, out, s)
}
func autoConvert_v1beta1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *resourcev1beta1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]resource.ResourceClaimTemplate, len(*in))
for i := range *in {
if err := Convert_v1beta1_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *resourcev1beta1.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateList_To_v1beta1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *resourcev1beta1.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]resourcev1beta1.ResourceClaimTemplate, len(*in))
for i := range *in {
if err := Convert_resource_ResourceClaimTemplate_To_v1beta1_ResourceClaimTemplate(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_resource_ResourceClaimTemplateList_To_v1beta1_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateList_To_v1beta1_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *resourcev1beta1.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateList_To_v1beta1_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_v1beta1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *resourcev1beta1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_v1beta1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *resourcev1beta1.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateSpec_To_v1beta1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *resourcev1beta1.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1beta1_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplateSpec_To_v1beta1_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateSpec_To_v1beta1_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *resourcev1beta1.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateSpec_To_v1beta1_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_v1beta1_ResourcePool_To_resource_ResourcePool(in *resourcev1beta1.ResourcePool, out *resource.ResourcePool, s conversion.Scope) error {
out.Name = in.Name
out.Generation = in.Generation
out.ResourceSliceCount = in.ResourceSliceCount
return nil
}
// Convert_v1beta1_ResourcePool_To_resource_ResourcePool is an autogenerated conversion function.
func Convert_v1beta1_ResourcePool_To_resource_ResourcePool(in *resourcev1beta1.ResourcePool, out *resource.ResourcePool, s conversion.Scope) error {
return autoConvert_v1beta1_ResourcePool_To_resource_ResourcePool(in, out, s)
}
func autoConvert_resource_ResourcePool_To_v1beta1_ResourcePool(in *resource.ResourcePool, out *resourcev1beta1.ResourcePool, s conversion.Scope) error {
out.Name = in.Name
out.Generation = in.Generation
out.ResourceSliceCount = in.ResourceSliceCount
return nil
}
// Convert_resource_ResourcePool_To_v1beta1_ResourcePool is an autogenerated conversion function.
func Convert_resource_ResourcePool_To_v1beta1_ResourcePool(in *resource.ResourcePool, out *resourcev1beta1.ResourcePool, s conversion.Scope) error {
return autoConvert_resource_ResourcePool_To_v1beta1_ResourcePool(in, out, s)
}
func autoConvert_v1beta1_ResourceSlice_To_resource_ResourceSlice(in *resourcev1beta1.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_ResourceSliceSpec_To_resource_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_ResourceSlice_To_resource_ResourceSlice is an autogenerated conversion function.
func Convert_v1beta1_ResourceSlice_To_resource_ResourceSlice(in *resourcev1beta1.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceSlice_To_resource_ResourceSlice(in, out, s)
}
func autoConvert_resource_ResourceSlice_To_v1beta1_ResourceSlice(in *resource.ResourceSlice, out *resourcev1beta1.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceSlice_To_v1beta1_ResourceSlice is an autogenerated conversion function.
func Convert_resource_ResourceSlice_To_v1beta1_ResourceSlice(in *resource.ResourceSlice, out *resourcev1beta1.ResourceSlice, s conversion.Scope) error {
return autoConvert_resource_ResourceSlice_To_v1beta1_ResourceSlice(in, out, s)
}
func autoConvert_v1beta1_ResourceSliceList_To_resource_ResourceSliceList(in *resourcev1beta1.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]resource.ResourceSlice, len(*in))
for i := range *in {
if err := Convert_v1beta1_ResourceSlice_To_resource_ResourceSlice(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_ResourceSliceList_To_resource_ResourceSliceList is an autogenerated conversion function.
func Convert_v1beta1_ResourceSliceList_To_resource_ResourceSliceList(in *resourcev1beta1.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceSliceList_To_resource_ResourceSliceList(in, out, s)
}
func autoConvert_resource_ResourceSliceList_To_v1beta1_ResourceSliceList(in *resource.ResourceSliceList, out *resourcev1beta1.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]resourcev1beta1.ResourceSlice, len(*in))
for i := range *in {
if err := Convert_resource_ResourceSlice_To_v1beta1_ResourceSlice(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_resource_ResourceSliceList_To_v1beta1_ResourceSliceList is an autogenerated conversion function.
func Convert_resource_ResourceSliceList_To_v1beta1_ResourceSliceList(in *resource.ResourceSliceList, out *resourcev1beta1.ResourceSliceList, s conversion.Scope) error {
return autoConvert_resource_ResourceSliceList_To_v1beta1_ResourceSliceList(in, out, s)
}
func autoConvert_v1beta1_ResourceSliceSpec_To_resource_ResourceSliceSpec(in *resourcev1beta1.ResourceSliceSpec, out *resource.ResourceSliceSpec, s conversion.Scope) error {
out.Driver = in.Driver
if err := Convert_v1beta1_ResourcePool_To_resource_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
if err := v1.Convert_string_To_Pointer_string(&in.NodeName, &out.NodeName, s); err != nil {
return err
}
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
if err := v1.Convert_bool_To_Pointer_bool(&in.AllNodes, &out.AllNodes, s); err != nil {
return err
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]resource.Device, len(*in))
for i := range *in {
if err := Convert_v1beta1_Device_To_resource_Device(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Devices = nil
}
out.PerDeviceNodeSelection = (*bool)(unsafe.Pointer(in.PerDeviceNodeSelection))
out.SharedCounters = *(*[]resource.CounterSet)(unsafe.Pointer(&in.SharedCounters))
return nil
}
func autoConvert_resource_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in *resource.ResourceSliceSpec, out *resourcev1beta1.ResourceSliceSpec, s conversion.Scope) error {
out.Driver = in.Driver
if err := Convert_resource_ResourcePool_To_v1beta1_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
if err := v1.Convert_Pointer_string_To_string(&in.NodeName, &out.NodeName, s); err != nil {
return err
}
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
if err := v1.Convert_Pointer_bool_To_bool(&in.AllNodes, &out.AllNodes, s); err != nil {
return err
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]resourcev1beta1.Device, len(*in))
for i := range *in {
if err := Convert_resource_Device_To_v1beta1_Device(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Devices = nil
}
out.PerDeviceNodeSelection = (*bool)(unsafe.Pointer(in.PerDeviceNodeSelection))
out.SharedCounters = *(*[]resourcev1beta1.CounterSet)(unsafe.Pointer(&in.SharedCounters))
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
resourcev1beta1 "k8s.io/api/resource/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&resourcev1beta1.ResourceClaim{}, func(obj interface{}) { SetObjectDefaults_ResourceClaim(obj.(*resourcev1beta1.ResourceClaim)) })
scheme.AddTypeDefaultingFunc(&resourcev1beta1.ResourceClaimList{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimList(obj.(*resourcev1beta1.ResourceClaimList)) })
scheme.AddTypeDefaultingFunc(&resourcev1beta1.ResourceClaimTemplate{}, func(obj interface{}) {
SetObjectDefaults_ResourceClaimTemplate(obj.(*resourcev1beta1.ResourceClaimTemplate))
})
scheme.AddTypeDefaultingFunc(&resourcev1beta1.ResourceClaimTemplateList{}, func(obj interface{}) {
SetObjectDefaults_ResourceClaimTemplateList(obj.(*resourcev1beta1.ResourceClaimTemplateList))
})
scheme.AddTypeDefaultingFunc(&resourcev1beta1.ResourceSlice{}, func(obj interface{}) { SetObjectDefaults_ResourceSlice(obj.(*resourcev1beta1.ResourceSlice)) })
scheme.AddTypeDefaultingFunc(&resourcev1beta1.ResourceSliceList{}, func(obj interface{}) { SetObjectDefaults_ResourceSliceList(obj.(*resourcev1beta1.ResourceSliceList)) })
return nil
}
func SetObjectDefaults_ResourceClaim(in *resourcev1beta1.ResourceClaim) {
for i := range in.Spec.Devices.Requests {
a := &in.Spec.Devices.Requests[i]
SetDefaults_DeviceRequest(a)
for j := range a.FirstAvailable {
b := &a.FirstAvailable[j]
SetDefaults_DeviceSubRequest(b)
for k := range b.Tolerations {
c := &b.Tolerations[k]
if c.Operator == "" {
c.Operator = "Equal"
}
}
}
for j := range a.Tolerations {
b := &a.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
if in.Status.Allocation != nil {
for i := range in.Status.Allocation.Devices.Results {
a := &in.Status.Allocation.Devices.Results[i]
for j := range a.Tolerations {
b := &a.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
}
}
func SetObjectDefaults_ResourceClaimList(in *resourcev1beta1.ResourceClaimList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaim(a)
}
}
func SetObjectDefaults_ResourceClaimTemplate(in *resourcev1beta1.ResourceClaimTemplate) {
for i := range in.Spec.Spec.Devices.Requests {
a := &in.Spec.Spec.Devices.Requests[i]
SetDefaults_DeviceRequest(a)
for j := range a.FirstAvailable {
b := &a.FirstAvailable[j]
SetDefaults_DeviceSubRequest(b)
for k := range b.Tolerations {
c := &b.Tolerations[k]
if c.Operator == "" {
c.Operator = "Equal"
}
}
}
for j := range a.Tolerations {
b := &a.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
}
func SetObjectDefaults_ResourceClaimTemplateList(in *resourcev1beta1.ResourceClaimTemplateList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaimTemplate(a)
}
}
func SetObjectDefaults_ResourceSlice(in *resourcev1beta1.ResourceSlice) {
for i := range in.Spec.Devices {
a := &in.Spec.Devices[i]
if a.Basic != nil {
for j := range a.Basic.Taints {
b := &a.Basic.Taints[j]
SetDefaults_DeviceTaint(b)
}
}
}
}
func SetObjectDefaults_ResourceSliceList(in *resourcev1beta1.ResourceSliceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceSlice(a)
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"fmt"
resourceapi "k8s.io/api/resource/v1beta2"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ResourceSlice"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", resourceapi.ResourceSliceSelectorNodeName, resourceapi.ResourceSliceSelectorDriver:
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for %s: %s", SchemeGroupVersion.WithKind("ResourceSlice"), label)
}
}); err != nil {
return err
}
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
resourceapi "k8s.io/api/resource/v1beta2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ExactDeviceRequest(obj *resourceapi.ExactDeviceRequest) {
if obj.AllocationMode == "" {
obj.AllocationMode = resourceapi.DeviceAllocationModeExactCount
}
if obj.AllocationMode == resourceapi.DeviceAllocationModeExactCount && obj.Count == 0 {
obj.Count = 1
}
}
func SetDefaults_DeviceSubRequest(obj *resourceapi.DeviceSubRequest) {
if obj.AllocationMode == "" {
obj.AllocationMode = resourceapi.DeviceAllocationModeExactCount
}
if obj.AllocationMode == resourceapi.DeviceAllocationModeExactCount && obj.Count == 0 {
obj.Count = 1
}
}
func SetDefaults_DeviceTaint(obj *resourceapi.DeviceTaint) {
if obj.TimeAdded == nil {
obj.TimeAdded = &metav1.Time{Time: time.Now().Truncate(time.Second)}
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"k8s.io/api/resource/v1beta2"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
localSchemeBuilder = &v1beta2.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}
// TODO: remove these global variables
// GroupName is the group name use in this package
const GroupName = "resource.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta2
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
resourcev1beta2 "k8s.io/api/resource/v1beta2"
apiresource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
resource "k8s.io/kubernetes/pkg/apis/resource"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.AllocatedDeviceStatus)(nil), (*resource.AllocatedDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(a.(*resourcev1beta2.AllocatedDeviceStatus), b.(*resource.AllocatedDeviceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.AllocatedDeviceStatus)(nil), (*resourcev1beta2.AllocatedDeviceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_AllocatedDeviceStatus_To_v1beta2_AllocatedDeviceStatus(a.(*resource.AllocatedDeviceStatus), b.(*resourcev1beta2.AllocatedDeviceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.AllocationResult)(nil), (*resource.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_AllocationResult_To_resource_AllocationResult(a.(*resourcev1beta2.AllocationResult), b.(*resource.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.AllocationResult)(nil), (*resourcev1beta2.AllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_AllocationResult_To_v1beta2_AllocationResult(a.(*resource.AllocationResult), b.(*resourcev1beta2.AllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.CELDeviceSelector)(nil), (*resource.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_CELDeviceSelector_To_resource_CELDeviceSelector(a.(*resourcev1beta2.CELDeviceSelector), b.(*resource.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CELDeviceSelector)(nil), (*resourcev1beta2.CELDeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CELDeviceSelector_To_v1beta2_CELDeviceSelector(a.(*resource.CELDeviceSelector), b.(*resourcev1beta2.CELDeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.CapacityRequestPolicy)(nil), (*resource.CapacityRequestPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(a.(*resourcev1beta2.CapacityRequestPolicy), b.(*resource.CapacityRequestPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequestPolicy)(nil), (*resourcev1beta2.CapacityRequestPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequestPolicy_To_v1beta2_CapacityRequestPolicy(a.(*resource.CapacityRequestPolicy), b.(*resourcev1beta2.CapacityRequestPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.CapacityRequestPolicyRange)(nil), (*resource.CapacityRequestPolicyRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(a.(*resourcev1beta2.CapacityRequestPolicyRange), b.(*resource.CapacityRequestPolicyRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequestPolicyRange)(nil), (*resourcev1beta2.CapacityRequestPolicyRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequestPolicyRange_To_v1beta2_CapacityRequestPolicyRange(a.(*resource.CapacityRequestPolicyRange), b.(*resourcev1beta2.CapacityRequestPolicyRange), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.CapacityRequirements)(nil), (*resource.CapacityRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_CapacityRequirements_To_resource_CapacityRequirements(a.(*resourcev1beta2.CapacityRequirements), b.(*resource.CapacityRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CapacityRequirements)(nil), (*resourcev1beta2.CapacityRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CapacityRequirements_To_v1beta2_CapacityRequirements(a.(*resource.CapacityRequirements), b.(*resourcev1beta2.CapacityRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.Counter)(nil), (*resource.Counter)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Counter_To_resource_Counter(a.(*resourcev1beta2.Counter), b.(*resource.Counter), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.Counter)(nil), (*resourcev1beta2.Counter)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_Counter_To_v1beta2_Counter(a.(*resource.Counter), b.(*resourcev1beta2.Counter), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.CounterSet)(nil), (*resource.CounterSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_CounterSet_To_resource_CounterSet(a.(*resourcev1beta2.CounterSet), b.(*resource.CounterSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.CounterSet)(nil), (*resourcev1beta2.CounterSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_CounterSet_To_v1beta2_CounterSet(a.(*resource.CounterSet), b.(*resourcev1beta2.CounterSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.Device)(nil), (*resource.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Device_To_resource_Device(a.(*resourcev1beta2.Device), b.(*resource.Device), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.Device)(nil), (*resourcev1beta2.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_Device_To_v1beta2_Device(a.(*resource.Device), b.(*resourcev1beta2.Device), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceAllocationConfiguration)(nil), (*resource.DeviceAllocationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(a.(*resourcev1beta2.DeviceAllocationConfiguration), b.(*resource.DeviceAllocationConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAllocationConfiguration)(nil), (*resourcev1beta2.DeviceAllocationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAllocationConfiguration_To_v1beta2_DeviceAllocationConfiguration(a.(*resource.DeviceAllocationConfiguration), b.(*resourcev1beta2.DeviceAllocationConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceAllocationResult)(nil), (*resource.DeviceAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceAllocationResult_To_resource_DeviceAllocationResult(a.(*resourcev1beta2.DeviceAllocationResult), b.(*resource.DeviceAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAllocationResult)(nil), (*resourcev1beta2.DeviceAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAllocationResult_To_v1beta2_DeviceAllocationResult(a.(*resource.DeviceAllocationResult), b.(*resourcev1beta2.DeviceAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceAttribute)(nil), (*resource.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceAttribute_To_resource_DeviceAttribute(a.(*resourcev1beta2.DeviceAttribute), b.(*resource.DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceAttribute)(nil), (*resourcev1beta2.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceAttribute_To_v1beta2_DeviceAttribute(a.(*resource.DeviceAttribute), b.(*resourcev1beta2.DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceCapacity)(nil), (*resource.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceCapacity_To_resource_DeviceCapacity(a.(*resourcev1beta2.DeviceCapacity), b.(*resource.DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceCapacity)(nil), (*resourcev1beta2.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceCapacity_To_v1beta2_DeviceCapacity(a.(*resource.DeviceCapacity), b.(*resourcev1beta2.DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceClaim)(nil), (*resource.DeviceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceClaim_To_resource_DeviceClaim(a.(*resourcev1beta2.DeviceClaim), b.(*resource.DeviceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClaim)(nil), (*resourcev1beta2.DeviceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClaim_To_v1beta2_DeviceClaim(a.(*resource.DeviceClaim), b.(*resourcev1beta2.DeviceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceClaimConfiguration)(nil), (*resource.DeviceClaimConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(a.(*resourcev1beta2.DeviceClaimConfiguration), b.(*resource.DeviceClaimConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClaimConfiguration)(nil), (*resourcev1beta2.DeviceClaimConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClaimConfiguration_To_v1beta2_DeviceClaimConfiguration(a.(*resource.DeviceClaimConfiguration), b.(*resourcev1beta2.DeviceClaimConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceClass)(nil), (*resource.DeviceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceClass_To_resource_DeviceClass(a.(*resourcev1beta2.DeviceClass), b.(*resource.DeviceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClass)(nil), (*resourcev1beta2.DeviceClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClass_To_v1beta2_DeviceClass(a.(*resource.DeviceClass), b.(*resourcev1beta2.DeviceClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceClassConfiguration)(nil), (*resource.DeviceClassConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(a.(*resourcev1beta2.DeviceClassConfiguration), b.(*resource.DeviceClassConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassConfiguration)(nil), (*resourcev1beta2.DeviceClassConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassConfiguration_To_v1beta2_DeviceClassConfiguration(a.(*resource.DeviceClassConfiguration), b.(*resourcev1beta2.DeviceClassConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceClassList)(nil), (*resource.DeviceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceClassList_To_resource_DeviceClassList(a.(*resourcev1beta2.DeviceClassList), b.(*resource.DeviceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassList)(nil), (*resourcev1beta2.DeviceClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassList_To_v1beta2_DeviceClassList(a.(*resource.DeviceClassList), b.(*resourcev1beta2.DeviceClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceClassSpec)(nil), (*resource.DeviceClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceClassSpec_To_resource_DeviceClassSpec(a.(*resourcev1beta2.DeviceClassSpec), b.(*resource.DeviceClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceClassSpec)(nil), (*resourcev1beta2.DeviceClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceClassSpec_To_v1beta2_DeviceClassSpec(a.(*resource.DeviceClassSpec), b.(*resourcev1beta2.DeviceClassSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceConfiguration)(nil), (*resource.DeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration(a.(*resourcev1beta2.DeviceConfiguration), b.(*resource.DeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceConfiguration)(nil), (*resourcev1beta2.DeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration(a.(*resource.DeviceConfiguration), b.(*resourcev1beta2.DeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceConstraint)(nil), (*resource.DeviceConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceConstraint_To_resource_DeviceConstraint(a.(*resourcev1beta2.DeviceConstraint), b.(*resource.DeviceConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceConstraint)(nil), (*resourcev1beta2.DeviceConstraint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceConstraint_To_v1beta2_DeviceConstraint(a.(*resource.DeviceConstraint), b.(*resourcev1beta2.DeviceConstraint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceCounterConsumption)(nil), (*resource.DeviceCounterConsumption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(a.(*resourcev1beta2.DeviceCounterConsumption), b.(*resource.DeviceCounterConsumption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceCounterConsumption)(nil), (*resourcev1beta2.DeviceCounterConsumption)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceCounterConsumption_To_v1beta2_DeviceCounterConsumption(a.(*resource.DeviceCounterConsumption), b.(*resourcev1beta2.DeviceCounterConsumption), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceRequest)(nil), (*resource.DeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceRequest_To_resource_DeviceRequest(a.(*resourcev1beta2.DeviceRequest), b.(*resource.DeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceRequest)(nil), (*resourcev1beta2.DeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceRequest_To_v1beta2_DeviceRequest(a.(*resource.DeviceRequest), b.(*resourcev1beta2.DeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceRequestAllocationResult)(nil), (*resource.DeviceRequestAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(a.(*resourcev1beta2.DeviceRequestAllocationResult), b.(*resource.DeviceRequestAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceRequestAllocationResult)(nil), (*resourcev1beta2.DeviceRequestAllocationResult)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceRequestAllocationResult_To_v1beta2_DeviceRequestAllocationResult(a.(*resource.DeviceRequestAllocationResult), b.(*resourcev1beta2.DeviceRequestAllocationResult), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceSelector)(nil), (*resource.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceSelector_To_resource_DeviceSelector(a.(*resourcev1beta2.DeviceSelector), b.(*resource.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceSelector)(nil), (*resourcev1beta2.DeviceSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceSelector_To_v1beta2_DeviceSelector(a.(*resource.DeviceSelector), b.(*resourcev1beta2.DeviceSelector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceSubRequest)(nil), (*resource.DeviceSubRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceSubRequest_To_resource_DeviceSubRequest(a.(*resourcev1beta2.DeviceSubRequest), b.(*resource.DeviceSubRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceSubRequest)(nil), (*resourcev1beta2.DeviceSubRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceSubRequest_To_v1beta2_DeviceSubRequest(a.(*resource.DeviceSubRequest), b.(*resourcev1beta2.DeviceSubRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceTaint)(nil), (*resource.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceTaint_To_resource_DeviceTaint(a.(*resourcev1beta2.DeviceTaint), b.(*resource.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceTaint)(nil), (*resourcev1beta2.DeviceTaint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceTaint_To_v1beta2_DeviceTaint(a.(*resource.DeviceTaint), b.(*resourcev1beta2.DeviceTaint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.DeviceToleration)(nil), (*resource.DeviceToleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DeviceToleration_To_resource_DeviceToleration(a.(*resourcev1beta2.DeviceToleration), b.(*resource.DeviceToleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.DeviceToleration)(nil), (*resourcev1beta2.DeviceToleration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_DeviceToleration_To_v1beta2_DeviceToleration(a.(*resource.DeviceToleration), b.(*resourcev1beta2.DeviceToleration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ExactDeviceRequest)(nil), (*resource.ExactDeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ExactDeviceRequest_To_resource_ExactDeviceRequest(a.(*resourcev1beta2.ExactDeviceRequest), b.(*resource.ExactDeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ExactDeviceRequest)(nil), (*resourcev1beta2.ExactDeviceRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ExactDeviceRequest_To_v1beta2_ExactDeviceRequest(a.(*resource.ExactDeviceRequest), b.(*resourcev1beta2.ExactDeviceRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.NetworkDeviceData)(nil), (*resource.NetworkDeviceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_NetworkDeviceData_To_resource_NetworkDeviceData(a.(*resourcev1beta2.NetworkDeviceData), b.(*resource.NetworkDeviceData), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.NetworkDeviceData)(nil), (*resourcev1beta2.NetworkDeviceData)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_NetworkDeviceData_To_v1beta2_NetworkDeviceData(a.(*resource.NetworkDeviceData), b.(*resourcev1beta2.NetworkDeviceData), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.OpaqueDeviceConfiguration)(nil), (*resource.OpaqueDeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(a.(*resourcev1beta2.OpaqueDeviceConfiguration), b.(*resource.OpaqueDeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.OpaqueDeviceConfiguration)(nil), (*resourcev1beta2.OpaqueDeviceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_OpaqueDeviceConfiguration_To_v1beta2_OpaqueDeviceConfiguration(a.(*resource.OpaqueDeviceConfiguration), b.(*resourcev1beta2.OpaqueDeviceConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaim)(nil), (*resource.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaim_To_resource_ResourceClaim(a.(*resourcev1beta2.ResourceClaim), b.(*resource.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaim)(nil), (*resourcev1beta2.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaim_To_v1beta2_ResourceClaim(a.(*resource.ResourceClaim), b.(*resourcev1beta2.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaimConsumerReference)(nil), (*resource.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(a.(*resourcev1beta2.ResourceClaimConsumerReference), b.(*resource.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimConsumerReference)(nil), (*resourcev1beta2.ResourceClaimConsumerReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimConsumerReference_To_v1beta2_ResourceClaimConsumerReference(a.(*resource.ResourceClaimConsumerReference), b.(*resourcev1beta2.ResourceClaimConsumerReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaimList)(nil), (*resource.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaimList_To_resource_ResourceClaimList(a.(*resourcev1beta2.ResourceClaimList), b.(*resource.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimList)(nil), (*resourcev1beta2.ResourceClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimList_To_v1beta2_ResourceClaimList(a.(*resource.ResourceClaimList), b.(*resourcev1beta2.ResourceClaimList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaimSpec)(nil), (*resource.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaimSpec_To_resource_ResourceClaimSpec(a.(*resourcev1beta2.ResourceClaimSpec), b.(*resource.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSpec)(nil), (*resourcev1beta2.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimSpec_To_v1beta2_ResourceClaimSpec(a.(*resource.ResourceClaimSpec), b.(*resourcev1beta2.ResourceClaimSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaimStatus)(nil), (*resource.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaimStatus_To_resource_ResourceClaimStatus(a.(*resourcev1beta2.ResourceClaimStatus), b.(*resource.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimStatus)(nil), (*resourcev1beta2.ResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimStatus_To_v1beta2_ResourceClaimStatus(a.(*resource.ResourceClaimStatus), b.(*resourcev1beta2.ResourceClaimStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaimTemplate)(nil), (*resource.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(a.(*resourcev1beta2.ResourceClaimTemplate), b.(*resource.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplate)(nil), (*resourcev1beta2.ResourceClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplate_To_v1beta2_ResourceClaimTemplate(a.(*resource.ResourceClaimTemplate), b.(*resourcev1beta2.ResourceClaimTemplate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaimTemplateList)(nil), (*resource.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(a.(*resourcev1beta2.ResourceClaimTemplateList), b.(*resource.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateList)(nil), (*resourcev1beta2.ResourceClaimTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateList_To_v1beta2_ResourceClaimTemplateList(a.(*resource.ResourceClaimTemplateList), b.(*resourcev1beta2.ResourceClaimTemplateList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceClaimTemplateSpec)(nil), (*resource.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(a.(*resourcev1beta2.ResourceClaimTemplateSpec), b.(*resource.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimTemplateSpec)(nil), (*resourcev1beta2.ResourceClaimTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceClaimTemplateSpec_To_v1beta2_ResourceClaimTemplateSpec(a.(*resource.ResourceClaimTemplateSpec), b.(*resourcev1beta2.ResourceClaimTemplateSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourcePool)(nil), (*resource.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourcePool_To_resource_ResourcePool(a.(*resourcev1beta2.ResourcePool), b.(*resource.ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourcePool)(nil), (*resourcev1beta2.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourcePool_To_v1beta2_ResourcePool(a.(*resource.ResourcePool), b.(*resourcev1beta2.ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceSlice)(nil), (*resource.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceSlice_To_resource_ResourceSlice(a.(*resourcev1beta2.ResourceSlice), b.(*resource.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSlice)(nil), (*resourcev1beta2.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSlice_To_v1beta2_ResourceSlice(a.(*resource.ResourceSlice), b.(*resourcev1beta2.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceSliceList)(nil), (*resource.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceSliceList_To_resource_ResourceSliceList(a.(*resourcev1beta2.ResourceSliceList), b.(*resource.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSliceList)(nil), (*resourcev1beta2.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSliceList_To_v1beta2_ResourceSliceList(a.(*resource.ResourceSliceList), b.(*resourcev1beta2.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resourcev1beta2.ResourceSliceSpec)(nil), (*resource.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ResourceSliceSpec_To_resource_ResourceSliceSpec(a.(*resourcev1beta2.ResourceSliceSpec), b.(*resource.ResourceSliceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSliceSpec)(nil), (*resourcev1beta2.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSliceSpec_To_v1beta2_ResourceSliceSpec(a.(*resource.ResourceSliceSpec), b.(*resourcev1beta2.ResourceSliceSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta2_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in *resourcev1beta2.AllocatedDeviceStatus, out *resource.AllocatedDeviceStatus, s conversion.Scope) error {
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.ShareID = (*string)(unsafe.Pointer(in.ShareID))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
out.Data = (*runtime.RawExtension)(unsafe.Pointer(in.Data))
out.NetworkData = (*resource.NetworkDeviceData)(unsafe.Pointer(in.NetworkData))
return nil
}
// Convert_v1beta2_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus is an autogenerated conversion function.
func Convert_v1beta2_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in *resourcev1beta2.AllocatedDeviceStatus, out *resource.AllocatedDeviceStatus, s conversion.Scope) error {
return autoConvert_v1beta2_AllocatedDeviceStatus_To_resource_AllocatedDeviceStatus(in, out, s)
}
func autoConvert_resource_AllocatedDeviceStatus_To_v1beta2_AllocatedDeviceStatus(in *resource.AllocatedDeviceStatus, out *resourcev1beta2.AllocatedDeviceStatus, s conversion.Scope) error {
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.ShareID = (*string)(unsafe.Pointer(in.ShareID))
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
out.Data = (*runtime.RawExtension)(unsafe.Pointer(in.Data))
out.NetworkData = (*resourcev1beta2.NetworkDeviceData)(unsafe.Pointer(in.NetworkData))
return nil
}
// Convert_resource_AllocatedDeviceStatus_To_v1beta2_AllocatedDeviceStatus is an autogenerated conversion function.
func Convert_resource_AllocatedDeviceStatus_To_v1beta2_AllocatedDeviceStatus(in *resource.AllocatedDeviceStatus, out *resourcev1beta2.AllocatedDeviceStatus, s conversion.Scope) error {
return autoConvert_resource_AllocatedDeviceStatus_To_v1beta2_AllocatedDeviceStatus(in, out, s)
}
func autoConvert_v1beta2_AllocationResult_To_resource_AllocationResult(in *resourcev1beta2.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
if err := Convert_v1beta2_DeviceAllocationResult_To_resource_DeviceAllocationResult(&in.Devices, &out.Devices, s); err != nil {
return err
}
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllocationTimestamp = (*v1.Time)(unsafe.Pointer(in.AllocationTimestamp))
return nil
}
// Convert_v1beta2_AllocationResult_To_resource_AllocationResult is an autogenerated conversion function.
func Convert_v1beta2_AllocationResult_To_resource_AllocationResult(in *resourcev1beta2.AllocationResult, out *resource.AllocationResult, s conversion.Scope) error {
return autoConvert_v1beta2_AllocationResult_To_resource_AllocationResult(in, out, s)
}
func autoConvert_resource_AllocationResult_To_v1beta2_AllocationResult(in *resource.AllocationResult, out *resourcev1beta2.AllocationResult, s conversion.Scope) error {
if err := Convert_resource_DeviceAllocationResult_To_v1beta2_DeviceAllocationResult(&in.Devices, &out.Devices, s); err != nil {
return err
}
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllocationTimestamp = (*v1.Time)(unsafe.Pointer(in.AllocationTimestamp))
return nil
}
// Convert_resource_AllocationResult_To_v1beta2_AllocationResult is an autogenerated conversion function.
func Convert_resource_AllocationResult_To_v1beta2_AllocationResult(in *resource.AllocationResult, out *resourcev1beta2.AllocationResult, s conversion.Scope) error {
return autoConvert_resource_AllocationResult_To_v1beta2_AllocationResult(in, out, s)
}
func autoConvert_v1beta2_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1beta2.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_v1beta2_CELDeviceSelector_To_resource_CELDeviceSelector is an autogenerated conversion function.
func Convert_v1beta2_CELDeviceSelector_To_resource_CELDeviceSelector(in *resourcev1beta2.CELDeviceSelector, out *resource.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_v1beta2_CELDeviceSelector_To_resource_CELDeviceSelector(in, out, s)
}
func autoConvert_resource_CELDeviceSelector_To_v1beta2_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1beta2.CELDeviceSelector, s conversion.Scope) error {
out.Expression = in.Expression
return nil
}
// Convert_resource_CELDeviceSelector_To_v1beta2_CELDeviceSelector is an autogenerated conversion function.
func Convert_resource_CELDeviceSelector_To_v1beta2_CELDeviceSelector(in *resource.CELDeviceSelector, out *resourcev1beta2.CELDeviceSelector, s conversion.Scope) error {
return autoConvert_resource_CELDeviceSelector_To_v1beta2_CELDeviceSelector(in, out, s)
}
func autoConvert_v1beta2_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in *resourcev1beta2.CapacityRequestPolicy, out *resource.CapacityRequestPolicy, s conversion.Scope) error {
out.Default = (*apiresource.Quantity)(unsafe.Pointer(in.Default))
out.ValidValues = *(*[]apiresource.Quantity)(unsafe.Pointer(&in.ValidValues))
out.ValidRange = (*resource.CapacityRequestPolicyRange)(unsafe.Pointer(in.ValidRange))
return nil
}
// Convert_v1beta2_CapacityRequestPolicy_To_resource_CapacityRequestPolicy is an autogenerated conversion function.
func Convert_v1beta2_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in *resourcev1beta2.CapacityRequestPolicy, out *resource.CapacityRequestPolicy, s conversion.Scope) error {
return autoConvert_v1beta2_CapacityRequestPolicy_To_resource_CapacityRequestPolicy(in, out, s)
}
func autoConvert_resource_CapacityRequestPolicy_To_v1beta2_CapacityRequestPolicy(in *resource.CapacityRequestPolicy, out *resourcev1beta2.CapacityRequestPolicy, s conversion.Scope) error {
out.Default = (*apiresource.Quantity)(unsafe.Pointer(in.Default))
out.ValidValues = *(*[]apiresource.Quantity)(unsafe.Pointer(&in.ValidValues))
out.ValidRange = (*resourcev1beta2.CapacityRequestPolicyRange)(unsafe.Pointer(in.ValidRange))
return nil
}
// Convert_resource_CapacityRequestPolicy_To_v1beta2_CapacityRequestPolicy is an autogenerated conversion function.
func Convert_resource_CapacityRequestPolicy_To_v1beta2_CapacityRequestPolicy(in *resource.CapacityRequestPolicy, out *resourcev1beta2.CapacityRequestPolicy, s conversion.Scope) error {
return autoConvert_resource_CapacityRequestPolicy_To_v1beta2_CapacityRequestPolicy(in, out, s)
}
func autoConvert_v1beta2_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in *resourcev1beta2.CapacityRequestPolicyRange, out *resource.CapacityRequestPolicyRange, s conversion.Scope) error {
out.Min = (*apiresource.Quantity)(unsafe.Pointer(in.Min))
out.Max = (*apiresource.Quantity)(unsafe.Pointer(in.Max))
out.Step = (*apiresource.Quantity)(unsafe.Pointer(in.Step))
return nil
}
// Convert_v1beta2_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange is an autogenerated conversion function.
func Convert_v1beta2_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in *resourcev1beta2.CapacityRequestPolicyRange, out *resource.CapacityRequestPolicyRange, s conversion.Scope) error {
return autoConvert_v1beta2_CapacityRequestPolicyRange_To_resource_CapacityRequestPolicyRange(in, out, s)
}
func autoConvert_resource_CapacityRequestPolicyRange_To_v1beta2_CapacityRequestPolicyRange(in *resource.CapacityRequestPolicyRange, out *resourcev1beta2.CapacityRequestPolicyRange, s conversion.Scope) error {
out.Min = (*apiresource.Quantity)(unsafe.Pointer(in.Min))
out.Max = (*apiresource.Quantity)(unsafe.Pointer(in.Max))
out.Step = (*apiresource.Quantity)(unsafe.Pointer(in.Step))
return nil
}
// Convert_resource_CapacityRequestPolicyRange_To_v1beta2_CapacityRequestPolicyRange is an autogenerated conversion function.
func Convert_resource_CapacityRequestPolicyRange_To_v1beta2_CapacityRequestPolicyRange(in *resource.CapacityRequestPolicyRange, out *resourcev1beta2.CapacityRequestPolicyRange, s conversion.Scope) error {
return autoConvert_resource_CapacityRequestPolicyRange_To_v1beta2_CapacityRequestPolicyRange(in, out, s)
}
func autoConvert_v1beta2_CapacityRequirements_To_resource_CapacityRequirements(in *resourcev1beta2.CapacityRequirements, out *resource.CapacityRequirements, s conversion.Scope) error {
out.Requests = *(*map[resource.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_v1beta2_CapacityRequirements_To_resource_CapacityRequirements is an autogenerated conversion function.
func Convert_v1beta2_CapacityRequirements_To_resource_CapacityRequirements(in *resourcev1beta2.CapacityRequirements, out *resource.CapacityRequirements, s conversion.Scope) error {
return autoConvert_v1beta2_CapacityRequirements_To_resource_CapacityRequirements(in, out, s)
}
func autoConvert_resource_CapacityRequirements_To_v1beta2_CapacityRequirements(in *resource.CapacityRequirements, out *resourcev1beta2.CapacityRequirements, s conversion.Scope) error {
out.Requests = *(*map[resourcev1beta2.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_resource_CapacityRequirements_To_v1beta2_CapacityRequirements is an autogenerated conversion function.
func Convert_resource_CapacityRequirements_To_v1beta2_CapacityRequirements(in *resource.CapacityRequirements, out *resourcev1beta2.CapacityRequirements, s conversion.Scope) error {
return autoConvert_resource_CapacityRequirements_To_v1beta2_CapacityRequirements(in, out, s)
}
func autoConvert_v1beta2_Counter_To_resource_Counter(in *resourcev1beta2.Counter, out *resource.Counter, s conversion.Scope) error {
out.Value = in.Value
return nil
}
// Convert_v1beta2_Counter_To_resource_Counter is an autogenerated conversion function.
func Convert_v1beta2_Counter_To_resource_Counter(in *resourcev1beta2.Counter, out *resource.Counter, s conversion.Scope) error {
return autoConvert_v1beta2_Counter_To_resource_Counter(in, out, s)
}
func autoConvert_resource_Counter_To_v1beta2_Counter(in *resource.Counter, out *resourcev1beta2.Counter, s conversion.Scope) error {
out.Value = in.Value
return nil
}
// Convert_resource_Counter_To_v1beta2_Counter is an autogenerated conversion function.
func Convert_resource_Counter_To_v1beta2_Counter(in *resource.Counter, out *resourcev1beta2.Counter, s conversion.Scope) error {
return autoConvert_resource_Counter_To_v1beta2_Counter(in, out, s)
}
func autoConvert_v1beta2_CounterSet_To_resource_CounterSet(in *resourcev1beta2.CounterSet, out *resource.CounterSet, s conversion.Scope) error {
out.Name = in.Name
out.Counters = *(*map[string]resource.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_v1beta2_CounterSet_To_resource_CounterSet is an autogenerated conversion function.
func Convert_v1beta2_CounterSet_To_resource_CounterSet(in *resourcev1beta2.CounterSet, out *resource.CounterSet, s conversion.Scope) error {
return autoConvert_v1beta2_CounterSet_To_resource_CounterSet(in, out, s)
}
func autoConvert_resource_CounterSet_To_v1beta2_CounterSet(in *resource.CounterSet, out *resourcev1beta2.CounterSet, s conversion.Scope) error {
out.Name = in.Name
out.Counters = *(*map[string]resourcev1beta2.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_resource_CounterSet_To_v1beta2_CounterSet is an autogenerated conversion function.
func Convert_resource_CounterSet_To_v1beta2_CounterSet(in *resource.CounterSet, out *resourcev1beta2.CounterSet, s conversion.Scope) error {
return autoConvert_resource_CounterSet_To_v1beta2_CounterSet(in, out, s)
}
func autoConvert_v1beta2_Device_To_resource_Device(in *resourcev1beta2.Device, out *resource.Device, s conversion.Scope) error {
out.Name = in.Name
out.Attributes = *(*map[resource.QualifiedName]resource.DeviceAttribute)(unsafe.Pointer(&in.Attributes))
out.Capacity = *(*map[resource.QualifiedName]resource.DeviceCapacity)(unsafe.Pointer(&in.Capacity))
out.ConsumesCounters = *(*[]resource.DeviceCounterConsumption)(unsafe.Pointer(&in.ConsumesCounters))
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Taints = *(*[]resource.DeviceTaint)(unsafe.Pointer(&in.Taints))
out.BindsToNode = (*bool)(unsafe.Pointer(in.BindsToNode))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.AllowMultipleAllocations = (*bool)(unsafe.Pointer(in.AllowMultipleAllocations))
return nil
}
// Convert_v1beta2_Device_To_resource_Device is an autogenerated conversion function.
func Convert_v1beta2_Device_To_resource_Device(in *resourcev1beta2.Device, out *resource.Device, s conversion.Scope) error {
return autoConvert_v1beta2_Device_To_resource_Device(in, out, s)
}
func autoConvert_resource_Device_To_v1beta2_Device(in *resource.Device, out *resourcev1beta2.Device, s conversion.Scope) error {
out.Name = in.Name
out.Attributes = *(*map[resourcev1beta2.QualifiedName]resourcev1beta2.DeviceAttribute)(unsafe.Pointer(&in.Attributes))
out.Capacity = *(*map[resourcev1beta2.QualifiedName]resourcev1beta2.DeviceCapacity)(unsafe.Pointer(&in.Capacity))
out.ConsumesCounters = *(*[]resourcev1beta2.DeviceCounterConsumption)(unsafe.Pointer(&in.ConsumesCounters))
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Taints = *(*[]resourcev1beta2.DeviceTaint)(unsafe.Pointer(&in.Taints))
out.BindsToNode = (*bool)(unsafe.Pointer(in.BindsToNode))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.AllowMultipleAllocations = (*bool)(unsafe.Pointer(in.AllowMultipleAllocations))
return nil
}
// Convert_resource_Device_To_v1beta2_Device is an autogenerated conversion function.
func Convert_resource_Device_To_v1beta2_Device(in *resource.Device, out *resourcev1beta2.Device, s conversion.Scope) error {
return autoConvert_resource_Device_To_v1beta2_Device(in, out, s)
}
func autoConvert_v1beta2_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in *resourcev1beta2.DeviceAllocationConfiguration, out *resource.DeviceAllocationConfiguration, s conversion.Scope) error {
out.Source = resource.AllocationConfigSource(in.Source)
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration is an autogenerated conversion function.
func Convert_v1beta2_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in *resourcev1beta2.DeviceAllocationConfiguration, out *resource.DeviceAllocationConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceAllocationConfiguration_To_resource_DeviceAllocationConfiguration(in, out, s)
}
func autoConvert_resource_DeviceAllocationConfiguration_To_v1beta2_DeviceAllocationConfiguration(in *resource.DeviceAllocationConfiguration, out *resourcev1beta2.DeviceAllocationConfiguration, s conversion.Scope) error {
out.Source = resourcev1beta2.AllocationConfigSource(in.Source)
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceAllocationConfiguration_To_v1beta2_DeviceAllocationConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceAllocationConfiguration_To_v1beta2_DeviceAllocationConfiguration(in *resource.DeviceAllocationConfiguration, out *resourcev1beta2.DeviceAllocationConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceAllocationConfiguration_To_v1beta2_DeviceAllocationConfiguration(in, out, s)
}
func autoConvert_v1beta2_DeviceAllocationResult_To_resource_DeviceAllocationResult(in *resourcev1beta2.DeviceAllocationResult, out *resource.DeviceAllocationResult, s conversion.Scope) error {
out.Results = *(*[]resource.DeviceRequestAllocationResult)(unsafe.Pointer(&in.Results))
out.Config = *(*[]resource.DeviceAllocationConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_v1beta2_DeviceAllocationResult_To_resource_DeviceAllocationResult is an autogenerated conversion function.
func Convert_v1beta2_DeviceAllocationResult_To_resource_DeviceAllocationResult(in *resourcev1beta2.DeviceAllocationResult, out *resource.DeviceAllocationResult, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceAllocationResult_To_resource_DeviceAllocationResult(in, out, s)
}
func autoConvert_resource_DeviceAllocationResult_To_v1beta2_DeviceAllocationResult(in *resource.DeviceAllocationResult, out *resourcev1beta2.DeviceAllocationResult, s conversion.Scope) error {
out.Results = *(*[]resourcev1beta2.DeviceRequestAllocationResult)(unsafe.Pointer(&in.Results))
out.Config = *(*[]resourcev1beta2.DeviceAllocationConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_resource_DeviceAllocationResult_To_v1beta2_DeviceAllocationResult is an autogenerated conversion function.
func Convert_resource_DeviceAllocationResult_To_v1beta2_DeviceAllocationResult(in *resource.DeviceAllocationResult, out *resourcev1beta2.DeviceAllocationResult, s conversion.Scope) error {
return autoConvert_resource_DeviceAllocationResult_To_v1beta2_DeviceAllocationResult(in, out, s)
}
func autoConvert_v1beta2_DeviceAttribute_To_resource_DeviceAttribute(in *resourcev1beta2.DeviceAttribute, out *resource.DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
return nil
}
// Convert_v1beta2_DeviceAttribute_To_resource_DeviceAttribute is an autogenerated conversion function.
func Convert_v1beta2_DeviceAttribute_To_resource_DeviceAttribute(in *resourcev1beta2.DeviceAttribute, out *resource.DeviceAttribute, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceAttribute_To_resource_DeviceAttribute(in, out, s)
}
func autoConvert_resource_DeviceAttribute_To_v1beta2_DeviceAttribute(in *resource.DeviceAttribute, out *resourcev1beta2.DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
out.VersionValue = (*string)(unsafe.Pointer(in.VersionValue))
return nil
}
// Convert_resource_DeviceAttribute_To_v1beta2_DeviceAttribute is an autogenerated conversion function.
func Convert_resource_DeviceAttribute_To_v1beta2_DeviceAttribute(in *resource.DeviceAttribute, out *resourcev1beta2.DeviceAttribute, s conversion.Scope) error {
return autoConvert_resource_DeviceAttribute_To_v1beta2_DeviceAttribute(in, out, s)
}
func autoConvert_v1beta2_DeviceCapacity_To_resource_DeviceCapacity(in *resourcev1beta2.DeviceCapacity, out *resource.DeviceCapacity, s conversion.Scope) error {
out.Value = in.Value
out.RequestPolicy = (*resource.CapacityRequestPolicy)(unsafe.Pointer(in.RequestPolicy))
return nil
}
// Convert_v1beta2_DeviceCapacity_To_resource_DeviceCapacity is an autogenerated conversion function.
func Convert_v1beta2_DeviceCapacity_To_resource_DeviceCapacity(in *resourcev1beta2.DeviceCapacity, out *resource.DeviceCapacity, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceCapacity_To_resource_DeviceCapacity(in, out, s)
}
func autoConvert_resource_DeviceCapacity_To_v1beta2_DeviceCapacity(in *resource.DeviceCapacity, out *resourcev1beta2.DeviceCapacity, s conversion.Scope) error {
out.Value = in.Value
out.RequestPolicy = (*resourcev1beta2.CapacityRequestPolicy)(unsafe.Pointer(in.RequestPolicy))
return nil
}
// Convert_resource_DeviceCapacity_To_v1beta2_DeviceCapacity is an autogenerated conversion function.
func Convert_resource_DeviceCapacity_To_v1beta2_DeviceCapacity(in *resource.DeviceCapacity, out *resourcev1beta2.DeviceCapacity, s conversion.Scope) error {
return autoConvert_resource_DeviceCapacity_To_v1beta2_DeviceCapacity(in, out, s)
}
func autoConvert_v1beta2_DeviceClaim_To_resource_DeviceClaim(in *resourcev1beta2.DeviceClaim, out *resource.DeviceClaim, s conversion.Scope) error {
out.Requests = *(*[]resource.DeviceRequest)(unsafe.Pointer(&in.Requests))
out.Constraints = *(*[]resource.DeviceConstraint)(unsafe.Pointer(&in.Constraints))
out.Config = *(*[]resource.DeviceClaimConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_v1beta2_DeviceClaim_To_resource_DeviceClaim is an autogenerated conversion function.
func Convert_v1beta2_DeviceClaim_To_resource_DeviceClaim(in *resourcev1beta2.DeviceClaim, out *resource.DeviceClaim, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceClaim_To_resource_DeviceClaim(in, out, s)
}
func autoConvert_resource_DeviceClaim_To_v1beta2_DeviceClaim(in *resource.DeviceClaim, out *resourcev1beta2.DeviceClaim, s conversion.Scope) error {
out.Requests = *(*[]resourcev1beta2.DeviceRequest)(unsafe.Pointer(&in.Requests))
out.Constraints = *(*[]resourcev1beta2.DeviceConstraint)(unsafe.Pointer(&in.Constraints))
out.Config = *(*[]resourcev1beta2.DeviceClaimConfiguration)(unsafe.Pointer(&in.Config))
return nil
}
// Convert_resource_DeviceClaim_To_v1beta2_DeviceClaim is an autogenerated conversion function.
func Convert_resource_DeviceClaim_To_v1beta2_DeviceClaim(in *resource.DeviceClaim, out *resourcev1beta2.DeviceClaim, s conversion.Scope) error {
return autoConvert_resource_DeviceClaim_To_v1beta2_DeviceClaim(in, out, s)
}
func autoConvert_v1beta2_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in *resourcev1beta2.DeviceClaimConfiguration, out *resource.DeviceClaimConfiguration, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration is an autogenerated conversion function.
func Convert_v1beta2_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in *resourcev1beta2.DeviceClaimConfiguration, out *resource.DeviceClaimConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceClaimConfiguration_To_resource_DeviceClaimConfiguration(in, out, s)
}
func autoConvert_resource_DeviceClaimConfiguration_To_v1beta2_DeviceClaimConfiguration(in *resource.DeviceClaimConfiguration, out *resourcev1beta2.DeviceClaimConfiguration, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
if err := Convert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClaimConfiguration_To_v1beta2_DeviceClaimConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceClaimConfiguration_To_v1beta2_DeviceClaimConfiguration(in *resource.DeviceClaimConfiguration, out *resourcev1beta2.DeviceClaimConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceClaimConfiguration_To_v1beta2_DeviceClaimConfiguration(in, out, s)
}
func autoConvert_v1beta2_DeviceClass_To_resource_DeviceClass(in *resourcev1beta2.DeviceClass, out *resource.DeviceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_DeviceClassSpec_To_resource_DeviceClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_DeviceClass_To_resource_DeviceClass is an autogenerated conversion function.
func Convert_v1beta2_DeviceClass_To_resource_DeviceClass(in *resourcev1beta2.DeviceClass, out *resource.DeviceClass, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceClass_To_resource_DeviceClass(in, out, s)
}
func autoConvert_resource_DeviceClass_To_v1beta2_DeviceClass(in *resource.DeviceClass, out *resourcev1beta2.DeviceClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_DeviceClassSpec_To_v1beta2_DeviceClassSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClass_To_v1beta2_DeviceClass is an autogenerated conversion function.
func Convert_resource_DeviceClass_To_v1beta2_DeviceClass(in *resource.DeviceClass, out *resourcev1beta2.DeviceClass, s conversion.Scope) error {
return autoConvert_resource_DeviceClass_To_v1beta2_DeviceClass(in, out, s)
}
func autoConvert_v1beta2_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in *resourcev1beta2.DeviceClassConfiguration, out *resource.DeviceClassConfiguration, s conversion.Scope) error {
if err := Convert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_DeviceClassConfiguration_To_resource_DeviceClassConfiguration is an autogenerated conversion function.
func Convert_v1beta2_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in *resourcev1beta2.DeviceClassConfiguration, out *resource.DeviceClassConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceClassConfiguration_To_resource_DeviceClassConfiguration(in, out, s)
}
func autoConvert_resource_DeviceClassConfiguration_To_v1beta2_DeviceClassConfiguration(in *resource.DeviceClassConfiguration, out *resourcev1beta2.DeviceClassConfiguration, s conversion.Scope) error {
if err := Convert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration(&in.DeviceConfiguration, &out.DeviceConfiguration, s); err != nil {
return err
}
return nil
}
// Convert_resource_DeviceClassConfiguration_To_v1beta2_DeviceClassConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceClassConfiguration_To_v1beta2_DeviceClassConfiguration(in *resource.DeviceClassConfiguration, out *resourcev1beta2.DeviceClassConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceClassConfiguration_To_v1beta2_DeviceClassConfiguration(in, out, s)
}
func autoConvert_v1beta2_DeviceClassList_To_resource_DeviceClassList(in *resourcev1beta2.DeviceClassList, out *resource.DeviceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.DeviceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta2_DeviceClassList_To_resource_DeviceClassList is an autogenerated conversion function.
func Convert_v1beta2_DeviceClassList_To_resource_DeviceClassList(in *resourcev1beta2.DeviceClassList, out *resource.DeviceClassList, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceClassList_To_resource_DeviceClassList(in, out, s)
}
func autoConvert_resource_DeviceClassList_To_v1beta2_DeviceClassList(in *resource.DeviceClassList, out *resourcev1beta2.DeviceClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1beta2.DeviceClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_DeviceClassList_To_v1beta2_DeviceClassList is an autogenerated conversion function.
func Convert_resource_DeviceClassList_To_v1beta2_DeviceClassList(in *resource.DeviceClassList, out *resourcev1beta2.DeviceClassList, s conversion.Scope) error {
return autoConvert_resource_DeviceClassList_To_v1beta2_DeviceClassList(in, out, s)
}
func autoConvert_v1beta2_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1beta2.DeviceClassSpec, out *resource.DeviceClassSpec, s conversion.Scope) error {
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.Config = *(*[]resource.DeviceClassConfiguration)(unsafe.Pointer(&in.Config))
out.ExtendedResourceName = (*string)(unsafe.Pointer(in.ExtendedResourceName))
return nil
}
// Convert_v1beta2_DeviceClassSpec_To_resource_DeviceClassSpec is an autogenerated conversion function.
func Convert_v1beta2_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1beta2.DeviceClassSpec, out *resource.DeviceClassSpec, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceClassSpec_To_resource_DeviceClassSpec(in, out, s)
}
func autoConvert_resource_DeviceClassSpec_To_v1beta2_DeviceClassSpec(in *resource.DeviceClassSpec, out *resourcev1beta2.DeviceClassSpec, s conversion.Scope) error {
out.Selectors = *(*[]resourcev1beta2.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.Config = *(*[]resourcev1beta2.DeviceClassConfiguration)(unsafe.Pointer(&in.Config))
out.ExtendedResourceName = (*string)(unsafe.Pointer(in.ExtendedResourceName))
return nil
}
// Convert_resource_DeviceClassSpec_To_v1beta2_DeviceClassSpec is an autogenerated conversion function.
func Convert_resource_DeviceClassSpec_To_v1beta2_DeviceClassSpec(in *resource.DeviceClassSpec, out *resourcev1beta2.DeviceClassSpec, s conversion.Scope) error {
return autoConvert_resource_DeviceClassSpec_To_v1beta2_DeviceClassSpec(in, out, s)
}
func autoConvert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration(in *resourcev1beta2.DeviceConfiguration, out *resource.DeviceConfiguration, s conversion.Scope) error {
out.Opaque = (*resource.OpaqueDeviceConfiguration)(unsafe.Pointer(in.Opaque))
return nil
}
// Convert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration is an autogenerated conversion function.
func Convert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration(in *resourcev1beta2.DeviceConfiguration, out *resource.DeviceConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceConfiguration_To_resource_DeviceConfiguration(in, out, s)
}
func autoConvert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration(in *resource.DeviceConfiguration, out *resourcev1beta2.DeviceConfiguration, s conversion.Scope) error {
out.Opaque = (*resourcev1beta2.OpaqueDeviceConfiguration)(unsafe.Pointer(in.Opaque))
return nil
}
// Convert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration is an autogenerated conversion function.
func Convert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration(in *resource.DeviceConfiguration, out *resourcev1beta2.DeviceConfiguration, s conversion.Scope) error {
return autoConvert_resource_DeviceConfiguration_To_v1beta2_DeviceConfiguration(in, out, s)
}
func autoConvert_v1beta2_DeviceConstraint_To_resource_DeviceConstraint(in *resourcev1beta2.DeviceConstraint, out *resource.DeviceConstraint, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
out.MatchAttribute = (*resource.FullyQualifiedName)(unsafe.Pointer(in.MatchAttribute))
out.DistinctAttribute = (*resource.FullyQualifiedName)(unsafe.Pointer(in.DistinctAttribute))
return nil
}
// Convert_v1beta2_DeviceConstraint_To_resource_DeviceConstraint is an autogenerated conversion function.
func Convert_v1beta2_DeviceConstraint_To_resource_DeviceConstraint(in *resourcev1beta2.DeviceConstraint, out *resource.DeviceConstraint, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceConstraint_To_resource_DeviceConstraint(in, out, s)
}
func autoConvert_resource_DeviceConstraint_To_v1beta2_DeviceConstraint(in *resource.DeviceConstraint, out *resourcev1beta2.DeviceConstraint, s conversion.Scope) error {
out.Requests = *(*[]string)(unsafe.Pointer(&in.Requests))
out.MatchAttribute = (*resourcev1beta2.FullyQualifiedName)(unsafe.Pointer(in.MatchAttribute))
out.DistinctAttribute = (*resourcev1beta2.FullyQualifiedName)(unsafe.Pointer(in.DistinctAttribute))
return nil
}
// Convert_resource_DeviceConstraint_To_v1beta2_DeviceConstraint is an autogenerated conversion function.
func Convert_resource_DeviceConstraint_To_v1beta2_DeviceConstraint(in *resource.DeviceConstraint, out *resourcev1beta2.DeviceConstraint, s conversion.Scope) error {
return autoConvert_resource_DeviceConstraint_To_v1beta2_DeviceConstraint(in, out, s)
}
func autoConvert_v1beta2_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in *resourcev1beta2.DeviceCounterConsumption, out *resource.DeviceCounterConsumption, s conversion.Scope) error {
out.CounterSet = in.CounterSet
out.Counters = *(*map[string]resource.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_v1beta2_DeviceCounterConsumption_To_resource_DeviceCounterConsumption is an autogenerated conversion function.
func Convert_v1beta2_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in *resourcev1beta2.DeviceCounterConsumption, out *resource.DeviceCounterConsumption, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceCounterConsumption_To_resource_DeviceCounterConsumption(in, out, s)
}
func autoConvert_resource_DeviceCounterConsumption_To_v1beta2_DeviceCounterConsumption(in *resource.DeviceCounterConsumption, out *resourcev1beta2.DeviceCounterConsumption, s conversion.Scope) error {
out.CounterSet = in.CounterSet
out.Counters = *(*map[string]resourcev1beta2.Counter)(unsafe.Pointer(&in.Counters))
return nil
}
// Convert_resource_DeviceCounterConsumption_To_v1beta2_DeviceCounterConsumption is an autogenerated conversion function.
func Convert_resource_DeviceCounterConsumption_To_v1beta2_DeviceCounterConsumption(in *resource.DeviceCounterConsumption, out *resourcev1beta2.DeviceCounterConsumption, s conversion.Scope) error {
return autoConvert_resource_DeviceCounterConsumption_To_v1beta2_DeviceCounterConsumption(in, out, s)
}
func autoConvert_v1beta2_DeviceRequest_To_resource_DeviceRequest(in *resourcev1beta2.DeviceRequest, out *resource.DeviceRequest, s conversion.Scope) error {
out.Name = in.Name
out.Exactly = (*resource.ExactDeviceRequest)(unsafe.Pointer(in.Exactly))
out.FirstAvailable = *(*[]resource.DeviceSubRequest)(unsafe.Pointer(&in.FirstAvailable))
return nil
}
// Convert_v1beta2_DeviceRequest_To_resource_DeviceRequest is an autogenerated conversion function.
func Convert_v1beta2_DeviceRequest_To_resource_DeviceRequest(in *resourcev1beta2.DeviceRequest, out *resource.DeviceRequest, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceRequest_To_resource_DeviceRequest(in, out, s)
}
func autoConvert_resource_DeviceRequest_To_v1beta2_DeviceRequest(in *resource.DeviceRequest, out *resourcev1beta2.DeviceRequest, s conversion.Scope) error {
out.Name = in.Name
out.Exactly = (*resourcev1beta2.ExactDeviceRequest)(unsafe.Pointer(in.Exactly))
out.FirstAvailable = *(*[]resourcev1beta2.DeviceSubRequest)(unsafe.Pointer(&in.FirstAvailable))
return nil
}
// Convert_resource_DeviceRequest_To_v1beta2_DeviceRequest is an autogenerated conversion function.
func Convert_resource_DeviceRequest_To_v1beta2_DeviceRequest(in *resource.DeviceRequest, out *resourcev1beta2.DeviceRequest, s conversion.Scope) error {
return autoConvert_resource_DeviceRequest_To_v1beta2_DeviceRequest(in, out, s)
}
func autoConvert_v1beta2_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in *resourcev1beta2.DeviceRequestAllocationResult, out *resource.DeviceRequestAllocationResult, s conversion.Scope) error {
out.Request = in.Request
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.ShareID = (*types.UID)(unsafe.Pointer(in.ShareID))
out.ConsumedCapacity = *(*map[resource.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.ConsumedCapacity))
return nil
}
// Convert_v1beta2_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult is an autogenerated conversion function.
func Convert_v1beta2_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in *resourcev1beta2.DeviceRequestAllocationResult, out *resource.DeviceRequestAllocationResult, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceRequestAllocationResult_To_resource_DeviceRequestAllocationResult(in, out, s)
}
func autoConvert_resource_DeviceRequestAllocationResult_To_v1beta2_DeviceRequestAllocationResult(in *resource.DeviceRequestAllocationResult, out *resourcev1beta2.DeviceRequestAllocationResult, s conversion.Scope) error {
out.Request = in.Request
out.Driver = in.Driver
out.Pool = in.Pool
out.Device = in.Device
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resourcev1beta2.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.BindingConditions = *(*[]string)(unsafe.Pointer(&in.BindingConditions))
out.BindingFailureConditions = *(*[]string)(unsafe.Pointer(&in.BindingFailureConditions))
out.ShareID = (*types.UID)(unsafe.Pointer(in.ShareID))
out.ConsumedCapacity = *(*map[resourcev1beta2.QualifiedName]apiresource.Quantity)(unsafe.Pointer(&in.ConsumedCapacity))
return nil
}
// Convert_resource_DeviceRequestAllocationResult_To_v1beta2_DeviceRequestAllocationResult is an autogenerated conversion function.
func Convert_resource_DeviceRequestAllocationResult_To_v1beta2_DeviceRequestAllocationResult(in *resource.DeviceRequestAllocationResult, out *resourcev1beta2.DeviceRequestAllocationResult, s conversion.Scope) error {
return autoConvert_resource_DeviceRequestAllocationResult_To_v1beta2_DeviceRequestAllocationResult(in, out, s)
}
func autoConvert_v1beta2_DeviceSelector_To_resource_DeviceSelector(in *resourcev1beta2.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resource.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_v1beta2_DeviceSelector_To_resource_DeviceSelector is an autogenerated conversion function.
func Convert_v1beta2_DeviceSelector_To_resource_DeviceSelector(in *resourcev1beta2.DeviceSelector, out *resource.DeviceSelector, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceSelector_To_resource_DeviceSelector(in, out, s)
}
func autoConvert_resource_DeviceSelector_To_v1beta2_DeviceSelector(in *resource.DeviceSelector, out *resourcev1beta2.DeviceSelector, s conversion.Scope) error {
out.CEL = (*resourcev1beta2.CELDeviceSelector)(unsafe.Pointer(in.CEL))
return nil
}
// Convert_resource_DeviceSelector_To_v1beta2_DeviceSelector is an autogenerated conversion function.
func Convert_resource_DeviceSelector_To_v1beta2_DeviceSelector(in *resource.DeviceSelector, out *resourcev1beta2.DeviceSelector, s conversion.Scope) error {
return autoConvert_resource_DeviceSelector_To_v1beta2_DeviceSelector(in, out, s)
}
func autoConvert_v1beta2_DeviceSubRequest_To_resource_DeviceSubRequest(in *resourcev1beta2.DeviceSubRequest, out *resource.DeviceSubRequest, s conversion.Scope) error {
out.Name = in.Name
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resource.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resource.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_v1beta2_DeviceSubRequest_To_resource_DeviceSubRequest is an autogenerated conversion function.
func Convert_v1beta2_DeviceSubRequest_To_resource_DeviceSubRequest(in *resourcev1beta2.DeviceSubRequest, out *resource.DeviceSubRequest, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceSubRequest_To_resource_DeviceSubRequest(in, out, s)
}
func autoConvert_resource_DeviceSubRequest_To_v1beta2_DeviceSubRequest(in *resource.DeviceSubRequest, out *resourcev1beta2.DeviceSubRequest, s conversion.Scope) error {
out.Name = in.Name
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resourcev1beta2.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resourcev1beta2.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.Tolerations = *(*[]resourcev1beta2.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resourcev1beta2.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_resource_DeviceSubRequest_To_v1beta2_DeviceSubRequest is an autogenerated conversion function.
func Convert_resource_DeviceSubRequest_To_v1beta2_DeviceSubRequest(in *resource.DeviceSubRequest, out *resourcev1beta2.DeviceSubRequest, s conversion.Scope) error {
return autoConvert_resource_DeviceSubRequest_To_v1beta2_DeviceSubRequest(in, out, s)
}
func autoConvert_v1beta2_DeviceTaint_To_resource_DeviceTaint(in *resourcev1beta2.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resource.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_v1beta2_DeviceTaint_To_resource_DeviceTaint is an autogenerated conversion function.
func Convert_v1beta2_DeviceTaint_To_resource_DeviceTaint(in *resourcev1beta2.DeviceTaint, out *resource.DeviceTaint, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceTaint_To_resource_DeviceTaint(in, out, s)
}
func autoConvert_resource_DeviceTaint_To_v1beta2_DeviceTaint(in *resource.DeviceTaint, out *resourcev1beta2.DeviceTaint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = resourcev1beta2.DeviceTaintEffect(in.Effect)
out.TimeAdded = (*v1.Time)(unsafe.Pointer(in.TimeAdded))
return nil
}
// Convert_resource_DeviceTaint_To_v1beta2_DeviceTaint is an autogenerated conversion function.
func Convert_resource_DeviceTaint_To_v1beta2_DeviceTaint(in *resource.DeviceTaint, out *resourcev1beta2.DeviceTaint, s conversion.Scope) error {
return autoConvert_resource_DeviceTaint_To_v1beta2_DeviceTaint(in, out, s)
}
func autoConvert_v1beta2_DeviceToleration_To_resource_DeviceToleration(in *resourcev1beta2.DeviceToleration, out *resource.DeviceToleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = resource.DeviceTolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = resource.DeviceTaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_v1beta2_DeviceToleration_To_resource_DeviceToleration is an autogenerated conversion function.
func Convert_v1beta2_DeviceToleration_To_resource_DeviceToleration(in *resourcev1beta2.DeviceToleration, out *resource.DeviceToleration, s conversion.Scope) error {
return autoConvert_v1beta2_DeviceToleration_To_resource_DeviceToleration(in, out, s)
}
func autoConvert_resource_DeviceToleration_To_v1beta2_DeviceToleration(in *resource.DeviceToleration, out *resourcev1beta2.DeviceToleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = resourcev1beta2.DeviceTolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = resourcev1beta2.DeviceTaintEffect(in.Effect)
out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds))
return nil
}
// Convert_resource_DeviceToleration_To_v1beta2_DeviceToleration is an autogenerated conversion function.
func Convert_resource_DeviceToleration_To_v1beta2_DeviceToleration(in *resource.DeviceToleration, out *resourcev1beta2.DeviceToleration, s conversion.Scope) error {
return autoConvert_resource_DeviceToleration_To_v1beta2_DeviceToleration(in, out, s)
}
func autoConvert_v1beta2_ExactDeviceRequest_To_resource_ExactDeviceRequest(in *resourcev1beta2.ExactDeviceRequest, out *resource.ExactDeviceRequest, s conversion.Scope) error {
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resource.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resource.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resource.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_v1beta2_ExactDeviceRequest_To_resource_ExactDeviceRequest is an autogenerated conversion function.
func Convert_v1beta2_ExactDeviceRequest_To_resource_ExactDeviceRequest(in *resourcev1beta2.ExactDeviceRequest, out *resource.ExactDeviceRequest, s conversion.Scope) error {
return autoConvert_v1beta2_ExactDeviceRequest_To_resource_ExactDeviceRequest(in, out, s)
}
func autoConvert_resource_ExactDeviceRequest_To_v1beta2_ExactDeviceRequest(in *resource.ExactDeviceRequest, out *resourcev1beta2.ExactDeviceRequest, s conversion.Scope) error {
out.DeviceClassName = in.DeviceClassName
out.Selectors = *(*[]resourcev1beta2.DeviceSelector)(unsafe.Pointer(&in.Selectors))
out.AllocationMode = resourcev1beta2.DeviceAllocationMode(in.AllocationMode)
out.Count = in.Count
out.AdminAccess = (*bool)(unsafe.Pointer(in.AdminAccess))
out.Tolerations = *(*[]resourcev1beta2.DeviceToleration)(unsafe.Pointer(&in.Tolerations))
out.Capacity = (*resourcev1beta2.CapacityRequirements)(unsafe.Pointer(in.Capacity))
return nil
}
// Convert_resource_ExactDeviceRequest_To_v1beta2_ExactDeviceRequest is an autogenerated conversion function.
func Convert_resource_ExactDeviceRequest_To_v1beta2_ExactDeviceRequest(in *resource.ExactDeviceRequest, out *resourcev1beta2.ExactDeviceRequest, s conversion.Scope) error {
return autoConvert_resource_ExactDeviceRequest_To_v1beta2_ExactDeviceRequest(in, out, s)
}
func autoConvert_v1beta2_NetworkDeviceData_To_resource_NetworkDeviceData(in *resourcev1beta2.NetworkDeviceData, out *resource.NetworkDeviceData, s conversion.Scope) error {
out.InterfaceName = in.InterfaceName
out.IPs = *(*[]string)(unsafe.Pointer(&in.IPs))
out.HardwareAddress = in.HardwareAddress
return nil
}
// Convert_v1beta2_NetworkDeviceData_To_resource_NetworkDeviceData is an autogenerated conversion function.
func Convert_v1beta2_NetworkDeviceData_To_resource_NetworkDeviceData(in *resourcev1beta2.NetworkDeviceData, out *resource.NetworkDeviceData, s conversion.Scope) error {
return autoConvert_v1beta2_NetworkDeviceData_To_resource_NetworkDeviceData(in, out, s)
}
func autoConvert_resource_NetworkDeviceData_To_v1beta2_NetworkDeviceData(in *resource.NetworkDeviceData, out *resourcev1beta2.NetworkDeviceData, s conversion.Scope) error {
out.InterfaceName = in.InterfaceName
out.IPs = *(*[]string)(unsafe.Pointer(&in.IPs))
out.HardwareAddress = in.HardwareAddress
return nil
}
// Convert_resource_NetworkDeviceData_To_v1beta2_NetworkDeviceData is an autogenerated conversion function.
func Convert_resource_NetworkDeviceData_To_v1beta2_NetworkDeviceData(in *resource.NetworkDeviceData, out *resourcev1beta2.NetworkDeviceData, s conversion.Scope) error {
return autoConvert_resource_NetworkDeviceData_To_v1beta2_NetworkDeviceData(in, out, s)
}
func autoConvert_v1beta2_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in *resourcev1beta2.OpaqueDeviceConfiguration, out *resource.OpaqueDeviceConfiguration, s conversion.Scope) error {
out.Driver = in.Driver
out.Parameters = in.Parameters
return nil
}
// Convert_v1beta2_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration is an autogenerated conversion function.
func Convert_v1beta2_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in *resourcev1beta2.OpaqueDeviceConfiguration, out *resource.OpaqueDeviceConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_OpaqueDeviceConfiguration_To_resource_OpaqueDeviceConfiguration(in, out, s)
}
func autoConvert_resource_OpaqueDeviceConfiguration_To_v1beta2_OpaqueDeviceConfiguration(in *resource.OpaqueDeviceConfiguration, out *resourcev1beta2.OpaqueDeviceConfiguration, s conversion.Scope) error {
out.Driver = in.Driver
out.Parameters = in.Parameters
return nil
}
// Convert_resource_OpaqueDeviceConfiguration_To_v1beta2_OpaqueDeviceConfiguration is an autogenerated conversion function.
func Convert_resource_OpaqueDeviceConfiguration_To_v1beta2_OpaqueDeviceConfiguration(in *resource.OpaqueDeviceConfiguration, out *resourcev1beta2.OpaqueDeviceConfiguration, s conversion.Scope) error {
return autoConvert_resource_OpaqueDeviceConfiguration_To_v1beta2_OpaqueDeviceConfiguration(in, out, s)
}
func autoConvert_v1beta2_ResourceClaim_To_resource_ResourceClaim(in *resourcev1beta2.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta2_ResourceClaimStatus_To_resource_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ResourceClaim_To_resource_ResourceClaim is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaim_To_resource_ResourceClaim(in *resourcev1beta2.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaim_To_resource_ResourceClaim(in, out, s)
}
func autoConvert_resource_ResourceClaim_To_v1beta2_ResourceClaim(in *resource.ResourceClaim, out *resourcev1beta2.ResourceClaim, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1beta2_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_resource_ResourceClaimStatus_To_v1beta2_ResourceClaimStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaim_To_v1beta2_ResourceClaim is an autogenerated conversion function.
func Convert_resource_ResourceClaim_To_v1beta2_ResourceClaim(in *resource.ResourceClaim, out *resourcev1beta2.ResourceClaim, s conversion.Scope) error {
return autoConvert_resource_ResourceClaim_To_v1beta2_ResourceClaim(in, out, s)
}
func autoConvert_v1beta2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *resourcev1beta2.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_v1beta2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in *resourcev1beta2.ResourceClaimConsumerReference, out *resource.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaimConsumerReference_To_resource_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_resource_ResourceClaimConsumerReference_To_v1beta2_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *resourcev1beta2.ResourceClaimConsumerReference, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Resource = in.Resource
out.Name = in.Name
out.UID = types.UID(in.UID)
return nil
}
// Convert_resource_ResourceClaimConsumerReference_To_v1beta2_ResourceClaimConsumerReference is an autogenerated conversion function.
func Convert_resource_ResourceClaimConsumerReference_To_v1beta2_ResourceClaimConsumerReference(in *resource.ResourceClaimConsumerReference, out *resourcev1beta2.ResourceClaimConsumerReference, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimConsumerReference_To_v1beta2_ResourceClaimConsumerReference(in, out, s)
}
func autoConvert_v1beta2_ResourceClaimList_To_resource_ResourceClaimList(in *resourcev1beta2.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta2_ResourceClaimList_To_resource_ResourceClaimList is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaimList_To_resource_ResourceClaimList(in *resourcev1beta2.ResourceClaimList, out *resource.ResourceClaimList, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaimList_To_resource_ResourceClaimList(in, out, s)
}
func autoConvert_resource_ResourceClaimList_To_v1beta2_ResourceClaimList(in *resource.ResourceClaimList, out *resourcev1beta2.ResourceClaimList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1beta2.ResourceClaim)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceClaimList_To_v1beta2_ResourceClaimList is an autogenerated conversion function.
func Convert_resource_ResourceClaimList_To_v1beta2_ResourceClaimList(in *resource.ResourceClaimList, out *resourcev1beta2.ResourceClaimList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimList_To_v1beta2_ResourceClaimList(in, out, s)
}
func autoConvert_v1beta2_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *resourcev1beta2.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
if err := Convert_v1beta2_DeviceClaim_To_resource_DeviceClaim(&in.Devices, &out.Devices, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ResourceClaimSpec_To_resource_ResourceClaimSpec is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *resourcev1beta2.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaimSpec_To_resource_ResourceClaimSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimSpec_To_v1beta2_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *resourcev1beta2.ResourceClaimSpec, s conversion.Scope) error {
if err := Convert_resource_DeviceClaim_To_v1beta2_DeviceClaim(&in.Devices, &out.Devices, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimSpec_To_v1beta2_ResourceClaimSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimSpec_To_v1beta2_ResourceClaimSpec(in *resource.ResourceClaimSpec, out *resourcev1beta2.ResourceClaimSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimSpec_To_v1beta2_ResourceClaimSpec(in, out, s)
}
func autoConvert_v1beta2_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *resourcev1beta2.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
out.Allocation = (*resource.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]resource.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.Devices = *(*[]resource.AllocatedDeviceStatus)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_v1beta2_ResourceClaimStatus_To_resource_ResourceClaimStatus is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *resourcev1beta2.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaimStatus_To_resource_ResourceClaimStatus(in, out, s)
}
func autoConvert_resource_ResourceClaimStatus_To_v1beta2_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *resourcev1beta2.ResourceClaimStatus, s conversion.Scope) error {
out.Allocation = (*resourcev1beta2.AllocationResult)(unsafe.Pointer(in.Allocation))
out.ReservedFor = *(*[]resourcev1beta2.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor))
out.Devices = *(*[]resourcev1beta2.AllocatedDeviceStatus)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_resource_ResourceClaimStatus_To_v1beta2_ResourceClaimStatus is an autogenerated conversion function.
func Convert_resource_ResourceClaimStatus_To_v1beta2_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *resourcev1beta2.ResourceClaimStatus, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimStatus_To_v1beta2_ResourceClaimStatus(in, out, s)
}
func autoConvert_v1beta2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *resourcev1beta2.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in *resourcev1beta2.ResourceClaimTemplate, out *resource.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaimTemplate_To_resource_ResourceClaimTemplate(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplate_To_v1beta2_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *resourcev1beta2.ResourceClaimTemplate, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimTemplateSpec_To_v1beta2_ResourceClaimTemplateSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplate_To_v1beta2_ResourceClaimTemplate is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplate_To_v1beta2_ResourceClaimTemplate(in *resource.ResourceClaimTemplate, out *resourcev1beta2.ResourceClaimTemplate, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplate_To_v1beta2_ResourceClaimTemplate(in, out, s)
}
func autoConvert_v1beta2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *resourcev1beta2.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceClaimTemplate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in *resourcev1beta2.ResourceClaimTemplateList, out *resource.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaimTemplateList_To_resource_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateList_To_v1beta2_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *resourcev1beta2.ResourceClaimTemplateList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1beta2.ResourceClaimTemplate)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceClaimTemplateList_To_v1beta2_ResourceClaimTemplateList is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateList_To_v1beta2_ResourceClaimTemplateList(in *resource.ResourceClaimTemplateList, out *resourcev1beta2.ResourceClaimTemplateList, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateList_To_v1beta2_ResourceClaimTemplateList(in, out, s)
}
func autoConvert_v1beta2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *resourcev1beta2.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_v1beta2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in *resourcev1beta2.ResourceClaimTemplateSpec, out *resource.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceClaimTemplateSpec_To_resource_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_resource_ResourceClaimTemplateSpec_To_v1beta2_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *resourcev1beta2.ResourceClaimTemplateSpec, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceClaimSpec_To_v1beta2_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceClaimTemplateSpec_To_v1beta2_ResourceClaimTemplateSpec is an autogenerated conversion function.
func Convert_resource_ResourceClaimTemplateSpec_To_v1beta2_ResourceClaimTemplateSpec(in *resource.ResourceClaimTemplateSpec, out *resourcev1beta2.ResourceClaimTemplateSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceClaimTemplateSpec_To_v1beta2_ResourceClaimTemplateSpec(in, out, s)
}
func autoConvert_v1beta2_ResourcePool_To_resource_ResourcePool(in *resourcev1beta2.ResourcePool, out *resource.ResourcePool, s conversion.Scope) error {
out.Name = in.Name
out.Generation = in.Generation
out.ResourceSliceCount = in.ResourceSliceCount
return nil
}
// Convert_v1beta2_ResourcePool_To_resource_ResourcePool is an autogenerated conversion function.
func Convert_v1beta2_ResourcePool_To_resource_ResourcePool(in *resourcev1beta2.ResourcePool, out *resource.ResourcePool, s conversion.Scope) error {
return autoConvert_v1beta2_ResourcePool_To_resource_ResourcePool(in, out, s)
}
func autoConvert_resource_ResourcePool_To_v1beta2_ResourcePool(in *resource.ResourcePool, out *resourcev1beta2.ResourcePool, s conversion.Scope) error {
out.Name = in.Name
out.Generation = in.Generation
out.ResourceSliceCount = in.ResourceSliceCount
return nil
}
// Convert_resource_ResourcePool_To_v1beta2_ResourcePool is an autogenerated conversion function.
func Convert_resource_ResourcePool_To_v1beta2_ResourcePool(in *resource.ResourcePool, out *resourcev1beta2.ResourcePool, s conversion.Scope) error {
return autoConvert_resource_ResourcePool_To_v1beta2_ResourcePool(in, out, s)
}
func autoConvert_v1beta2_ResourceSlice_To_resource_ResourceSlice(in *resourcev1beta2.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta2_ResourceSliceSpec_To_resource_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_ResourceSlice_To_resource_ResourceSlice is an autogenerated conversion function.
func Convert_v1beta2_ResourceSlice_To_resource_ResourceSlice(in *resourcev1beta2.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceSlice_To_resource_ResourceSlice(in, out, s)
}
func autoConvert_resource_ResourceSlice_To_v1beta2_ResourceSlice(in *resource.ResourceSlice, out *resourcev1beta2.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_resource_ResourceSliceSpec_To_v1beta2_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceSlice_To_v1beta2_ResourceSlice is an autogenerated conversion function.
func Convert_resource_ResourceSlice_To_v1beta2_ResourceSlice(in *resource.ResourceSlice, out *resourcev1beta2.ResourceSlice, s conversion.Scope) error {
return autoConvert_resource_ResourceSlice_To_v1beta2_ResourceSlice(in, out, s)
}
func autoConvert_v1beta2_ResourceSliceList_To_resource_ResourceSliceList(in *resourcev1beta2.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta2_ResourceSliceList_To_resource_ResourceSliceList is an autogenerated conversion function.
func Convert_v1beta2_ResourceSliceList_To_resource_ResourceSliceList(in *resourcev1beta2.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceSliceList_To_resource_ResourceSliceList(in, out, s)
}
func autoConvert_resource_ResourceSliceList_To_v1beta2_ResourceSliceList(in *resource.ResourceSliceList, out *resourcev1beta2.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resourcev1beta2.ResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceSliceList_To_v1beta2_ResourceSliceList is an autogenerated conversion function.
func Convert_resource_ResourceSliceList_To_v1beta2_ResourceSliceList(in *resource.ResourceSliceList, out *resourcev1beta2.ResourceSliceList, s conversion.Scope) error {
return autoConvert_resource_ResourceSliceList_To_v1beta2_ResourceSliceList(in, out, s)
}
func autoConvert_v1beta2_ResourceSliceSpec_To_resource_ResourceSliceSpec(in *resourcev1beta2.ResourceSliceSpec, out *resource.ResourceSliceSpec, s conversion.Scope) error {
out.Driver = in.Driver
if err := Convert_v1beta2_ResourcePool_To_resource_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Devices = *(*[]resource.Device)(unsafe.Pointer(&in.Devices))
out.PerDeviceNodeSelection = (*bool)(unsafe.Pointer(in.PerDeviceNodeSelection))
out.SharedCounters = *(*[]resource.CounterSet)(unsafe.Pointer(&in.SharedCounters))
return nil
}
// Convert_v1beta2_ResourceSliceSpec_To_resource_ResourceSliceSpec is an autogenerated conversion function.
func Convert_v1beta2_ResourceSliceSpec_To_resource_ResourceSliceSpec(in *resourcev1beta2.ResourceSliceSpec, out *resource.ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_v1beta2_ResourceSliceSpec_To_resource_ResourceSliceSpec(in, out, s)
}
func autoConvert_resource_ResourceSliceSpec_To_v1beta2_ResourceSliceSpec(in *resource.ResourceSliceSpec, out *resourcev1beta2.ResourceSliceSpec, s conversion.Scope) error {
out.Driver = in.Driver
if err := Convert_resource_ResourcePool_To_v1beta2_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
out.NodeName = (*string)(unsafe.Pointer(in.NodeName))
out.NodeSelector = (*corev1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = (*bool)(unsafe.Pointer(in.AllNodes))
out.Devices = *(*[]resourcev1beta2.Device)(unsafe.Pointer(&in.Devices))
out.PerDeviceNodeSelection = (*bool)(unsafe.Pointer(in.PerDeviceNodeSelection))
out.SharedCounters = *(*[]resourcev1beta2.CounterSet)(unsafe.Pointer(&in.SharedCounters))
return nil
}
// Convert_resource_ResourceSliceSpec_To_v1beta2_ResourceSliceSpec is an autogenerated conversion function.
func Convert_resource_ResourceSliceSpec_To_v1beta2_ResourceSliceSpec(in *resource.ResourceSliceSpec, out *resourcev1beta2.ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_resource_ResourceSliceSpec_To_v1beta2_ResourceSliceSpec(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta2
import (
resourcev1beta2 "k8s.io/api/resource/v1beta2"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&resourcev1beta2.ResourceClaim{}, func(obj interface{}) { SetObjectDefaults_ResourceClaim(obj.(*resourcev1beta2.ResourceClaim)) })
scheme.AddTypeDefaultingFunc(&resourcev1beta2.ResourceClaimList{}, func(obj interface{}) { SetObjectDefaults_ResourceClaimList(obj.(*resourcev1beta2.ResourceClaimList)) })
scheme.AddTypeDefaultingFunc(&resourcev1beta2.ResourceClaimTemplate{}, func(obj interface{}) {
SetObjectDefaults_ResourceClaimTemplate(obj.(*resourcev1beta2.ResourceClaimTemplate))
})
scheme.AddTypeDefaultingFunc(&resourcev1beta2.ResourceClaimTemplateList{}, func(obj interface{}) {
SetObjectDefaults_ResourceClaimTemplateList(obj.(*resourcev1beta2.ResourceClaimTemplateList))
})
scheme.AddTypeDefaultingFunc(&resourcev1beta2.ResourceSlice{}, func(obj interface{}) { SetObjectDefaults_ResourceSlice(obj.(*resourcev1beta2.ResourceSlice)) })
scheme.AddTypeDefaultingFunc(&resourcev1beta2.ResourceSliceList{}, func(obj interface{}) { SetObjectDefaults_ResourceSliceList(obj.(*resourcev1beta2.ResourceSliceList)) })
return nil
}
func SetObjectDefaults_ResourceClaim(in *resourcev1beta2.ResourceClaim) {
for i := range in.Spec.Devices.Requests {
a := &in.Spec.Devices.Requests[i]
if a.Exactly != nil {
SetDefaults_ExactDeviceRequest(a.Exactly)
for j := range a.Exactly.Tolerations {
b := &a.Exactly.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
for j := range a.FirstAvailable {
b := &a.FirstAvailable[j]
SetDefaults_DeviceSubRequest(b)
for k := range b.Tolerations {
c := &b.Tolerations[k]
if c.Operator == "" {
c.Operator = "Equal"
}
}
}
}
if in.Status.Allocation != nil {
for i := range in.Status.Allocation.Devices.Results {
a := &in.Status.Allocation.Devices.Results[i]
for j := range a.Tolerations {
b := &a.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
}
}
func SetObjectDefaults_ResourceClaimList(in *resourcev1beta2.ResourceClaimList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaim(a)
}
}
func SetObjectDefaults_ResourceClaimTemplate(in *resourcev1beta2.ResourceClaimTemplate) {
for i := range in.Spec.Spec.Devices.Requests {
a := &in.Spec.Spec.Devices.Requests[i]
if a.Exactly != nil {
SetDefaults_ExactDeviceRequest(a.Exactly)
for j := range a.Exactly.Tolerations {
b := &a.Exactly.Tolerations[j]
if b.Operator == "" {
b.Operator = "Equal"
}
}
}
for j := range a.FirstAvailable {
b := &a.FirstAvailable[j]
SetDefaults_DeviceSubRequest(b)
for k := range b.Tolerations {
c := &b.Tolerations[k]
if c.Operator == "" {
c.Operator = "Equal"
}
}
}
}
}
func SetObjectDefaults_ResourceClaimTemplateList(in *resourcev1beta2.ResourceClaimTemplateList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceClaimTemplate(a)
}
}
func SetObjectDefaults_ResourceSlice(in *resourcev1beta2.ResourceSlice) {
for i := range in.Spec.Devices {
a := &in.Spec.Devices[i]
for j := range a.Taints {
b := &a.Taints[j]
SetDefaults_DeviceTaint(b)
}
}
}
func SetObjectDefaults_ResourceSliceList(in *resourcev1beta2.ResourceSliceList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ResourceSlice(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package resource
import (
apiresource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocatedDeviceStatus) DeepCopyInto(out *AllocatedDeviceStatus) {
*out = *in
if in.ShareID != nil {
in, out := &in.ShareID, &out.ShareID
*out = new(string)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = new(runtime.RawExtension)
(*in).DeepCopyInto(*out)
}
if in.NetworkData != nil {
in, out := &in.NetworkData, &out.NetworkData
*out = new(NetworkDeviceData)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocatedDeviceStatus.
func (in *AllocatedDeviceStatus) DeepCopy() *AllocatedDeviceStatus {
if in == nil {
return nil
}
out := new(AllocatedDeviceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
*out = *in
in.Devices.DeepCopyInto(&out.Devices)
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(core.NodeSelector)
(*in).DeepCopyInto(*out)
}
if in.AllocationTimestamp != nil {
in, out := &in.AllocationTimestamp, &out.AllocationTimestamp
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult.
func (in *AllocationResult) DeepCopy() *AllocationResult {
if in == nil {
return nil
}
out := new(AllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector.
func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector {
if in == nil {
return nil
}
out := new(CELDeviceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapacityRequestPolicy) DeepCopyInto(out *CapacityRequestPolicy) {
*out = *in
if in.Default != nil {
in, out := &in.Default, &out.Default
x := (*in).DeepCopy()
*out = &x
}
if in.ValidValues != nil {
in, out := &in.ValidValues, &out.ValidValues
*out = make([]apiresource.Quantity, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ValidRange != nil {
in, out := &in.ValidRange, &out.ValidRange
*out = new(CapacityRequestPolicyRange)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityRequestPolicy.
func (in *CapacityRequestPolicy) DeepCopy() *CapacityRequestPolicy {
if in == nil {
return nil
}
out := new(CapacityRequestPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapacityRequestPolicyRange) DeepCopyInto(out *CapacityRequestPolicyRange) {
*out = *in
if in.Min != nil {
in, out := &in.Min, &out.Min
x := (*in).DeepCopy()
*out = &x
}
if in.Max != nil {
in, out := &in.Max, &out.Max
x := (*in).DeepCopy()
*out = &x
}
if in.Step != nil {
in, out := &in.Step, &out.Step
x := (*in).DeepCopy()
*out = &x
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityRequestPolicyRange.
func (in *CapacityRequestPolicyRange) DeepCopy() *CapacityRequestPolicyRange {
if in == nil {
return nil
}
out := new(CapacityRequestPolicyRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapacityRequirements) DeepCopyInto(out *CapacityRequirements) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make(map[QualifiedName]apiresource.Quantity, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityRequirements.
func (in *CapacityRequirements) DeepCopy() *CapacityRequirements {
if in == nil {
return nil
}
out := new(CapacityRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Counter) DeepCopyInto(out *Counter) {
*out = *in
out.Value = in.Value.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Counter.
func (in *Counter) DeepCopy() *Counter {
if in == nil {
return nil
}
out := new(Counter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CounterSet) DeepCopyInto(out *CounterSet) {
*out = *in
if in.Counters != nil {
in, out := &in.Counters, &out.Counters
*out = make(map[string]Counter, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CounterSet.
func (in *CounterSet) DeepCopy() *CounterSet {
if in == nil {
return nil
}
out := new(CounterSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Device) DeepCopyInto(out *Device) {
*out = *in
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = make(map[QualifiedName]DeviceAttribute, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = make(map[QualifiedName]DeviceCapacity, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.ConsumesCounters != nil {
in, out := &in.ConsumesCounters, &out.ConsumesCounters
*out = make([]DeviceCounterConsumption, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeName != nil {
in, out := &in.NodeName, &out.NodeName
*out = new(string)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(core.NodeSelector)
(*in).DeepCopyInto(*out)
}
if in.AllNodes != nil {
in, out := &in.AllNodes, &out.AllNodes
*out = new(bool)
**out = **in
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]DeviceTaint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.BindsToNode != nil {
in, out := &in.BindsToNode, &out.BindsToNode
*out = new(bool)
**out = **in
}
if in.BindingConditions != nil {
in, out := &in.BindingConditions, &out.BindingConditions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.BindingFailureConditions != nil {
in, out := &in.BindingFailureConditions, &out.BindingFailureConditions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowMultipleAllocations != nil {
in, out := &in.AllowMultipleAllocations, &out.AllowMultipleAllocations
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device.
func (in *Device) DeepCopy() *Device {
if in == nil {
return nil
}
out := new(Device)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]string, len(*in))
copy(*out, *in)
}
in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration.
func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration {
if in == nil {
return nil
}
out := new(DeviceAllocationConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) {
*out = *in
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]DeviceRequestAllocationResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = make([]DeviceAllocationConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult.
func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult {
if in == nil {
return nil
}
out := new(DeviceAllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) {
*out = *in
if in.IntValue != nil {
in, out := &in.IntValue, &out.IntValue
*out = new(int64)
**out = **in
}
if in.BoolValue != nil {
in, out := &in.BoolValue, &out.BoolValue
*out = new(bool)
**out = **in
}
if in.StringValue != nil {
in, out := &in.StringValue, &out.StringValue
*out = new(string)
**out = **in
}
if in.VersionValue != nil {
in, out := &in.VersionValue, &out.VersionValue
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute.
func (in *DeviceAttribute) DeepCopy() *DeviceAttribute {
if in == nil {
return nil
}
out := new(DeviceAttribute)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceCapacity) DeepCopyInto(out *DeviceCapacity) {
*out = *in
out.Value = in.Value.DeepCopy()
if in.RequestPolicy != nil {
in, out := &in.RequestPolicy, &out.RequestPolicy
*out = new(CapacityRequestPolicy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceCapacity.
func (in *DeviceCapacity) DeepCopy() *DeviceCapacity {
if in == nil {
return nil
}
out := new(DeviceCapacity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]DeviceRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Constraints != nil {
in, out := &in.Constraints, &out.Constraints
*out = make([]DeviceConstraint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = make([]DeviceClaimConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim.
func (in *DeviceClaim) DeepCopy() *DeviceClaim {
if in == nil {
return nil
}
out := new(DeviceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]string, len(*in))
copy(*out, *in)
}
in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration.
func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration {
if in == nil {
return nil
}
out := new(DeviceClaimConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceClass) DeepCopyInto(out *DeviceClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass.
func (in *DeviceClass) DeepCopy() *DeviceClass {
if in == nil {
return nil
}
out := new(DeviceClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeviceClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) {
*out = *in
in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration.
func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration {
if in == nil {
return nil
}
out := new(DeviceClassConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DeviceClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList.
func (in *DeviceClassList) DeepCopy() *DeviceClassList {
if in == nil {
return nil
}
out := new(DeviceClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeviceClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]DeviceSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = make([]DeviceClassConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtendedResourceName != nil {
in, out := &in.ExtendedResourceName, &out.ExtendedResourceName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec.
func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec {
if in == nil {
return nil
}
out := new(DeviceClassSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) {
*out = *in
if in.Opaque != nil {
in, out := &in.Opaque, &out.Opaque
*out = new(OpaqueDeviceConfiguration)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration.
func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration {
if in == nil {
return nil
}
out := new(DeviceConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MatchAttribute != nil {
in, out := &in.MatchAttribute, &out.MatchAttribute
*out = new(FullyQualifiedName)
**out = **in
}
if in.DistinctAttribute != nil {
in, out := &in.DistinctAttribute, &out.DistinctAttribute
*out = new(FullyQualifiedName)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint.
func (in *DeviceConstraint) DeepCopy() *DeviceConstraint {
if in == nil {
return nil
}
out := new(DeviceConstraint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceCounterConsumption) DeepCopyInto(out *DeviceCounterConsumption) {
*out = *in
if in.Counters != nil {
in, out := &in.Counters, &out.Counters
*out = make(map[string]Counter, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceCounterConsumption.
func (in *DeviceCounterConsumption) DeepCopy() *DeviceCounterConsumption {
if in == nil {
return nil
}
out := new(DeviceCounterConsumption)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) {
*out = *in
if in.Exactly != nil {
in, out := &in.Exactly, &out.Exactly
*out = new(ExactDeviceRequest)
(*in).DeepCopyInto(*out)
}
if in.FirstAvailable != nil {
in, out := &in.FirstAvailable, &out.FirstAvailable
*out = make([]DeviceSubRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest.
func (in *DeviceRequest) DeepCopy() *DeviceRequest {
if in == nil {
return nil
}
out := new(DeviceRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) {
*out = *in
if in.AdminAccess != nil {
in, out := &in.AdminAccess, &out.AdminAccess
*out = new(bool)
**out = **in
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]DeviceToleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.BindingConditions != nil {
in, out := &in.BindingConditions, &out.BindingConditions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.BindingFailureConditions != nil {
in, out := &in.BindingFailureConditions, &out.BindingFailureConditions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ShareID != nil {
in, out := &in.ShareID, &out.ShareID
*out = new(types.UID)
**out = **in
}
if in.ConsumedCapacity != nil {
in, out := &in.ConsumedCapacity, &out.ConsumedCapacity
*out = make(map[QualifiedName]apiresource.Quantity, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult.
func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult {
if in == nil {
return nil
}
out := new(DeviceRequestAllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) {
*out = *in
if in.CEL != nil {
in, out := &in.CEL, &out.CEL
*out = new(CELDeviceSelector)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector.
func (in *DeviceSelector) DeepCopy() *DeviceSelector {
if in == nil {
return nil
}
out := new(DeviceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceSubRequest) DeepCopyInto(out *DeviceSubRequest) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]DeviceSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]DeviceToleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = new(CapacityRequirements)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSubRequest.
func (in *DeviceSubRequest) DeepCopy() *DeviceSubRequest {
if in == nil {
return nil
}
out := new(DeviceSubRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceTaint) DeepCopyInto(out *DeviceTaint) {
*out = *in
if in.TimeAdded != nil {
in, out := &in.TimeAdded, &out.TimeAdded
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceTaint.
func (in *DeviceTaint) DeepCopy() *DeviceTaint {
if in == nil {
return nil
}
out := new(DeviceTaint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceTaintRule) DeepCopyInto(out *DeviceTaintRule) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceTaintRule.
func (in *DeviceTaintRule) DeepCopy() *DeviceTaintRule {
if in == nil {
return nil
}
out := new(DeviceTaintRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeviceTaintRule) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceTaintRuleList) DeepCopyInto(out *DeviceTaintRuleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DeviceTaintRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceTaintRuleList.
func (in *DeviceTaintRuleList) DeepCopy() *DeviceTaintRuleList {
if in == nil {
return nil
}
out := new(DeviceTaintRuleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeviceTaintRuleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceTaintRuleSpec) DeepCopyInto(out *DeviceTaintRuleSpec) {
*out = *in
if in.DeviceSelector != nil {
in, out := &in.DeviceSelector, &out.DeviceSelector
*out = new(DeviceTaintSelector)
(*in).DeepCopyInto(*out)
}
in.Taint.DeepCopyInto(&out.Taint)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceTaintRuleSpec.
func (in *DeviceTaintRuleSpec) DeepCopy() *DeviceTaintRuleSpec {
if in == nil {
return nil
}
out := new(DeviceTaintRuleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceTaintSelector) DeepCopyInto(out *DeviceTaintSelector) {
*out = *in
if in.DeviceClassName != nil {
in, out := &in.DeviceClassName, &out.DeviceClassName
*out = new(string)
**out = **in
}
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(string)
**out = **in
}
if in.Pool != nil {
in, out := &in.Pool, &out.Pool
*out = new(string)
**out = **in
}
if in.Device != nil {
in, out := &in.Device, &out.Device
*out = new(string)
**out = **in
}
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]DeviceSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceTaintSelector.
func (in *DeviceTaintSelector) DeepCopy() *DeviceTaintSelector {
if in == nil {
return nil
}
out := new(DeviceTaintSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceToleration) DeepCopyInto(out *DeviceToleration) {
*out = *in
if in.TolerationSeconds != nil {
in, out := &in.TolerationSeconds, &out.TolerationSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceToleration.
func (in *DeviceToleration) DeepCopy() *DeviceToleration {
if in == nil {
return nil
}
out := new(DeviceToleration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExactDeviceRequest) DeepCopyInto(out *ExactDeviceRequest) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]DeviceSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdminAccess != nil {
in, out := &in.AdminAccess, &out.AdminAccess
*out = new(bool)
**out = **in
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]DeviceToleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = new(CapacityRequirements)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExactDeviceRequest.
func (in *ExactDeviceRequest) DeepCopy() *ExactDeviceRequest {
if in == nil {
return nil
}
out := new(ExactDeviceRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkDeviceData) DeepCopyInto(out *NetworkDeviceData) {
*out = *in
if in.IPs != nil {
in, out := &in.IPs, &out.IPs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceData.
func (in *NetworkDeviceData) DeepCopy() *NetworkDeviceData {
if in == nil {
return nil
}
out := new(NetworkDeviceData)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) {
*out = *in
in.Parameters.DeepCopyInto(&out.Parameters)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration.
func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration {
if in == nil {
return nil
}
out := new(OpaqueDeviceConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
func (in *ResourceClaim) DeepCopy() *ResourceClaim {
if in == nil {
return nil
}
out := new(ResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaim) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference.
func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference {
if in == nil {
return nil
}
out := new(ResourceClaimConsumerReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList.
func (in *ResourceClaimList) DeepCopy() *ResourceClaimList {
if in == nil {
return nil
}
out := new(ResourceClaimList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) {
*out = *in
in.Devices.DeepCopyInto(&out.Devices)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec.
func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec {
if in == nil {
return nil
}
out := new(ResourceClaimSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) {
*out = *in
if in.Allocation != nil {
in, out := &in.Allocation, &out.Allocation
*out = new(AllocationResult)
(*in).DeepCopyInto(*out)
}
if in.ReservedFor != nil {
in, out := &in.ReservedFor, &out.ReservedFor
*out = make([]ResourceClaimConsumerReference, len(*in))
copy(*out, *in)
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]AllocatedDeviceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus.
func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus {
if in == nil {
return nil
}
out := new(ResourceClaimStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate.
func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate {
if in == nil {
return nil
}
out := new(ResourceClaimTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClaimTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList.
func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList {
if in == nil {
return nil
}
out := new(ResourceClaimTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec.
func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec {
if in == nil {
return nil
}
out := new(ResourceClaimTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcePool) DeepCopyInto(out *ResourcePool) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool.
func (in *ResourcePool) DeepCopy() *ResourcePool {
if in == nil {
return nil
}
out := new(ResourcePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
func (in *ResourceSlice) DeepCopy() *ResourceSlice {
if in == nil {
return nil
}
out := new(ResourceSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
if in == nil {
return nil
}
out := new(ResourceSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) {
*out = *in
out.Pool = in.Pool
if in.NodeName != nil {
in, out := &in.NodeName, &out.NodeName
*out = new(string)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(core.NodeSelector)
(*in).DeepCopyInto(*out)
}
if in.AllNodes != nil {
in, out := &in.AllNodes, &out.AllNodes
*out = new(bool)
**out = **in
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]Device, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PerDeviceNodeSelection != nil {
in, out := &in.PerDeviceNodeSelection, &out.PerDeviceNodeSelection
*out = new(bool)
**out = **in
}
if in.SharedCounters != nil {
in, out := &in.SharedCounters, &out.SharedCounters
*out = make([]CounterSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec.
func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec {
if in == nil {
return nil
}
out := new(ResourceSliceSpec)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling"
"sigs.k8s.io/randfill"
)
// Funcs returns the fuzzer functions for the scheduling api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(s *scheduling.PriorityClass, c randfill.Continue) {
c.FillNoCustom(s)
if s.PreemptionPolicy == nil {
preemptLowerPriority := core.PreemptLowerPriority
s.PreemptionPolicy = &preemptLowerPriority
}
},
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/apis/scheduling/v1"
"k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1"
"k8s.io/kubernetes/pkg/apis/scheduling/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(scheduling.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "scheduling.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PriorityClass{},
&PriorityClassList{},
)
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
// in extensions.
func SetDefaults_PriorityClass(obj *v1.PriorityClass) {
if obj.PreemptionPolicy == nil {
preemptLowerPriority := apiv1.PreemptLowerPriority
obj.PreemptionPolicy = &preemptLowerPriority
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"k8s.io/api/scheduling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/apis/scheduling"
)
// SystemPriorityClasses define system priority classes that are auto-created at cluster bootstrapping.
// Our API validation logic ensures that any priority class that has a system prefix or its value
// is higher than HighestUserDefinablePriority is equal to one of these SystemPriorityClasses.
var systemPriorityClasses = []*v1.PriorityClass{
{
ObjectMeta: metav1.ObjectMeta{
Name: scheduling.SystemNodeCritical,
},
Value: scheduling.SystemCriticalPriority + 1000,
Description: "Used for system critical pods that must not be moved from their current node.",
},
{
ObjectMeta: metav1.ObjectMeta{
Name: scheduling.SystemClusterCritical,
},
Value: scheduling.SystemCriticalPriority,
Description: "Used for system critical pods that must run in the cluster, but can be moved to another node if necessary.",
},
}
// SystemPriorityClasses returns a deep copy of the list of system priority classes.
// If only the names are needed, use SystemPriorityClassNames().
func SystemPriorityClasses() []*v1.PriorityClass {
retval := make([]*v1.PriorityClass, 0, len(systemPriorityClasses))
for _, c := range systemPriorityClasses {
retval = append(retval, c.DeepCopy())
}
return retval
}
// SystemPriorityClassNames returns the names of system priority classes.
func SystemPriorityClassNames() []string {
retval := make([]string, 0, len(systemPriorityClasses))
for _, c := range systemPriorityClasses {
retval = append(retval, c.Name)
}
return retval
}
// IsKnownSystemPriorityClass returns true if there's any of the system priority classes exactly
// matches "name", "value", "globalDefault". otherwise it will return an error.
func IsKnownSystemPriorityClass(name string, value int32, globalDefault bool) (bool, error) {
for _, spc := range systemPriorityClasses {
if spc.Name == name {
if spc.Value != value {
return false, fmt.Errorf("value of %v PriorityClass must be %v", spc.Name, spc.Value)
}
if spc.GlobalDefault != globalDefault {
return false, fmt.Errorf("globalDefault of %v PriorityClass must be %v", spc.Name, spc.GlobalDefault)
}
return true, nil
}
}
return false, fmt.Errorf("%v is not a known system priority class", name)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "scheduling.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &schedulingv1.SchemeBuilder
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
scheduling "k8s.io/kubernetes/pkg/apis/scheduling"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*schedulingv1.PriorityClass)(nil), (*scheduling.PriorityClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityClass_To_scheduling_PriorityClass(a.(*schedulingv1.PriorityClass), b.(*scheduling.PriorityClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*scheduling.PriorityClass)(nil), (*schedulingv1.PriorityClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_scheduling_PriorityClass_To_v1_PriorityClass(a.(*scheduling.PriorityClass), b.(*schedulingv1.PriorityClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*schedulingv1.PriorityClassList)(nil), (*scheduling.PriorityClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PriorityClassList_To_scheduling_PriorityClassList(a.(*schedulingv1.PriorityClassList), b.(*scheduling.PriorityClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*scheduling.PriorityClassList)(nil), (*schedulingv1.PriorityClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_scheduling_PriorityClassList_To_v1_PriorityClassList(a.(*scheduling.PriorityClassList), b.(*schedulingv1.PriorityClassList), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_PriorityClass_To_scheduling_PriorityClass(in *schedulingv1.PriorityClass, out *scheduling.PriorityClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Value = in.Value
out.GlobalDefault = in.GlobalDefault
out.Description = in.Description
out.PreemptionPolicy = (*core.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
return nil
}
// Convert_v1_PriorityClass_To_scheduling_PriorityClass is an autogenerated conversion function.
func Convert_v1_PriorityClass_To_scheduling_PriorityClass(in *schedulingv1.PriorityClass, out *scheduling.PriorityClass, s conversion.Scope) error {
return autoConvert_v1_PriorityClass_To_scheduling_PriorityClass(in, out, s)
}
func autoConvert_scheduling_PriorityClass_To_v1_PriorityClass(in *scheduling.PriorityClass, out *schedulingv1.PriorityClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Value = in.Value
out.GlobalDefault = in.GlobalDefault
out.Description = in.Description
out.PreemptionPolicy = (*corev1.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
return nil
}
// Convert_scheduling_PriorityClass_To_v1_PriorityClass is an autogenerated conversion function.
func Convert_scheduling_PriorityClass_To_v1_PriorityClass(in *scheduling.PriorityClass, out *schedulingv1.PriorityClass, s conversion.Scope) error {
return autoConvert_scheduling_PriorityClass_To_v1_PriorityClass(in, out, s)
}
func autoConvert_v1_PriorityClassList_To_scheduling_PriorityClassList(in *schedulingv1.PriorityClassList, out *scheduling.PriorityClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]scheduling.PriorityClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_PriorityClassList_To_scheduling_PriorityClassList is an autogenerated conversion function.
func Convert_v1_PriorityClassList_To_scheduling_PriorityClassList(in *schedulingv1.PriorityClassList, out *scheduling.PriorityClassList, s conversion.Scope) error {
return autoConvert_v1_PriorityClassList_To_scheduling_PriorityClassList(in, out, s)
}
func autoConvert_scheduling_PriorityClassList_To_v1_PriorityClassList(in *scheduling.PriorityClassList, out *schedulingv1.PriorityClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]schedulingv1.PriorityClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_scheduling_PriorityClassList_To_v1_PriorityClassList is an autogenerated conversion function.
func Convert_scheduling_PriorityClassList_To_v1_PriorityClassList(in *scheduling.PriorityClassList, out *schedulingv1.PriorityClassList, s conversion.Scope) error {
return autoConvert_scheduling_PriorityClassList_To_v1_PriorityClassList(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
schedulingv1 "k8s.io/api/scheduling/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&schedulingv1.PriorityClass{}, func(obj interface{}) { SetObjectDefaults_PriorityClass(obj.(*schedulingv1.PriorityClass)) })
scheme.AddTypeDefaultingFunc(&schedulingv1.PriorityClassList{}, func(obj interface{}) { SetObjectDefaults_PriorityClassList(obj.(*schedulingv1.PriorityClassList)) })
return nil
}
func SetObjectDefaults_PriorityClass(in *schedulingv1.PriorityClass) {
SetDefaults_PriorityClass(in)
}
func SetObjectDefaults_PriorityClassList(in *schedulingv1.PriorityClassList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PriorityClass(a)
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
// in extensions.
func SetDefaults_PriorityClass(obj *v1alpha1.PriorityClass) {
if obj.PreemptionPolicy == nil {
preemptLowerPriority := apiv1.PreemptLowerPriority
obj.PreemptionPolicy = &preemptLowerPriority
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "scheduling.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &schedulingv1alpha1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
scheduling "k8s.io/kubernetes/pkg/apis/scheduling"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*schedulingv1alpha1.PriorityClass)(nil), (*scheduling.PriorityClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PriorityClass_To_scheduling_PriorityClass(a.(*schedulingv1alpha1.PriorityClass), b.(*scheduling.PriorityClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*scheduling.PriorityClass)(nil), (*schedulingv1alpha1.PriorityClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_scheduling_PriorityClass_To_v1alpha1_PriorityClass(a.(*scheduling.PriorityClass), b.(*schedulingv1alpha1.PriorityClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*schedulingv1alpha1.PriorityClassList)(nil), (*scheduling.PriorityClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PriorityClassList_To_scheduling_PriorityClassList(a.(*schedulingv1alpha1.PriorityClassList), b.(*scheduling.PriorityClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*scheduling.PriorityClassList)(nil), (*schedulingv1alpha1.PriorityClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_scheduling_PriorityClassList_To_v1alpha1_PriorityClassList(a.(*scheduling.PriorityClassList), b.(*schedulingv1alpha1.PriorityClassList), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_PriorityClass_To_scheduling_PriorityClass(in *schedulingv1alpha1.PriorityClass, out *scheduling.PriorityClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Value = in.Value
out.GlobalDefault = in.GlobalDefault
out.Description = in.Description
out.PreemptionPolicy = (*core.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
return nil
}
// Convert_v1alpha1_PriorityClass_To_scheduling_PriorityClass is an autogenerated conversion function.
func Convert_v1alpha1_PriorityClass_To_scheduling_PriorityClass(in *schedulingv1alpha1.PriorityClass, out *scheduling.PriorityClass, s conversion.Scope) error {
return autoConvert_v1alpha1_PriorityClass_To_scheduling_PriorityClass(in, out, s)
}
func autoConvert_scheduling_PriorityClass_To_v1alpha1_PriorityClass(in *scheduling.PriorityClass, out *schedulingv1alpha1.PriorityClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Value = in.Value
out.GlobalDefault = in.GlobalDefault
out.Description = in.Description
out.PreemptionPolicy = (*v1.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
return nil
}
// Convert_scheduling_PriorityClass_To_v1alpha1_PriorityClass is an autogenerated conversion function.
func Convert_scheduling_PriorityClass_To_v1alpha1_PriorityClass(in *scheduling.PriorityClass, out *schedulingv1alpha1.PriorityClass, s conversion.Scope) error {
return autoConvert_scheduling_PriorityClass_To_v1alpha1_PriorityClass(in, out, s)
}
func autoConvert_v1alpha1_PriorityClassList_To_scheduling_PriorityClassList(in *schedulingv1alpha1.PriorityClassList, out *scheduling.PriorityClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]scheduling.PriorityClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_PriorityClassList_To_scheduling_PriorityClassList is an autogenerated conversion function.
func Convert_v1alpha1_PriorityClassList_To_scheduling_PriorityClassList(in *schedulingv1alpha1.PriorityClassList, out *scheduling.PriorityClassList, s conversion.Scope) error {
return autoConvert_v1alpha1_PriorityClassList_To_scheduling_PriorityClassList(in, out, s)
}
func autoConvert_scheduling_PriorityClassList_To_v1alpha1_PriorityClassList(in *scheduling.PriorityClassList, out *schedulingv1alpha1.PriorityClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]schedulingv1alpha1.PriorityClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_scheduling_PriorityClassList_To_v1alpha1_PriorityClassList is an autogenerated conversion function.
func Convert_scheduling_PriorityClassList_To_v1alpha1_PriorityClassList(in *scheduling.PriorityClassList, out *schedulingv1alpha1.PriorityClassList, s conversion.Scope) error {
return autoConvert_scheduling_PriorityClassList_To_v1alpha1_PriorityClassList(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&schedulingv1alpha1.PriorityClass{}, func(obj interface{}) { SetObjectDefaults_PriorityClass(obj.(*schedulingv1alpha1.PriorityClass)) })
scheme.AddTypeDefaultingFunc(&schedulingv1alpha1.PriorityClassList{}, func(obj interface{}) {
SetObjectDefaults_PriorityClassList(obj.(*schedulingv1alpha1.PriorityClassList))
})
return nil
}
func SetObjectDefaults_PriorityClass(in *schedulingv1alpha1.PriorityClass) {
SetDefaults_PriorityClass(in)
}
func SetObjectDefaults_PriorityClassList(in *schedulingv1alpha1.PriorityClassList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PriorityClass(a)
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
// in extensions.
func SetDefaults_PriorityClass(obj *v1beta1.PriorityClass) {
if obj.PreemptionPolicy == nil {
preemptLowerPriority := apiv1.PreemptLowerPriority
obj.PreemptionPolicy = &preemptLowerPriority
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "scheduling.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &schedulingv1beta1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
scheduling "k8s.io/kubernetes/pkg/apis/scheduling"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*schedulingv1beta1.PriorityClass)(nil), (*scheduling.PriorityClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityClass_To_scheduling_PriorityClass(a.(*schedulingv1beta1.PriorityClass), b.(*scheduling.PriorityClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*scheduling.PriorityClass)(nil), (*schedulingv1beta1.PriorityClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_scheduling_PriorityClass_To_v1beta1_PriorityClass(a.(*scheduling.PriorityClass), b.(*schedulingv1beta1.PriorityClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*schedulingv1beta1.PriorityClassList)(nil), (*scheduling.PriorityClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_PriorityClassList_To_scheduling_PriorityClassList(a.(*schedulingv1beta1.PriorityClassList), b.(*scheduling.PriorityClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*scheduling.PriorityClassList)(nil), (*schedulingv1beta1.PriorityClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_scheduling_PriorityClassList_To_v1beta1_PriorityClassList(a.(*scheduling.PriorityClassList), b.(*schedulingv1beta1.PriorityClassList), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_PriorityClass_To_scheduling_PriorityClass(in *schedulingv1beta1.PriorityClass, out *scheduling.PriorityClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Value = in.Value
out.GlobalDefault = in.GlobalDefault
out.Description = in.Description
out.PreemptionPolicy = (*core.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
return nil
}
// Convert_v1beta1_PriorityClass_To_scheduling_PriorityClass is an autogenerated conversion function.
func Convert_v1beta1_PriorityClass_To_scheduling_PriorityClass(in *schedulingv1beta1.PriorityClass, out *scheduling.PriorityClass, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityClass_To_scheduling_PriorityClass(in, out, s)
}
func autoConvert_scheduling_PriorityClass_To_v1beta1_PriorityClass(in *scheduling.PriorityClass, out *schedulingv1beta1.PriorityClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Value = in.Value
out.GlobalDefault = in.GlobalDefault
out.Description = in.Description
out.PreemptionPolicy = (*v1.PreemptionPolicy)(unsafe.Pointer(in.PreemptionPolicy))
return nil
}
// Convert_scheduling_PriorityClass_To_v1beta1_PriorityClass is an autogenerated conversion function.
func Convert_scheduling_PriorityClass_To_v1beta1_PriorityClass(in *scheduling.PriorityClass, out *schedulingv1beta1.PriorityClass, s conversion.Scope) error {
return autoConvert_scheduling_PriorityClass_To_v1beta1_PriorityClass(in, out, s)
}
func autoConvert_v1beta1_PriorityClassList_To_scheduling_PriorityClassList(in *schedulingv1beta1.PriorityClassList, out *scheduling.PriorityClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]scheduling.PriorityClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_PriorityClassList_To_scheduling_PriorityClassList is an autogenerated conversion function.
func Convert_v1beta1_PriorityClassList_To_scheduling_PriorityClassList(in *schedulingv1beta1.PriorityClassList, out *scheduling.PriorityClassList, s conversion.Scope) error {
return autoConvert_v1beta1_PriorityClassList_To_scheduling_PriorityClassList(in, out, s)
}
func autoConvert_scheduling_PriorityClassList_To_v1beta1_PriorityClassList(in *scheduling.PriorityClassList, out *schedulingv1beta1.PriorityClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]schedulingv1beta1.PriorityClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_scheduling_PriorityClassList_To_v1beta1_PriorityClassList is an autogenerated conversion function.
func Convert_scheduling_PriorityClassList_To_v1beta1_PriorityClassList(in *scheduling.PriorityClassList, out *schedulingv1beta1.PriorityClassList, s conversion.Scope) error {
return autoConvert_scheduling_PriorityClassList_To_v1beta1_PriorityClassList(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&schedulingv1beta1.PriorityClass{}, func(obj interface{}) { SetObjectDefaults_PriorityClass(obj.(*schedulingv1beta1.PriorityClass)) })
scheme.AddTypeDefaultingFunc(&schedulingv1beta1.PriorityClassList{}, func(obj interface{}) { SetObjectDefaults_PriorityClassList(obj.(*schedulingv1beta1.PriorityClassList)) })
return nil
}
func SetObjectDefaults_PriorityClass(in *schedulingv1beta1.PriorityClass) {
SetDefaults_PriorityClass(in)
}
func SetObjectDefaults_PriorityClassList(in *schedulingv1beta1.PriorityClassList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PriorityClass(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package scheduling
import (
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityClass) DeepCopyInto(out *PriorityClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.PreemptionPolicy != nil {
in, out := &in.PreemptionPolicy, &out.PreemptionPolicy
*out = new(core.PreemptionPolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass.
func (in *PriorityClass) DeepCopy() *PriorityClass {
if in == nil {
return nil
}
out := new(PriorityClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PriorityClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList.
func (in *PriorityClassList) DeepCopy() *PriorityClassList {
if in == nil {
return nil
}
out := new(PriorityClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PriorityClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"fmt"
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/storage"
)
// Funcs returns the fuzzer functions for the storage api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *storage.StorageClass, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimDelete, api.PersistentVolumeReclaimRetain}
obj.ReclaimPolicy = &reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))]
bindingModes := []storage.VolumeBindingMode{storage.VolumeBindingImmediate, storage.VolumeBindingWaitForFirstConsumer}
obj.VolumeBindingMode = &bindingModes[c.Rand.Intn(len(bindingModes))]
},
func(obj *storage.CSIDriver, c randfill.Continue) {
c.FillNoCustom(obj) // fuzz self without calling this function again
// Custom fuzzing for volume modes.
switch c.Rand.Intn(7) {
case 0:
obj.Spec.VolumeLifecycleModes = nil
case 1:
obj.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{}
case 2:
// Invalid mode.
obj.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{
storage.VolumeLifecycleMode(fmt.Sprintf("%d", c.Rand.Int31())),
}
case 3:
obj.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{
storage.VolumeLifecyclePersistent,
}
case 4:
obj.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{
storage.VolumeLifecycleEphemeral,
}
case 5:
obj.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{
storage.VolumeLifecyclePersistent,
storage.VolumeLifecycleEphemeral,
}
case 6:
obj.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{
storage.VolumeLifecycleEphemeral,
storage.VolumeLifecyclePersistent,
}
}
// match defaulting
if obj.Spec.AttachRequired == nil {
obj.Spec.AttachRequired = new(bool)
*(obj.Spec.AttachRequired) = true
}
if obj.Spec.PodInfoOnMount == nil {
obj.Spec.PodInfoOnMount = new(bool)
*(obj.Spec.PodInfoOnMount) = false
}
if obj.Spec.StorageCapacity == nil {
obj.Spec.StorageCapacity = new(bool)
*(obj.Spec.StorageCapacity) = false
}
if obj.Spec.FSGroupPolicy == nil {
obj.Spec.FSGroupPolicy = new(storage.FSGroupPolicy)
*obj.Spec.FSGroupPolicy = storage.ReadWriteOnceWithFSTypeFSGroupPolicy
}
if obj.Spec.RequiresRepublish == nil {
obj.Spec.RequiresRepublish = new(bool)
*(obj.Spec.RequiresRepublish) = false
}
if len(obj.Spec.VolumeLifecycleModes) == 0 {
obj.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{
storage.VolumeLifecyclePersistent,
}
}
if obj.Spec.SELinuxMount == nil {
obj.Spec.SELinuxMount = new(bool)
*(obj.Spec.SELinuxMount) = false
}
},
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/apis/storage/v1"
"k8s.io/kubernetes/pkg/apis/storage/v1alpha1"
"k8s.io/kubernetes/pkg/apis/storage/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(storage.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "storage.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&StorageClass{},
&StorageClassList{},
&VolumeAttachment{},
&VolumeAttachmentList{},
&CSINode{},
&CSINodeList{},
&CSIDriver{},
&CSIDriverList{},
&CSIStorageCapacity{},
&CSIStorageCapacityList{},
&VolumeAttributesClass{},
&VolumeAttributesClassList{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_StorageClass(obj *storagev1.StorageClass) {
if obj.ReclaimPolicy == nil {
obj.ReclaimPolicy = new(v1.PersistentVolumeReclaimPolicy)
*obj.ReclaimPolicy = v1.PersistentVolumeReclaimDelete
}
if obj.VolumeBindingMode == nil {
obj.VolumeBindingMode = new(storagev1.VolumeBindingMode)
*obj.VolumeBindingMode = storagev1.VolumeBindingImmediate
}
}
func SetDefaults_CSIDriver(obj *storagev1.CSIDriver) {
if obj.Spec.AttachRequired == nil {
obj.Spec.AttachRequired = new(bool)
*(obj.Spec.AttachRequired) = true
}
if obj.Spec.PodInfoOnMount == nil {
obj.Spec.PodInfoOnMount = new(bool)
*(obj.Spec.PodInfoOnMount) = false
}
if obj.Spec.StorageCapacity == nil {
obj.Spec.StorageCapacity = new(bool)
*(obj.Spec.StorageCapacity) = false
}
if obj.Spec.FSGroupPolicy == nil {
obj.Spec.FSGroupPolicy = new(storagev1.FSGroupPolicy)
*obj.Spec.FSGroupPolicy = storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy
}
if len(obj.Spec.VolumeLifecycleModes) == 0 {
obj.Spec.VolumeLifecycleModes = append(obj.Spec.VolumeLifecycleModes, storagev1.VolumeLifecyclePersistent)
}
if obj.Spec.RequiresRepublish == nil {
obj.Spec.RequiresRepublish = new(bool)
*(obj.Spec.RequiresRepublish) = false
}
if obj.Spec.SELinuxMount == nil && utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
obj.Spec.SELinuxMount = new(bool)
*(obj.Spec.SELinuxMount) = false
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "storage.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &storagev1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
storage "k8s.io/kubernetes/pkg/apis/storage"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*storagev1.CSIDriver)(nil), (*storage.CSIDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSIDriver_To_storage_CSIDriver(a.(*storagev1.CSIDriver), b.(*storage.CSIDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIDriver)(nil), (*storagev1.CSIDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIDriver_To_v1_CSIDriver(a.(*storage.CSIDriver), b.(*storagev1.CSIDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSIDriverList)(nil), (*storage.CSIDriverList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSIDriverList_To_storage_CSIDriverList(a.(*storagev1.CSIDriverList), b.(*storage.CSIDriverList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIDriverList)(nil), (*storagev1.CSIDriverList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIDriverList_To_v1_CSIDriverList(a.(*storage.CSIDriverList), b.(*storagev1.CSIDriverList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSIDriverSpec)(nil), (*storage.CSIDriverSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSIDriverSpec_To_storage_CSIDriverSpec(a.(*storagev1.CSIDriverSpec), b.(*storage.CSIDriverSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIDriverSpec)(nil), (*storagev1.CSIDriverSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIDriverSpec_To_v1_CSIDriverSpec(a.(*storage.CSIDriverSpec), b.(*storagev1.CSIDriverSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSINode)(nil), (*storage.CSINode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSINode_To_storage_CSINode(a.(*storagev1.CSINode), b.(*storage.CSINode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINode)(nil), (*storagev1.CSINode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINode_To_v1_CSINode(a.(*storage.CSINode), b.(*storagev1.CSINode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSINodeDriver)(nil), (*storage.CSINodeDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSINodeDriver_To_storage_CSINodeDriver(a.(*storagev1.CSINodeDriver), b.(*storage.CSINodeDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINodeDriver)(nil), (*storagev1.CSINodeDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINodeDriver_To_v1_CSINodeDriver(a.(*storage.CSINodeDriver), b.(*storagev1.CSINodeDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSINodeList)(nil), (*storage.CSINodeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSINodeList_To_storage_CSINodeList(a.(*storagev1.CSINodeList), b.(*storage.CSINodeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINodeList)(nil), (*storagev1.CSINodeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINodeList_To_v1_CSINodeList(a.(*storage.CSINodeList), b.(*storagev1.CSINodeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSINodeSpec)(nil), (*storage.CSINodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSINodeSpec_To_storage_CSINodeSpec(a.(*storagev1.CSINodeSpec), b.(*storage.CSINodeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINodeSpec)(nil), (*storagev1.CSINodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINodeSpec_To_v1_CSINodeSpec(a.(*storage.CSINodeSpec), b.(*storagev1.CSINodeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSIStorageCapacity)(nil), (*storage.CSIStorageCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSIStorageCapacity_To_storage_CSIStorageCapacity(a.(*storagev1.CSIStorageCapacity), b.(*storage.CSIStorageCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIStorageCapacity)(nil), (*storagev1.CSIStorageCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIStorageCapacity_To_v1_CSIStorageCapacity(a.(*storage.CSIStorageCapacity), b.(*storagev1.CSIStorageCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.CSIStorageCapacityList)(nil), (*storage.CSIStorageCapacityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(a.(*storagev1.CSIStorageCapacityList), b.(*storage.CSIStorageCapacityList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIStorageCapacityList)(nil), (*storagev1.CSIStorageCapacityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIStorageCapacityList_To_v1_CSIStorageCapacityList(a.(*storage.CSIStorageCapacityList), b.(*storagev1.CSIStorageCapacityList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.StorageClass)(nil), (*storage.StorageClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StorageClass_To_storage_StorageClass(a.(*storagev1.StorageClass), b.(*storage.StorageClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.StorageClass)(nil), (*storagev1.StorageClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_StorageClass_To_v1_StorageClass(a.(*storage.StorageClass), b.(*storagev1.StorageClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.StorageClassList)(nil), (*storage.StorageClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StorageClassList_To_storage_StorageClassList(a.(*storagev1.StorageClassList), b.(*storage.StorageClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.StorageClassList)(nil), (*storagev1.StorageClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_StorageClassList_To_v1_StorageClassList(a.(*storage.StorageClassList), b.(*storagev1.StorageClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.TokenRequest)(nil), (*storage.TokenRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TokenRequest_To_storage_TokenRequest(a.(*storagev1.TokenRequest), b.(*storage.TokenRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.TokenRequest)(nil), (*storagev1.TokenRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_TokenRequest_To_v1_TokenRequest(a.(*storage.TokenRequest), b.(*storagev1.TokenRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeAttachment)(nil), (*storage.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeAttachment_To_storage_VolumeAttachment(a.(*storagev1.VolumeAttachment), b.(*storage.VolumeAttachment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachment)(nil), (*storagev1.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachment_To_v1_VolumeAttachment(a.(*storage.VolumeAttachment), b.(*storagev1.VolumeAttachment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeAttachmentList)(nil), (*storage.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(a.(*storagev1.VolumeAttachmentList), b.(*storage.VolumeAttachmentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentList)(nil), (*storagev1.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(a.(*storage.VolumeAttachmentList), b.(*storagev1.VolumeAttachmentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeAttachmentSource)(nil), (*storage.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(a.(*storagev1.VolumeAttachmentSource), b.(*storage.VolumeAttachmentSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSource)(nil), (*storagev1.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(a.(*storage.VolumeAttachmentSource), b.(*storagev1.VolumeAttachmentSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeAttachmentSpec)(nil), (*storage.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(a.(*storagev1.VolumeAttachmentSpec), b.(*storage.VolumeAttachmentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSpec)(nil), (*storagev1.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(a.(*storage.VolumeAttachmentSpec), b.(*storagev1.VolumeAttachmentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeAttachmentStatus)(nil), (*storage.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(a.(*storagev1.VolumeAttachmentStatus), b.(*storage.VolumeAttachmentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentStatus)(nil), (*storagev1.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(a.(*storage.VolumeAttachmentStatus), b.(*storagev1.VolumeAttachmentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeAttributesClass)(nil), (*storage.VolumeAttributesClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeAttributesClass_To_storage_VolumeAttributesClass(a.(*storagev1.VolumeAttributesClass), b.(*storage.VolumeAttributesClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttributesClass)(nil), (*storagev1.VolumeAttributesClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttributesClass_To_v1_VolumeAttributesClass(a.(*storage.VolumeAttributesClass), b.(*storagev1.VolumeAttributesClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeAttributesClassList)(nil), (*storage.VolumeAttributesClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(a.(*storagev1.VolumeAttributesClassList), b.(*storage.VolumeAttributesClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttributesClassList)(nil), (*storagev1.VolumeAttributesClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttributesClassList_To_v1_VolumeAttributesClassList(a.(*storage.VolumeAttributesClassList), b.(*storagev1.VolumeAttributesClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeError)(nil), (*storage.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeError_To_storage_VolumeError(a.(*storagev1.VolumeError), b.(*storage.VolumeError), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeError)(nil), (*storagev1.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeError_To_v1_VolumeError(a.(*storage.VolumeError), b.(*storagev1.VolumeError), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1.VolumeNodeResources)(nil), (*storage.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(a.(*storagev1.VolumeNodeResources), b.(*storage.VolumeNodeResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeNodeResources)(nil), (*storagev1.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(a.(*storage.VolumeNodeResources), b.(*storagev1.VolumeNodeResources), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_CSIDriver_To_storage_CSIDriver(in *storagev1.CSIDriver, out *storage.CSIDriver, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_CSIDriverSpec_To_storage_CSIDriverSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_CSIDriver_To_storage_CSIDriver is an autogenerated conversion function.
func Convert_v1_CSIDriver_To_storage_CSIDriver(in *storagev1.CSIDriver, out *storage.CSIDriver, s conversion.Scope) error {
return autoConvert_v1_CSIDriver_To_storage_CSIDriver(in, out, s)
}
func autoConvert_storage_CSIDriver_To_v1_CSIDriver(in *storage.CSIDriver, out *storagev1.CSIDriver, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_storage_CSIDriverSpec_To_v1_CSIDriverSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_storage_CSIDriver_To_v1_CSIDriver is an autogenerated conversion function.
func Convert_storage_CSIDriver_To_v1_CSIDriver(in *storage.CSIDriver, out *storagev1.CSIDriver, s conversion.Scope) error {
return autoConvert_storage_CSIDriver_To_v1_CSIDriver(in, out, s)
}
func autoConvert_v1_CSIDriverList_To_storage_CSIDriverList(in *storagev1.CSIDriverList, out *storage.CSIDriverList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storage.CSIDriver, len(*in))
for i := range *in {
if err := Convert_v1_CSIDriver_To_storage_CSIDriver(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_CSIDriverList_To_storage_CSIDriverList is an autogenerated conversion function.
func Convert_v1_CSIDriverList_To_storage_CSIDriverList(in *storagev1.CSIDriverList, out *storage.CSIDriverList, s conversion.Scope) error {
return autoConvert_v1_CSIDriverList_To_storage_CSIDriverList(in, out, s)
}
func autoConvert_storage_CSIDriverList_To_v1_CSIDriverList(in *storage.CSIDriverList, out *storagev1.CSIDriverList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storagev1.CSIDriver, len(*in))
for i := range *in {
if err := Convert_storage_CSIDriver_To_v1_CSIDriver(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_storage_CSIDriverList_To_v1_CSIDriverList is an autogenerated conversion function.
func Convert_storage_CSIDriverList_To_v1_CSIDriverList(in *storage.CSIDriverList, out *storagev1.CSIDriverList, s conversion.Scope) error {
return autoConvert_storage_CSIDriverList_To_v1_CSIDriverList(in, out, s)
}
func autoConvert_v1_CSIDriverSpec_To_storage_CSIDriverSpec(in *storagev1.CSIDriverSpec, out *storage.CSIDriverSpec, s conversion.Scope) error {
out.AttachRequired = (*bool)(unsafe.Pointer(in.AttachRequired))
out.PodInfoOnMount = (*bool)(unsafe.Pointer(in.PodInfoOnMount))
out.VolumeLifecycleModes = *(*[]storage.VolumeLifecycleMode)(unsafe.Pointer(&in.VolumeLifecycleModes))
out.StorageCapacity = (*bool)(unsafe.Pointer(in.StorageCapacity))
out.FSGroupPolicy = (*storage.FSGroupPolicy)(unsafe.Pointer(in.FSGroupPolicy))
out.TokenRequests = *(*[]storage.TokenRequest)(unsafe.Pointer(&in.TokenRequests))
out.RequiresRepublish = (*bool)(unsafe.Pointer(in.RequiresRepublish))
out.SELinuxMount = (*bool)(unsafe.Pointer(in.SELinuxMount))
out.NodeAllocatableUpdatePeriodSeconds = (*int64)(unsafe.Pointer(in.NodeAllocatableUpdatePeriodSeconds))
return nil
}
// Convert_v1_CSIDriverSpec_To_storage_CSIDriverSpec is an autogenerated conversion function.
func Convert_v1_CSIDriverSpec_To_storage_CSIDriverSpec(in *storagev1.CSIDriverSpec, out *storage.CSIDriverSpec, s conversion.Scope) error {
return autoConvert_v1_CSIDriverSpec_To_storage_CSIDriverSpec(in, out, s)
}
func autoConvert_storage_CSIDriverSpec_To_v1_CSIDriverSpec(in *storage.CSIDriverSpec, out *storagev1.CSIDriverSpec, s conversion.Scope) error {
out.AttachRequired = (*bool)(unsafe.Pointer(in.AttachRequired))
out.FSGroupPolicy = (*storagev1.FSGroupPolicy)(unsafe.Pointer(in.FSGroupPolicy))
out.PodInfoOnMount = (*bool)(unsafe.Pointer(in.PodInfoOnMount))
out.VolumeLifecycleModes = *(*[]storagev1.VolumeLifecycleMode)(unsafe.Pointer(&in.VolumeLifecycleModes))
out.StorageCapacity = (*bool)(unsafe.Pointer(in.StorageCapacity))
out.TokenRequests = *(*[]storagev1.TokenRequest)(unsafe.Pointer(&in.TokenRequests))
out.RequiresRepublish = (*bool)(unsafe.Pointer(in.RequiresRepublish))
out.SELinuxMount = (*bool)(unsafe.Pointer(in.SELinuxMount))
out.NodeAllocatableUpdatePeriodSeconds = (*int64)(unsafe.Pointer(in.NodeAllocatableUpdatePeriodSeconds))
return nil
}
// Convert_storage_CSIDriverSpec_To_v1_CSIDriverSpec is an autogenerated conversion function.
func Convert_storage_CSIDriverSpec_To_v1_CSIDriverSpec(in *storage.CSIDriverSpec, out *storagev1.CSIDriverSpec, s conversion.Scope) error {
return autoConvert_storage_CSIDriverSpec_To_v1_CSIDriverSpec(in, out, s)
}
func autoConvert_v1_CSINode_To_storage_CSINode(in *storagev1.CSINode, out *storage.CSINode, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_CSINodeSpec_To_storage_CSINodeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1_CSINode_To_storage_CSINode is an autogenerated conversion function.
func Convert_v1_CSINode_To_storage_CSINode(in *storagev1.CSINode, out *storage.CSINode, s conversion.Scope) error {
return autoConvert_v1_CSINode_To_storage_CSINode(in, out, s)
}
func autoConvert_storage_CSINode_To_v1_CSINode(in *storage.CSINode, out *storagev1.CSINode, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_storage_CSINodeSpec_To_v1_CSINodeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_storage_CSINode_To_v1_CSINode is an autogenerated conversion function.
func Convert_storage_CSINode_To_v1_CSINode(in *storage.CSINode, out *storagev1.CSINode, s conversion.Scope) error {
return autoConvert_storage_CSINode_To_v1_CSINode(in, out, s)
}
func autoConvert_v1_CSINodeDriver_To_storage_CSINodeDriver(in *storagev1.CSINodeDriver, out *storage.CSINodeDriver, s conversion.Scope) error {
out.Name = in.Name
out.NodeID = in.NodeID
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
out.Allocatable = (*storage.VolumeNodeResources)(unsafe.Pointer(in.Allocatable))
return nil
}
// Convert_v1_CSINodeDriver_To_storage_CSINodeDriver is an autogenerated conversion function.
func Convert_v1_CSINodeDriver_To_storage_CSINodeDriver(in *storagev1.CSINodeDriver, out *storage.CSINodeDriver, s conversion.Scope) error {
return autoConvert_v1_CSINodeDriver_To_storage_CSINodeDriver(in, out, s)
}
func autoConvert_storage_CSINodeDriver_To_v1_CSINodeDriver(in *storage.CSINodeDriver, out *storagev1.CSINodeDriver, s conversion.Scope) error {
out.Name = in.Name
out.NodeID = in.NodeID
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
out.Allocatable = (*storagev1.VolumeNodeResources)(unsafe.Pointer(in.Allocatable))
return nil
}
// Convert_storage_CSINodeDriver_To_v1_CSINodeDriver is an autogenerated conversion function.
func Convert_storage_CSINodeDriver_To_v1_CSINodeDriver(in *storage.CSINodeDriver, out *storagev1.CSINodeDriver, s conversion.Scope) error {
return autoConvert_storage_CSINodeDriver_To_v1_CSINodeDriver(in, out, s)
}
func autoConvert_v1_CSINodeList_To_storage_CSINodeList(in *storagev1.CSINodeList, out *storage.CSINodeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.CSINode)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_CSINodeList_To_storage_CSINodeList is an autogenerated conversion function.
func Convert_v1_CSINodeList_To_storage_CSINodeList(in *storagev1.CSINodeList, out *storage.CSINodeList, s conversion.Scope) error {
return autoConvert_v1_CSINodeList_To_storage_CSINodeList(in, out, s)
}
func autoConvert_storage_CSINodeList_To_v1_CSINodeList(in *storage.CSINodeList, out *storagev1.CSINodeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1.CSINode)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_CSINodeList_To_v1_CSINodeList is an autogenerated conversion function.
func Convert_storage_CSINodeList_To_v1_CSINodeList(in *storage.CSINodeList, out *storagev1.CSINodeList, s conversion.Scope) error {
return autoConvert_storage_CSINodeList_To_v1_CSINodeList(in, out, s)
}
func autoConvert_v1_CSINodeSpec_To_storage_CSINodeSpec(in *storagev1.CSINodeSpec, out *storage.CSINodeSpec, s conversion.Scope) error {
out.Drivers = *(*[]storage.CSINodeDriver)(unsafe.Pointer(&in.Drivers))
return nil
}
// Convert_v1_CSINodeSpec_To_storage_CSINodeSpec is an autogenerated conversion function.
func Convert_v1_CSINodeSpec_To_storage_CSINodeSpec(in *storagev1.CSINodeSpec, out *storage.CSINodeSpec, s conversion.Scope) error {
return autoConvert_v1_CSINodeSpec_To_storage_CSINodeSpec(in, out, s)
}
func autoConvert_storage_CSINodeSpec_To_v1_CSINodeSpec(in *storage.CSINodeSpec, out *storagev1.CSINodeSpec, s conversion.Scope) error {
out.Drivers = *(*[]storagev1.CSINodeDriver)(unsafe.Pointer(&in.Drivers))
return nil
}
// Convert_storage_CSINodeSpec_To_v1_CSINodeSpec is an autogenerated conversion function.
func Convert_storage_CSINodeSpec_To_v1_CSINodeSpec(in *storage.CSINodeSpec, out *storagev1.CSINodeSpec, s conversion.Scope) error {
return autoConvert_storage_CSINodeSpec_To_v1_CSINodeSpec(in, out, s)
}
func autoConvert_v1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in *storagev1.CSIStorageCapacity, out *storage.CSIStorageCapacity, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeTopology = (*metav1.LabelSelector)(unsafe.Pointer(in.NodeTopology))
out.StorageClassName = in.StorageClassName
out.Capacity = (*resource.Quantity)(unsafe.Pointer(in.Capacity))
out.MaximumVolumeSize = (*resource.Quantity)(unsafe.Pointer(in.MaximumVolumeSize))
return nil
}
// Convert_v1_CSIStorageCapacity_To_storage_CSIStorageCapacity is an autogenerated conversion function.
func Convert_v1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in *storagev1.CSIStorageCapacity, out *storage.CSIStorageCapacity, s conversion.Scope) error {
return autoConvert_v1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in, out, s)
}
func autoConvert_storage_CSIStorageCapacity_To_v1_CSIStorageCapacity(in *storage.CSIStorageCapacity, out *storagev1.CSIStorageCapacity, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeTopology = (*metav1.LabelSelector)(unsafe.Pointer(in.NodeTopology))
out.StorageClassName = in.StorageClassName
out.Capacity = (*resource.Quantity)(unsafe.Pointer(in.Capacity))
out.MaximumVolumeSize = (*resource.Quantity)(unsafe.Pointer(in.MaximumVolumeSize))
return nil
}
// Convert_storage_CSIStorageCapacity_To_v1_CSIStorageCapacity is an autogenerated conversion function.
func Convert_storage_CSIStorageCapacity_To_v1_CSIStorageCapacity(in *storage.CSIStorageCapacity, out *storagev1.CSIStorageCapacity, s conversion.Scope) error {
return autoConvert_storage_CSIStorageCapacity_To_v1_CSIStorageCapacity(in, out, s)
}
func autoConvert_v1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in *storagev1.CSIStorageCapacityList, out *storage.CSIStorageCapacityList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.CSIStorageCapacity)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList is an autogenerated conversion function.
func Convert_v1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in *storagev1.CSIStorageCapacityList, out *storage.CSIStorageCapacityList, s conversion.Scope) error {
return autoConvert_v1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in, out, s)
}
func autoConvert_storage_CSIStorageCapacityList_To_v1_CSIStorageCapacityList(in *storage.CSIStorageCapacityList, out *storagev1.CSIStorageCapacityList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1.CSIStorageCapacity)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_CSIStorageCapacityList_To_v1_CSIStorageCapacityList is an autogenerated conversion function.
func Convert_storage_CSIStorageCapacityList_To_v1_CSIStorageCapacityList(in *storage.CSIStorageCapacityList, out *storagev1.CSIStorageCapacityList, s conversion.Scope) error {
return autoConvert_storage_CSIStorageCapacityList_To_v1_CSIStorageCapacityList(in, out, s)
}
func autoConvert_v1_StorageClass_To_storage_StorageClass(in *storagev1.StorageClass, out *storage.StorageClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Provisioner = in.Provisioner
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
out.ReclaimPolicy = (*core.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy))
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion))
out.VolumeBindingMode = (*storage.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode))
out.AllowedTopologies = *(*[]core.TopologySelectorTerm)(unsafe.Pointer(&in.AllowedTopologies))
return nil
}
// Convert_v1_StorageClass_To_storage_StorageClass is an autogenerated conversion function.
func Convert_v1_StorageClass_To_storage_StorageClass(in *storagev1.StorageClass, out *storage.StorageClass, s conversion.Scope) error {
return autoConvert_v1_StorageClass_To_storage_StorageClass(in, out, s)
}
func autoConvert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *storagev1.StorageClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Provisioner = in.Provisioner
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
out.ReclaimPolicy = (*corev1.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy))
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion))
out.VolumeBindingMode = (*storagev1.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode))
out.AllowedTopologies = *(*[]corev1.TopologySelectorTerm)(unsafe.Pointer(&in.AllowedTopologies))
return nil
}
// Convert_storage_StorageClass_To_v1_StorageClass is an autogenerated conversion function.
func Convert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *storagev1.StorageClass, s conversion.Scope) error {
return autoConvert_storage_StorageClass_To_v1_StorageClass(in, out, s)
}
func autoConvert_v1_StorageClassList_To_storage_StorageClassList(in *storagev1.StorageClassList, out *storage.StorageClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_StorageClassList_To_storage_StorageClassList is an autogenerated conversion function.
func Convert_v1_StorageClassList_To_storage_StorageClassList(in *storagev1.StorageClassList, out *storage.StorageClassList, s conversion.Scope) error {
return autoConvert_v1_StorageClassList_To_storage_StorageClassList(in, out, s)
}
func autoConvert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *storagev1.StorageClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1.StorageClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_StorageClassList_To_v1_StorageClassList is an autogenerated conversion function.
func Convert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *storagev1.StorageClassList, s conversion.Scope) error {
return autoConvert_storage_StorageClassList_To_v1_StorageClassList(in, out, s)
}
func autoConvert_v1_TokenRequest_To_storage_TokenRequest(in *storagev1.TokenRequest, out *storage.TokenRequest, s conversion.Scope) error {
out.Audience = in.Audience
out.ExpirationSeconds = (*int64)(unsafe.Pointer(in.ExpirationSeconds))
return nil
}
// Convert_v1_TokenRequest_To_storage_TokenRequest is an autogenerated conversion function.
func Convert_v1_TokenRequest_To_storage_TokenRequest(in *storagev1.TokenRequest, out *storage.TokenRequest, s conversion.Scope) error {
return autoConvert_v1_TokenRequest_To_storage_TokenRequest(in, out, s)
}
func autoConvert_storage_TokenRequest_To_v1_TokenRequest(in *storage.TokenRequest, out *storagev1.TokenRequest, s conversion.Scope) error {
out.Audience = in.Audience
out.ExpirationSeconds = (*int64)(unsafe.Pointer(in.ExpirationSeconds))
return nil
}
// Convert_storage_TokenRequest_To_v1_TokenRequest is an autogenerated conversion function.
func Convert_storage_TokenRequest_To_v1_TokenRequest(in *storage.TokenRequest, out *storagev1.TokenRequest, s conversion.Scope) error {
return autoConvert_storage_TokenRequest_To_v1_TokenRequest(in, out, s)
}
func autoConvert_v1_VolumeAttachment_To_storage_VolumeAttachment(in *storagev1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_VolumeAttachment_To_storage_VolumeAttachment is an autogenerated conversion function.
func Convert_v1_VolumeAttachment_To_storage_VolumeAttachment(in *storagev1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error {
return autoConvert_v1_VolumeAttachment_To_storage_VolumeAttachment(in, out, s)
}
func autoConvert_storage_VolumeAttachment_To_v1_VolumeAttachment(in *storage.VolumeAttachment, out *storagev1.VolumeAttachment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_storage_VolumeAttachment_To_v1_VolumeAttachment is an autogenerated conversion function.
func Convert_storage_VolumeAttachment_To_v1_VolumeAttachment(in *storage.VolumeAttachment, out *storagev1.VolumeAttachment, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachment_To_v1_VolumeAttachment(in, out, s)
}
func autoConvert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *storagev1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storage.VolumeAttachment, len(*in))
for i := range *in {
if err := Convert_v1_VolumeAttachment_To_storage_VolumeAttachment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList is an autogenerated conversion function.
func Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *storagev1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error {
return autoConvert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in, out, s)
}
func autoConvert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *storagev1.VolumeAttachmentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storagev1.VolumeAttachment, len(*in))
for i := range *in {
if err := Convert_storage_VolumeAttachment_To_v1_VolumeAttachment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *storagev1.VolumeAttachmentList, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in, out, s)
}
func autoConvert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *storagev1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error {
out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName))
if in.InlineVolumeSpec != nil {
in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec
*out = new(core.PersistentVolumeSpec)
if err := apiscorev1.Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(*in, *out, s); err != nil {
return err
}
} else {
out.InlineVolumeSpec = nil
}
return nil
}
// Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource is an autogenerated conversion function.
func Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *storagev1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error {
return autoConvert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in, out, s)
}
func autoConvert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *storagev1.VolumeAttachmentSource, s conversion.Scope) error {
out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName))
if in.InlineVolumeSpec != nil {
in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec
*out = new(corev1.PersistentVolumeSpec)
if err := apiscorev1.Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(*in, *out, s); err != nil {
return err
}
} else {
out.InlineVolumeSpec = nil
}
return nil
}
// Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *storagev1.VolumeAttachmentSource, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in, out, s)
}
func autoConvert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *storagev1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error {
out.Attacher = in.Attacher
if err := Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.NodeName = in.NodeName
return nil
}
// Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec is an autogenerated conversion function.
func Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *storagev1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error {
return autoConvert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in, out, s)
}
func autoConvert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *storagev1.VolumeAttachmentSpec, s conversion.Scope) error {
out.Attacher = in.Attacher
if err := Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.NodeName = in.NodeName
return nil
}
// Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *storagev1.VolumeAttachmentSpec, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in, out, s)
}
func autoConvert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *storagev1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error {
out.Attached = in.Attached
out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata))
out.AttachError = (*storage.VolumeError)(unsafe.Pointer(in.AttachError))
out.DetachError = (*storage.VolumeError)(unsafe.Pointer(in.DetachError))
return nil
}
// Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus is an autogenerated conversion function.
func Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *storagev1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error {
return autoConvert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in, out, s)
}
func autoConvert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *storagev1.VolumeAttachmentStatus, s conversion.Scope) error {
out.Attached = in.Attached
out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata))
out.AttachError = (*storagev1.VolumeError)(unsafe.Pointer(in.AttachError))
out.DetachError = (*storagev1.VolumeError)(unsafe.Pointer(in.DetachError))
return nil
}
// Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *storagev1.VolumeAttachmentStatus, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in, out, s)
}
func autoConvert_v1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in *storagev1.VolumeAttributesClass, out *storage.VolumeAttributesClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
return nil
}
// Convert_v1_VolumeAttributesClass_To_storage_VolumeAttributesClass is an autogenerated conversion function.
func Convert_v1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in *storagev1.VolumeAttributesClass, out *storage.VolumeAttributesClass, s conversion.Scope) error {
return autoConvert_v1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in, out, s)
}
func autoConvert_storage_VolumeAttributesClass_To_v1_VolumeAttributesClass(in *storage.VolumeAttributesClass, out *storagev1.VolumeAttributesClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
return nil
}
// Convert_storage_VolumeAttributesClass_To_v1_VolumeAttributesClass is an autogenerated conversion function.
func Convert_storage_VolumeAttributesClass_To_v1_VolumeAttributesClass(in *storage.VolumeAttributesClass, out *storagev1.VolumeAttributesClass, s conversion.Scope) error {
return autoConvert_storage_VolumeAttributesClass_To_v1_VolumeAttributesClass(in, out, s)
}
func autoConvert_v1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in *storagev1.VolumeAttributesClassList, out *storage.VolumeAttributesClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.VolumeAttributesClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList is an autogenerated conversion function.
func Convert_v1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in *storagev1.VolumeAttributesClassList, out *storage.VolumeAttributesClassList, s conversion.Scope) error {
return autoConvert_v1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in, out, s)
}
func autoConvert_storage_VolumeAttributesClassList_To_v1_VolumeAttributesClassList(in *storage.VolumeAttributesClassList, out *storagev1.VolumeAttributesClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1.VolumeAttributesClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_VolumeAttributesClassList_To_v1_VolumeAttributesClassList is an autogenerated conversion function.
func Convert_storage_VolumeAttributesClassList_To_v1_VolumeAttributesClassList(in *storage.VolumeAttributesClassList, out *storagev1.VolumeAttributesClassList, s conversion.Scope) error {
return autoConvert_storage_VolumeAttributesClassList_To_v1_VolumeAttributesClassList(in, out, s)
}
func autoConvert_v1_VolumeError_To_storage_VolumeError(in *storagev1.VolumeError, out *storage.VolumeError, s conversion.Scope) error {
out.Time = in.Time
out.Message = in.Message
out.ErrorCode = (*int32)(unsafe.Pointer(in.ErrorCode))
return nil
}
// Convert_v1_VolumeError_To_storage_VolumeError is an autogenerated conversion function.
func Convert_v1_VolumeError_To_storage_VolumeError(in *storagev1.VolumeError, out *storage.VolumeError, s conversion.Scope) error {
return autoConvert_v1_VolumeError_To_storage_VolumeError(in, out, s)
}
func autoConvert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, out *storagev1.VolumeError, s conversion.Scope) error {
out.Time = in.Time
out.Message = in.Message
out.ErrorCode = (*int32)(unsafe.Pointer(in.ErrorCode))
return nil
}
// Convert_storage_VolumeError_To_v1_VolumeError is an autogenerated conversion function.
func Convert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, out *storagev1.VolumeError, s conversion.Scope) error {
return autoConvert_storage_VolumeError_To_v1_VolumeError(in, out, s)
}
func autoConvert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(in *storagev1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error {
out.Count = (*int32)(unsafe.Pointer(in.Count))
return nil
}
// Convert_v1_VolumeNodeResources_To_storage_VolumeNodeResources is an autogenerated conversion function.
func Convert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(in *storagev1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error {
return autoConvert_v1_VolumeNodeResources_To_storage_VolumeNodeResources(in, out, s)
}
func autoConvert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(in *storage.VolumeNodeResources, out *storagev1.VolumeNodeResources, s conversion.Scope) error {
out.Count = (*int32)(unsafe.Pointer(in.Count))
return nil
}
// Convert_storage_VolumeNodeResources_To_v1_VolumeNodeResources is an autogenerated conversion function.
func Convert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(in *storage.VolumeNodeResources, out *storagev1.VolumeNodeResources, s conversion.Scope) error {
return autoConvert_storage_VolumeNodeResources_To_v1_VolumeNodeResources(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&storagev1.CSIDriver{}, func(obj interface{}) { SetObjectDefaults_CSIDriver(obj.(*storagev1.CSIDriver)) })
scheme.AddTypeDefaultingFunc(&storagev1.CSIDriverList{}, func(obj interface{}) { SetObjectDefaults_CSIDriverList(obj.(*storagev1.CSIDriverList)) })
scheme.AddTypeDefaultingFunc(&storagev1.StorageClass{}, func(obj interface{}) { SetObjectDefaults_StorageClass(obj.(*storagev1.StorageClass)) })
scheme.AddTypeDefaultingFunc(&storagev1.StorageClassList{}, func(obj interface{}) { SetObjectDefaults_StorageClassList(obj.(*storagev1.StorageClassList)) })
scheme.AddTypeDefaultingFunc(&storagev1.VolumeAttachment{}, func(obj interface{}) { SetObjectDefaults_VolumeAttachment(obj.(*storagev1.VolumeAttachment)) })
scheme.AddTypeDefaultingFunc(&storagev1.VolumeAttachmentList{}, func(obj interface{}) { SetObjectDefaults_VolumeAttachmentList(obj.(*storagev1.VolumeAttachmentList)) })
return nil
}
func SetObjectDefaults_CSIDriver(in *storagev1.CSIDriver) {
SetDefaults_CSIDriver(in)
}
func SetObjectDefaults_CSIDriverList(in *storagev1.CSIDriverList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_CSIDriver(a)
}
}
func SetObjectDefaults_StorageClass(in *storagev1.StorageClass) {
SetDefaults_StorageClass(in)
}
func SetObjectDefaults_StorageClassList(in *storagev1.StorageClassList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_StorageClass(a)
}
}
func SetObjectDefaults_VolumeAttachment(in *storagev1.VolumeAttachment) {
if in.Spec.Source.InlineVolumeSpec != nil {
apiscorev1.SetDefaults_ResourceList(&in.Spec.Source.InlineVolumeSpec.Capacity)
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.HostPath != nil {
apiscorev1.SetDefaults_HostPathVolumeSource(in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.HostPath)
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RBDPool == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RBDPool = "rbd"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RadosUser == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RadosUser = "admin"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.Keyring == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI.ISCSIInterface == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := corev1.AzureDataDiskCachingMode(corev1.AzureDataDiskCachingReadWrite)
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.FSType = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.Kind == nil {
ptrVar1 := corev1.AzureDataDiskKind(corev1.AzureSharedBlobDisk)
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.StorageMode == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.FSType == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.FSType = "xfs"
}
}
}
}
func SetObjectDefaults_VolumeAttachmentList(in *storagev1.VolumeAttachmentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_VolumeAttachment(a)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "storage.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &storagev1alpha1.SchemeBuilder
// AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
apicorev1 "k8s.io/api/core/v1"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
storage "k8s.io/kubernetes/pkg/apis/storage"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.CSIStorageCapacity)(nil), (*storage.CSIStorageCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CSIStorageCapacity_To_storage_CSIStorageCapacity(a.(*storagev1alpha1.CSIStorageCapacity), b.(*storage.CSIStorageCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIStorageCapacity)(nil), (*storagev1alpha1.CSIStorageCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIStorageCapacity_To_v1alpha1_CSIStorageCapacity(a.(*storage.CSIStorageCapacity), b.(*storagev1alpha1.CSIStorageCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.CSIStorageCapacityList)(nil), (*storage.CSIStorageCapacityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(a.(*storagev1alpha1.CSIStorageCapacityList), b.(*storage.CSIStorageCapacityList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIStorageCapacityList)(nil), (*storagev1alpha1.CSIStorageCapacityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIStorageCapacityList_To_v1alpha1_CSIStorageCapacityList(a.(*storage.CSIStorageCapacityList), b.(*storagev1alpha1.CSIStorageCapacityList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeAttachment)(nil), (*storage.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(a.(*storagev1alpha1.VolumeAttachment), b.(*storage.VolumeAttachment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachment)(nil), (*storagev1alpha1.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(a.(*storage.VolumeAttachment), b.(*storagev1alpha1.VolumeAttachment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeAttachmentList)(nil), (*storage.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList(a.(*storagev1alpha1.VolumeAttachmentList), b.(*storage.VolumeAttachmentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentList)(nil), (*storagev1alpha1.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList(a.(*storage.VolumeAttachmentList), b.(*storagev1alpha1.VolumeAttachmentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeAttachmentSource)(nil), (*storage.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(a.(*storagev1alpha1.VolumeAttachmentSource), b.(*storage.VolumeAttachmentSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSource)(nil), (*storagev1alpha1.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(a.(*storage.VolumeAttachmentSource), b.(*storagev1alpha1.VolumeAttachmentSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeAttachmentSpec)(nil), (*storage.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(a.(*storagev1alpha1.VolumeAttachmentSpec), b.(*storage.VolumeAttachmentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSpec)(nil), (*storagev1alpha1.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(a.(*storage.VolumeAttachmentSpec), b.(*storagev1alpha1.VolumeAttachmentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeAttachmentStatus)(nil), (*storage.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(a.(*storagev1alpha1.VolumeAttachmentStatus), b.(*storage.VolumeAttachmentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentStatus)(nil), (*storagev1alpha1.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(a.(*storage.VolumeAttachmentStatus), b.(*storagev1alpha1.VolumeAttachmentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeAttributesClass)(nil), (*storage.VolumeAttributesClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeAttributesClass_To_storage_VolumeAttributesClass(a.(*storagev1alpha1.VolumeAttributesClass), b.(*storage.VolumeAttributesClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttributesClass)(nil), (*storagev1alpha1.VolumeAttributesClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttributesClass_To_v1alpha1_VolumeAttributesClass(a.(*storage.VolumeAttributesClass), b.(*storagev1alpha1.VolumeAttributesClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeAttributesClassList)(nil), (*storage.VolumeAttributesClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(a.(*storagev1alpha1.VolumeAttributesClassList), b.(*storage.VolumeAttributesClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttributesClassList)(nil), (*storagev1alpha1.VolumeAttributesClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttributesClassList_To_v1alpha1_VolumeAttributesClassList(a.(*storage.VolumeAttributesClassList), b.(*storagev1alpha1.VolumeAttributesClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1alpha1.VolumeError)(nil), (*storage.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeError_To_storage_VolumeError(a.(*storagev1alpha1.VolumeError), b.(*storage.VolumeError), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeError)(nil), (*storagev1alpha1.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeError_To_v1alpha1_VolumeError(a.(*storage.VolumeError), b.(*storagev1alpha1.VolumeError), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in *storagev1alpha1.CSIStorageCapacity, out *storage.CSIStorageCapacity, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeTopology = (*v1.LabelSelector)(unsafe.Pointer(in.NodeTopology))
out.StorageClassName = in.StorageClassName
out.Capacity = (*resource.Quantity)(unsafe.Pointer(in.Capacity))
out.MaximumVolumeSize = (*resource.Quantity)(unsafe.Pointer(in.MaximumVolumeSize))
return nil
}
// Convert_v1alpha1_CSIStorageCapacity_To_storage_CSIStorageCapacity is an autogenerated conversion function.
func Convert_v1alpha1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in *storagev1alpha1.CSIStorageCapacity, out *storage.CSIStorageCapacity, s conversion.Scope) error {
return autoConvert_v1alpha1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in, out, s)
}
func autoConvert_storage_CSIStorageCapacity_To_v1alpha1_CSIStorageCapacity(in *storage.CSIStorageCapacity, out *storagev1alpha1.CSIStorageCapacity, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeTopology = (*v1.LabelSelector)(unsafe.Pointer(in.NodeTopology))
out.StorageClassName = in.StorageClassName
out.Capacity = (*resource.Quantity)(unsafe.Pointer(in.Capacity))
out.MaximumVolumeSize = (*resource.Quantity)(unsafe.Pointer(in.MaximumVolumeSize))
return nil
}
// Convert_storage_CSIStorageCapacity_To_v1alpha1_CSIStorageCapacity is an autogenerated conversion function.
func Convert_storage_CSIStorageCapacity_To_v1alpha1_CSIStorageCapacity(in *storage.CSIStorageCapacity, out *storagev1alpha1.CSIStorageCapacity, s conversion.Scope) error {
return autoConvert_storage_CSIStorageCapacity_To_v1alpha1_CSIStorageCapacity(in, out, s)
}
func autoConvert_v1alpha1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in *storagev1alpha1.CSIStorageCapacityList, out *storage.CSIStorageCapacityList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.CSIStorageCapacity)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList is an autogenerated conversion function.
func Convert_v1alpha1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in *storagev1alpha1.CSIStorageCapacityList, out *storage.CSIStorageCapacityList, s conversion.Scope) error {
return autoConvert_v1alpha1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in, out, s)
}
func autoConvert_storage_CSIStorageCapacityList_To_v1alpha1_CSIStorageCapacityList(in *storage.CSIStorageCapacityList, out *storagev1alpha1.CSIStorageCapacityList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1alpha1.CSIStorageCapacity)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_CSIStorageCapacityList_To_v1alpha1_CSIStorageCapacityList is an autogenerated conversion function.
func Convert_storage_CSIStorageCapacityList_To_v1alpha1_CSIStorageCapacityList(in *storage.CSIStorageCapacityList, out *storagev1alpha1.CSIStorageCapacityList, s conversion.Scope) error {
return autoConvert_storage_CSIStorageCapacityList_To_v1alpha1_CSIStorageCapacityList(in, out, s)
}
func autoConvert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(in *storagev1alpha1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment is an autogenerated conversion function.
func Convert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(in *storagev1alpha1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(in, out, s)
}
func autoConvert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(in *storage.VolumeAttachment, out *storagev1alpha1.VolumeAttachment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment is an autogenerated conversion function.
func Convert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(in *storage.VolumeAttachment, out *storagev1alpha1.VolumeAttachment, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(in, out, s)
}
func autoConvert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *storagev1alpha1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storage.VolumeAttachment, len(*in))
for i := range *in {
if err := Convert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList is an autogenerated conversion function.
func Convert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *storagev1alpha1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in, out, s)
}
func autoConvert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *storagev1alpha1.VolumeAttachmentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storagev1alpha1.VolumeAttachment, len(*in))
for i := range *in {
if err := Convert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *storagev1alpha1.VolumeAttachmentList, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList(in, out, s)
}
func autoConvert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *storagev1alpha1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error {
out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName))
if in.InlineVolumeSpec != nil {
in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec
*out = new(core.PersistentVolumeSpec)
if err := corev1.Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(*in, *out, s); err != nil {
return err
}
} else {
out.InlineVolumeSpec = nil
}
return nil
}
// Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource is an autogenerated conversion function.
func Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *storagev1alpha1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in, out, s)
}
func autoConvert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *storagev1alpha1.VolumeAttachmentSource, s conversion.Scope) error {
out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName))
if in.InlineVolumeSpec != nil {
in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec
*out = new(apicorev1.PersistentVolumeSpec)
if err := corev1.Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(*in, *out, s); err != nil {
return err
}
} else {
out.InlineVolumeSpec = nil
}
return nil
}
// Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *storagev1alpha1.VolumeAttachmentSource, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(in, out, s)
}
func autoConvert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *storagev1alpha1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error {
out.Attacher = in.Attacher
if err := Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.NodeName = in.NodeName
return nil
}
// Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec is an autogenerated conversion function.
func Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *storagev1alpha1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in, out, s)
}
func autoConvert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *storagev1alpha1.VolumeAttachmentSpec, s conversion.Scope) error {
out.Attacher = in.Attacher
if err := Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.NodeName = in.NodeName
return nil
}
// Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *storagev1alpha1.VolumeAttachmentSpec, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in, out, s)
}
func autoConvert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *storagev1alpha1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error {
out.Attached = in.Attached
out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata))
out.AttachError = (*storage.VolumeError)(unsafe.Pointer(in.AttachError))
out.DetachError = (*storage.VolumeError)(unsafe.Pointer(in.DetachError))
return nil
}
// Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus is an autogenerated conversion function.
func Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *storagev1alpha1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in, out, s)
}
func autoConvert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *storagev1alpha1.VolumeAttachmentStatus, s conversion.Scope) error {
out.Attached = in.Attached
out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata))
out.AttachError = (*storagev1alpha1.VolumeError)(unsafe.Pointer(in.AttachError))
out.DetachError = (*storagev1alpha1.VolumeError)(unsafe.Pointer(in.DetachError))
return nil
}
// Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *storagev1alpha1.VolumeAttachmentStatus, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(in, out, s)
}
func autoConvert_v1alpha1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in *storagev1alpha1.VolumeAttributesClass, out *storage.VolumeAttributesClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
return nil
}
// Convert_v1alpha1_VolumeAttributesClass_To_storage_VolumeAttributesClass is an autogenerated conversion function.
func Convert_v1alpha1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in *storagev1alpha1.VolumeAttributesClass, out *storage.VolumeAttributesClass, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in, out, s)
}
func autoConvert_storage_VolumeAttributesClass_To_v1alpha1_VolumeAttributesClass(in *storage.VolumeAttributesClass, out *storagev1alpha1.VolumeAttributesClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
return nil
}
// Convert_storage_VolumeAttributesClass_To_v1alpha1_VolumeAttributesClass is an autogenerated conversion function.
func Convert_storage_VolumeAttributesClass_To_v1alpha1_VolumeAttributesClass(in *storage.VolumeAttributesClass, out *storagev1alpha1.VolumeAttributesClass, s conversion.Scope) error {
return autoConvert_storage_VolumeAttributesClass_To_v1alpha1_VolumeAttributesClass(in, out, s)
}
func autoConvert_v1alpha1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in *storagev1alpha1.VolumeAttributesClassList, out *storage.VolumeAttributesClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.VolumeAttributesClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList is an autogenerated conversion function.
func Convert_v1alpha1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in *storagev1alpha1.VolumeAttributesClassList, out *storage.VolumeAttributesClassList, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in, out, s)
}
func autoConvert_storage_VolumeAttributesClassList_To_v1alpha1_VolumeAttributesClassList(in *storage.VolumeAttributesClassList, out *storagev1alpha1.VolumeAttributesClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1alpha1.VolumeAttributesClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_VolumeAttributesClassList_To_v1alpha1_VolumeAttributesClassList is an autogenerated conversion function.
func Convert_storage_VolumeAttributesClassList_To_v1alpha1_VolumeAttributesClassList(in *storage.VolumeAttributesClassList, out *storagev1alpha1.VolumeAttributesClassList, s conversion.Scope) error {
return autoConvert_storage_VolumeAttributesClassList_To_v1alpha1_VolumeAttributesClassList(in, out, s)
}
func autoConvert_v1alpha1_VolumeError_To_storage_VolumeError(in *storagev1alpha1.VolumeError, out *storage.VolumeError, s conversion.Scope) error {
out.Time = in.Time
out.Message = in.Message
out.ErrorCode = (*int32)(unsafe.Pointer(in.ErrorCode))
return nil
}
// Convert_v1alpha1_VolumeError_To_storage_VolumeError is an autogenerated conversion function.
func Convert_v1alpha1_VolumeError_To_storage_VolumeError(in *storagev1alpha1.VolumeError, out *storage.VolumeError, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeError_To_storage_VolumeError(in, out, s)
}
func autoConvert_storage_VolumeError_To_v1alpha1_VolumeError(in *storage.VolumeError, out *storagev1alpha1.VolumeError, s conversion.Scope) error {
out.Time = in.Time
out.Message = in.Message
out.ErrorCode = (*int32)(unsafe.Pointer(in.ErrorCode))
return nil
}
// Convert_storage_VolumeError_To_v1alpha1_VolumeError is an autogenerated conversion function.
func Convert_storage_VolumeError_To_v1alpha1_VolumeError(in *storage.VolumeError, out *storagev1alpha1.VolumeError, s conversion.Scope) error {
return autoConvert_storage_VolumeError_To_v1alpha1_VolumeError(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&storagev1alpha1.VolumeAttachment{}, func(obj interface{}) { SetObjectDefaults_VolumeAttachment(obj.(*storagev1alpha1.VolumeAttachment)) })
scheme.AddTypeDefaultingFunc(&storagev1alpha1.VolumeAttachmentList{}, func(obj interface{}) {
SetObjectDefaults_VolumeAttachmentList(obj.(*storagev1alpha1.VolumeAttachmentList))
})
return nil
}
func SetObjectDefaults_VolumeAttachment(in *storagev1alpha1.VolumeAttachment) {
if in.Spec.Source.InlineVolumeSpec != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Source.InlineVolumeSpec.Capacity)
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.HostPath)
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RBDPool == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RBDPool = "rbd"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RadosUser == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RadosUser = "admin"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.Keyring == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI.ISCSIInterface == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.FSType = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.StorageMode == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.FSType == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.FSType = "xfs"
}
}
}
}
func SetObjectDefaults_VolumeAttachmentList(in *storagev1alpha1.VolumeAttachmentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_VolumeAttachment(a)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
v1 "k8s.io/api/core/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_StorageClass(obj *storagev1beta1.StorageClass) {
if obj.ReclaimPolicy == nil {
obj.ReclaimPolicy = new(v1.PersistentVolumeReclaimPolicy)
*obj.ReclaimPolicy = v1.PersistentVolumeReclaimDelete
}
if obj.VolumeBindingMode == nil {
obj.VolumeBindingMode = new(storagev1beta1.VolumeBindingMode)
*obj.VolumeBindingMode = storagev1beta1.VolumeBindingImmediate
}
}
func SetDefaults_CSIDriver(obj *storagev1beta1.CSIDriver) {
if obj.Spec.AttachRequired == nil {
obj.Spec.AttachRequired = new(bool)
*(obj.Spec.AttachRequired) = true
}
if obj.Spec.PodInfoOnMount == nil {
obj.Spec.PodInfoOnMount = new(bool)
*(obj.Spec.PodInfoOnMount) = false
}
if obj.Spec.StorageCapacity == nil {
obj.Spec.StorageCapacity = new(bool)
*(obj.Spec.StorageCapacity) = false
}
if obj.Spec.FSGroupPolicy == nil {
obj.Spec.FSGroupPolicy = new(storagev1beta1.FSGroupPolicy)
*obj.Spec.FSGroupPolicy = storagev1beta1.ReadWriteOnceWithFSTypeFSGroupPolicy
}
if len(obj.Spec.VolumeLifecycleModes) == 0 {
obj.Spec.VolumeLifecycleModes = append(obj.Spec.VolumeLifecycleModes, storagev1beta1.VolumeLifecyclePersistent)
}
if obj.Spec.RequiresRepublish == nil {
obj.Spec.RequiresRepublish = new(bool)
*(obj.Spec.RequiresRepublish) = false
}
if obj.Spec.SELinuxMount == nil && utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
obj.Spec.SELinuxMount = new(bool)
*(obj.Spec.SELinuxMount) = false
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
storagev1beta1 "k8s.io/api/storage/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "storage.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &storagev1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
storage "k8s.io/kubernetes/pkg/apis/storage"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSIDriver)(nil), (*storage.CSIDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSIDriver_To_storage_CSIDriver(a.(*storagev1beta1.CSIDriver), b.(*storage.CSIDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIDriver)(nil), (*storagev1beta1.CSIDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIDriver_To_v1beta1_CSIDriver(a.(*storage.CSIDriver), b.(*storagev1beta1.CSIDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSIDriverList)(nil), (*storage.CSIDriverList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSIDriverList_To_storage_CSIDriverList(a.(*storagev1beta1.CSIDriverList), b.(*storage.CSIDriverList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIDriverList)(nil), (*storagev1beta1.CSIDriverList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIDriverList_To_v1beta1_CSIDriverList(a.(*storage.CSIDriverList), b.(*storagev1beta1.CSIDriverList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSIDriverSpec)(nil), (*storage.CSIDriverSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSIDriverSpec_To_storage_CSIDriverSpec(a.(*storagev1beta1.CSIDriverSpec), b.(*storage.CSIDriverSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIDriverSpec)(nil), (*storagev1beta1.CSIDriverSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIDriverSpec_To_v1beta1_CSIDriverSpec(a.(*storage.CSIDriverSpec), b.(*storagev1beta1.CSIDriverSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSINode)(nil), (*storage.CSINode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSINode_To_storage_CSINode(a.(*storagev1beta1.CSINode), b.(*storage.CSINode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINode)(nil), (*storagev1beta1.CSINode)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINode_To_v1beta1_CSINode(a.(*storage.CSINode), b.(*storagev1beta1.CSINode), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSINodeDriver)(nil), (*storage.CSINodeDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSINodeDriver_To_storage_CSINodeDriver(a.(*storagev1beta1.CSINodeDriver), b.(*storage.CSINodeDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINodeDriver)(nil), (*storagev1beta1.CSINodeDriver)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINodeDriver_To_v1beta1_CSINodeDriver(a.(*storage.CSINodeDriver), b.(*storagev1beta1.CSINodeDriver), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSINodeList)(nil), (*storage.CSINodeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSINodeList_To_storage_CSINodeList(a.(*storagev1beta1.CSINodeList), b.(*storage.CSINodeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINodeList)(nil), (*storagev1beta1.CSINodeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINodeList_To_v1beta1_CSINodeList(a.(*storage.CSINodeList), b.(*storagev1beta1.CSINodeList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSINodeSpec)(nil), (*storage.CSINodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSINodeSpec_To_storage_CSINodeSpec(a.(*storagev1beta1.CSINodeSpec), b.(*storage.CSINodeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSINodeSpec)(nil), (*storagev1beta1.CSINodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSINodeSpec_To_v1beta1_CSINodeSpec(a.(*storage.CSINodeSpec), b.(*storagev1beta1.CSINodeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSIStorageCapacity)(nil), (*storage.CSIStorageCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSIStorageCapacity_To_storage_CSIStorageCapacity(a.(*storagev1beta1.CSIStorageCapacity), b.(*storage.CSIStorageCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIStorageCapacity)(nil), (*storagev1beta1.CSIStorageCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIStorageCapacity_To_v1beta1_CSIStorageCapacity(a.(*storage.CSIStorageCapacity), b.(*storagev1beta1.CSIStorageCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.CSIStorageCapacityList)(nil), (*storage.CSIStorageCapacityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(a.(*storagev1beta1.CSIStorageCapacityList), b.(*storage.CSIStorageCapacityList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.CSIStorageCapacityList)(nil), (*storagev1beta1.CSIStorageCapacityList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_CSIStorageCapacityList_To_v1beta1_CSIStorageCapacityList(a.(*storage.CSIStorageCapacityList), b.(*storagev1beta1.CSIStorageCapacityList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.StorageClass)(nil), (*storage.StorageClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StorageClass_To_storage_StorageClass(a.(*storagev1beta1.StorageClass), b.(*storage.StorageClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.StorageClass)(nil), (*storagev1beta1.StorageClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_StorageClass_To_v1beta1_StorageClass(a.(*storage.StorageClass), b.(*storagev1beta1.StorageClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.StorageClassList)(nil), (*storage.StorageClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StorageClassList_To_storage_StorageClassList(a.(*storagev1beta1.StorageClassList), b.(*storage.StorageClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.StorageClassList)(nil), (*storagev1beta1.StorageClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_StorageClassList_To_v1beta1_StorageClassList(a.(*storage.StorageClassList), b.(*storagev1beta1.StorageClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.TokenRequest)(nil), (*storage.TokenRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_TokenRequest_To_storage_TokenRequest(a.(*storagev1beta1.TokenRequest), b.(*storage.TokenRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.TokenRequest)(nil), (*storagev1beta1.TokenRequest)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_TokenRequest_To_v1beta1_TokenRequest(a.(*storage.TokenRequest), b.(*storagev1beta1.TokenRequest), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeAttachment)(nil), (*storage.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeAttachment_To_storage_VolumeAttachment(a.(*storagev1beta1.VolumeAttachment), b.(*storage.VolumeAttachment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachment)(nil), (*storagev1beta1.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachment_To_v1beta1_VolumeAttachment(a.(*storage.VolumeAttachment), b.(*storagev1beta1.VolumeAttachment), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeAttachmentList)(nil), (*storage.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeAttachmentList_To_storage_VolumeAttachmentList(a.(*storagev1beta1.VolumeAttachmentList), b.(*storage.VolumeAttachmentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentList)(nil), (*storagev1beta1.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentList_To_v1beta1_VolumeAttachmentList(a.(*storage.VolumeAttachmentList), b.(*storagev1beta1.VolumeAttachmentList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeAttachmentSource)(nil), (*storage.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(a.(*storagev1beta1.VolumeAttachmentSource), b.(*storage.VolumeAttachmentSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSource)(nil), (*storagev1beta1.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentSource_To_v1beta1_VolumeAttachmentSource(a.(*storage.VolumeAttachmentSource), b.(*storagev1beta1.VolumeAttachmentSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeAttachmentSpec)(nil), (*storage.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(a.(*storagev1beta1.VolumeAttachmentSpec), b.(*storage.VolumeAttachmentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSpec)(nil), (*storagev1beta1.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentSpec_To_v1beta1_VolumeAttachmentSpec(a.(*storage.VolumeAttachmentSpec), b.(*storagev1beta1.VolumeAttachmentSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeAttachmentStatus)(nil), (*storage.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(a.(*storagev1beta1.VolumeAttachmentStatus), b.(*storage.VolumeAttachmentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentStatus)(nil), (*storagev1beta1.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttachmentStatus_To_v1beta1_VolumeAttachmentStatus(a.(*storage.VolumeAttachmentStatus), b.(*storagev1beta1.VolumeAttachmentStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeAttributesClass)(nil), (*storage.VolumeAttributesClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeAttributesClass_To_storage_VolumeAttributesClass(a.(*storagev1beta1.VolumeAttributesClass), b.(*storage.VolumeAttributesClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttributesClass)(nil), (*storagev1beta1.VolumeAttributesClass)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttributesClass_To_v1beta1_VolumeAttributesClass(a.(*storage.VolumeAttributesClass), b.(*storagev1beta1.VolumeAttributesClass), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeAttributesClassList)(nil), (*storage.VolumeAttributesClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(a.(*storagev1beta1.VolumeAttributesClassList), b.(*storage.VolumeAttributesClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeAttributesClassList)(nil), (*storagev1beta1.VolumeAttributesClassList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeAttributesClassList_To_v1beta1_VolumeAttributesClassList(a.(*storage.VolumeAttributesClassList), b.(*storagev1beta1.VolumeAttributesClassList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeError)(nil), (*storage.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeError_To_storage_VolumeError(a.(*storagev1beta1.VolumeError), b.(*storage.VolumeError), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeError)(nil), (*storagev1beta1.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeError_To_v1beta1_VolumeError(a.(*storage.VolumeError), b.(*storagev1beta1.VolumeError), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storagev1beta1.VolumeNodeResources)(nil), (*storage.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(a.(*storagev1beta1.VolumeNodeResources), b.(*storage.VolumeNodeResources), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*storage.VolumeNodeResources)(nil), (*storagev1beta1.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(a.(*storage.VolumeNodeResources), b.(*storagev1beta1.VolumeNodeResources), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_CSIDriver_To_storage_CSIDriver(in *storagev1beta1.CSIDriver, out *storage.CSIDriver, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_CSIDriverSpec_To_storage_CSIDriverSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_CSIDriver_To_storage_CSIDriver is an autogenerated conversion function.
func Convert_v1beta1_CSIDriver_To_storage_CSIDriver(in *storagev1beta1.CSIDriver, out *storage.CSIDriver, s conversion.Scope) error {
return autoConvert_v1beta1_CSIDriver_To_storage_CSIDriver(in, out, s)
}
func autoConvert_storage_CSIDriver_To_v1beta1_CSIDriver(in *storage.CSIDriver, out *storagev1beta1.CSIDriver, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_storage_CSIDriverSpec_To_v1beta1_CSIDriverSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_storage_CSIDriver_To_v1beta1_CSIDriver is an autogenerated conversion function.
func Convert_storage_CSIDriver_To_v1beta1_CSIDriver(in *storage.CSIDriver, out *storagev1beta1.CSIDriver, s conversion.Scope) error {
return autoConvert_storage_CSIDriver_To_v1beta1_CSIDriver(in, out, s)
}
func autoConvert_v1beta1_CSIDriverList_To_storage_CSIDriverList(in *storagev1beta1.CSIDriverList, out *storage.CSIDriverList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storage.CSIDriver, len(*in))
for i := range *in {
if err := Convert_v1beta1_CSIDriver_To_storage_CSIDriver(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_CSIDriverList_To_storage_CSIDriverList is an autogenerated conversion function.
func Convert_v1beta1_CSIDriverList_To_storage_CSIDriverList(in *storagev1beta1.CSIDriverList, out *storage.CSIDriverList, s conversion.Scope) error {
return autoConvert_v1beta1_CSIDriverList_To_storage_CSIDriverList(in, out, s)
}
func autoConvert_storage_CSIDriverList_To_v1beta1_CSIDriverList(in *storage.CSIDriverList, out *storagev1beta1.CSIDriverList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storagev1beta1.CSIDriver, len(*in))
for i := range *in {
if err := Convert_storage_CSIDriver_To_v1beta1_CSIDriver(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_storage_CSIDriverList_To_v1beta1_CSIDriverList is an autogenerated conversion function.
func Convert_storage_CSIDriverList_To_v1beta1_CSIDriverList(in *storage.CSIDriverList, out *storagev1beta1.CSIDriverList, s conversion.Scope) error {
return autoConvert_storage_CSIDriverList_To_v1beta1_CSIDriverList(in, out, s)
}
func autoConvert_v1beta1_CSIDriverSpec_To_storage_CSIDriverSpec(in *storagev1beta1.CSIDriverSpec, out *storage.CSIDriverSpec, s conversion.Scope) error {
out.AttachRequired = (*bool)(unsafe.Pointer(in.AttachRequired))
out.PodInfoOnMount = (*bool)(unsafe.Pointer(in.PodInfoOnMount))
out.VolumeLifecycleModes = *(*[]storage.VolumeLifecycleMode)(unsafe.Pointer(&in.VolumeLifecycleModes))
out.StorageCapacity = (*bool)(unsafe.Pointer(in.StorageCapacity))
out.FSGroupPolicy = (*storage.FSGroupPolicy)(unsafe.Pointer(in.FSGroupPolicy))
out.TokenRequests = *(*[]storage.TokenRequest)(unsafe.Pointer(&in.TokenRequests))
out.RequiresRepublish = (*bool)(unsafe.Pointer(in.RequiresRepublish))
out.SELinuxMount = (*bool)(unsafe.Pointer(in.SELinuxMount))
out.NodeAllocatableUpdatePeriodSeconds = (*int64)(unsafe.Pointer(in.NodeAllocatableUpdatePeriodSeconds))
return nil
}
// Convert_v1beta1_CSIDriverSpec_To_storage_CSIDriverSpec is an autogenerated conversion function.
func Convert_v1beta1_CSIDriverSpec_To_storage_CSIDriverSpec(in *storagev1beta1.CSIDriverSpec, out *storage.CSIDriverSpec, s conversion.Scope) error {
return autoConvert_v1beta1_CSIDriverSpec_To_storage_CSIDriverSpec(in, out, s)
}
func autoConvert_storage_CSIDriverSpec_To_v1beta1_CSIDriverSpec(in *storage.CSIDriverSpec, out *storagev1beta1.CSIDriverSpec, s conversion.Scope) error {
out.AttachRequired = (*bool)(unsafe.Pointer(in.AttachRequired))
out.FSGroupPolicy = (*storagev1beta1.FSGroupPolicy)(unsafe.Pointer(in.FSGroupPolicy))
out.PodInfoOnMount = (*bool)(unsafe.Pointer(in.PodInfoOnMount))
out.VolumeLifecycleModes = *(*[]storagev1beta1.VolumeLifecycleMode)(unsafe.Pointer(&in.VolumeLifecycleModes))
out.StorageCapacity = (*bool)(unsafe.Pointer(in.StorageCapacity))
out.TokenRequests = *(*[]storagev1beta1.TokenRequest)(unsafe.Pointer(&in.TokenRequests))
out.RequiresRepublish = (*bool)(unsafe.Pointer(in.RequiresRepublish))
out.SELinuxMount = (*bool)(unsafe.Pointer(in.SELinuxMount))
out.NodeAllocatableUpdatePeriodSeconds = (*int64)(unsafe.Pointer(in.NodeAllocatableUpdatePeriodSeconds))
return nil
}
// Convert_storage_CSIDriverSpec_To_v1beta1_CSIDriverSpec is an autogenerated conversion function.
func Convert_storage_CSIDriverSpec_To_v1beta1_CSIDriverSpec(in *storage.CSIDriverSpec, out *storagev1beta1.CSIDriverSpec, s conversion.Scope) error {
return autoConvert_storage_CSIDriverSpec_To_v1beta1_CSIDriverSpec(in, out, s)
}
func autoConvert_v1beta1_CSINode_To_storage_CSINode(in *storagev1beta1.CSINode, out *storage.CSINode, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_CSINodeSpec_To_storage_CSINodeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_CSINode_To_storage_CSINode is an autogenerated conversion function.
func Convert_v1beta1_CSINode_To_storage_CSINode(in *storagev1beta1.CSINode, out *storage.CSINode, s conversion.Scope) error {
return autoConvert_v1beta1_CSINode_To_storage_CSINode(in, out, s)
}
func autoConvert_storage_CSINode_To_v1beta1_CSINode(in *storage.CSINode, out *storagev1beta1.CSINode, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_storage_CSINodeSpec_To_v1beta1_CSINodeSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_storage_CSINode_To_v1beta1_CSINode is an autogenerated conversion function.
func Convert_storage_CSINode_To_v1beta1_CSINode(in *storage.CSINode, out *storagev1beta1.CSINode, s conversion.Scope) error {
return autoConvert_storage_CSINode_To_v1beta1_CSINode(in, out, s)
}
func autoConvert_v1beta1_CSINodeDriver_To_storage_CSINodeDriver(in *storagev1beta1.CSINodeDriver, out *storage.CSINodeDriver, s conversion.Scope) error {
out.Name = in.Name
out.NodeID = in.NodeID
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
out.Allocatable = (*storage.VolumeNodeResources)(unsafe.Pointer(in.Allocatable))
return nil
}
// Convert_v1beta1_CSINodeDriver_To_storage_CSINodeDriver is an autogenerated conversion function.
func Convert_v1beta1_CSINodeDriver_To_storage_CSINodeDriver(in *storagev1beta1.CSINodeDriver, out *storage.CSINodeDriver, s conversion.Scope) error {
return autoConvert_v1beta1_CSINodeDriver_To_storage_CSINodeDriver(in, out, s)
}
func autoConvert_storage_CSINodeDriver_To_v1beta1_CSINodeDriver(in *storage.CSINodeDriver, out *storagev1beta1.CSINodeDriver, s conversion.Scope) error {
out.Name = in.Name
out.NodeID = in.NodeID
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
out.Allocatable = (*storagev1beta1.VolumeNodeResources)(unsafe.Pointer(in.Allocatable))
return nil
}
// Convert_storage_CSINodeDriver_To_v1beta1_CSINodeDriver is an autogenerated conversion function.
func Convert_storage_CSINodeDriver_To_v1beta1_CSINodeDriver(in *storage.CSINodeDriver, out *storagev1beta1.CSINodeDriver, s conversion.Scope) error {
return autoConvert_storage_CSINodeDriver_To_v1beta1_CSINodeDriver(in, out, s)
}
func autoConvert_v1beta1_CSINodeList_To_storage_CSINodeList(in *storagev1beta1.CSINodeList, out *storage.CSINodeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.CSINode)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_CSINodeList_To_storage_CSINodeList is an autogenerated conversion function.
func Convert_v1beta1_CSINodeList_To_storage_CSINodeList(in *storagev1beta1.CSINodeList, out *storage.CSINodeList, s conversion.Scope) error {
return autoConvert_v1beta1_CSINodeList_To_storage_CSINodeList(in, out, s)
}
func autoConvert_storage_CSINodeList_To_v1beta1_CSINodeList(in *storage.CSINodeList, out *storagev1beta1.CSINodeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1beta1.CSINode)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_CSINodeList_To_v1beta1_CSINodeList is an autogenerated conversion function.
func Convert_storage_CSINodeList_To_v1beta1_CSINodeList(in *storage.CSINodeList, out *storagev1beta1.CSINodeList, s conversion.Scope) error {
return autoConvert_storage_CSINodeList_To_v1beta1_CSINodeList(in, out, s)
}
func autoConvert_v1beta1_CSINodeSpec_To_storage_CSINodeSpec(in *storagev1beta1.CSINodeSpec, out *storage.CSINodeSpec, s conversion.Scope) error {
out.Drivers = *(*[]storage.CSINodeDriver)(unsafe.Pointer(&in.Drivers))
return nil
}
// Convert_v1beta1_CSINodeSpec_To_storage_CSINodeSpec is an autogenerated conversion function.
func Convert_v1beta1_CSINodeSpec_To_storage_CSINodeSpec(in *storagev1beta1.CSINodeSpec, out *storage.CSINodeSpec, s conversion.Scope) error {
return autoConvert_v1beta1_CSINodeSpec_To_storage_CSINodeSpec(in, out, s)
}
func autoConvert_storage_CSINodeSpec_To_v1beta1_CSINodeSpec(in *storage.CSINodeSpec, out *storagev1beta1.CSINodeSpec, s conversion.Scope) error {
out.Drivers = *(*[]storagev1beta1.CSINodeDriver)(unsafe.Pointer(&in.Drivers))
return nil
}
// Convert_storage_CSINodeSpec_To_v1beta1_CSINodeSpec is an autogenerated conversion function.
func Convert_storage_CSINodeSpec_To_v1beta1_CSINodeSpec(in *storage.CSINodeSpec, out *storagev1beta1.CSINodeSpec, s conversion.Scope) error {
return autoConvert_storage_CSINodeSpec_To_v1beta1_CSINodeSpec(in, out, s)
}
func autoConvert_v1beta1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in *storagev1beta1.CSIStorageCapacity, out *storage.CSIStorageCapacity, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeTopology = (*v1.LabelSelector)(unsafe.Pointer(in.NodeTopology))
out.StorageClassName = in.StorageClassName
out.Capacity = (*resource.Quantity)(unsafe.Pointer(in.Capacity))
out.MaximumVolumeSize = (*resource.Quantity)(unsafe.Pointer(in.MaximumVolumeSize))
return nil
}
// Convert_v1beta1_CSIStorageCapacity_To_storage_CSIStorageCapacity is an autogenerated conversion function.
func Convert_v1beta1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in *storagev1beta1.CSIStorageCapacity, out *storage.CSIStorageCapacity, s conversion.Scope) error {
return autoConvert_v1beta1_CSIStorageCapacity_To_storage_CSIStorageCapacity(in, out, s)
}
func autoConvert_storage_CSIStorageCapacity_To_v1beta1_CSIStorageCapacity(in *storage.CSIStorageCapacity, out *storagev1beta1.CSIStorageCapacity, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeTopology = (*v1.LabelSelector)(unsafe.Pointer(in.NodeTopology))
out.StorageClassName = in.StorageClassName
out.Capacity = (*resource.Quantity)(unsafe.Pointer(in.Capacity))
out.MaximumVolumeSize = (*resource.Quantity)(unsafe.Pointer(in.MaximumVolumeSize))
return nil
}
// Convert_storage_CSIStorageCapacity_To_v1beta1_CSIStorageCapacity is an autogenerated conversion function.
func Convert_storage_CSIStorageCapacity_To_v1beta1_CSIStorageCapacity(in *storage.CSIStorageCapacity, out *storagev1beta1.CSIStorageCapacity, s conversion.Scope) error {
return autoConvert_storage_CSIStorageCapacity_To_v1beta1_CSIStorageCapacity(in, out, s)
}
func autoConvert_v1beta1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in *storagev1beta1.CSIStorageCapacityList, out *storage.CSIStorageCapacityList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.CSIStorageCapacity)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList is an autogenerated conversion function.
func Convert_v1beta1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in *storagev1beta1.CSIStorageCapacityList, out *storage.CSIStorageCapacityList, s conversion.Scope) error {
return autoConvert_v1beta1_CSIStorageCapacityList_To_storage_CSIStorageCapacityList(in, out, s)
}
func autoConvert_storage_CSIStorageCapacityList_To_v1beta1_CSIStorageCapacityList(in *storage.CSIStorageCapacityList, out *storagev1beta1.CSIStorageCapacityList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1beta1.CSIStorageCapacity)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_CSIStorageCapacityList_To_v1beta1_CSIStorageCapacityList is an autogenerated conversion function.
func Convert_storage_CSIStorageCapacityList_To_v1beta1_CSIStorageCapacityList(in *storage.CSIStorageCapacityList, out *storagev1beta1.CSIStorageCapacityList, s conversion.Scope) error {
return autoConvert_storage_CSIStorageCapacityList_To_v1beta1_CSIStorageCapacityList(in, out, s)
}
func autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in *storagev1beta1.StorageClass, out *storage.StorageClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Provisioner = in.Provisioner
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
out.ReclaimPolicy = (*core.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy))
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion))
out.VolumeBindingMode = (*storage.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode))
out.AllowedTopologies = *(*[]core.TopologySelectorTerm)(unsafe.Pointer(&in.AllowedTopologies))
return nil
}
// Convert_v1beta1_StorageClass_To_storage_StorageClass is an autogenerated conversion function.
func Convert_v1beta1_StorageClass_To_storage_StorageClass(in *storagev1beta1.StorageClass, out *storage.StorageClass, s conversion.Scope) error {
return autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in, out, s)
}
func autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *storagev1beta1.StorageClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Provisioner = in.Provisioner
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
out.ReclaimPolicy = (*corev1.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy))
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion))
out.VolumeBindingMode = (*storagev1beta1.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode))
out.AllowedTopologies = *(*[]corev1.TopologySelectorTerm)(unsafe.Pointer(&in.AllowedTopologies))
return nil
}
// Convert_storage_StorageClass_To_v1beta1_StorageClass is an autogenerated conversion function.
func Convert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *storagev1beta1.StorageClass, s conversion.Scope) error {
return autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in, out, s)
}
func autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in *storagev1beta1.StorageClassList, out *storage.StorageClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_StorageClassList_To_storage_StorageClassList is an autogenerated conversion function.
func Convert_v1beta1_StorageClassList_To_storage_StorageClassList(in *storagev1beta1.StorageClassList, out *storage.StorageClassList, s conversion.Scope) error {
return autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in, out, s)
}
func autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *storagev1beta1.StorageClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1beta1.StorageClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_StorageClassList_To_v1beta1_StorageClassList is an autogenerated conversion function.
func Convert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *storagev1beta1.StorageClassList, s conversion.Scope) error {
return autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in, out, s)
}
func autoConvert_v1beta1_TokenRequest_To_storage_TokenRequest(in *storagev1beta1.TokenRequest, out *storage.TokenRequest, s conversion.Scope) error {
out.Audience = in.Audience
out.ExpirationSeconds = (*int64)(unsafe.Pointer(in.ExpirationSeconds))
return nil
}
// Convert_v1beta1_TokenRequest_To_storage_TokenRequest is an autogenerated conversion function.
func Convert_v1beta1_TokenRequest_To_storage_TokenRequest(in *storagev1beta1.TokenRequest, out *storage.TokenRequest, s conversion.Scope) error {
return autoConvert_v1beta1_TokenRequest_To_storage_TokenRequest(in, out, s)
}
func autoConvert_storage_TokenRequest_To_v1beta1_TokenRequest(in *storage.TokenRequest, out *storagev1beta1.TokenRequest, s conversion.Scope) error {
out.Audience = in.Audience
out.ExpirationSeconds = (*int64)(unsafe.Pointer(in.ExpirationSeconds))
return nil
}
// Convert_storage_TokenRequest_To_v1beta1_TokenRequest is an autogenerated conversion function.
func Convert_storage_TokenRequest_To_v1beta1_TokenRequest(in *storage.TokenRequest, out *storagev1beta1.TokenRequest, s conversion.Scope) error {
return autoConvert_storage_TokenRequest_To_v1beta1_TokenRequest(in, out, s)
}
func autoConvert_v1beta1_VolumeAttachment_To_storage_VolumeAttachment(in *storagev1beta1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1beta1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1beta1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_VolumeAttachment_To_storage_VolumeAttachment is an autogenerated conversion function.
func Convert_v1beta1_VolumeAttachment_To_storage_VolumeAttachment(in *storagev1beta1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeAttachment_To_storage_VolumeAttachment(in, out, s)
}
func autoConvert_storage_VolumeAttachment_To_v1beta1_VolumeAttachment(in *storage.VolumeAttachment, out *storagev1beta1.VolumeAttachment, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_storage_VolumeAttachmentSpec_To_v1beta1_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_storage_VolumeAttachmentStatus_To_v1beta1_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_storage_VolumeAttachment_To_v1beta1_VolumeAttachment is an autogenerated conversion function.
func Convert_storage_VolumeAttachment_To_v1beta1_VolumeAttachment(in *storage.VolumeAttachment, out *storagev1beta1.VolumeAttachment, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachment_To_v1beta1_VolumeAttachment(in, out, s)
}
func autoConvert_v1beta1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *storagev1beta1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storage.VolumeAttachment, len(*in))
for i := range *in {
if err := Convert_v1beta1_VolumeAttachment_To_storage_VolumeAttachment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1beta1_VolumeAttachmentList_To_storage_VolumeAttachmentList is an autogenerated conversion function.
func Convert_v1beta1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *storagev1beta1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in, out, s)
}
func autoConvert_storage_VolumeAttachmentList_To_v1beta1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *storagev1beta1.VolumeAttachmentList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]storagev1beta1.VolumeAttachment, len(*in))
for i := range *in {
if err := Convert_storage_VolumeAttachment_To_v1beta1_VolumeAttachment(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_storage_VolumeAttachmentList_To_v1beta1_VolumeAttachmentList is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentList_To_v1beta1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *storagev1beta1.VolumeAttachmentList, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentList_To_v1beta1_VolumeAttachmentList(in, out, s)
}
func autoConvert_v1beta1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *storagev1beta1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error {
out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName))
if in.InlineVolumeSpec != nil {
in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec
*out = new(core.PersistentVolumeSpec)
if err := apiscorev1.Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(*in, *out, s); err != nil {
return err
}
} else {
out.InlineVolumeSpec = nil
}
return nil
}
// Convert_v1beta1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource is an autogenerated conversion function.
func Convert_v1beta1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *storagev1beta1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in, out, s)
}
func autoConvert_storage_VolumeAttachmentSource_To_v1beta1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *storagev1beta1.VolumeAttachmentSource, s conversion.Scope) error {
out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName))
if in.InlineVolumeSpec != nil {
in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec
*out = new(corev1.PersistentVolumeSpec)
if err := apiscorev1.Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(*in, *out, s); err != nil {
return err
}
} else {
out.InlineVolumeSpec = nil
}
return nil
}
// Convert_storage_VolumeAttachmentSource_To_v1beta1_VolumeAttachmentSource is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentSource_To_v1beta1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *storagev1beta1.VolumeAttachmentSource, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentSource_To_v1beta1_VolumeAttachmentSource(in, out, s)
}
func autoConvert_v1beta1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *storagev1beta1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error {
out.Attacher = in.Attacher
if err := Convert_v1beta1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.NodeName = in.NodeName
return nil
}
// Convert_v1beta1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec is an autogenerated conversion function.
func Convert_v1beta1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *storagev1beta1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in, out, s)
}
func autoConvert_storage_VolumeAttachmentSpec_To_v1beta1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *storagev1beta1.VolumeAttachmentSpec, s conversion.Scope) error {
out.Attacher = in.Attacher
if err := Convert_storage_VolumeAttachmentSource_To_v1beta1_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.NodeName = in.NodeName
return nil
}
// Convert_storage_VolumeAttachmentSpec_To_v1beta1_VolumeAttachmentSpec is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentSpec_To_v1beta1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *storagev1beta1.VolumeAttachmentSpec, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentSpec_To_v1beta1_VolumeAttachmentSpec(in, out, s)
}
func autoConvert_v1beta1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *storagev1beta1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error {
out.Attached = in.Attached
out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata))
out.AttachError = (*storage.VolumeError)(unsafe.Pointer(in.AttachError))
out.DetachError = (*storage.VolumeError)(unsafe.Pointer(in.DetachError))
return nil
}
// Convert_v1beta1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus is an autogenerated conversion function.
func Convert_v1beta1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *storagev1beta1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in, out, s)
}
func autoConvert_storage_VolumeAttachmentStatus_To_v1beta1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *storagev1beta1.VolumeAttachmentStatus, s conversion.Scope) error {
out.Attached = in.Attached
out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata))
out.AttachError = (*storagev1beta1.VolumeError)(unsafe.Pointer(in.AttachError))
out.DetachError = (*storagev1beta1.VolumeError)(unsafe.Pointer(in.DetachError))
return nil
}
// Convert_storage_VolumeAttachmentStatus_To_v1beta1_VolumeAttachmentStatus is an autogenerated conversion function.
func Convert_storage_VolumeAttachmentStatus_To_v1beta1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *storagev1beta1.VolumeAttachmentStatus, s conversion.Scope) error {
return autoConvert_storage_VolumeAttachmentStatus_To_v1beta1_VolumeAttachmentStatus(in, out, s)
}
func autoConvert_v1beta1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in *storagev1beta1.VolumeAttributesClass, out *storage.VolumeAttributesClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
return nil
}
// Convert_v1beta1_VolumeAttributesClass_To_storage_VolumeAttributesClass is an autogenerated conversion function.
func Convert_v1beta1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in *storagev1beta1.VolumeAttributesClass, out *storage.VolumeAttributesClass, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeAttributesClass_To_storage_VolumeAttributesClass(in, out, s)
}
func autoConvert_storage_VolumeAttributesClass_To_v1beta1_VolumeAttributesClass(in *storage.VolumeAttributesClass, out *storagev1beta1.VolumeAttributesClass, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.DriverName = in.DriverName
out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters))
return nil
}
// Convert_storage_VolumeAttributesClass_To_v1beta1_VolumeAttributesClass is an autogenerated conversion function.
func Convert_storage_VolumeAttributesClass_To_v1beta1_VolumeAttributesClass(in *storage.VolumeAttributesClass, out *storagev1beta1.VolumeAttributesClass, s conversion.Scope) error {
return autoConvert_storage_VolumeAttributesClass_To_v1beta1_VolumeAttributesClass(in, out, s)
}
func autoConvert_v1beta1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in *storagev1beta1.VolumeAttributesClassList, out *storage.VolumeAttributesClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storage.VolumeAttributesClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1beta1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList is an autogenerated conversion function.
func Convert_v1beta1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in *storagev1beta1.VolumeAttributesClassList, out *storage.VolumeAttributesClassList, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeAttributesClassList_To_storage_VolumeAttributesClassList(in, out, s)
}
func autoConvert_storage_VolumeAttributesClassList_To_v1beta1_VolumeAttributesClassList(in *storage.VolumeAttributesClassList, out *storagev1beta1.VolumeAttributesClassList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]storagev1beta1.VolumeAttributesClass)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_storage_VolumeAttributesClassList_To_v1beta1_VolumeAttributesClassList is an autogenerated conversion function.
func Convert_storage_VolumeAttributesClassList_To_v1beta1_VolumeAttributesClassList(in *storage.VolumeAttributesClassList, out *storagev1beta1.VolumeAttributesClassList, s conversion.Scope) error {
return autoConvert_storage_VolumeAttributesClassList_To_v1beta1_VolumeAttributesClassList(in, out, s)
}
func autoConvert_v1beta1_VolumeError_To_storage_VolumeError(in *storagev1beta1.VolumeError, out *storage.VolumeError, s conversion.Scope) error {
out.Time = in.Time
out.Message = in.Message
out.ErrorCode = (*int32)(unsafe.Pointer(in.ErrorCode))
return nil
}
// Convert_v1beta1_VolumeError_To_storage_VolumeError is an autogenerated conversion function.
func Convert_v1beta1_VolumeError_To_storage_VolumeError(in *storagev1beta1.VolumeError, out *storage.VolumeError, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeError_To_storage_VolumeError(in, out, s)
}
func autoConvert_storage_VolumeError_To_v1beta1_VolumeError(in *storage.VolumeError, out *storagev1beta1.VolumeError, s conversion.Scope) error {
out.Time = in.Time
out.Message = in.Message
out.ErrorCode = (*int32)(unsafe.Pointer(in.ErrorCode))
return nil
}
// Convert_storage_VolumeError_To_v1beta1_VolumeError is an autogenerated conversion function.
func Convert_storage_VolumeError_To_v1beta1_VolumeError(in *storage.VolumeError, out *storagev1beta1.VolumeError, s conversion.Scope) error {
return autoConvert_storage_VolumeError_To_v1beta1_VolumeError(in, out, s)
}
func autoConvert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(in *storagev1beta1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error {
out.Count = (*int32)(unsafe.Pointer(in.Count))
return nil
}
// Convert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources is an autogenerated conversion function.
func Convert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(in *storagev1beta1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error {
return autoConvert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(in, out, s)
}
func autoConvert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(in *storage.VolumeNodeResources, out *storagev1beta1.VolumeNodeResources, s conversion.Scope) error {
out.Count = (*int32)(unsafe.Pointer(in.Count))
return nil
}
// Convert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources is an autogenerated conversion function.
func Convert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(in *storage.VolumeNodeResources, out *storagev1beta1.VolumeNodeResources, s conversion.Scope) error {
return autoConvert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
v1 "k8s.io/api/core/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&storagev1beta1.CSIDriver{}, func(obj interface{}) { SetObjectDefaults_CSIDriver(obj.(*storagev1beta1.CSIDriver)) })
scheme.AddTypeDefaultingFunc(&storagev1beta1.CSIDriverList{}, func(obj interface{}) { SetObjectDefaults_CSIDriverList(obj.(*storagev1beta1.CSIDriverList)) })
scheme.AddTypeDefaultingFunc(&storagev1beta1.StorageClass{}, func(obj interface{}) { SetObjectDefaults_StorageClass(obj.(*storagev1beta1.StorageClass)) })
scheme.AddTypeDefaultingFunc(&storagev1beta1.StorageClassList{}, func(obj interface{}) { SetObjectDefaults_StorageClassList(obj.(*storagev1beta1.StorageClassList)) })
scheme.AddTypeDefaultingFunc(&storagev1beta1.VolumeAttachment{}, func(obj interface{}) { SetObjectDefaults_VolumeAttachment(obj.(*storagev1beta1.VolumeAttachment)) })
scheme.AddTypeDefaultingFunc(&storagev1beta1.VolumeAttachmentList{}, func(obj interface{}) {
SetObjectDefaults_VolumeAttachmentList(obj.(*storagev1beta1.VolumeAttachmentList))
})
return nil
}
func SetObjectDefaults_CSIDriver(in *storagev1beta1.CSIDriver) {
SetDefaults_CSIDriver(in)
}
func SetObjectDefaults_CSIDriverList(in *storagev1beta1.CSIDriverList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_CSIDriver(a)
}
}
func SetObjectDefaults_StorageClass(in *storagev1beta1.StorageClass) {
SetDefaults_StorageClass(in)
}
func SetObjectDefaults_StorageClassList(in *storagev1beta1.StorageClassList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_StorageClass(a)
}
}
func SetObjectDefaults_VolumeAttachment(in *storagev1beta1.VolumeAttachment) {
if in.Spec.Source.InlineVolumeSpec != nil {
corev1.SetDefaults_ResourceList(&in.Spec.Source.InlineVolumeSpec.Capacity)
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.HostPath != nil {
corev1.SetDefaults_HostPathVolumeSource(in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.HostPath)
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RBDPool == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RBDPool = "rbd"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RadosUser == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.RadosUser = "admin"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.Keyring == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI.ISCSIInterface == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.FSType = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO != nil {
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.StorageMode == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.FSType == "" {
in.Spec.Source.InlineVolumeSpec.PersistentVolumeSource.ScaleIO.FSType = "xfs"
}
}
}
}
func SetObjectDefaults_VolumeAttachmentList(in *storagev1beta1.VolumeAttachmentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_VolumeAttachment(a)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package storage
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriver) DeepCopyInto(out *CSIDriver) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver.
func (in *CSIDriver) DeepCopy() *CSIDriver {
if in == nil {
return nil
}
out := new(CSIDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIDriver) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSIDriver, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList.
func (in *CSIDriverList) DeepCopy() *CSIDriverList {
if in == nil {
return nil
}
out := new(CSIDriverList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIDriverList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) {
*out = *in
if in.AttachRequired != nil {
in, out := &in.AttachRequired, &out.AttachRequired
*out = new(bool)
**out = **in
}
if in.FSGroupPolicy != nil {
in, out := &in.FSGroupPolicy, &out.FSGroupPolicy
*out = new(FSGroupPolicy)
**out = **in
}
if in.PodInfoOnMount != nil {
in, out := &in.PodInfoOnMount, &out.PodInfoOnMount
*out = new(bool)
**out = **in
}
if in.VolumeLifecycleModes != nil {
in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes
*out = make([]VolumeLifecycleMode, len(*in))
copy(*out, *in)
}
if in.StorageCapacity != nil {
in, out := &in.StorageCapacity, &out.StorageCapacity
*out = new(bool)
**out = **in
}
if in.TokenRequests != nil {
in, out := &in.TokenRequests, &out.TokenRequests
*out = make([]TokenRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RequiresRepublish != nil {
in, out := &in.RequiresRepublish, &out.RequiresRepublish
*out = new(bool)
**out = **in
}
if in.SELinuxMount != nil {
in, out := &in.SELinuxMount, &out.SELinuxMount
*out = new(bool)
**out = **in
}
if in.NodeAllocatableUpdatePeriodSeconds != nil {
in, out := &in.NodeAllocatableUpdatePeriodSeconds, &out.NodeAllocatableUpdatePeriodSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec.
func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec {
if in == nil {
return nil
}
out := new(CSIDriverSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINode) DeepCopyInto(out *CSINode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode.
func (in *CSINode) DeepCopy() *CSINode {
if in == nil {
return nil
}
out := new(CSINode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) {
*out = *in
if in.TopologyKeys != nil {
in, out := &in.TopologyKeys, &out.TopologyKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Allocatable != nil {
in, out := &in.Allocatable, &out.Allocatable
*out = new(VolumeNodeResources)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver.
func (in *CSINodeDriver) DeepCopy() *CSINodeDriver {
if in == nil {
return nil
}
out := new(CSINodeDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeList) DeepCopyInto(out *CSINodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSINode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList.
func (in *CSINodeList) DeepCopy() *CSINodeList {
if in == nil {
return nil
}
out := new(CSINodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSINodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) {
*out = *in
if in.Drivers != nil {
in, out := &in.Drivers, &out.Drivers
*out = make([]CSINodeDriver, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec.
func (in *CSINodeSpec) DeepCopy() *CSINodeSpec {
if in == nil {
return nil
}
out := new(CSINodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIStorageCapacity) DeepCopyInto(out *CSIStorageCapacity) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.NodeTopology != nil {
in, out := &in.NodeTopology, &out.NodeTopology
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
x := (*in).DeepCopy()
*out = &x
}
if in.MaximumVolumeSize != nil {
in, out := &in.MaximumVolumeSize, &out.MaximumVolumeSize
x := (*in).DeepCopy()
*out = &x
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIStorageCapacity.
func (in *CSIStorageCapacity) DeepCopy() *CSIStorageCapacity {
if in == nil {
return nil
}
out := new(CSIStorageCapacity)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIStorageCapacity) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSIStorageCapacityList) DeepCopyInto(out *CSIStorageCapacityList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CSIStorageCapacity, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIStorageCapacityList.
func (in *CSIStorageCapacityList) DeepCopy() *CSIStorageCapacityList {
if in == nil {
return nil
}
out := new(CSIStorageCapacityList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CSIStorageCapacityList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClass) DeepCopyInto(out *StorageClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ReclaimPolicy != nil {
in, out := &in.ReclaimPolicy, &out.ReclaimPolicy
*out = new(core.PersistentVolumeReclaimPolicy)
**out = **in
}
if in.MountOptions != nil {
in, out := &in.MountOptions, &out.MountOptions
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowVolumeExpansion != nil {
in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion
*out = new(bool)
**out = **in
}
if in.VolumeBindingMode != nil {
in, out := &in.VolumeBindingMode, &out.VolumeBindingMode
*out = new(VolumeBindingMode)
**out = **in
}
if in.AllowedTopologies != nil {
in, out := &in.AllowedTopologies, &out.AllowedTopologies
*out = make([]core.TopologySelectorTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass.
func (in *StorageClass) DeepCopy() *StorageClass {
if in == nil {
return nil
}
out := new(StorageClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StorageClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClassList) DeepCopyInto(out *StorageClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StorageClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList.
func (in *StorageClassList) DeepCopy() *StorageClassList {
if in == nil {
return nil
}
out := new(StorageClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StorageClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenRequest) DeepCopyInto(out *TokenRequest) {
*out = *in
if in.ExpirationSeconds != nil {
in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest.
func (in *TokenRequest) DeepCopy() *TokenRequest {
if in == nil {
return nil
}
out := new(TokenRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment.
func (in *VolumeAttachment) DeepCopy() *VolumeAttachment {
if in == nil {
return nil
}
out := new(VolumeAttachment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VolumeAttachment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VolumeAttachment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList.
func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList {
if in == nil {
return nil
}
out := new(VolumeAttachmentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) {
*out = *in
if in.PersistentVolumeName != nil {
in, out := &in.PersistentVolumeName, &out.PersistentVolumeName
*out = new(string)
**out = **in
}
if in.InlineVolumeSpec != nil {
in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec
*out = new(core.PersistentVolumeSpec)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource.
func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource {
if in == nil {
return nil
}
out := new(VolumeAttachmentSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec.
func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec {
if in == nil {
return nil
}
out := new(VolumeAttachmentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) {
*out = *in
if in.AttachmentMetadata != nil {
in, out := &in.AttachmentMetadata, &out.AttachmentMetadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AttachError != nil {
in, out := &in.AttachError, &out.AttachError
*out = new(VolumeError)
(*in).DeepCopyInto(*out)
}
if in.DetachError != nil {
in, out := &in.DetachError, &out.DetachError
*out = new(VolumeError)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus.
func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
if in == nil {
return nil
}
out := new(VolumeAttachmentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass.
func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass {
if in == nil {
return nil
}
out := new(VolumeAttributesClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VolumeAttributesClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList.
func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList {
if in == nil {
return nil
}
out := new(VolumeAttributesClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeError) DeepCopyInto(out *VolumeError) {
*out = *in
in.Time.DeepCopyInto(&out.Time)
if in.ErrorCode != nil {
in, out := &in.ErrorCode, &out.ErrorCode
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError.
func (in *VolumeError) DeepCopy() *VolumeError {
if in == nil {
return nil
}
out := new(VolumeError)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) {
*out = *in
if in.Count != nil {
in, out := &in.Count, &out.Count
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources.
func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources {
if in == nil {
return nil
}
out := new(VolumeNodeResources)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package capabilities
import (
"sync"
)
// Capabilities defines the set of capabilities available within the system.
// For now these are global. Eventually they may be per-user
type Capabilities struct {
AllowPrivileged bool
// Pod sources from which to allow privileged capabilities like host networking, sharing the host
// IPC namespace, and sharing the host PID namespace.
PrivilegedSources PrivilegedSources
// PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach)
PerConnectionBandwidthLimitBytesPerSec int64
}
// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types
// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace.
type PrivilegedSources struct {
// List of pod sources for which using host network is allowed.
HostNetworkSources []string
// List of pod sources for which using host pid namespace is allowed.
HostPIDSources []string
// List of pod sources for which using host ipc is allowed.
HostIPCSources []string
}
var capInstance struct {
once sync.Once
lock sync.Mutex
capabilities *Capabilities
}
// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored.
func Initialize(c Capabilities) {
// Only do this once
capInstance.once.Do(func() {
capInstance.capabilities = &c
})
}
// Setup the capability set. It wraps Initialize for improving usability.
func Setup(allowPrivileged bool, perConnectionBytesPerSec int64) {
Initialize(Capabilities{
AllowPrivileged: allowPrivileged,
PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec,
})
}
// ResetForTest resets the capabilities to a given state for testing purposes.
// This function should only be called from tests.
func ResetForTest() {
capInstance.lock.Lock()
defer capInstance.lock.Unlock()
capInstance.capabilities = nil
capInstance.once = sync.Once{}
}
// Get returns a read-only copy of the system capabilities.
func Get() Capabilities {
capInstance.lock.Lock()
defer capInstance.lock.Unlock()
// This check prevents clobbering of capabilities that might've been set via SetForTests
if capInstance.capabilities == nil {
Initialize(Capabilities{
AllowPrivileged: false,
PrivilegedSources: PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
})
}
return *capInstance.capabilities
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"fmt"
"sigs.k8s.io/randfill"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
)
// Funcs returns the fuzzer functions for the kube-controller manager apis.
func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *kubectrlmgrconfig.KubeControllerManagerConfiguration, c randfill.Continue) {
c.FillNoCustom(obj)
obj.Generic.Address = fmt.Sprintf("%d.%d.%d.%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256))
obj.Generic.ClientConnection.ContentType = fmt.Sprintf("%s/%s.%s.%s", c.String(0), c.String(0), c.String(0), c.String(0))
if obj.Generic.LeaderElection.ResourceLock == "" {
obj.Generic.LeaderElection.ResourceLock = "leases"
}
obj.Generic.Controllers = []string{c.String(0)}
if obj.KubeCloudShared.ClusterName == "" {
obj.KubeCloudShared.ClusterName = "kubernetes"
}
obj.CSRSigningController.ClusterSigningCertFile = fmt.Sprintf("/%s", c.String(0))
obj.CSRSigningController.ClusterSigningKeyFile = fmt.Sprintf("/%s", c.String(0))
obj.PersistentVolumeBinderController.VolumeConfiguration.FlexVolumePluginDir = fmt.Sprintf("/%s", c.String(0))
obj.TTLAfterFinishedController.ConcurrentTTLSyncs = c.Int31()
},
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "kubecontrollermanager.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes registers known types to the given scheme
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeControllerManagerConfiguration{},
)
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/controller/apis/config"
"k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1"
)
var (
// Scheme defines methods for serializing and deserializing API objects.
Scheme = runtime.NewScheme()
// Codecs provides methods for retrieving codecs and serializers for specific
// versions and content types.
Codecs = serializer.NewCodecFactory(Scheme)
)
func init() {
AddToScheme(Scheme)
}
// AddToScheme registers the API group and adds types to a scheme
func AddToScheme(scheme *runtime.Scheme) {
utilruntime.Must(config.AddToScheme(scheme))
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion))
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kruntime "k8s.io/apimachinery/pkg/runtime"
serviceconfigv1alpha1 "k8s.io/cloud-provider/controllers/service/config/v1alpha1"
cmconfigv1alpha1 "k8s.io/controller-manager/config/v1alpha1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
csrsigningconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/certificates/signer/config/v1alpha1"
cronjobconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/cronjob/config/v1alpha1"
daemonconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/daemon/config/v1alpha1"
deploymentconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/deployment/config/v1alpha1"
endpointconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpoint/config/v1alpha1"
endpointsliceconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpointslice/config/v1alpha1"
endpointslicemirroringconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config/v1alpha1"
garbagecollectorconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/garbagecollector/config/v1alpha1"
jobconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/job/config/v1alpha1"
namespaceconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/namespace/config/v1alpha1"
nodeipamconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/nodeipam/config/v1alpha1"
nodelifecycleconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/nodelifecycle/config/v1alpha1"
poautosclerconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/podautoscaler/config/v1alpha1"
podgcconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/podgc/config/v1alpha1"
replicasetconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/replicaset/config/v1alpha1"
replicationconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/replication/config/v1alpha1"
resourcequotaconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/resourcequota/config/v1alpha1"
serviceaccountconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/serviceaccount/config/v1alpha1"
statefulsetconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/statefulset/config/v1alpha1"
ttlafterfinishedconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/ttlafterfinished/config/v1alpha1"
validatingadmissionpolicystatusv1alpha1 "k8s.io/kubernetes/pkg/controller/validatingadmissionpolicystatus/config/v1alpha1"
attachdetachconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config/v1alpha1"
ephemeralvolumeconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/ephemeral/config/v1alpha1"
persistentvolumeconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config/v1alpha1"
)
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_KubeControllerManagerConfiguration(obj *kubectrlmgrconfigv1alpha1.KubeControllerManagerConfiguration) {
// These defaults override the recommended defaults from the componentbaseconfigv1alpha1 package that are applied automatically
// These client-connection defaults are specific to the kube-controller-manager
if obj.Generic.ClientConnection.QPS == 0.0 {
obj.Generic.ClientConnection.QPS = 20.0
}
if obj.Generic.ClientConnection.Burst == 0 {
obj.Generic.ClientConnection.Burst = 30
}
// Use the default RecommendedDefaultGenericControllerManagerConfiguration options
cmconfigv1alpha1.RecommendedDefaultGenericControllerManagerConfiguration(&obj.Generic)
// Use the default RecommendedDefaultHPAControllerConfiguration options
attachdetachconfigv1alpha1.RecommendedDefaultAttachDetachControllerConfiguration(&obj.AttachDetachController)
// Use the default RecommendedDefaultCSRSigningControllerConfiguration options
csrsigningconfigv1alpha1.RecommendedDefaultCSRSigningControllerConfiguration(&obj.CSRSigningController)
// Use the default RecommendedDefaultDaemonSetControllerConfiguration options
daemonconfigv1alpha1.RecommendedDefaultDaemonSetControllerConfiguration(&obj.DaemonSetController)
// Use the default RecommendedDefaultDeploymentControllerConfiguration options
deploymentconfigv1alpha1.RecommendedDefaultDeploymentControllerConfiguration(&obj.DeploymentController)
// Use the default RecommendedDefaultStatefulSetControllerConfiguration options
statefulsetconfigv1alpha1.RecommendedDefaultStatefulSetControllerConfiguration(&obj.StatefulSetController)
// Use the default RecommendedDefaultEndpointControllerConfiguration options
endpointconfigv1alpha1.RecommendedDefaultEndpointControllerConfiguration(&obj.EndpointController)
// Use the default RecommendedDefaultEndpointSliceControllerConfiguration options
endpointsliceconfigv1alpha1.RecommendedDefaultEndpointSliceControllerConfiguration(&obj.EndpointSliceController)
// Use the default RecommendedDefaultEndpointSliceMirroringControllerConfiguration options
endpointslicemirroringconfigv1alpha1.RecommendedDefaultEndpointSliceMirroringControllerConfiguration(&obj.EndpointSliceMirroringController)
// Use the default RecommendedDefaultEphemeralVolumeControllerConfiguration options
ephemeralvolumeconfigv1alpha1.RecommendedDefaultEphemeralVolumeControllerConfiguration(&obj.EphemeralVolumeController)
// Use the default RecommendedDefaultGarbageCollectorControllerConfiguration options
garbagecollectorconfigv1alpha1.RecommendedDefaultGarbageCollectorControllerConfiguration(&obj.GarbageCollectorController)
// Use the default RecommendedDefaultJobControllerConfiguration options
jobconfigv1alpha1.RecommendedDefaultJobControllerConfiguration(&obj.JobController)
// Use the default RecommendedDefaultCronJobControllerConfiguration options
cronjobconfigv1alpha1.RecommendedDefaultCronJobControllerConfiguration(&obj.CronJobController)
// Use the default RecommendedDefaultNamespaceControllerConfiguration options
namespaceconfigv1alpha1.RecommendedDefaultNamespaceControllerConfiguration(&obj.NamespaceController)
// Use the default RecommendedDefaultNodeIPAMControllerConfiguration options
nodeipamconfigv1alpha1.RecommendedDefaultNodeIPAMControllerConfiguration(&obj.NodeIPAMController)
// Use the default RecommendedDefaultHPAControllerConfiguration options
poautosclerconfigv1alpha1.RecommendedDefaultHPAControllerConfiguration(&obj.HPAController)
// Use the default RecommendedDefaultNodeLifecycleControllerConfiguration options
nodelifecycleconfigv1alpha1.RecommendedDefaultNodeLifecycleControllerConfiguration(&obj.NodeLifecycleController)
// Use the default RecommendedDefaultPodGCControllerConfiguration options
podgcconfigv1alpha1.RecommendedDefaultPodGCControllerConfiguration(&obj.PodGCController)
// Use the default RecommendedDefaultReplicaSetControllerConfiguration options
replicasetconfigv1alpha1.RecommendedDefaultReplicaSetControllerConfiguration(&obj.ReplicaSetController)
// Use the default RecommendedDefaultReplicationControllerConfiguration options
replicationconfigv1alpha1.RecommendedDefaultReplicationControllerConfiguration(&obj.ReplicationController)
// Use the default RecommendedDefaultResourceQuotaControllerConfiguration options
resourcequotaconfigv1alpha1.RecommendedDefaultResourceQuotaControllerConfiguration(&obj.ResourceQuotaController)
// Use the default RecommendedDefaultServiceControllerConfiguration options
serviceconfigv1alpha1.RecommendedDefaultServiceControllerConfiguration(&obj.ServiceController)
// Use the default RecommendedDefaultLegacySATokenCleanerConfiguration options
serviceaccountconfigv1alpha1.RecommendedDefaultLegacySATokenCleanerConfiguration(&obj.LegacySATokenCleaner)
// Use the default RecommendedDefaultSAControllerConfiguration options
serviceaccountconfigv1alpha1.RecommendedDefaultSAControllerConfiguration(&obj.SAController)
// Use the default RecommendedDefaultTTLAfterFinishedControllerConfiguration options
ttlafterfinishedconfigv1alpha1.RecommendedDefaultTTLAfterFinishedControllerConfiguration(&obj.TTLAfterFinishedController)
// Use the default RecommendedDefaultPersistentVolumeBinderControllerConfiguration options
persistentvolumeconfigv1alpha1.RecommendedDefaultPersistentVolumeBinderControllerConfiguration(&obj.PersistentVolumeBinderController)
// Use the default RecommendedDefaultValidatingAdmissionPolicyStatusControllerConfiguration options
validatingadmissionpolicystatusv1alpha1.RecommendedDefaultValidatingAdmissionPolicyStatusControllerConfiguration(&obj.ValidatingAdmissionPolicyStatusController)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// GroupName is the group name use in this package
const GroupName = "kubecontrollermanager.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
var (
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
// defaulting and conversion init funcs are registered as well.
localSchemeBuilder = &kubectrlmgrconfigv1alpha1.SchemeBuilder
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
cloudproviderconfigv1alpha1 "k8s.io/cloud-provider/config/v1alpha1"
serviceconfigv1alpha1 "k8s.io/cloud-provider/controllers/service/config/v1alpha1"
controllermanagerconfigv1alpha1 "k8s.io/controller-manager/config/v1alpha1"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/apis/config"
signerconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/certificates/signer/config/v1alpha1"
cronjobconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/cronjob/config/v1alpha1"
daemonconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/daemon/config/v1alpha1"
deploymentconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/deployment/config/v1alpha1"
endpointconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpoint/config/v1alpha1"
endpointsliceconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpointslice/config/v1alpha1"
endpointslicemirroringconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config/v1alpha1"
garbagecollectorconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/garbagecollector/config/v1alpha1"
jobconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/job/config/v1alpha1"
namespaceconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/namespace/config/v1alpha1"
nodeipamconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/nodeipam/config/v1alpha1"
nodelifecycleconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/nodelifecycle/config/v1alpha1"
podautoscalerconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/podautoscaler/config/v1alpha1"
podgcconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/podgc/config/v1alpha1"
replicasetconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/replicaset/config/v1alpha1"
replicationconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/replication/config/v1alpha1"
resourcequotaconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/resourcequota/config/v1alpha1"
serviceaccountconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/serviceaccount/config/v1alpha1"
statefulsetconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/statefulset/config/v1alpha1"
ttlafterfinishedconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/ttlafterfinished/config/v1alpha1"
validatingadmissionpolicystatusconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/validatingadmissionpolicystatus/config/v1alpha1"
attachdetachconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config/v1alpha1"
ephemeralconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/ephemeral/config/v1alpha1"
persistentvolumeconfigv1alpha1 "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config/v1alpha1"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.DeprecatedControllerConfiguration)(nil), (*config.DeprecatedControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeprecatedControllerConfiguration_To_config_DeprecatedControllerConfiguration(a.(*configv1alpha1.DeprecatedControllerConfiguration), b.(*config.DeprecatedControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.DeprecatedControllerConfiguration)(nil), (*configv1alpha1.DeprecatedControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_DeprecatedControllerConfiguration_To_v1alpha1_DeprecatedControllerConfiguration(a.(*config.DeprecatedControllerConfiguration), b.(*configv1alpha1.DeprecatedControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.KubeControllerManagerConfiguration)(nil), (*config.KubeControllerManagerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeControllerManagerConfiguration_To_config_KubeControllerManagerConfiguration(a.(*configv1alpha1.KubeControllerManagerConfiguration), b.(*config.KubeControllerManagerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeControllerManagerConfiguration)(nil), (*configv1alpha1.KubeControllerManagerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeControllerManagerConfiguration_To_v1alpha1_KubeControllerManagerConfiguration(a.(*config.KubeControllerManagerConfiguration), b.(*configv1alpha1.KubeControllerManagerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_DeprecatedControllerConfiguration_To_config_DeprecatedControllerConfiguration(in *configv1alpha1.DeprecatedControllerConfiguration, out *config.DeprecatedControllerConfiguration, s conversion.Scope) error {
return nil
}
// Convert_v1alpha1_DeprecatedControllerConfiguration_To_config_DeprecatedControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_DeprecatedControllerConfiguration_To_config_DeprecatedControllerConfiguration(in *configv1alpha1.DeprecatedControllerConfiguration, out *config.DeprecatedControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_DeprecatedControllerConfiguration_To_config_DeprecatedControllerConfiguration(in, out, s)
}
func autoConvert_config_DeprecatedControllerConfiguration_To_v1alpha1_DeprecatedControllerConfiguration(in *config.DeprecatedControllerConfiguration, out *configv1alpha1.DeprecatedControllerConfiguration, s conversion.Scope) error {
return nil
}
// Convert_config_DeprecatedControllerConfiguration_To_v1alpha1_DeprecatedControllerConfiguration is an autogenerated conversion function.
func Convert_config_DeprecatedControllerConfiguration_To_v1alpha1_DeprecatedControllerConfiguration(in *config.DeprecatedControllerConfiguration, out *configv1alpha1.DeprecatedControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_DeprecatedControllerConfiguration_To_v1alpha1_DeprecatedControllerConfiguration(in, out, s)
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_KubeControllerManagerConfiguration_To_config_KubeControllerManagerConfiguration(in *configv1alpha1.KubeControllerManagerConfiguration, out *config.KubeControllerManagerConfiguration, s conversion.Scope) error {
if err := controllermanagerconfigv1alpha1.Convert_v1alpha1_GenericControllerManagerConfiguration_To_config_GenericControllerManagerConfiguration(&in.Generic, &out.Generic, s); err != nil {
return err
}
if err := cloudproviderconfigv1alpha1.Convert_v1alpha1_KubeCloudSharedConfiguration_To_config_KubeCloudSharedConfiguration(&in.KubeCloudShared, &out.KubeCloudShared, s); err != nil {
return err
}
if err := attachdetachconfigv1alpha1.Convert_v1alpha1_AttachDetachControllerConfiguration_To_config_AttachDetachControllerConfiguration(&in.AttachDetachController, &out.AttachDetachController, s); err != nil {
return err
}
if err := signerconfigv1alpha1.Convert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration(&in.CSRSigningController, &out.CSRSigningController, s); err != nil {
return err
}
if err := daemonconfigv1alpha1.Convert_v1alpha1_DaemonSetControllerConfiguration_To_config_DaemonSetControllerConfiguration(&in.DaemonSetController, &out.DaemonSetController, s); err != nil {
return err
}
if err := deploymentconfigv1alpha1.Convert_v1alpha1_DeploymentControllerConfiguration_To_config_DeploymentControllerConfiguration(&in.DeploymentController, &out.DeploymentController, s); err != nil {
return err
}
if err := statefulsetconfigv1alpha1.Convert_v1alpha1_StatefulSetControllerConfiguration_To_config_StatefulSetControllerConfiguration(&in.StatefulSetController, &out.StatefulSetController, s); err != nil {
return err
}
if err := Convert_v1alpha1_DeprecatedControllerConfiguration_To_config_DeprecatedControllerConfiguration(&in.DeprecatedController, &out.DeprecatedController, s); err != nil {
return err
}
if err := endpointconfigv1alpha1.Convert_v1alpha1_EndpointControllerConfiguration_To_config_EndpointControllerConfiguration(&in.EndpointController, &out.EndpointController, s); err != nil {
return err
}
if err := endpointsliceconfigv1alpha1.Convert_v1alpha1_EndpointSliceControllerConfiguration_To_config_EndpointSliceControllerConfiguration(&in.EndpointSliceController, &out.EndpointSliceController, s); err != nil {
return err
}
if err := endpointslicemirroringconfigv1alpha1.Convert_v1alpha1_EndpointSliceMirroringControllerConfiguration_To_config_EndpointSliceMirroringControllerConfiguration(&in.EndpointSliceMirroringController, &out.EndpointSliceMirroringController, s); err != nil {
return err
}
if err := ephemeralconfigv1alpha1.Convert_v1alpha1_EphemeralVolumeControllerConfiguration_To_config_EphemeralVolumeControllerConfiguration(&in.EphemeralVolumeController, &out.EphemeralVolumeController, s); err != nil {
return err
}
if err := garbagecollectorconfigv1alpha1.Convert_v1alpha1_GarbageCollectorControllerConfiguration_To_config_GarbageCollectorControllerConfiguration(&in.GarbageCollectorController, &out.GarbageCollectorController, s); err != nil {
return err
}
if err := podautoscalerconfigv1alpha1.Convert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConfiguration(&in.HPAController, &out.HPAController, s); err != nil {
return err
}
if err := jobconfigv1alpha1.Convert_v1alpha1_JobControllerConfiguration_To_config_JobControllerConfiguration(&in.JobController, &out.JobController, s); err != nil {
return err
}
if err := cronjobconfigv1alpha1.Convert_v1alpha1_CronJobControllerConfiguration_To_config_CronJobControllerConfiguration(&in.CronJobController, &out.CronJobController, s); err != nil {
return err
}
if err := serviceaccountconfigv1alpha1.Convert_v1alpha1_LegacySATokenCleanerConfiguration_To_config_LegacySATokenCleanerConfiguration(&in.LegacySATokenCleaner, &out.LegacySATokenCleaner, s); err != nil {
return err
}
if err := namespaceconfigv1alpha1.Convert_v1alpha1_NamespaceControllerConfiguration_To_config_NamespaceControllerConfiguration(&in.NamespaceController, &out.NamespaceController, s); err != nil {
return err
}
if err := nodeipamconfigv1alpha1.Convert_v1alpha1_NodeIPAMControllerConfiguration_To_config_NodeIPAMControllerConfiguration(&in.NodeIPAMController, &out.NodeIPAMController, s); err != nil {
return err
}
if err := nodelifecycleconfigv1alpha1.Convert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration(&in.NodeLifecycleController, &out.NodeLifecycleController, s); err != nil {
return err
}
if err := persistentvolumeconfigv1alpha1.Convert_v1alpha1_PersistentVolumeBinderControllerConfiguration_To_config_PersistentVolumeBinderControllerConfiguration(&in.PersistentVolumeBinderController, &out.PersistentVolumeBinderController, s); err != nil {
return err
}
if err := podgcconfigv1alpha1.Convert_v1alpha1_PodGCControllerConfiguration_To_config_PodGCControllerConfiguration(&in.PodGCController, &out.PodGCController, s); err != nil {
return err
}
if err := replicasetconfigv1alpha1.Convert_v1alpha1_ReplicaSetControllerConfiguration_To_config_ReplicaSetControllerConfiguration(&in.ReplicaSetController, &out.ReplicaSetController, s); err != nil {
return err
}
if err := replicationconfigv1alpha1.Convert_v1alpha1_ReplicationControllerConfiguration_To_config_ReplicationControllerConfiguration(&in.ReplicationController, &out.ReplicationController, s); err != nil {
return err
}
if err := resourcequotaconfigv1alpha1.Convert_v1alpha1_ResourceQuotaControllerConfiguration_To_config_ResourceQuotaControllerConfiguration(&in.ResourceQuotaController, &out.ResourceQuotaController, s); err != nil {
return err
}
if err := serviceaccountconfigv1alpha1.Convert_v1alpha1_SAControllerConfiguration_To_config_SAControllerConfiguration(&in.SAController, &out.SAController, s); err != nil {
return err
}
if err := serviceconfigv1alpha1.Convert_v1alpha1_ServiceControllerConfiguration_To_config_ServiceControllerConfiguration(&in.ServiceController, &out.ServiceController, s); err != nil {
return err
}
if err := ttlafterfinishedconfigv1alpha1.Convert_v1alpha1_TTLAfterFinishedControllerConfiguration_To_config_TTLAfterFinishedControllerConfiguration(&in.TTLAfterFinishedController, &out.TTLAfterFinishedController, s); err != nil {
return err
}
if err := validatingadmissionpolicystatusconfigv1alpha1.Convert_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration_To_config_ValidatingAdmissionPolicyStatusControllerConfiguration(&in.ValidatingAdmissionPolicyStatusController, &out.ValidatingAdmissionPolicyStatusController, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_KubeControllerManagerConfiguration_To_config_KubeControllerManagerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeControllerManagerConfiguration_To_config_KubeControllerManagerConfiguration(in *configv1alpha1.KubeControllerManagerConfiguration, out *config.KubeControllerManagerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeControllerManagerConfiguration_To_config_KubeControllerManagerConfiguration(in, out, s)
}
func autoConvert_config_KubeControllerManagerConfiguration_To_v1alpha1_KubeControllerManagerConfiguration(in *config.KubeControllerManagerConfiguration, out *configv1alpha1.KubeControllerManagerConfiguration, s conversion.Scope) error {
if err := controllermanagerconfigv1alpha1.Convert_config_GenericControllerManagerConfiguration_To_v1alpha1_GenericControllerManagerConfiguration(&in.Generic, &out.Generic, s); err != nil {
return err
}
if err := cloudproviderconfigv1alpha1.Convert_config_KubeCloudSharedConfiguration_To_v1alpha1_KubeCloudSharedConfiguration(&in.KubeCloudShared, &out.KubeCloudShared, s); err != nil {
return err
}
if err := attachdetachconfigv1alpha1.Convert_config_AttachDetachControllerConfiguration_To_v1alpha1_AttachDetachControllerConfiguration(&in.AttachDetachController, &out.AttachDetachController, s); err != nil {
return err
}
if err := signerconfigv1alpha1.Convert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration(&in.CSRSigningController, &out.CSRSigningController, s); err != nil {
return err
}
if err := daemonconfigv1alpha1.Convert_config_DaemonSetControllerConfiguration_To_v1alpha1_DaemonSetControllerConfiguration(&in.DaemonSetController, &out.DaemonSetController, s); err != nil {
return err
}
if err := deploymentconfigv1alpha1.Convert_config_DeploymentControllerConfiguration_To_v1alpha1_DeploymentControllerConfiguration(&in.DeploymentController, &out.DeploymentController, s); err != nil {
return err
}
if err := statefulsetconfigv1alpha1.Convert_config_StatefulSetControllerConfiguration_To_v1alpha1_StatefulSetControllerConfiguration(&in.StatefulSetController, &out.StatefulSetController, s); err != nil {
return err
}
if err := Convert_config_DeprecatedControllerConfiguration_To_v1alpha1_DeprecatedControllerConfiguration(&in.DeprecatedController, &out.DeprecatedController, s); err != nil {
return err
}
if err := endpointconfigv1alpha1.Convert_config_EndpointControllerConfiguration_To_v1alpha1_EndpointControllerConfiguration(&in.EndpointController, &out.EndpointController, s); err != nil {
return err
}
if err := endpointsliceconfigv1alpha1.Convert_config_EndpointSliceControllerConfiguration_To_v1alpha1_EndpointSliceControllerConfiguration(&in.EndpointSliceController, &out.EndpointSliceController, s); err != nil {
return err
}
if err := endpointslicemirroringconfigv1alpha1.Convert_config_EndpointSliceMirroringControllerConfiguration_To_v1alpha1_EndpointSliceMirroringControllerConfiguration(&in.EndpointSliceMirroringController, &out.EndpointSliceMirroringController, s); err != nil {
return err
}
if err := ephemeralconfigv1alpha1.Convert_config_EphemeralVolumeControllerConfiguration_To_v1alpha1_EphemeralVolumeControllerConfiguration(&in.EphemeralVolumeController, &out.EphemeralVolumeController, s); err != nil {
return err
}
if err := garbagecollectorconfigv1alpha1.Convert_config_GarbageCollectorControllerConfiguration_To_v1alpha1_GarbageCollectorControllerConfiguration(&in.GarbageCollectorController, &out.GarbageCollectorController, s); err != nil {
return err
}
if err := podautoscalerconfigv1alpha1.Convert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConfiguration(&in.HPAController, &out.HPAController, s); err != nil {
return err
}
if err := jobconfigv1alpha1.Convert_config_JobControllerConfiguration_To_v1alpha1_JobControllerConfiguration(&in.JobController, &out.JobController, s); err != nil {
return err
}
if err := cronjobconfigv1alpha1.Convert_config_CronJobControllerConfiguration_To_v1alpha1_CronJobControllerConfiguration(&in.CronJobController, &out.CronJobController, s); err != nil {
return err
}
if err := serviceaccountconfigv1alpha1.Convert_config_LegacySATokenCleanerConfiguration_To_v1alpha1_LegacySATokenCleanerConfiguration(&in.LegacySATokenCleaner, &out.LegacySATokenCleaner, s); err != nil {
return err
}
if err := namespaceconfigv1alpha1.Convert_config_NamespaceControllerConfiguration_To_v1alpha1_NamespaceControllerConfiguration(&in.NamespaceController, &out.NamespaceController, s); err != nil {
return err
}
if err := nodeipamconfigv1alpha1.Convert_config_NodeIPAMControllerConfiguration_To_v1alpha1_NodeIPAMControllerConfiguration(&in.NodeIPAMController, &out.NodeIPAMController, s); err != nil {
return err
}
if err := nodelifecycleconfigv1alpha1.Convert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration(&in.NodeLifecycleController, &out.NodeLifecycleController, s); err != nil {
return err
}
if err := persistentvolumeconfigv1alpha1.Convert_config_PersistentVolumeBinderControllerConfiguration_To_v1alpha1_PersistentVolumeBinderControllerConfiguration(&in.PersistentVolumeBinderController, &out.PersistentVolumeBinderController, s); err != nil {
return err
}
if err := podgcconfigv1alpha1.Convert_config_PodGCControllerConfiguration_To_v1alpha1_PodGCControllerConfiguration(&in.PodGCController, &out.PodGCController, s); err != nil {
return err
}
if err := replicasetconfigv1alpha1.Convert_config_ReplicaSetControllerConfiguration_To_v1alpha1_ReplicaSetControllerConfiguration(&in.ReplicaSetController, &out.ReplicaSetController, s); err != nil {
return err
}
if err := replicationconfigv1alpha1.Convert_config_ReplicationControllerConfiguration_To_v1alpha1_ReplicationControllerConfiguration(&in.ReplicationController, &out.ReplicationController, s); err != nil {
return err
}
if err := resourcequotaconfigv1alpha1.Convert_config_ResourceQuotaControllerConfiguration_To_v1alpha1_ResourceQuotaControllerConfiguration(&in.ResourceQuotaController, &out.ResourceQuotaController, s); err != nil {
return err
}
if err := serviceaccountconfigv1alpha1.Convert_config_SAControllerConfiguration_To_v1alpha1_SAControllerConfiguration(&in.SAController, &out.SAController, s); err != nil {
return err
}
if err := serviceconfigv1alpha1.Convert_config_ServiceControllerConfiguration_To_v1alpha1_ServiceControllerConfiguration(&in.ServiceController, &out.ServiceController, s); err != nil {
return err
}
if err := ttlafterfinishedconfigv1alpha1.Convert_config_TTLAfterFinishedControllerConfiguration_To_v1alpha1_TTLAfterFinishedControllerConfiguration(&in.TTLAfterFinishedController, &out.TTLAfterFinishedController, s); err != nil {
return err
}
if err := validatingadmissionpolicystatusconfigv1alpha1.Convert_config_ValidatingAdmissionPolicyStatusControllerConfiguration_To_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration(&in.ValidatingAdmissionPolicyStatusController, &out.ValidatingAdmissionPolicyStatusController, s); err != nil {
return err
}
return nil
}
// Convert_config_KubeControllerManagerConfiguration_To_v1alpha1_KubeControllerManagerConfiguration is an autogenerated conversion function.
func Convert_config_KubeControllerManagerConfiguration_To_v1alpha1_KubeControllerManagerConfiguration(in *config.KubeControllerManagerConfiguration, out *configv1alpha1.KubeControllerManagerConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeControllerManagerConfiguration_To_v1alpha1_KubeControllerManagerConfiguration(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
cloudproviderconfigv1alpha1 "k8s.io/cloud-provider/config/v1alpha1"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&configv1alpha1.KubeControllerManagerConfiguration{}, func(obj interface{}) {
SetObjectDefaults_KubeControllerManagerConfiguration(obj.(*configv1alpha1.KubeControllerManagerConfiguration))
})
return nil
}
func SetObjectDefaults_KubeControllerManagerConfiguration(in *configv1alpha1.KubeControllerManagerConfiguration) {
SetDefaults_KubeControllerManagerConfiguration(in)
cloudproviderconfigv1alpha1.SetDefaults_KubeCloudSharedConfiguration(&in.KubeCloudShared)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeprecatedControllerConfiguration) DeepCopyInto(out *DeprecatedControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedControllerConfiguration.
func (in *DeprecatedControllerConfiguration) DeepCopy() *DeprecatedControllerConfiguration {
if in == nil {
return nil
}
out := new(DeprecatedControllerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeControllerManagerConfiguration) DeepCopyInto(out *KubeControllerManagerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Generic.DeepCopyInto(&out.Generic)
out.KubeCloudShared = in.KubeCloudShared
out.AttachDetachController = in.AttachDetachController
out.CSRSigningController = in.CSRSigningController
out.DaemonSetController = in.DaemonSetController
out.DeploymentController = in.DeploymentController
out.StatefulSetController = in.StatefulSetController
out.DeprecatedController = in.DeprecatedController
out.EndpointController = in.EndpointController
out.EndpointSliceController = in.EndpointSliceController
out.EndpointSliceMirroringController = in.EndpointSliceMirroringController
out.EphemeralVolumeController = in.EphemeralVolumeController
in.GarbageCollectorController.DeepCopyInto(&out.GarbageCollectorController)
out.HPAController = in.HPAController
out.JobController = in.JobController
out.CronJobController = in.CronJobController
out.LegacySATokenCleaner = in.LegacySATokenCleaner
out.NamespaceController = in.NamespaceController
out.NodeIPAMController = in.NodeIPAMController
out.NodeLifecycleController = in.NodeLifecycleController
out.PersistentVolumeBinderController = in.PersistentVolumeBinderController
out.PodGCController = in.PodGCController
out.ReplicaSetController = in.ReplicaSetController
out.ReplicationController = in.ReplicationController
out.ResourceQuotaController = in.ResourceQuotaController
out.SAController = in.SAController
out.ServiceController = in.ServiceController
out.TTLAfterFinishedController = in.TTLAfterFinishedController
out.ValidatingAdmissionPolicyStatusController = in.ValidatingAdmissionPolicyStatusController
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfiguration.
func (in *KubeControllerManagerConfiguration) DeepCopy() *KubeControllerManagerConfiguration {
if in == nil {
return nil
}
out := new(KubeControllerManagerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeControllerManagerConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
csrsigningconfig "k8s.io/kubernetes/pkg/controller/certificates/signer/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with CSRSigningControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration(in *v1alpha1.CSRSigningControllerConfiguration, out *csrsigningconfig.CSRSigningControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration(in, out, s)
}
// Convert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration is an autogenerated conversion function.
func Convert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration(in *csrsigningconfig.CSRSigningControllerConfiguration, out *v1alpha1.CSRSigningControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultCSRSigningControllerConfiguration defaults a pointer to a
// CSRSigningControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultCSRSigningControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.CSRSigningControllerConfiguration) {
zero := metav1.Duration{}
if obj.ClusterSigningDuration == zero {
obj.ClusterSigningDuration = metav1.Duration{Duration: 365 * 24 * time.Hour}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/certificates/signer/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.CSRSigningConfiguration)(nil), (*config.CSRSigningConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(a.(*configv1alpha1.CSRSigningConfiguration), b.(*config.CSRSigningConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.CSRSigningConfiguration)(nil), (*configv1alpha1.CSRSigningConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(a.(*config.CSRSigningConfiguration), b.(*configv1alpha1.CSRSigningConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.CSRSigningControllerConfiguration)(nil), (*configv1alpha1.CSRSigningControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration(a.(*config.CSRSigningControllerConfiguration), b.(*configv1alpha1.CSRSigningControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.CSRSigningControllerConfiguration)(nil), (*config.CSRSigningControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration(a.(*configv1alpha1.CSRSigningControllerConfiguration), b.(*config.CSRSigningControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(in *configv1alpha1.CSRSigningConfiguration, out *config.CSRSigningConfiguration, s conversion.Scope) error {
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
return nil
}
// Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(in *configv1alpha1.CSRSigningConfiguration, out *config.CSRSigningConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(in, out, s)
}
func autoConvert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(in *config.CSRSigningConfiguration, out *configv1alpha1.CSRSigningConfiguration, s conversion.Scope) error {
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
return nil
}
// Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration is an autogenerated conversion function.
func Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(in *config.CSRSigningConfiguration, out *configv1alpha1.CSRSigningConfiguration, s conversion.Scope) error {
return autoConvert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(in, out, s)
}
func autoConvert_v1alpha1_CSRSigningControllerConfiguration_To_config_CSRSigningControllerConfiguration(in *configv1alpha1.CSRSigningControllerConfiguration, out *config.CSRSigningControllerConfiguration, s conversion.Scope) error {
out.ClusterSigningCertFile = in.ClusterSigningCertFile
out.ClusterSigningKeyFile = in.ClusterSigningKeyFile
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.KubeletServingSignerConfiguration, &out.KubeletServingSignerConfiguration, s); err != nil {
return err
}
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.KubeletClientSignerConfiguration, &out.KubeletClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.KubeAPIServerClientSignerConfiguration, &out.KubeAPIServerClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_v1alpha1_CSRSigningConfiguration_To_config_CSRSigningConfiguration(&in.LegacyUnknownSignerConfiguration, &out.LegacyUnknownSignerConfiguration, s); err != nil {
return err
}
out.ClusterSigningDuration = in.ClusterSigningDuration
return nil
}
func autoConvert_config_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration(in *config.CSRSigningControllerConfiguration, out *configv1alpha1.CSRSigningControllerConfiguration, s conversion.Scope) error {
out.ClusterSigningCertFile = in.ClusterSigningCertFile
out.ClusterSigningKeyFile = in.ClusterSigningKeyFile
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.KubeletServingSignerConfiguration, &out.KubeletServingSignerConfiguration, s); err != nil {
return err
}
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.KubeletClientSignerConfiguration, &out.KubeletClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.KubeAPIServerClientSignerConfiguration, &out.KubeAPIServerClientSignerConfiguration, s); err != nil {
return err
}
if err := Convert_config_CSRSigningConfiguration_To_v1alpha1_CSRSigningConfiguration(&in.LegacyUnknownSignerConfiguration, &out.LegacyUnknownSignerConfiguration, s); err != nil {
return err
}
out.ClusterSigningDuration = in.ClusterSigningDuration
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSRSigningConfiguration) DeepCopyInto(out *CSRSigningConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSRSigningConfiguration.
func (in *CSRSigningConfiguration) DeepCopy() *CSRSigningConfiguration {
if in == nil {
return nil
}
out := new(CSRSigningConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CSRSigningControllerConfiguration) DeepCopyInto(out *CSRSigningControllerConfiguration) {
*out = *in
out.KubeletServingSignerConfiguration = in.KubeletServingSignerConfiguration
out.KubeletClientSignerConfiguration = in.KubeletClientSignerConfiguration
out.KubeAPIServerClientSignerConfiguration = in.KubeAPIServerClientSignerConfiguration
out.LegacyUnknownSignerConfiguration = in.LegacyUnknownSignerConfiguration
out.ClusterSigningDuration = in.ClusterSigningDuration
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSRSigningControllerConfiguration.
func (in *CSRSigningControllerConfiguration) DeepCopy() *CSRSigningControllerConfiguration {
if in == nil {
return nil
}
out := new(CSRSigningControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"encoding/json"
"fmt"
"sync"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
)
type BaseControllerRefManager struct {
Controller metav1.Object
Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
CanAdoptFunc func(ctx context.Context) error
}
func (m *BaseControllerRefManager) CanAdopt(ctx context.Context) error {
m.canAdoptOnce.Do(func() {
if m.CanAdoptFunc != nil {
m.canAdoptErr = m.CanAdoptFunc(ctx)
}
})
return m.canAdoptErr
}
// ClaimObject tries to take ownership of an object for this controller.
//
// It will reconcile the following:
// - Adopt orphans if the match function returns true.
// - Release owned objects if the match function returns false.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The returned boolean indicates whether you now
// own the object.
//
// No reconciliation will be attempted if the controller is being deleted.
func (m *BaseControllerRefManager) ClaimObject(ctx context.Context, obj metav1.Object, match func(metav1.Object) bool, adopt, release func(context.Context, metav1.Object) error) (bool, error) {
controllerRef := metav1.GetControllerOfNoCopy(obj)
if controllerRef != nil {
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else. Ignore.
return false, nil
}
if match(obj) {
// We already own it and the selector matches.
// Return true (successfully claimed) before checking deletion timestamp.
// We're still allowed to claim things we already own while being deleted
// because doing so requires taking no actions.
return true, nil
}
// Owned by us but selector doesn't match.
// Try to release, unless we're being deleted.
if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(ctx, obj); err != nil {
// If the pod no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else released it, or there was a transient error.
// The controller should requeue and try again if it's still stale.
return false, err
}
// Successfully released.
return false, nil
}
// It's an orphan.
if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
if obj.GetDeletionTimestamp() != nil {
// Ignore if the object is being deleted
return false, nil
}
if len(m.Controller.GetNamespace()) > 0 && m.Controller.GetNamespace() != obj.GetNamespace() {
// Ignore if namespace not match
return false, nil
}
// Selector matches. Try to adopt.
if err := adopt(ctx, obj); err != nil {
// If the pod no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else claimed it first, or there was a transient error.
// The controller should requeue and try again if it's still orphaned.
return false, err
}
// Successfully adopted.
return true, nil
}
type PodControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
podControl PodControlInterface
finalizers []string
}
// NewPodControllerRefManager returns a PodControllerRefManager that exposes
// methods to manage the controllerRef of pods.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// PodControllerRefManager instance. Create a new instance if it makes
// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewPodControllerRefManager(
podControl PodControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func(ctx context.Context) error,
finalizers ...string,
) *PodControllerRefManager {
return &PodControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
podControl: podControl,
finalizers: finalizers,
}
}
// ClaimPods tries to take ownership of a list of Pods.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// Optional: If one or more filters are specified, a Pod will only be claimed if
// all filters return true.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of Pods that you now own is returned.
func (m *PodControllerRefManager) ClaimPods(ctx context.Context, pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) {
var claimed []*v1.Pod
var errlist []error
match := func(obj metav1.Object) bool {
pod := obj.(*v1.Pod)
// Check selector first so filters only run on potentially matching Pods.
if !m.Selector.Matches(labels.Set(pod.Labels)) {
return false
}
for _, filter := range filters {
if !filter(pod) {
return false
}
}
return true
}
adopt := func(ctx context.Context, obj metav1.Object) error {
return m.AdoptPod(ctx, obj.(*v1.Pod))
}
release := func(ctx context.Context, obj metav1.Object) error {
return m.ReleasePod(ctx, obj.(*v1.Pod))
}
for _, pod := range pods {
ok, err := m.ClaimObject(ctx, pod, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, pod)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptPod sends a patch to take control of the pod. It returns the error if
// the patching fails.
func (m *PodControllerRefManager) AdoptPod(ctx context.Context, pod *v1.Pod) error {
if err := m.CanAdopt(ctx); err != nil {
return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, pod.UID, m.finalizers...)
if err != nil {
return err
}
return m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, patchBytes)
}
// ReleasePod sends a patch to free the pod from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *PodControllerRefManager) ReleasePod(ctx context.Context, pod *v1.Pod) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Patching pod to remove its controllerRef", "pod", klog.KObj(pod), "gvk", m.controllerKind, "controller", m.Controller.GetName())
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(pod.UID, []types.UID{m.Controller.GetUID()}, m.finalizers...)
if err != nil {
return err
}
err = m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, patchBytes)
if err != nil {
if errors.IsNotFound(err) {
// If the pod no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the pod
// has no owner reference, 2. the uid of the pod doesn't
// match, which means the pod is deleted and then recreated.
// In both cases, the error can be ignored.
// TODO: If the pod has owner references, but none of them
// has the owner.UID, server will silently ignore the patch.
// Investigate why.
return nil
}
}
return err
}
// ReplicaSetControllerRefManager is used to manage controllerRef of ReplicaSets.
// Three methods are defined on this object 1: Classify 2: AdoptReplicaSet and
// 3: ReleaseReplicaSet which are used to classify the ReplicaSets into appropriate
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ReplicaSetControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
rsControl RSControlInterface
}
// NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes
// methods to manage the controllerRef of ReplicaSets.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// ReplicaSetControllerRefManager instance. Create a new instance if it
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewReplicaSetControllerRefManager(
rsControl RSControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func(ctx context.Context) error,
) *ReplicaSetControllerRefManager {
return &ReplicaSetControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
rsControl: rsControl,
}
}
// ClaimReplicaSets tries to take ownership of a list of ReplicaSets.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ReplicaSets that you now own is
// returned.
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(ctx context.Context, sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
var claimed []*apps.ReplicaSet
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(ctx context.Context, obj metav1.Object) error {
return m.AdoptReplicaSet(ctx, obj.(*apps.ReplicaSet))
}
release := func(ctx context.Context, obj metav1.Object) error {
return m.ReleaseReplicaSet(ctx, obj.(*apps.ReplicaSet))
}
for _, rs := range sets {
ok, err := m.ClaimObject(ctx, rs, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, rs)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
// the error if the patching fails.
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(ctx context.Context, rs *apps.ReplicaSet) error {
if err := m.CanAdopt(ctx); err != nil {
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, rs.UID)
if err != nil {
return err
}
return m.rsControl.PatchReplicaSet(ctx, rs.Namespace, rs.Name, patchBytes)
}
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(ctx context.Context, replicaSet *apps.ReplicaSet) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Patching ReplicaSet to remove its controllerRef", "replicaSet", klog.KObj(replicaSet), "gvk", m.controllerKind, "controller", m.Controller.GetName())
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(replicaSet.UID, []types.UID{m.Controller.GetUID()})
if err != nil {
return err
}
err = m.rsControl.PatchReplicaSet(ctx, replicaSet.Namespace, replicaSet.Name, patchBytes)
if err != nil {
if errors.IsNotFound(err) {
// If the ReplicaSet no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ReplicaSet
// has no owner reference, 2. the uid of the ReplicaSet doesn't
// match, which means the ReplicaSet is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func(context.Context) (metav1.Object, error)) func(context.Context) error {
return func(ctx context.Context) error {
obj, err := getObject(ctx)
if err != nil {
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
}
if obj.GetDeletionTimestamp() != nil {
return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp())
}
return nil
}
}
// ControllerRevisionControllerRefManager is used to manage controllerRef of ControllerRevisions.
// Three methods are defined on this object 1: Classify 2: AdoptControllerRevision and
// 3: ReleaseControllerRevision which are used to classify the ControllerRevisions into appropriate
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ControllerRevisionControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
crControl ControllerRevisionControlInterface
}
// NewControllerRevisionControllerRefManager returns a ControllerRevisionControllerRefManager that exposes
// methods to manage the controllerRef of ControllerRevisions.
//
// The canAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If canAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once canAdopt() is called, it will not be called again by the same
// ControllerRevisionControllerRefManager instance. Create a new instance if it
// makes sense to check canAdopt() again (e.g. in a different sync pass).
func NewControllerRevisionControllerRefManager(
crControl ControllerRevisionControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func(ctx context.Context) error,
) *ControllerRevisionControllerRefManager {
return &ControllerRevisionControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
crControl: crControl,
}
}
// ClaimControllerRevisions tries to take ownership of a list of ControllerRevisions.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ControllerRevisions that you now own is
// returned.
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(ctx context.Context, histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
var claimed []*apps.ControllerRevision
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(ctx context.Context, obj metav1.Object) error {
return m.AdoptControllerRevision(ctx, obj.(*apps.ControllerRevision))
}
release := func(ctx context.Context, obj metav1.Object) error {
return m.ReleaseControllerRevision(ctx, obj.(*apps.ControllerRevision))
}
for _, h := range histories {
ok, err := m.ClaimObject(ctx, h, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, h)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails.
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
if err := m.CanAdopt(ctx); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
patchBytes, err := ownerRefControllerPatch(m.Controller, m.controllerKind, history.UID)
if err != nil {
return err
}
return m.crControl.PatchControllerRevision(ctx, history.Namespace, history.Name, patchBytes)
}
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Patching ControllerRevision to remove its controllerRef", "controllerRevision", klog.KObj(history), "gvk", m.controllerKind, "controller", m.Controller.GetName())
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(history.UID, []types.UID{m.Controller.GetUID()})
if err != nil {
return err
}
err = m.crControl.PatchControllerRevision(ctx, history.Namespace, history.Name, patchBytes)
if err != nil {
if errors.IsNotFound(err) {
// If the ControllerRevision no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ControllerRevision
// has no owner reference, 2. the uid of the ControllerRevision doesn't
// match, which means the ControllerRevision is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}
type objectForAddOwnerRefPatch struct {
Metadata objectMetaForPatch `json:"metadata"`
}
type objectMetaForPatch struct {
OwnerReferences []metav1.OwnerReference `json:"ownerReferences"`
UID types.UID `json:"uid"`
Finalizers []string `json:"finalizers,omitempty"`
}
func ownerRefControllerPatch(controller metav1.Object, controllerKind schema.GroupVersionKind, uid types.UID, finalizers ...string) ([]byte, error) {
blockOwnerDeletion := true
isController := true
addControllerPatch := objectForAddOwnerRefPatch{
Metadata: objectMetaForPatch{
UID: uid,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: controllerKind.GroupVersion().String(),
Kind: controllerKind.Kind,
Name: controller.GetName(),
UID: controller.GetUID(),
Controller: &isController,
BlockOwnerDeletion: &blockOwnerDeletion,
},
},
Finalizers: finalizers,
},
}
patchBytes, err := json.Marshal(&addControllerPatch)
if err != nil {
return nil, err
}
return patchBytes, nil
}
type objectForDeleteOwnerRefStrategicMergePatch struct {
Metadata objectMetaForMergePatch `json:"metadata"`
}
type objectMetaForMergePatch struct {
UID types.UID `json:"uid"`
OwnerReferences []map[string]string `json:"ownerReferences"`
DeleteFinalizers []string `json:"$deleteFromPrimitiveList/finalizers,omitempty"`
}
func GenerateDeleteOwnerRefStrategicMergeBytes(dependentUID types.UID, ownerUIDs []types.UID, finalizers ...string) ([]byte, error) {
var ownerReferences []map[string]string
for _, ownerUID := range ownerUIDs {
ownerReferences = append(ownerReferences, ownerReference(ownerUID, "delete"))
}
patch := objectForDeleteOwnerRefStrategicMergePatch{
Metadata: objectMetaForMergePatch{
UID: dependentUID,
OwnerReferences: ownerReferences,
DeleteFinalizers: finalizers,
},
}
patchBytes, err := json.Marshal(&patch)
if err != nil {
return nil, err
}
return patchBytes, nil
}
func ownerReference(uid types.UID, patchType string) map[string]string {
return map[string]string{
"$patch": patchType,
"uid": string(uid),
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"hash/fnv"
"math"
"sync"
"sync/atomic"
"time"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/helper"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/features"
hashutil "k8s.io/kubernetes/pkg/util/hash"
taintutils "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/utils/clock"
"k8s.io/utils/ptr"
"k8s.io/klog/v2"
)
const (
// If a watch drops a delete event for a pod, it'll take this long
// before a dormant controller waiting for those packets is woken up anyway. It is
// specifically targeted at the case where some problem prevents an update
// of expectations, without it the controller could stay asleep forever. This should
// be set based on the expected latency of watch events.
//
// Currently a controller can service (create *and* observe the watch events for said
// creation) about 10 pods a second, so it takes about 1 min to service
// 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s
// latency/pod at the scale of 3000 pods over 100 nodes.
ExpectationsTimeout = 5 * time.Minute
// When batching pod creates, SlowStartInitialBatchSize is the size of the
// initial batch. The size of each successive batch is twice the size of
// the previous batch. For example, for a value of 1, batch sizes would be
// 1, 2, 4, 8, ... and for a value of 10, batch sizes would be
// 10, 20, 40, 80, ... Setting the value higher means that quota denials
// will result in more doomed API calls and associated event spam. Setting
// the value lower will result in more API call round trip periods for
// large batches.
//
// Given a number of pods to start "N":
// The number of doomed calls per sync once quota is exceeded is given by:
// min(N,SlowStartInitialBatchSize)
// The number of batches is given by:
// 1+floor(log_2(ceil(N/SlowStartInitialBatchSize)))
SlowStartInitialBatchSize = 1
// PodNodeNameKeyIndex is the name of the index used by PodInformer to index pods by their node name.
PodNodeNameKeyIndex = "spec.nodeName"
// OrphanPodIndexKey is used to index all Orphan pods to this key
OrphanPodIndexKey = "_ORPHAN_POD"
// podControllerUIDIndex is the name for the Pod store's index function,
// which is to index by pods's controllerUID.
PodControllerUIDIndex = "podControllerUID"
)
var UpdateTaintBackoff = wait.Backoff{
Steps: 5,
Duration: 100 * time.Millisecond,
Jitter: 1.0,
}
var UpdateLabelBackoff = wait.Backoff{
Steps: 5,
Duration: 100 * time.Millisecond,
Jitter: 1.0,
}
var (
KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
podPhaseToOrdinal = map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2}
)
type ResyncPeriodFunc func() time.Duration
// Returns 0 for resyncPeriod in case resyncing is not needed.
func NoResyncPeriodFunc() time.Duration {
return 0
}
// StaticResyncPeriodFunc returns the resync period specified
func StaticResyncPeriodFunc(resyncPeriod time.Duration) ResyncPeriodFunc {
return func() time.Duration {
return resyncPeriod
}
}
// Expectations are a way for controllers to tell the controller manager what they expect. eg:
// ControllerExpectations: {
// controller1: expects 2 adds in 2 minutes
// controller2: expects 2 dels in 2 minutes
// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met
// }
//
// Implementation:
// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion
// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller
//
// * Once set expectations can only be lowered
// * A controller isn't synced till its expectations are either fulfilled, or expire
// * Controllers that don't set expectations will get woken up for every matching controllee
// ExpKeyFunc to parse out the key from a ControlleeExpectation
var ExpKeyFunc = func(obj interface{}) (string, error) {
if e, ok := obj.(*ControlleeExpectations); ok {
return e.key, nil
}
return "", fmt.Errorf("could not find key for obj %#v", obj)
}
// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations.
// Only abstracted out for testing.
// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different
// types of controllers, because the keys might conflict across types.
type ControllerExpectationsInterface interface {
GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error)
SatisfiedExpectations(logger klog.Logger, controllerKey string) bool
DeleteExpectations(logger klog.Logger, controllerKey string)
SetExpectations(logger klog.Logger, controllerKey string, add, del int) error
ExpectCreations(logger klog.Logger, controllerKey string, adds int) error
ExpectDeletions(logger klog.Logger, controllerKey string, dels int) error
CreationObserved(logger klog.Logger, controllerKey string)
DeletionObserved(logger klog.Logger, controllerKey string)
RaiseExpectations(logger klog.Logger, controllerKey string, add, del int)
LowerExpectations(logger klog.Logger, controllerKey string, add, del int)
}
// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync.
type ControllerExpectations struct {
cache.Store
}
// GetExpectations returns the ControlleeExpectations of the given controller.
func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) {
exp, exists, err := r.GetByKey(controllerKey)
if err == nil && exists {
return exp.(*ControlleeExpectations), true, nil
}
return nil, false, err
}
// DeleteExpectations deletes the expectations of the given controller from the TTLStore.
func (r *ControllerExpectations) DeleteExpectations(logger klog.Logger, controllerKey string) {
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
if err := r.Delete(exp); err != nil {
logger.V(2).Info("Error deleting expectations", "controller", controllerKey, "err", err)
}
}
}
// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed.
// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller
// manager.
func (r *ControllerExpectations) SatisfiedExpectations(logger klog.Logger, controllerKey string) bool {
if exp, exists, err := r.GetExpectations(controllerKey); exists {
if exp.Fulfilled() {
logger.V(4).Info("Controller expectations fulfilled", "expectations", exp)
return true
} else if exp.isExpired() {
logger.V(4).Info("Controller expectations expired", "expectations", exp)
return true
} else {
logger.V(4).Info("Controller still waiting on expectations", "expectations", exp)
return false
}
} else if err != nil {
logger.V(2).Info("Error encountered while checking expectations, forcing sync", "err", err)
} else {
// When a new controller is created, it doesn't have expectations.
// When it doesn't see expected watch events for > TTL, the expectations expire.
// - In this case it wakes up, creates/deletes controllees, and sets expectations again.
// When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire.
// - In this case it continues without setting expectations till it needs to create/delete controllees.
logger.V(4).Info("Controller either never recorded expectations, or the ttl expired", "controller", controllerKey)
}
// Trigger a sync if we either encountered and error (which shouldn't happen since we're
// getting from local store) or this controller hasn't established expectations.
return true
}
// TODO: Extend ExpirationCache to support explicit expiration.
// TODO: Make this possible to disable in tests.
// TODO: Support injection of clock.
func (exp *ControlleeExpectations) isExpired() bool {
return clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout
}
// SetExpectations registers new expectations for the given controller. Forgets existing expectations.
func (r *ControllerExpectations) SetExpectations(logger klog.Logger, controllerKey string, add, del int) error {
exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()}
logger.V(4).Info("Setting expectations", "expectations", exp)
return r.Add(exp)
}
func (r *ControllerExpectations) ExpectCreations(logger klog.Logger, controllerKey string, adds int) error {
return r.SetExpectations(logger, controllerKey, adds, 0)
}
func (r *ControllerExpectations) ExpectDeletions(logger klog.Logger, controllerKey string, dels int) error {
return r.SetExpectations(logger, controllerKey, 0, dels)
}
// Decrements the expectation counts of the given controller.
func (r *ControllerExpectations) LowerExpectations(logger klog.Logger, controllerKey string, add, del int) {
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
exp.Add(int64(-add), int64(-del))
// The expectations might've been modified since the update on the previous line.
logger.V(4).Info("Lowered expectations", "expectations", exp)
}
}
// Increments the expectation counts of the given controller.
func (r *ControllerExpectations) RaiseExpectations(logger klog.Logger, controllerKey string, add, del int) {
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
exp.Add(int64(add), int64(del))
// The expectations might've been modified since the update on the previous line.
logger.V(4).Info("Raised expectations", "expectations", exp)
}
}
// CreationObserved atomically decrements the `add` expectation count of the given controller.
func (r *ControllerExpectations) CreationObserved(logger klog.Logger, controllerKey string) {
r.LowerExpectations(logger, controllerKey, 1, 0)
}
// DeletionObserved atomically decrements the `del` expectation count of the given controller.
func (r *ControllerExpectations) DeletionObserved(logger klog.Logger, controllerKey string) {
r.LowerExpectations(logger, controllerKey, 0, 1)
}
// ControlleeExpectations track controllee creates/deletes.
type ControlleeExpectations struct {
// Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms
// See: https://golang.org/pkg/sync/atomic/ for more information
add int64
del int64
key string
timestamp time.Time
}
// Add increments the add and del counters.
func (e *ControlleeExpectations) Add(add, del int64) {
atomic.AddInt64(&e.add, add)
atomic.AddInt64(&e.del, del)
}
// Fulfilled returns true if this expectation has been fulfilled.
func (e *ControlleeExpectations) Fulfilled() bool {
// TODO: think about why this line being atomic doesn't matter
return atomic.LoadInt64(&e.add) <= 0 && atomic.LoadInt64(&e.del) <= 0
}
// GetExpectations returns the add and del expectations of the controllee.
func (e *ControlleeExpectations) GetExpectations() (int64, int64) {
return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del)
}
// MarshalLog makes a thread-safe copy of the values of the expectations that
// can be used for logging.
func (e *ControlleeExpectations) MarshalLog() interface{} {
return struct {
add int64
del int64
key string
}{
add: atomic.LoadInt64(&e.add),
del: atomic.LoadInt64(&e.del),
key: e.key,
}
}
// NewControllerExpectations returns a store for ControllerExpectations.
func NewControllerExpectations() *ControllerExpectations {
return &ControllerExpectations{cache.NewStore(ExpKeyFunc)}
}
// UIDSetKeyFunc to parse out the key from a UIDSet.
var UIDSetKeyFunc = func(obj interface{}) (string, error) {
if u, ok := obj.(*UIDSet); ok {
return u.key, nil
}
return "", fmt.Errorf("could not find key for obj %#v", obj)
}
// UIDSet holds a key and a set of UIDs. Used by the
// UIDTrackingControllerExpectations to remember which UID it has seen/still
// waiting for.
type UIDSet struct {
sets.String
key string
}
// UIDTrackingControllerExpectations tracks the UID of the pods it deletes.
// This cache is needed over plain old expectations to safely handle graceful
// deletion. The desired behavior is to treat an update that sets the
// DeletionTimestamp on an object as a delete. To do so consistently, one needs
// to remember the expected deletes so they aren't double counted.
// TODO: Track creates as well (#22599)
type UIDTrackingControllerExpectations struct {
ControllerExpectationsInterface
// TODO: There is a much nicer way to do this that involves a single store,
// a lock per entry, and a ControlleeExpectationsInterface type.
uidStoreLock sync.Mutex
// Store used for the UIDs associated with any expectation tracked via the
// ControllerExpectationsInterface.
uidStore cache.Store
}
// GetUIDs is a convenience method to avoid exposing the set of expected uids.
// The returned set is not thread safe, all modifications must be made holding
// the uidStoreLock.
func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String {
if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists {
return uid.(*UIDSet).String
}
return nil
}
// ExpectDeletions records expectations for the given deleteKeys, against the given controller.
func (u *UIDTrackingControllerExpectations) ExpectDeletions(logger klog.Logger, rcKey string, deletedKeys []string) error {
expectedUIDs := sets.NewString()
for _, k := range deletedKeys {
expectedUIDs.Insert(k)
}
logger.V(4).Info("Controller waiting on deletions", "controller", rcKey, "keys", deletedKeys)
u.uidStoreLock.Lock()
defer u.uidStoreLock.Unlock()
if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 {
logger.Error(nil, "Clobbering existing delete keys", "keys", existing)
}
if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil {
return err
}
return u.ControllerExpectationsInterface.ExpectDeletions(logger, rcKey, expectedUIDs.Len())
}
// DeletionObserved records the given deleteKey as a deletion, for the given rc.
func (u *UIDTrackingControllerExpectations) DeletionObserved(logger klog.Logger, rcKey, deleteKey string) {
u.uidStoreLock.Lock()
defer u.uidStoreLock.Unlock()
uids := u.GetUIDs(rcKey)
if uids != nil && uids.Has(deleteKey) {
logger.V(4).Info("Controller received delete for pod", "controller", rcKey, "key", deleteKey)
u.ControllerExpectationsInterface.DeletionObserved(logger, rcKey)
uids.Delete(deleteKey)
}
}
// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the
// underlying ControllerExpectationsInterface.
func (u *UIDTrackingControllerExpectations) DeleteExpectations(logger klog.Logger, rcKey string) {
u.uidStoreLock.Lock()
defer u.uidStoreLock.Unlock()
u.ControllerExpectationsInterface.DeleteExpectations(logger, rcKey)
if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists {
if err := u.uidStore.Delete(uidExp); err != nil {
logger.V(2).Info("Error deleting uid expectations", "controller", rcKey, "err", err)
}
}
}
// NewUIDTrackingControllerExpectations returns a wrapper around
// ControllerExpectations that is aware of deleteKeys.
func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations {
return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)}
}
// Reasons for pod events
const (
// FailedCreatePodReason is added in an event and in a replica set condition
// when a pod for a replica set is failed to be created.
FailedCreatePodReason = "FailedCreate"
// SuccessfulCreatePodReason is added in an event when a pod for a replica set
// is successfully created.
SuccessfulCreatePodReason = "SuccessfulCreate"
// FailedDeletePodReason is added in an event and in a replica set condition
// when a pod for a replica set is failed to be deleted.
FailedDeletePodReason = "FailedDelete"
// SuccessfulDeletePodReason is added in an event when a pod for a replica set
// is successfully deleted.
SuccessfulDeletePodReason = "SuccessfulDelete"
)
// RSControlInterface is an interface that knows how to add or delete
// ReplicaSets, as well as increment or decrement them. It is used
// by the deployment controller to ease testing of actions that it takes.
type RSControlInterface interface {
PatchReplicaSet(ctx context.Context, namespace, name string, data []byte) error
}
// RealRSControl is the default implementation of RSControllerInterface.
type RealRSControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ RSControlInterface = &RealRSControl{}
func (r RealRSControl) PatchReplicaSet(ctx context.Context, namespace, name string, data []byte) error {
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(ctx, name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
return err
}
// TODO: merge the controller revision interface in controller_history.go with this one
// ControllerRevisionControlInterface is an interface that knows how to patch
// ControllerRevisions, as well as increment or decrement them. It is used
// by the daemonset controller to ease testing of actions that it takes.
type ControllerRevisionControlInterface interface {
PatchControllerRevision(ctx context.Context, namespace, name string, data []byte) error
}
// RealControllerRevisionControl is the default implementation of ControllerRevisionControlInterface.
type RealControllerRevisionControl struct {
KubeClient clientset.Interface
}
var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{}
func (r RealControllerRevisionControl) PatchControllerRevision(ctx context.Context, namespace, name string, data []byte) error {
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(ctx, name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
return err
}
// PodControlInterface is an interface that knows how to add or delete pods
// created as an interface to allow testing.
type PodControlInterface interface {
// CreatePods creates new pods according to the spec, and sets object as the pod's controller.
CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error
// CreatePodsWithGenerateName creates new pods according to the spec, sets object as the pod's controller and sets pod's generateName.
CreatePodsWithGenerateName(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error
// DeletePod deletes the pod identified by podID.
DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error
// PatchPod patches the pod.
PatchPod(ctx context.Context, namespace, name string, data []byte) error
}
// RealPodControl is the default implementation of PodControlInterface.
type RealPodControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ PodControlInterface = &RealPodControl{}
func getPodsLabelSet(template *v1.PodTemplateSpec) labels.Set {
desiredLabels := make(labels.Set)
for k, v := range template.Labels {
desiredLabels[k] = v
}
return desiredLabels
}
func getPodsFinalizers(template *v1.PodTemplateSpec) []string {
desiredFinalizers := make([]string, len(template.Finalizers))
copy(desiredFinalizers, template.Finalizers)
return desiredFinalizers
}
func getPodsAnnotationSet(template *v1.PodTemplateSpec) labels.Set {
desiredAnnotations := make(labels.Set)
for k, v := range template.Annotations {
desiredAnnotations[k] = v
}
return desiredAnnotations
}
func getPodsPrefix(controllerName string) string {
// use the dash (if the name isn't too long) to make the pod name a bit prettier
prefix := fmt.Sprintf("%s-", controllerName)
if len(validation.ValidatePodName(prefix, true)) != 0 {
prefix = controllerName
}
return prefix
}
func validateControllerRef(controllerRef *metav1.OwnerReference) error {
if controllerRef == nil {
return fmt.Errorf("controllerRef is nil")
}
if len(controllerRef.APIVersion) == 0 {
return fmt.Errorf("controllerRef has empty APIVersion")
}
if len(controllerRef.Kind) == 0 {
return fmt.Errorf("controllerRef has empty Kind")
}
if controllerRef.Controller == nil || !*controllerRef.Controller {
return fmt.Errorf("controllerRef.Controller is not set to true")
}
if controllerRef.BlockOwnerDeletion == nil || !*controllerRef.BlockOwnerDeletion {
return fmt.Errorf("controllerRef.BlockOwnerDeletion is not set")
}
return nil
}
func (r RealPodControl) CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference) error {
return r.CreatePodsWithGenerateName(ctx, namespace, template, controllerObject, controllerRef, "")
}
func (r RealPodControl) CreatePodsWithGenerateName(ctx context.Context, namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error {
if err := validateControllerRef(controllerRef); err != nil {
return err
}
pod, err := GetPodFromTemplate(template, controllerObject, controllerRef)
if err != nil {
return err
}
if len(generateName) > 0 {
pod.ObjectMeta.GenerateName = generateName
}
return r.createPods(ctx, namespace, pod, controllerObject)
}
func (r RealPodControl) PatchPod(ctx context.Context, namespace, name string, data []byte) error {
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(ctx, name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
return err
}
func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *metav1.OwnerReference) (*v1.Pod, error) {
desiredLabels := getPodsLabelSet(template)
desiredFinalizers := getPodsFinalizers(template)
desiredAnnotations := getPodsAnnotationSet(template)
accessor, err := meta.Accessor(parentObject)
if err != nil {
return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err)
}
prefix := getPodsPrefix(accessor.GetName())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: desiredLabels,
Annotations: desiredAnnotations,
GenerateName: prefix,
Finalizers: desiredFinalizers,
},
}
if controllerRef != nil {
pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef)
}
pod.Spec = *template.Spec.DeepCopy()
return pod, nil
}
func (r RealPodControl) createPods(ctx context.Context, namespace string, pod *v1.Pod, object runtime.Object) error {
if len(labels.Set(pod.Labels)) == 0 {
return fmt.Errorf("unable to create pods, no labels")
}
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
// only send an event if the namespace isn't terminating
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
}
return err
}
logger := klog.FromContext(ctx)
accessor, err := meta.Accessor(object)
if err != nil {
logger.Error(err, "parentObject does not have ObjectMeta")
return nil
}
logger.V(4).Info("Controller created pod", "controller", accessor.GetName(), "pod", klog.KObj(newPod))
r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name)
return nil
}
func (r RealPodControl) DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error {
accessor, err := meta.Accessor(object)
if err != nil {
return fmt.Errorf("object does not have ObjectMeta, %v", err)
}
logger := klog.FromContext(ctx)
logger.V(2).Info("Deleting pod", "controller", accessor.GetName(), "pod", klog.KRef(namespace, podID))
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(ctx, podID, metav1.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
logger.V(4).Info("Pod has already been deleted.", "pod", klog.KRef(namespace, podID))
return err
}
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
return fmt.Errorf("unable to delete pods: %v", err)
}
r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID)
return nil
}
type FakePodControl struct {
sync.Mutex
Templates []v1.PodTemplateSpec
ControllerRefs []metav1.OwnerReference
DeletePodName []string
Patches [][]byte
Err error
CreateLimit int
CreateCallCount int
}
var _ PodControlInterface = &FakePodControl{}
func (f *FakePodControl) PatchPod(ctx context.Context, namespace, name string, data []byte) error {
f.Lock()
defer f.Unlock()
f.Patches = append(f.Patches, data)
if f.Err != nil {
return f.Err
}
return nil
}
func (f *FakePodControl) CreatePods(ctx context.Context, namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
return f.CreatePodsWithGenerateName(ctx, namespace, spec, object, controllerRef, "")
}
func (f *FakePodControl) CreatePodsWithGenerateName(ctx context.Context, namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference, generateNamePrefix string) error {
f.Lock()
defer f.Unlock()
f.CreateCallCount++
if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
return fmt.Errorf("not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
}
spec.GenerateName = generateNamePrefix
f.Templates = append(f.Templates, *spec)
f.ControllerRefs = append(f.ControllerRefs, *controllerRef)
if f.Err != nil {
return f.Err
}
return nil
}
func (f *FakePodControl) DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error {
f.Lock()
defer f.Unlock()
f.DeletePodName = append(f.DeletePodName, podID)
if f.Err != nil {
return f.Err
}
return nil
}
func (f *FakePodControl) Clear() {
f.Lock()
defer f.Unlock()
f.DeletePodName = []string{}
f.Templates = []v1.PodTemplateSpec{}
f.ControllerRefs = []metav1.OwnerReference{}
f.Patches = [][]byte{}
f.CreateLimit = 0
f.CreateCallCount = 0
}
// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
type ByLogging []*v1.Pod
func (s ByLogging) Len() int { return len(s) }
func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByLogging) Less(i, j int) bool {
// 1. assigned < unassigned
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
return len(s[i].Spec.NodeName) > 0
}
// 2. PodRunning < PodUnknown < PodPending
if s[i].Status.Phase != s[j].Status.Phase {
return podPhaseToOrdinal[s[i].Status.Phase] > podPhaseToOrdinal[s[j].Status.Phase]
}
// 3. ready < not ready
if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) {
return podutil.IsPodReady(s[i])
}
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065
// 4. Been ready for more time < less time < empty time
if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) {
readyTime1 := podReadyTime(s[i])
readyTime2 := podReadyTime(s[j])
if !readyTime1.Equal(readyTime2) {
return afterOrZero(readyTime2, readyTime1)
}
}
// 5. Pods with containers with higher restart counts < lower restart counts
if res := compareMaxContainerRestarts(s[i], s[j]); res != nil {
return *res
}
// 6. older pods < newer pods < empty timestamp pods
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp)
}
return false
}
// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
type ActivePods []*v1.Pod
func (s ActivePods) Len() int { return len(s) }
func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ActivePods) Less(i, j int) bool {
// 1. Unassigned < assigned
// If only one of the pods is unassigned, the unassigned one is smaller
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
return len(s[i].Spec.NodeName) == 0
}
// 2. PodPending < PodUnknown < PodRunning
if podPhaseToOrdinal[s[i].Status.Phase] != podPhaseToOrdinal[s[j].Status.Phase] {
return podPhaseToOrdinal[s[i].Status.Phase] < podPhaseToOrdinal[s[j].Status.Phase]
}
// 3. Not ready < ready
// If only one of the pods is not ready, the not ready one is smaller
if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) {
return !podutil.IsPodReady(s[i])
}
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065
// 4. Been ready for empty time < less time < more time
// If both pods are ready, the latest ready one is smaller
if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) {
readyTime1 := podReadyTime(s[i])
readyTime2 := podReadyTime(s[j])
if !readyTime1.Equal(readyTime2) {
return afterOrZero(readyTime1, readyTime2)
}
}
// 5. Pods with containers with higher restart counts < lower restart counts
if res := compareMaxContainerRestarts(s[i], s[j]); res != nil {
return *res
}
// 6. Empty creation time pods < newer pods < older pods
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp)
}
return false
}
// ActivePodsWithRanks is a sortable list of pods and a list of corresponding
// ranks which will be considered during sorting. The two lists must have equal
// length. After sorting, the pods will be ordered as follows, applying each
// rule in turn until one matches:
//
// 1. If only one of the pods is assigned to a node, the pod that is not
// assigned comes before the pod that is.
// 2. If the pods' phases differ, a pending pod comes before a pod whose phase
// is unknown, and a pod whose phase is unknown comes before a running pod.
// 3. If exactly one of the pods is ready, the pod that is not ready comes
// before the ready pod.
// 4. If controller.kubernetes.io/pod-deletion-cost annotation is set, then
// the pod with the lower value will come first.
// 5. If the pods' ranks differ, the pod with greater rank comes before the pod
// with lower rank.
// 6. If both pods are ready but have not been ready for the same amount of
// time, the pod that has been ready for a shorter amount of time comes
// before the pod that has been ready for longer.
// 7. If one pod has a container that has restarted more than any container in
// the other pod, the pod with the container with more restarts comes
// before the other pod.
// 8. If the pods' creation times differ, the pod that was created more recently
// comes before the older pod.
//
// In 6 and 8, times are compared in a logarithmic scale. This allows a level
// of randomness among equivalent Pods when sorting. If two pods have the same
// logarithmic rank, they are sorted by UUID to provide a pseudorandom order.
//
// If none of these rules matches, the second pod comes before the first pod.
//
// The intention of this ordering is to put pods that should be preferred for
// deletion first in the list.
type ActivePodsWithRanks struct {
// Pods is a list of pods.
Pods []*v1.Pod
// Rank is a ranking of pods. This ranking is used during sorting when
// comparing two pods that are both scheduled, in the same phase, and
// having the same ready status.
Rank []int
// Now is a reference timestamp for doing logarithmic timestamp comparisons.
// If zero, comparison happens without scaling.
Now metav1.Time
}
func (s ActivePodsWithRanks) Len() int {
return len(s.Pods)
}
func (s ActivePodsWithRanks) Swap(i, j int) {
s.Pods[i], s.Pods[j] = s.Pods[j], s.Pods[i]
s.Rank[i], s.Rank[j] = s.Rank[j], s.Rank[i]
}
// Less compares two pods with corresponding ranks and returns true if the first
// one should be preferred for deletion.
func (s ActivePodsWithRanks) Less(i, j int) bool {
// 1. Unassigned < assigned
// If only one of the pods is unassigned, the unassigned one is smaller
if s.Pods[i].Spec.NodeName != s.Pods[j].Spec.NodeName && (len(s.Pods[i].Spec.NodeName) == 0 || len(s.Pods[j].Spec.NodeName) == 0) {
return len(s.Pods[i].Spec.NodeName) == 0
}
// 2. PodPending < PodUnknown < PodRunning
if podPhaseToOrdinal[s.Pods[i].Status.Phase] != podPhaseToOrdinal[s.Pods[j].Status.Phase] {
return podPhaseToOrdinal[s.Pods[i].Status.Phase] < podPhaseToOrdinal[s.Pods[j].Status.Phase]
}
// 3. Not ready < ready
// If only one of the pods is not ready, the not ready one is smaller
if podutil.IsPodReady(s.Pods[i]) != podutil.IsPodReady(s.Pods[j]) {
return !podutil.IsPodReady(s.Pods[i])
}
// 4. lower pod-deletion-cost < higher pod-deletion cost
if utilfeature.DefaultFeatureGate.Enabled(features.PodDeletionCost) {
pi, _ := helper.GetDeletionCostFromPodAnnotations(s.Pods[i].Annotations)
pj, _ := helper.GetDeletionCostFromPodAnnotations(s.Pods[j].Annotations)
if pi != pj {
return pi < pj
}
}
// 5. Doubled up < not doubled up
// If one of the two pods is on the same node as one or more additional
// ready pods that belong to the same replicaset, whichever pod has more
// colocated ready pods is less
if s.Rank[i] != s.Rank[j] {
return s.Rank[i] > s.Rank[j]
}
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065
// 6. Been ready for empty time < less time < more time
// If both pods are ready, the latest ready one is smaller
if podutil.IsPodReady(s.Pods[i]) && podutil.IsPodReady(s.Pods[j]) {
readyTime1 := podReadyTime(s.Pods[i])
readyTime2 := podReadyTime(s.Pods[j])
if !readyTime1.Equal(readyTime2) {
if !utilfeature.DefaultFeatureGate.Enabled(features.LogarithmicScaleDown) {
return afterOrZero(readyTime1, readyTime2)
} else {
if s.Now.IsZero() || readyTime1.IsZero() || readyTime2.IsZero() {
return afterOrZero(readyTime1, readyTime2)
}
rankDiff := logarithmicRankDiff(*readyTime1, *readyTime2, s.Now)
if rankDiff == 0 {
return s.Pods[i].UID < s.Pods[j].UID
}
return rankDiff < 0
}
}
}
// 7. Pods with containers with higher restart counts < lower restart counts
if res := compareMaxContainerRestarts(s.Pods[i], s.Pods[j]); res != nil {
return *res
}
// 8. Empty creation time pods < newer pods < older pods
if !s.Pods[i].CreationTimestamp.Equal(&s.Pods[j].CreationTimestamp) {
if !utilfeature.DefaultFeatureGate.Enabled(features.LogarithmicScaleDown) {
return afterOrZero(&s.Pods[i].CreationTimestamp, &s.Pods[j].CreationTimestamp)
} else {
if s.Now.IsZero() || s.Pods[i].CreationTimestamp.IsZero() || s.Pods[j].CreationTimestamp.IsZero() {
return afterOrZero(&s.Pods[i].CreationTimestamp, &s.Pods[j].CreationTimestamp)
}
rankDiff := logarithmicRankDiff(s.Pods[i].CreationTimestamp, s.Pods[j].CreationTimestamp, s.Now)
if rankDiff == 0 {
return s.Pods[i].UID < s.Pods[j].UID
}
return rankDiff < 0
}
}
return false
}
// afterOrZero checks if time t1 is after time t2; if one of them
// is zero, the zero time is seen as after non-zero time.
func afterOrZero(t1, t2 *metav1.Time) bool {
if t1.Time.IsZero() || t2.Time.IsZero() {
return t1.Time.IsZero()
}
return t1.After(t2.Time)
}
// logarithmicRankDiff calculates the base-2 logarithmic ranks of 2 timestamps,
// compared to the current timestamp
func logarithmicRankDiff(t1, t2, now metav1.Time) int64 {
d1 := now.Sub(t1.Time)
d2 := now.Sub(t2.Time)
r1 := int64(-1)
r2 := int64(-1)
if d1 > 0 {
r1 = int64(math.Log2(float64(d1)))
}
if d2 > 0 {
r2 = int64(math.Log2(float64(d2)))
}
return r1 - r2
}
func podReadyTime(pod *v1.Pod) *metav1.Time {
if podutil.IsPodReady(pod) {
for _, c := range pod.Status.Conditions {
// we only care about pod ready conditions
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
return &c.LastTransitionTime
}
}
}
return &metav1.Time{}
}
func maxContainerRestarts(pod *v1.Pod) (regularRestarts, sidecarRestarts int) {
for _, c := range pod.Status.ContainerStatuses {
regularRestarts = max(regularRestarts, int(c.RestartCount))
}
names := sets.New[string]()
for _, c := range pod.Spec.InitContainers {
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
names.Insert(c.Name)
}
}
for _, c := range pod.Status.InitContainerStatuses {
if names.Has(c.Name) {
sidecarRestarts = max(sidecarRestarts, int(c.RestartCount))
}
}
return
}
// We use *bool here to determine equality:
// true: pi has a higher container restart count.
// false: pj has a higher container restart count.
// nil: Both have the same container restart count.
func compareMaxContainerRestarts(pi *v1.Pod, pj *v1.Pod) *bool {
regularRestartsI, sidecarRestartsI := maxContainerRestarts(pi)
regularRestartsJ, sidecarRestartsJ := maxContainerRestarts(pj)
if regularRestartsI != regularRestartsJ {
res := regularRestartsI > regularRestartsJ
return &res
}
// If pods have the same restart count, an attempt is made to compare the restart counts of sidecar containers.
if sidecarRestartsI != sidecarRestartsJ {
res := sidecarRestartsI > sidecarRestartsJ
return &res
}
return nil
}
// FilterClaimedPods returns pods that are controlled by the controller and match the selector.
func FilterClaimedPods(controller metav1.Object, selector labels.Selector, pods []*v1.Pod) []*v1.Pod {
var result []*v1.Pod
for _, pod := range pods {
if !metav1.IsControlledBy(pod, controller) {
// It's an orphan or owned by someone else.
continue
}
if selector.Matches(labels.Set(pod.Labels)) {
result = append(result, pod)
}
}
return result
}
// FilterActivePods returns pods that have not terminated.
func FilterActivePods(logger klog.Logger, pods []*v1.Pod) []*v1.Pod {
var result []*v1.Pod
for _, p := range pods {
if IsPodActive(p) {
result = append(result, p)
}
}
return result
}
func FilterTerminatingPods(pods []*v1.Pod) []*v1.Pod {
var result []*v1.Pod
for _, p := range pods {
if IsPodTerminating(p) {
result = append(result, p)
}
}
return result
}
func CountTerminatingPods(pods []*v1.Pod) int32 {
numberOfTerminatingPods := 0
for _, p := range pods {
if IsPodTerminating(p) {
numberOfTerminatingPods += 1
}
}
return int32(numberOfTerminatingPods)
}
// nextPodAvailabilityCheck implements similar logic to podutil.IsPodAvailable
func nextPodAvailabilityCheck(pod *v1.Pod, minReadySeconds int32, now time.Time) *time.Duration {
if !podutil.IsPodReady(pod) || minReadySeconds <= 0 {
return nil
}
c := podutil.GetPodReadyCondition(pod.Status)
if c.LastTransitionTime.IsZero() {
return nil
}
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
nextCheck := c.LastTransitionTime.Add(minReadySecondsDuration).Sub(now)
if nextCheck > 0 {
return ptr.To(nextCheck)
}
return nil
}
// findMinNextPodAvailabilitySimpleCheck finds a duration when the next availability check should occur. It also returns the
// first pod affected by the future availability recalculation (there might be more pods if they became ready at the same time;
// this helps to implement FindMinNextPodAvailabilityCheck).
func findMinNextPodAvailabilitySimpleCheck(pods []*v1.Pod, minReadySeconds int32, now time.Time) (*time.Duration, *v1.Pod) {
var minAvailabilityCheck *time.Duration
var checkPod *v1.Pod
for _, p := range pods {
nextCheck := nextPodAvailabilityCheck(p, minReadySeconds, now)
if nextCheck != nil && (minAvailabilityCheck == nil || *nextCheck < *minAvailabilityCheck) {
minAvailabilityCheck = nextCheck
checkPod = p
}
}
return minAvailabilityCheck, checkPod
}
// FindMinNextPodAvailabilityCheck finds a duration when the next availability check should occur.
// We should check for the availability at the same time as the status evaluation/update occurs (e.g. .status.availableReplicas) by
// passing lastOwnerStatusEvaluation. This ensures that we will not skip any pods that might become available
// (findMinNextPodAvailabilitySimpleCheck would return nil in the future time), since the owner status evaluation.
// clock is then used to calculate the precise time for the next availability check.
func FindMinNextPodAvailabilityCheck(pods []*v1.Pod, minReadySeconds int32, lastOwnerStatusEvaluation time.Time, clock clock.PassiveClock) *time.Duration {
nextCheckAccordingToOwnerStatusEvaluation, checkPod := findMinNextPodAvailabilitySimpleCheck(pods, minReadySeconds, lastOwnerStatusEvaluation)
if nextCheckAccordingToOwnerStatusEvaluation == nil || checkPod == nil {
return nil
}
// There must be a nextCheck. We try to calculate a more precise value for the next availability check.
// Check the earliest pod to avoid being preempted by a later pod.
if updatedNextCheck := nextPodAvailabilityCheck(checkPod, minReadySeconds, clock.Now()); updatedNextCheck != nil {
// There is a delay since the last Now() call (lastOwnerStatusEvaluation). Use the updatedNextCheck.
return updatedNextCheck
}
// Fall back to 0 (immediate check) in case the last nextPodAvailabilityCheck call (with a refreshed Now) returns nil, as we might be past the check.
return ptr.To(time.Duration(0))
}
func IsPodActive(p *v1.Pod) bool {
return v1.PodSucceeded != p.Status.Phase &&
v1.PodFailed != p.Status.Phase &&
p.DeletionTimestamp == nil
}
func IsPodTerminating(p *v1.Pod) bool {
return !podutil.IsPodTerminal(p) &&
p.DeletionTimestamp != nil
}
// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods.
func FilterActiveReplicaSets(replicaSets []*apps.ReplicaSet) []*apps.ReplicaSet {
activeFilter := func(rs *apps.ReplicaSet) bool {
return rs != nil && *(rs.Spec.Replicas) > 0
}
return FilterReplicaSets(replicaSets, activeFilter)
}
type filterRS func(rs *apps.ReplicaSet) bool
// FilterReplicaSets returns replica sets that are filtered by filterFn (all returned ones should match filterFn).
func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.ReplicaSet {
var filtered []*apps.ReplicaSet
for i := range RSes {
if filterFn(RSes[i]) {
filtered = append(filtered, RSes[i])
}
}
return filtered
}
// AddPodNodeNameIndexer adds an indexer for Pod's nodeName to the given PodInformer.
// This indexer is used to efficiently look up pods by their node name.
func AddPodNodeNameIndexer(podInformer cache.SharedIndexInformer) error {
if _, exists := podInformer.GetIndexer().GetIndexers()[PodNodeNameKeyIndex]; exists {
// indexer already exists, do nothing
return nil
}
return podInformer.AddIndexers(cache.Indexers{
PodNodeNameKeyIndex: func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Spec.NodeName) == 0 {
return []string{}, nil
}
return []string{pod.Spec.NodeName}, nil
},
})
}
// OrphanPodIndexKeyForNamespace returns the orphan pod index key for a specific namespace.
func OrphanPodIndexKeyForNamespace(namespace string) string {
return OrphanPodIndexKey + "/" + namespace
}
// AddPodControllerUIDIndexer adds an indexer for Pod's controllerRef.UID to the given PodInformer.
// This indexer is used to efficiently look up pods by their ControllerRef.UID
func AddPodControllerUIDIndexer(podInformer cache.SharedIndexInformer) error {
if _, exists := podInformer.GetIndexer().GetIndexers()[PodControllerUIDIndex]; exists {
// indexer already exists, do nothing
return nil
}
return podInformer.AddIndexers(cache.Indexers{
PodControllerUIDIndex: func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return nil, nil
}
// Get the ControllerRef of the Pod to check if it's managed by a controller
if ref := metav1.GetControllerOf(pod); ref != nil {
return []string{string(ref.UID)}, nil
}
// If the Pod has no controller (i.e., it's orphaned), index it with the OrphanPodIndexKeyForNamespace
// This helps identify orphan pods for reconciliation and adoption by controllers
return []string{OrphanPodIndexKeyForNamespace(pod.Namespace)}, nil
},
})
}
// FilterPodsByOwner gets the Pods managed by an owner or orphan Pods in the owner's namespace
func FilterPodsByOwner(podIndexer cache.Indexer, owner *metav1.ObjectMeta) ([]*v1.Pod, error) {
result := []*v1.Pod{}
// Iterate over two keys:
// - the UID of the owner, which identifies Pods that are controlled by the owner
// - the OrphanPodIndexKey, which identifies orphaned Pods in the owner's namespace and might be adopted by the owner later
for _, key := range []string{string(owner.UID), OrphanPodIndexKeyForNamespace(owner.Namespace)} {
pods, err := podIndexer.ByIndex(PodControllerUIDIndex, key)
if err != nil {
return nil, err
}
for _, obj := range pods {
pod, ok := obj.(*v1.Pod)
if !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type in pod indexer: %v", obj))
continue
}
result = append(result, pod)
}
}
return result, nil
}
// PodKey returns a key unique to the given pod within a cluster.
// It's used so we consistently use the same key scheme in this module.
// It does exactly what cache.MetaNamespaceKeyFunc would have done
// except there's not possibility for error since we know the exact type.
func PodKey(pod *v1.Pod) string {
return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
}
// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker.
type ControllersByCreationTimestamp []*v1.ReplicationController
func (o ControllersByCreationTimestamp) Len() int { return len(o) }
func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ControllersByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
// By using the creation timestamp, this sorts from old to new replica sets.
type ReplicaSetsBySizeOlder []*apps.ReplicaSet
func (o ReplicaSetsBySizeOlder) Len() int { return len(o) }
func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsBySizeOlder) Less(i, j int) bool {
if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) {
return ReplicaSetsByCreationTimestamp(o).Less(i, j)
}
return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
}
// ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
// By using the creation timestamp, this sorts from new to old replica sets.
type ReplicaSetsBySizeNewer []*apps.ReplicaSet
func (o ReplicaSetsBySizeNewer) Len() int { return len(o) }
func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) {
return ReplicaSetsByCreationTimestamp(o).Less(j, i)
}
return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
}
// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
// to update nodes; otherwise, no API calls. Return error if any.
func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
option := metav1.GetOptions{}
if firstTry {
option.ResourceVersion = "0"
firstTry = false
}
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := taintutils.AddOrUpdateTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("failed to update taint of node")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return PatchNodeTaints(ctx, c, nodeName, oldNode, newNode)
})
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
for _, taint := range taints {
if taintutils.TaintExists(node.Spec.Taints, taint) {
match = true
break
}
}
if !match {
return nil
}
}
firstTry := true
return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
option := metav1.GetOptions{}
if firstTry {
option.ResourceVersion = "0"
firstTry = false
}
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := taintutils.RemoveTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("failed to remove taint of node")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return PatchNodeTaints(ctx, c, nodeName, oldNode, newNode)
})
}
// PatchNodeTaints patches node's taints.
func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
// Strip base diff node from RV to ensure that our Patch request will set RV to check for conflicts over .spec.taints.
// This is needed because .spec.taints does not specify patchMergeKey and patchStrategy and adding them is no longer an option for compatibility reasons.
// Using other Patch strategy works for adding new taints, however will not resolve problem with taint removal.
oldNodeNoRV := oldNode.DeepCopy()
oldNodeNoRV.ResourceVersion = ""
oldDataNoRV, err := json.Marshal(&oldNodeNoRV)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNodeNoRV, nodeName, err)
}
newTaints := newNode.Spec.Taints
newNodeClone := oldNode.DeepCopy()
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldDataNoRV, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
}
// ComputeHash returns a hash value calculated from pod template and
// a collisionCount to avoid hash collision. The hash will be safe encoded to
// avoid bad words.
func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string {
podTemplateSpecHasher := fnv.New32a()
hashutil.DeepHashObject(podTemplateSpecHasher, *template)
// Add collisionCount in the hash if it exists.
if collisionCount != nil {
collisionCountBytes := make([]byte, 8)
binary.LittleEndian.PutUint32(collisionCountBytes, uint32(*collisionCount))
podTemplateSpecHasher.Write(collisionCountBytes)
}
return rand.SafeEncodeString(fmt.Sprint(podTemplateSpecHasher.Sum32()))
}
func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, labelsToUpdate map[string]string) error {
firstTry := true
return clientretry.RetryOnConflict(UpdateLabelBackoff, func() error {
var err error
var node *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
option := metav1.GetOptions{}
if firstTry {
option.ResourceVersion = "0"
firstTry = false
}
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, option)
if err != nil {
return err
}
// Make a copy of the node and update the labels.
newNode := node.DeepCopy()
if newNode.Labels == nil {
newNode.Labels = make(map[string]string)
}
for key, value := range labelsToUpdate {
newNode.Labels[key] = value
}
oldData, err := json.Marshal(node)
if err != nil {
return fmt.Errorf("failed to marshal the existing node %#v: %v", node, err)
}
newData, err := json.Marshal(newNode)
if err != nil {
return fmt.Errorf("failed to marshal the new node %#v: %v", newNode, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{})
if err != nil {
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
}
if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("failed to patch the node: %v", err)
}
return nil
})
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/cronjob/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with CronJobControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_CronJobControllerConfiguration_To_config_CronJobControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_CronJobControllerConfiguration_To_config_CronJobControllerConfiguration(in *v1alpha1.CronJobControllerConfiguration, out *config.CronJobControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_CronJobControllerConfiguration_To_config_CronJobControllerConfiguration(in, out, s)
}
// Convert_config_CronJobControllerConfiguration_To_v1alpha1_CronJobControllerConfiguration is an autogenerated conversion function.
func Convert_config_CronJobControllerConfiguration_To_v1alpha1_CronJobControllerConfiguration(in *config.CronJobControllerConfiguration, out *v1alpha1.CronJobControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_CronJobControllerConfiguration_To_v1alpha1_CronJobControllerConfiguration(in, out, s)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultCronJobControllerConfiguration defaults a pointer to a
// CronJobControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultCronJobControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.CronJobControllerConfiguration) {
if obj.ConcurrentCronJobSyncs == 0 {
obj.ConcurrentCronJobSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/cronjob/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.CronJobControllerConfiguration)(nil), (*configv1alpha1.CronJobControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CronJobControllerConfiguration_To_v1alpha1_CronJobControllerConfiguration(a.(*config.CronJobControllerConfiguration), b.(*configv1alpha1.CronJobControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.CronJobControllerConfiguration)(nil), (*config.CronJobControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CronJobControllerConfiguration_To_config_CronJobControllerConfiguration(a.(*configv1alpha1.CronJobControllerConfiguration), b.(*config.CronJobControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_CronJobControllerConfiguration_To_config_CronJobControllerConfiguration(in *configv1alpha1.CronJobControllerConfiguration, out *config.CronJobControllerConfiguration, s conversion.Scope) error {
out.ConcurrentCronJobSyncs = in.ConcurrentCronJobSyncs
return nil
}
func autoConvert_config_CronJobControllerConfiguration_To_v1alpha1_CronJobControllerConfiguration(in *config.CronJobControllerConfiguration, out *configv1alpha1.CronJobControllerConfiguration, s conversion.Scope) error {
out.ConcurrentCronJobSyncs = in.ConcurrentCronJobSyncs
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobControllerConfiguration) DeepCopyInto(out *CronJobControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobControllerConfiguration.
func (in *CronJobControllerConfiguration) DeepCopy() *CronJobControllerConfiguration {
if in == nil {
return nil
}
out := new(CronJobControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
daemonconfig "k8s.io/kubernetes/pkg/controller/daemon/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with DaemonSetControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_DaemonSetControllerConfiguration_To_config_DaemonSetControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_DaemonSetControllerConfiguration_To_config_DaemonSetControllerConfiguration(in *v1alpha1.DaemonSetControllerConfiguration, out *daemonconfig.DaemonSetControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_DaemonSetControllerConfiguration_To_config_DaemonSetControllerConfiguration(in, out, s)
}
// Convert_config_DaemonSetControllerConfiguration_To_v1alpha1_DaemonSetControllerConfiguration is an autogenerated conversion function.
func Convert_config_DaemonSetControllerConfiguration_To_v1alpha1_DaemonSetControllerConfiguration(in *daemonconfig.DaemonSetControllerConfiguration, out *v1alpha1.DaemonSetControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_DaemonSetControllerConfiguration_To_v1alpha1_DaemonSetControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultDaemonSetControllerConfiguration defaults a pointer to a
// DaemonSetControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultDaemonSetControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.DaemonSetControllerConfiguration) {
if obj.ConcurrentDaemonSetSyncs == 0 {
obj.ConcurrentDaemonSetSyncs = 2
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/daemon/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.DaemonSetControllerConfiguration)(nil), (*configv1alpha1.DaemonSetControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_DaemonSetControllerConfiguration_To_v1alpha1_DaemonSetControllerConfiguration(a.(*config.DaemonSetControllerConfiguration), b.(*configv1alpha1.DaemonSetControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.DaemonSetControllerConfiguration)(nil), (*config.DaemonSetControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DaemonSetControllerConfiguration_To_config_DaemonSetControllerConfiguration(a.(*configv1alpha1.DaemonSetControllerConfiguration), b.(*config.DaemonSetControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_DaemonSetControllerConfiguration_To_config_DaemonSetControllerConfiguration(in *configv1alpha1.DaemonSetControllerConfiguration, out *config.DaemonSetControllerConfiguration, s conversion.Scope) error {
out.ConcurrentDaemonSetSyncs = in.ConcurrentDaemonSetSyncs
return nil
}
func autoConvert_config_DaemonSetControllerConfiguration_To_v1alpha1_DaemonSetControllerConfiguration(in *config.DaemonSetControllerConfiguration, out *configv1alpha1.DaemonSetControllerConfiguration, s conversion.Scope) error {
out.ConcurrentDaemonSetSyncs = in.ConcurrentDaemonSetSyncs
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetControllerConfiguration) DeepCopyInto(out *DaemonSetControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetControllerConfiguration.
func (in *DaemonSetControllerConfiguration) DeepCopy() *DaemonSetControllerConfiguration {
if in == nil {
return nil
}
out := new(DaemonSetControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
deploymentconfig "k8s.io/kubernetes/pkg/controller/deployment/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with DeploymentControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_DeploymentControllerConfiguration_To_config_DeploymentControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_DeploymentControllerConfiguration_To_config_DeploymentControllerConfiguration(in *v1alpha1.DeploymentControllerConfiguration, out *deploymentconfig.DeploymentControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_DeploymentControllerConfiguration_To_config_DeploymentControllerConfiguration(in, out, s)
}
// Convert_config_DeploymentControllerConfiguration_To_v1alpha1_DeploymentControllerConfiguration is an autogenerated conversion function.
func Convert_config_DeploymentControllerConfiguration_To_v1alpha1_DeploymentControllerConfiguration(in *deploymentconfig.DeploymentControllerConfiguration, out *v1alpha1.DeploymentControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_DeploymentControllerConfiguration_To_v1alpha1_DeploymentControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultDeploymentControllerConfiguration defaults a pointer to a
// DeploymentControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultDeploymentControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.DeploymentControllerConfiguration) {
if obj.ConcurrentDeploymentSyncs == 0 {
obj.ConcurrentDeploymentSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/deployment/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.DeploymentControllerConfiguration)(nil), (*configv1alpha1.DeploymentControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_DeploymentControllerConfiguration_To_v1alpha1_DeploymentControllerConfiguration(a.(*config.DeploymentControllerConfiguration), b.(*configv1alpha1.DeploymentControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.DeploymentControllerConfiguration)(nil), (*config.DeploymentControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeploymentControllerConfiguration_To_config_DeploymentControllerConfiguration(a.(*configv1alpha1.DeploymentControllerConfiguration), b.(*config.DeploymentControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_DeploymentControllerConfiguration_To_config_DeploymentControllerConfiguration(in *configv1alpha1.DeploymentControllerConfiguration, out *config.DeploymentControllerConfiguration, s conversion.Scope) error {
out.ConcurrentDeploymentSyncs = in.ConcurrentDeploymentSyncs
return nil
}
func autoConvert_config_DeploymentControllerConfiguration_To_v1alpha1_DeploymentControllerConfiguration(in *config.DeploymentControllerConfiguration, out *configv1alpha1.DeploymentControllerConfiguration, s conversion.Scope) error {
out.ConcurrentDeploymentSyncs = in.ConcurrentDeploymentSyncs
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentControllerConfiguration) DeepCopyInto(out *DeploymentControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentControllerConfiguration.
func (in *DeploymentControllerConfiguration) DeepCopy() *DeploymentControllerConfiguration {
if in == nil {
return nil
}
out := new(DeploymentControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"fmt"
"math"
"sort"
"strconv"
"strings"
"time"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/utils/integer"
)
const (
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
RevisionAnnotation = "deployment.kubernetes.io/revision"
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
// in its replica sets. Helps in separating scaling events from the rollout process and for
// determining if the new replica set for a deployment is really saturated.
DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas"
// MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which
// is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their
// proportions in case the deployment has surge replicas.
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
// RollbackRevisionNotFound is not found rollback event reason
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
// RollbackTemplateUnchanged is the template unchanged rollback event reason
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
// RollbackDone is the done rollback event reason
RollbackDone = "DeploymentRollback"
// Reasons for deployment conditions
//
// Progressing:
// ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part
// of the rollout process.
ReplicaSetUpdatedReason = "ReplicaSetUpdated"
// FailedRSCreateReason is added in a deployment when it cannot create a new replica set.
FailedRSCreateReason = "ReplicaSetCreateError"
// NewReplicaSetReason is added in a deployment when it creates a new replica set.
NewReplicaSetReason = "NewReplicaSetCreated"
// FoundNewRSReason is added in a deployment when it adopts an existing replica set.
FoundNewRSReason = "FoundNewReplicaSet"
// NewRSAvailableReason is added in a deployment when its newest replica set is made available
// ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds
// is at least the minimum available pods that need to run for the deployment.
NewRSAvailableReason = "NewReplicaSetAvailable"
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
TimedOutReason = "ProgressDeadlineExceeded"
// PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be
// estimated once a deployment is paused.
PausedDeployReason = "DeploymentPaused"
// ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally
// deployments that paused amidst a rollout and are bounded by a deadline.
ResumedDeployReason = "DeploymentResumed"
//
// Available:
// MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available.
MinimumReplicasAvailable = "MinimumReplicasAvailable"
// MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas
// available.
MinimumReplicasUnavailable = "MinimumReplicasUnavailable"
)
// NewDeploymentCondition creates a new deployment condition.
func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition {
return &apps.DeploymentCondition{
Type: condType,
Status: status,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// GetDeploymentCondition returns the condition with the provided type.
func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) {
currentCond := GetDeploymentCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// RemoveDeploymentCondition removes the deployment condition with the provided type.
func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition {
var newConditions []apps.DeploymentCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
// Useful for promoting replica set failure conditions into deployments.
func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition {
return apps.DeploymentCondition{
Type: apps.DeploymentConditionType(cond.Type),
Status: cond.Status,
LastTransitionTime: cond.LastTransitionTime,
LastUpdateTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
}
}
// SetDeploymentRevision updates the revision for a deployment.
func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool {
updated := false
if deployment.Annotations == nil {
deployment.Annotations = make(map[string]string)
}
if deployment.Annotations[RevisionAnnotation] != revision {
deployment.Annotations[RevisionAnnotation] = revision
updated = true
}
return updated
}
// MaxRevision finds the highest revision in the replica sets
func MaxRevision(logger klog.Logger, allRSs []*apps.ReplicaSet) int64 {
max := int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
logger.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
} else if v > max {
max = v
}
}
return max
}
// LastRevision finds the second max revision number in all replica sets (the last revision)
func LastRevision(logger klog.Logger, allRSs []*apps.ReplicaSet) int64 {
max, secMax := int64(0), int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
logger.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
} else if v >= max {
secMax = max
max = v
} else if v > secMax {
secMax = v
}
}
return secMax
}
// Revision returns the revision number of the input object.
func Revision(obj runtime.Object) (int64, error) {
acc, err := meta.Accessor(obj)
if err != nil {
return 0, err
}
v, ok := acc.GetAnnotations()[RevisionAnnotation]
if !ok {
return 0, nil
}
return strconv.ParseInt(v, 10, 64)
}
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
func SetNewReplicaSetAnnotations(ctx context.Context, deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool, revHistoryLimitInChars int) bool {
logger := klog.FromContext(ctx)
// First, copy deployment's annotations (except for apply and revision annotations)
annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS)
// Then, update replica set's revision annotation
if newRS.Annotations == nil {
newRS.Annotations = make(map[string]string)
}
oldRevision, ok := newRS.Annotations[RevisionAnnotation]
// The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number
// of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and
// newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision.
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
if err != nil {
if oldRevision != "" {
logger.Info("Updating replica set revision OldRevision not int", "err", err)
return false
}
//If the RS annotation is empty then initialise it to 0
oldRevisionInt = 0
}
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
if err != nil {
logger.Info("Updating replica set revision NewRevision not int", "err", err)
return false
}
if oldRevisionInt < newRevisionInt {
newRS.Annotations[RevisionAnnotation] = newRevision
annotationChanged = true
logger.V(4).Info("Updating replica set revision", "replicaSet", klog.KObj(newRS), "newRevision", newRevision)
}
// If a revision annotation already existed and this replica set was updated with a new revision
// then that means we are rolling back to this replica set. We need to preserve the old revisions
// for historical information.
if ok && oldRevisionInt < newRevisionInt {
revisionHistoryAnnotation := newRS.Annotations[RevisionHistoryAnnotation]
oldRevisions := strings.Split(revisionHistoryAnnotation, ",")
if len(oldRevisions[0]) == 0 {
newRS.Annotations[RevisionHistoryAnnotation] = oldRevision
} else {
totalLen := len(revisionHistoryAnnotation) + len(oldRevision) + 1
// index for the starting position in oldRevisions
start := 0
for totalLen > revHistoryLimitInChars && start < len(oldRevisions) {
totalLen = totalLen - len(oldRevisions[start]) - 1
start++
}
if totalLen <= revHistoryLimitInChars {
oldRevisions = append(oldRevisions[start:], oldRevision)
newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",")
} else {
logger.Info("Not appending revision due to revision history length limit reached", "revisionHistoryLimit", revHistoryLimitInChars)
}
}
}
// If the new replica set is about to be created, we need to add replica annotations to it.
if !exists && SetReplicasAnnotations(newRS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) {
annotationChanged = true
}
return annotationChanged
}
var annotationsToSkip = map[string]bool{
v1.LastAppliedConfigAnnotation: true,
RevisionAnnotation: true,
RevisionHistoryAnnotation: true,
DesiredReplicasAnnotation: true,
MaxReplicasAnnotation: true,
apps.DeprecatedRollbackTo: true,
}
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO: How to decide which annotations should / should not be copied?
//
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation(key string) bool {
return annotationsToSkip[key]
}
// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations,
// and returns true if replica set's annotation is changed.
// Note that apply and revision annotations are not copied.
func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
rsAnnotationsChanged := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
}
for k, v := range deployment.Annotations {
// newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated
// by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of
// deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable.
if _, exist := rs.Annotations[k]; skipCopyAnnotation(k) || (exist && rs.Annotations[k] == v) {
continue
}
rs.Annotations[k] = v
rsAnnotationsChanged = true
}
return rsAnnotationsChanged
}
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
// This action should be done if and only if the deployment is rolling back to this rs.
// Note that apply and revision annotations are not changed.
func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) {
deployment.Annotations = getSkippedAnnotations(deployment.Annotations)
for k, v := range rollbackToRS.Annotations {
if !skipCopyAnnotation(k) {
deployment.Annotations[k] = v
}
}
}
func getSkippedAnnotations(annotations map[string]string) map[string]string {
skippedAnnotations := make(map[string]string)
for k, v := range annotations {
if skipCopyAnnotation(k) {
skippedAnnotations[k] = v
}
}
return skippedAnnotations
}
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
// replica set. If there are more active replica sets, then we should proportionally scale them.
func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet {
if newRS == nil && len(oldRSs) == 0 {
return nil
}
sort.Sort(sort.Reverse(controller.ReplicaSetsByCreationTimestamp(oldRSs)))
allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS))
switch len(allRSs) {
case 0:
// If there is no active replica set then we should return the newest.
if newRS != nil {
return newRS
}
return oldRSs[0]
case 1:
return allRSs[0]
default:
return nil
}
}
// GetDesiredReplicasAnnotation returns the number of desired replicas
func GetDesiredReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
return getNonNegativeInt32FromAnnotationVerbose(logger, rs, DesiredReplicasAnnotation)
}
func getMaxReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
return getNonNegativeInt32FromAnnotationVerbose(logger, rs, MaxReplicasAnnotation)
}
func getNonNegativeInt32FromAnnotationVerbose(logger klog.Logger, rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
value, ok, err := getNonNegativeInt32FromAnnotation(rs, annotationKey)
if err != nil {
logger.V(2).Info("Could not convert the value with annotation key for the replica set", "annotationValue", rs.Annotations[annotationKey], "annotationKey", annotationKey, "replicaSet", klog.KObj(rs))
}
return value, ok
}
func getNonNegativeInt32FromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool, error) {
annotationValue, ok := rs.Annotations[annotationKey]
if !ok {
return int32(0), false, nil
}
intValue, err := strconv.ParseUint(annotationValue, 10, 32)
if err != nil {
return int32(0), false, err
}
if intValue > math.MaxInt32 {
return int32(0), false, fmt.Errorf("value %d is out of range (higher than %d)", intValue, math.MaxInt32)
}
return int32(intValue), true, nil
}
// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations
func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
updated := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
}
desiredString := fmt.Sprintf("%d", desiredReplicas)
if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString {
rs.Annotations[DesiredReplicasAnnotation] = desiredString
updated = true
}
maxString := fmt.Sprintf("%d", maxReplicas)
if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString {
rs.Annotations[MaxReplicasAnnotation] = maxString
updated = true
}
return updated
}
// ReplicasAnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated
func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
if rs.Annotations == nil {
return true
}
desiredString := fmt.Sprintf("%d", desiredReplicas)
if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString {
return true
}
maxString := fmt.Sprintf("%d", maxReplicas)
if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString {
return true
}
return false
}
// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
func MaxUnavailable(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 {
return int32(0)
}
// Error caught by validation
_, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
if maxUnavailable > *deployment.Spec.Replicas {
return *deployment.Spec.Replicas
}
return maxUnavailable
}
// MinAvailable returns the minimum available pods of a given deployment
func MinAvailable(deployment *apps.Deployment) int32 {
if !IsRollingUpdate(deployment) {
return int32(0)
}
return *(deployment.Spec.Replicas) - MaxUnavailable(*deployment)
}
// MaxSurge returns the maximum surge pods a rolling deployment can take.
func MaxSurge(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) {
return int32(0)
}
// Error caught by validation
maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
return maxSurge
}
// GetReplicaSetProportion will estimate the proportion for the provided replica set using 1. the current size
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
func GetReplicaSetProportion(logger klog.Logger, rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
return int32(0)
}
rsFraction := getReplicaSetFraction(logger, *rs, d)
allowed := deploymentReplicasToAdd - deploymentReplicasAdded
if deploymentReplicasToAdd > 0 {
// Use the minimum between the replica set fraction and the maximum allowed replicas
// when scaling up. This way we ensure we will not scale up more than the allowed
// replicas we can add.
return min(rsFraction, allowed)
}
// Use the maximum between the replica set fraction and the maximum allowed replicas
// when scaling down. This way we ensure we will not scale down more than the allowed
// replicas we can remove.
return max(rsFraction, allowed)
}
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
func getReplicaSetFraction(logger klog.Logger, rs apps.ReplicaSet, d apps.Deployment) int32 {
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
if *(d.Spec.Replicas) == int32(0) {
return -*(rs.Spec.Replicas)
}
deploymentMaxReplicas := *(d.Spec.Replicas) + MaxSurge(d)
deploymentMaxReplicasBeforeScale, ok := getMaxReplicasAnnotation(logger, &rs)
if !ok || deploymentMaxReplicasBeforeScale == 0 {
// If we cannot find the annotation then fallback to the current deployment size.
// This can occur if someone tampers with the annotation (removes it, sets it to an invalid value, or to 0).
// Note that this will not be an accurate proportion estimation in case other replica sets have different values
// which means that the deployment was scaled at some point but we at least will stay in limits
// due to the min-max comparisons in GetReplicaSetProportion.
deploymentMaxReplicasBeforeScale = d.Status.Replicas
if deploymentMaxReplicasBeforeScale == 0 {
// Rare situation: missing annotation; some actor has removed it and pods are failing to be created.
return 0
}
}
// We should never proportionally scale up from zero (see GetReplicaSetProportion) which means rs.spec.replicas will never be zero here.
scaleBase := *(rs.Spec.Replicas)
// deploymentMaxReplicasBeforeScale should normally be a positive value, and we have made sure that it is not a zero.
newRSsize := (float64(scaleBase * deploymentMaxReplicas)) / float64(deploymentMaxReplicasBeforeScale)
return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
}
// RsListFromClient returns an rsListFunc that wraps the given client.
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options)
if err != nil {
return nil, err
}
var ret []*apps.ReplicaSet
for i := range rsList.Items {
ret = append(ret, &rsList.Items[i])
}
return ret, err
}
}
// TODO: switch RsListFunc and podListFunc to full namespacers
// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions.
type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error)
// podListFunc returns the PodList from the Pod namespace and the List metav1.ListOptions.
type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error)
// ListReplicaSets returns a slice of RSes the given deployment targets.
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) {
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getRSList(namespace, options)
if err != nil {
return nil, err
}
// Only include those whose ControllerRef matches the Deployment.
owned := make([]*apps.ReplicaSet, 0, len(all))
for _, rs := range all {
if metav1.IsControlledBy(rs, deployment) {
owned = append(owned, rs)
}
}
return owned, nil
}
// ListPods returns a list of pods the given deployment targets.
// This needs a list of ReplicaSets for the Deployment,
// which can be found with ListReplicaSets().
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) {
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getPodList(namespace, options)
if err != nil {
return all, err
}
// Only include those whose ControllerRef points to a ReplicaSet that is in
// turn owned by this Deployment.
rsMap := make(map[types.UID]bool, len(rsList))
for _, rs := range rsList {
rsMap[rs.UID] = true
}
owned := &v1.PodList{Items: make([]v1.Pod, 0, len(all.Items))}
for i := range all.Items {
pod := &all.Items[i]
controllerRef := metav1.GetControllerOf(pod)
if controllerRef != nil && rsMap[controllerRef.UID] {
owned.Items = append(owned.Items, *pod)
}
}
return owned, nil
}
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList))
for i := range rsList {
if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
// In rare cases, such as after cluster upgrades, Deployment may end up with
// having more than one new ReplicaSets that have the same template as its template,
// see https://github.com/kubernetes/kubernetes/issues/40415
// We deterministically choose the oldest new ReplicaSet.
return rsList[i]
}
}
// new ReplicaSet does not exist.
return nil
}
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) {
var requiredRSs []*apps.ReplicaSet
var allRSs []*apps.ReplicaSet
newRS := FindNewReplicaSet(deployment, rsList)
for _, rs := range rsList {
// Filter out new replica set
if newRS != nil && rs.UID == newRS.UID {
continue
}
allRSs = append(allRSs, rs)
if *(rs.Spec.Replicas) != 0 {
requiredRSs = append(requiredRSs, rs)
}
}
return requiredRSs, allRSs
}
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment {
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
deployment.Spec.Template.Spec = template.Spec
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
deployment.Spec.Template.ObjectMeta.Labels,
apps.DefaultDeploymentUniqueLabelKey)
return deployment
}
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalReplicas += *(rs.Spec.Replicas)
}
}
return totalReplicas
}
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalActualReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalActualReplicas += rs.Status.Replicas
}
}
return totalActualReplicas
}
// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets.
func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReadyReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalReadyReplicas += rs.Status.ReadyReplicas
}
}
return totalReadyReplicas
}
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalAvailableReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalAvailableReplicas += rs.Status.AvailableReplicas
}
}
return totalAvailableReplicas
}
// GetTerminatingReplicaCountForReplicaSets returns the number of terminating pods for all replica sets
// or returns an error if any replica sets have been synced by the controller but do not report their terminating count.
func GetTerminatingReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) *int32 {
terminatingReplicas := int32(0)
for _, rs := range replicaSets {
switch {
case rs == nil:
// No-op
case rs.Status.ObservedGeneration == 0 && rs.Status.TerminatingReplicas == nil:
// Replicasets that have never been synced by the controller don't contribute to TerminatingReplicas
case rs.Status.TerminatingReplicas == nil:
// If any replicaset synced by the controller hasn't reported TerminatingReplicas, we cannot calculate a sum
return nil
default:
terminatingReplicas += *rs.Status.TerminatingReplicas
}
}
return &terminatingReplicas
}
// IsRollingUpdate returns true if the strategy type is a rolling update.
func IsRollingUpdate(deployment *apps.Deployment) bool {
return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
}
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
// are updated and available, and no old pods are running.
func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
newStatus.Replicas == *(deployment.Spec.Replicas) &&
newStatus.AvailableReplicas == *(deployment.Spec.Replicas) &&
newStatus.ObservedGeneration >= deployment.Generation
}
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
// current with the new status of the deployment that the controller is observing. More specifically,
// when new pods are scaled up or become ready or available, or old pods are scaled down, then we
// consider the deployment is progressing.
func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
oldStatus := deployment.Status
// Old replicas that need to be scaled down
oldStatusOldReplicas := oldStatus.Replicas - oldStatus.UpdatedReplicas
newStatusOldReplicas := newStatus.Replicas - newStatus.UpdatedReplicas
return (newStatus.UpdatedReplicas > oldStatus.UpdatedReplicas) ||
(newStatusOldReplicas < oldStatusOldReplicas) ||
newStatus.ReadyReplicas > deployment.Status.ReadyReplicas ||
newStatus.AvailableReplicas > deployment.Status.AvailableReplicas
}
// used for unit testing
var nowFn = func() time.Time { return time.Now() }
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
// exists.
func DeploymentTimedOut(ctx context.Context, deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
if !HasProgressDeadline(deployment) {
return false
}
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
// again.
condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing)
if condition == nil {
return false
}
// If the previous condition has been a successful rollout then we shouldn't try to
// estimate any progress. Scenario:
//
// * progressDeadlineSeconds is smaller than the difference between now and the time
// the last rollout finished in the past.
// * the creation of a new ReplicaSet triggers a resync of the Deployment prior to the
// cached copy of the Deployment getting updated with the status.condition that indicates
// the creation of the new ReplicaSet.
//
// The Deployment will be resynced and eventually its Progressing condition will catch
// up with the state of the world.
if condition.Reason == NewRSAvailableReason {
return false
}
if condition.Reason == TimedOutReason {
return true
}
logger := klog.FromContext(ctx)
// Look at the difference in seconds between now and the last time we reported any
// progress or tried to create a replica set, or resumed a paused deployment and
// compare against progressDeadlineSeconds.
from := condition.LastUpdateTime
now := nowFn()
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
timedOut := from.Add(delta).Before(now)
logger.V(4).Info("Deployment timed out from last progress check", "deployment", klog.KObj(deployment), "timeout", timedOut, "from", from, "now", now)
return timedOut
}
// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have.
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) {
switch deployment.Spec.Strategy.Type {
case apps.RollingUpdateDeploymentStrategyType:
// Check if we can scale up.
maxSurge, err := intstrutil.GetScaledValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
if err != nil {
return 0, err
}
// Find the total number of pods
currentPodCount := GetReplicaCountForReplicaSets(allRSs)
maxTotalPods := *(deployment.Spec.Replicas) + int32(maxSurge)
if currentPodCount >= maxTotalPods {
// Cannot scale up.
return *(newRS.Spec.Replicas), nil
}
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
// Do not exceed the number of desired replicas.
scaleUpCount = min(scaleUpCount, *(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))
return *(newRS.Spec.Replicas) + scaleUpCount, nil
case apps.RecreateDeploymentStrategyType:
return *(deployment.Spec.Replicas), nil
default:
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
}
}
// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size.
// Both the deployment and the replica set have to believe this replica set can own all of the desired
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
// need to be available.
func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
if rs == nil {
return false
}
desiredString := rs.Annotations[DesiredReplicasAnnotation]
desired, err := strconv.Atoi(desiredString)
if err != nil {
return false
}
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) &&
int32(desired) == *(deployment.Spec.Replicas) &&
rs.Status.AvailableReplicas == *(deployment.Spec.Replicas)
}
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
// Returns error if polling timesout.
func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
return wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := getDeploymentFunc()
if err != nil {
return false, err
}
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
})
}
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
// step. For example:
//
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt32(0)), int(desired), true)
if err != nil {
return 0, 0, err
}
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt32(0)), int(desired), false)
if err != nil {
return 0, 0, err
}
if surge == 0 && unavailable == 0 {
// Validation should never allow the user to explicitly use zero values for both maxSurge
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
// theory that surge might not work due to quota.
unavailable = 1
}
return int32(surge), int32(unavailable), nil
}
// HasProgressDeadline checks if the Deployment d is expected to surface the reason
// "ProgressDeadlineExceeded" when the Deployment progress takes longer than expected time.
func HasProgressDeadline(d *apps.Deployment) bool {
return d.Spec.ProgressDeadlineSeconds != nil && *d.Spec.ProgressDeadlineSeconds != math.MaxInt32
}
// HasRevisionHistoryLimit checks if the Deployment d is expected to keep a specified number of
// old replicaSets. These replicaSets are mainly kept with the purpose of rollback.
// The RevisionHistoryLimit can start from 0 (no retained replicasSet). When set to math.MaxInt32,
// the Deployment will keep all revisions.
func HasRevisionHistoryLimit(d *apps.Deployment) bool {
return d.Spec.RevisionHistoryLimit != nil && *d.Spec.RevisionHistoryLimit != math.MaxInt32
}
// GetDeploymentsForReplicaSet returns a list of Deployments that potentially
// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef
// will actually manage it.
// Returns an error only if no matching Deployments are found.
func GetDeploymentsForReplicaSet(deploymentLister appslisters.DeploymentLister, rs *apps.ReplicaSet) ([]*apps.Deployment, error) {
if len(rs.Labels) == 0 {
return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name)
}
// TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
dList, err := deploymentLister.Deployments(rs.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
var deployments []*apps.Deployment
for _, d := range dList {
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
// This object has an invalid selector, it does not match the replicaset
continue
}
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) {
continue
}
deployments = append(deployments, d)
}
if len(deployments) == 0 {
return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels)
}
return deployments, nil
}
// ReplicaSetsByRevision sorts a list of ReplicaSet by revision, using their creation timestamp or name as a tie breaker.
// By using the creation timestamp, this sorts from old to new replica sets.
type ReplicaSetsByRevision []*apps.ReplicaSet
func (o ReplicaSetsByRevision) Len() int { return len(o) }
func (o ReplicaSetsByRevision) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsByRevision) Less(i, j int) bool {
revision1, err1 := Revision(o[i])
revision2, err2 := Revision(o[j])
if err1 != nil || err2 != nil || revision1 == revision2 {
return controller.ReplicaSetsByCreationTimestamp(o).Less(i, j)
}
return revision1 < revision2
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package util
import (
"context"
apps "k8s.io/api/apps/v1"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/informers"
"k8s.io/klog/v2"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"time"
)
var (
functionsToCall = map[int]string {
0: "FuzzSetDeploymentCondition",
1: "FuzzRemoveDeploymentCondition",
2: "FuzzSetDeploymentRevision",
3: "FuzzMaxAndLastRevision",
4: "FuzzSetNewReplicaSetAnnotations",
5: "FuzzSetDeploymentAnnotationsTo",
6: "FuzzFindActiveOrLatest",
7: "FuzzGetDesiredReplicasAnnotation",
8: "FuzzSetReplicasAnnotations",
9: "FuzzReplicasAnnotationsNeedUpdate",
10: "FuzzMaxUnavailable",
11: "FuzzMinAvailable",
12: "FuzzMaxSurge",
13: "FuzzFindNewReplicaSet",
14: "FuzzFindOldReplicaSets",
15: "FuzzGetReplicaCountForReplicaSets",
16: "FuzzGetActualReplicaCountForReplicaSets",
17: "FuzzGetReadyReplicaCountForReplicaSets",
18: "FuzzGetAvailableReplicaCountForReplicaSets",
19: "FuzzNewRSNewReplicas",
20: "FuzzIsSaturated",
21: "FuzzResolveFenceposts",
22: "FuzzGetDeploymentsForReplicaSet",
}
)
func FuzzEntireDeploymentUtil(data []byte) int {
if len(data) < 10 {
return 0
}
functionToCall := int(data[0])
switch functionsToCall[functionToCall%len(functionsToCall)] {
case "FuzzSetDeploymentCondition":
return FuzzSetDeploymentCondition(data[1:])
case "FuzzRemoveDeploymentCondition":
return FuzzRemoveDeploymentCondition(data[1:])
case "FuzzSetDeploymentRevision":
return FuzzSetDeploymentRevision(data[1:])
case "FuzzMaxAndLastRevision":
return FuzzMaxAndLastRevision(data[1:])
case "FuzzSetNewReplicaSetAnnotations":
return FuzzSetNewReplicaSetAnnotations(data[1:])
case "FuzzSetDeploymentAnnotationsTo":
return FuzzSetDeploymentAnnotationsTo(data[1:])
case "FuzzFindActiveOrLatest":
return FuzzFindActiveOrLatest(data[1:])
case "FuzzGetDesiredReplicasAnnotation":
return FuzzGetDesiredReplicasAnnotation(data[1:])
case "FuzzSetReplicasAnnotations":
return FuzzSetReplicasAnnotations(data[1:])
case "FuzzReplicasAnnotationsNeedUpdate":
return FuzzReplicasAnnotationsNeedUpdate(data[1:])
case "FuzzMaxUnavailable":
return FuzzMaxUnavailable(data[1:])
case "FuzzMinAvailable":
return FuzzMinAvailable(data[1:])
case "FuzzMaxSurge":
return FuzzMaxSurge(data[1:])
case "FuzzFindNewReplicaSet":
return FuzzFindNewReplicaSet(data[1:])
case "FuzzFindOldReplicaSets":
return FuzzFindOldReplicaSets(data[1:])
case "FuzzGetReplicaCountForReplicaSets":
return FuzzGetReplicaCountForReplicaSets(data[1:])
case "FuzzGetActualReplicaCountForReplicaSets":
return FuzzGetActualReplicaCountForReplicaSets(data[1:])
case "FuzzGetReadyReplicaCountForReplicaSets":
return FuzzGetReadyReplicaCountForReplicaSets(data[1:])
case "FuzzGetAvailableReplicaCountForReplicaSets":
return FuzzGetAvailableReplicaCountForReplicaSets(data[1:])
case "FuzzNewRSNewReplicas":
return FuzzNewRSNewReplicas(data[1:])
case "FuzzIsSaturated":
return FuzzIsSaturated(data[1:])
case "FuzzResolveFenceposts":
return FuzzResolveFenceposts(data[1:])
case "FuzzGetDeploymentsForReplicaSet":
return FuzzGetDeploymentsForReplicaSet(data[1:])
}
return 1
}
func FuzzSetDeploymentCondition(data []byte) int {
// Not supported
return 1
}
func FuzzRemoveDeploymentCondition(data []byte) int {
// Not supported
return 1
}
func FuzzSetDeploymentRevision(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
revision, err := f.GetString()
if err != nil {
return 0
}
SetDeploymentRevision(deployment, revision)
return 1
}
func FuzzMaxAndLastRevision(data []byte) int {
f := fuzz.NewConsumer(data)
allRSs := make([]*apps.ReplicaSet, 0)
err := f.CreateSlice(&allRSs)
if err != nil {
return 0
}
max, err := f.GetBool()
if err != nil {
return 0
}
logger := klog.Background()
if max {
_ = MaxRevision(logger, allRSs)
} else {
LastRevision(logger, allRSs)
}
return 1
}
func FuzzSetNewReplicaSetAnnotations(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
newRS := &apps.ReplicaSet{}
err = f.GenerateStruct(newRS)
if err != nil {
return 0
}
newRevision, err := f.GetString()
if err != nil {
return 0
}
exists, err := f.GetBool()
if err != nil {
return 0
}
revHistoryLimitInChars, err := f.GetInt()
if err != nil {
return 0
}
SetNewReplicaSetAnnotations(context.Background(), deployment, newRS, newRevision, exists, revHistoryLimitInChars)
return 1
}
func FuzzSetDeploymentAnnotationsTo(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
rollbackToRS := &apps.ReplicaSet{}
err = f.GenerateStruct(rollbackToRS)
if err != nil {
return 0
}
SetDeploymentAnnotationsTo(deployment, rollbackToRS)
return 1
}
func FuzzFindActiveOrLatest(data []byte) int {
f := fuzz.NewConsumer(data)
newRS := &apps.ReplicaSet{}
err := f.GenerateStruct(newRS)
if err != nil {
return 0
}
oldRSs := make([]*apps.ReplicaSet, 0)
err = f.CreateSlice(&oldRSs)
if err != nil {
return 0
}
_ = FindActiveOrLatest(newRS, oldRSs)
return 1
}
func FuzzGetDesiredReplicasAnnotation(data []byte) int {
f := fuzz.NewConsumer(data)
rs := &apps.ReplicaSet{}
err := f.GenerateStruct(rs)
if err != nil {
return 0
}
_, _ = GetDesiredReplicasAnnotation(klog.FromContext(context.Background()), rs)
return 1
}
func FuzzSetReplicasAnnotations(data []byte) int {
f := fuzz.NewConsumer(data)
rs := &apps.ReplicaSet{}
err := f.GenerateStruct(rs)
if err != nil {
return 0
}
desiredReplicas, err := f.GetInt()
if err != nil {
return 0
}
maxReplicas, err := f.GetInt()
if err != nil {
return 0
}
SetReplicasAnnotations(rs, int32(desiredReplicas), int32(maxReplicas))
return 1
}
func FuzzReplicasAnnotationsNeedUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
rs := &apps.ReplicaSet{}
err := f.GenerateStruct(rs)
if err != nil {
return 0
}
desiredReplicas, err := f.GetInt()
if err != nil {
return 0
}
maxReplicas, err := f.GetInt()
if err != nil {
return 0
}
ReplicasAnnotationsNeedUpdate(rs, int32(desiredReplicas), int32(maxReplicas))
return 1
}
func FuzzMaxUnavailable(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := apps.Deployment{}
err := f.GenerateStruct(&deployment)
if err != nil {
return 0
}
_ = MaxUnavailable(deployment)
return 1
}
func FuzzMinAvailable(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
_ = MinAvailable(deployment)
return 1
}
func FuzzMaxSurge(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := apps.Deployment{}
err := f.GenerateStruct(&deployment)
if err != nil {
return 0
}
_ = MaxSurge(deployment)
return 1
}
func FuzzFindNewReplicaSet(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
rsList := make([]*apps.ReplicaSet, 0)
err = f.CreateSlice(&rsList)
if err != nil {
return 0
}
_ = FindNewReplicaSet(deployment, rsList)
return 1
}
func FuzzFindOldReplicaSets(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
rsList := make([]*apps.ReplicaSet, 0)
err = f.CreateSlice(&rsList)
if err != nil {
return 0
}
_, _ = FindOldReplicaSets(deployment, rsList)
return 1
}
func FuzzGetReplicaCountForReplicaSets(data []byte) int {
f := fuzz.NewConsumer(data)
replicaSets := make([]*apps.ReplicaSet, 0)
err := f.CreateSlice(&replicaSets)
if err != nil {
return 0
}
_ = GetReplicaCountForReplicaSets(replicaSets)
return 1
}
func FuzzGetActualReplicaCountForReplicaSets(data []byte) int {
f := fuzz.NewConsumer(data)
replicaSets := make([]*apps.ReplicaSet, 0)
err := f.CreateSlice(&replicaSets)
if err != nil {
return 0
}
_ = GetActualReplicaCountForReplicaSets(replicaSets)
return 1
}
func FuzzGetReadyReplicaCountForReplicaSets(data []byte) int {
f := fuzz.NewConsumer(data)
replicaSets := make([]*apps.ReplicaSet, 0)
err := f.CreateSlice(&replicaSets)
if err != nil {
return 0
}
_ = GetReadyReplicaCountForReplicaSets(replicaSets)
return 1
}
func FuzzGetAvailableReplicaCountForReplicaSets(data []byte) int {
f := fuzz.NewConsumer(data)
replicaSets := make([]*apps.ReplicaSet, 0)
err := f.CreateSlice(&replicaSets)
if err != nil {
return 0
}
_ = GetAvailableReplicaCountForReplicaSets(replicaSets)
return 1
}
func FuzzNewRSNewReplicas(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
allRSs := make([]*apps.ReplicaSet, 0)
err = f.CreateSlice(&allRSs)
if err != nil {
return 0
}
newRS := &apps.ReplicaSet{}
err = f.GenerateStruct(newRS)
if err != nil {
return 0
}
_, _ = NewRSNewReplicas(deployment, allRSs, newRS)
return 1
}
func FuzzIsSaturated(data []byte) int {
f := fuzz.NewConsumer(data)
deployment := &apps.Deployment{}
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
rs := &apps.ReplicaSet{}
err = f.GenerateStruct(rs)
if err != nil {
return 0
}
_ = IsSaturated(deployment, rs)
return 1
}
func FuzzResolveFenceposts(data []byte) int {
f := fuzz.NewConsumer(data)
maxSurge := &intstrutil.IntOrString{}
err := f.GenerateStruct(maxSurge)
if err != nil {
return 0
}
maxUnavailable := &intstrutil.IntOrString{}
err = f.GenerateStruct(maxUnavailable)
if err != nil {
return 0
}
desired, err := f.GetInt()
if err != nil {
return 0
}
_, _, _ = ResolveFenceposts(maxSurge, maxUnavailable, int32(desired))
return 1
}
func FuzzGetDeploymentsForReplicaSet(data []byte) int {
fakeInformerFactory := informers.NewSharedInformerFactory(&fake.Clientset{}, 0*time.Second)
f := fuzz.NewConsumer(data)
rs := &apps.ReplicaSet{}
err := f.GenerateStruct(rs)
if err != nil {
return 0
}
GetDeploymentsForReplicaSet(fakeInformerFactory.Apps().V1().Deployments().Lister(), rs)
return 1
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
endpointconfig "k8s.io/kubernetes/pkg/controller/endpoint/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with EndpointControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_EndpointControllerConfiguration_To_config_EndpointControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_EndpointControllerConfiguration_To_config_EndpointControllerConfiguration(in *v1alpha1.EndpointControllerConfiguration, out *endpointconfig.EndpointControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_EndpointControllerConfiguration_To_config_EndpointControllerConfiguration(in, out, s)
}
// Convert_config_EndpointControllerConfiguration_To_v1alpha1_EndpointControllerConfiguration is an autogenerated conversion function.
func Convert_config_EndpointControllerConfiguration_To_v1alpha1_EndpointControllerConfiguration(in *endpointconfig.EndpointControllerConfiguration, out *v1alpha1.EndpointControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_EndpointControllerConfiguration_To_v1alpha1_EndpointControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultEndpointControllerConfiguration defaults a pointer to a
// EndpointControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultEndpointControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.EndpointControllerConfiguration) {
if obj.ConcurrentEndpointSyncs == 0 {
obj.ConcurrentEndpointSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/endpoint/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.EndpointControllerConfiguration)(nil), (*configv1alpha1.EndpointControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_EndpointControllerConfiguration_To_v1alpha1_EndpointControllerConfiguration(a.(*config.EndpointControllerConfiguration), b.(*configv1alpha1.EndpointControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.EndpointControllerConfiguration)(nil), (*config.EndpointControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_EndpointControllerConfiguration_To_config_EndpointControllerConfiguration(a.(*configv1alpha1.EndpointControllerConfiguration), b.(*config.EndpointControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_EndpointControllerConfiguration_To_config_EndpointControllerConfiguration(in *configv1alpha1.EndpointControllerConfiguration, out *config.EndpointControllerConfiguration, s conversion.Scope) error {
out.ConcurrentEndpointSyncs = in.ConcurrentEndpointSyncs
out.EndpointUpdatesBatchPeriod = in.EndpointUpdatesBatchPeriod
return nil
}
func autoConvert_config_EndpointControllerConfiguration_To_v1alpha1_EndpointControllerConfiguration(in *config.EndpointControllerConfiguration, out *configv1alpha1.EndpointControllerConfiguration, s conversion.Scope) error {
out.ConcurrentEndpointSyncs = in.ConcurrentEndpointSyncs
out.EndpointUpdatesBatchPeriod = in.EndpointUpdatesBatchPeriod
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointControllerConfiguration) DeepCopyInto(out *EndpointControllerConfiguration) {
*out = *in
out.EndpointUpdatesBatchPeriod = in.EndpointUpdatesBatchPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointControllerConfiguration.
func (in *EndpointControllerConfiguration) DeepCopy() *EndpointControllerConfiguration {
if in == nil {
return nil
}
out := new(EndpointControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
endpointsliceconfig "k8s.io/kubernetes/pkg/controller/endpointslice/config"
)
// Important! The public back-and-forth conversion functions for the types in
// this package with EndpointControllerConfiguration types need to be manually
// exposed like this in order for other packages that reference this package to
// be able to call these conversion functions in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these
// Convert_* functions in autogenerated code as well.
// Convert_v1alpha1_EndpointSliceControllerConfiguration_To_config_EndpointSliceControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_EndpointSliceControllerConfiguration_To_config_EndpointSliceControllerConfiguration(in *v1alpha1.EndpointSliceControllerConfiguration, out *endpointsliceconfig.EndpointSliceControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_EndpointSliceControllerConfiguration_To_config_EndpointSliceControllerConfiguration(in, out, s)
}
// Convert_config_EndpointSliceControllerConfiguration_To_v1alpha1_EndpointSliceControllerConfiguration is an autogenerated conversion function.
func Convert_config_EndpointSliceControllerConfiguration_To_v1alpha1_EndpointSliceControllerConfiguration(in *endpointsliceconfig.EndpointSliceControllerConfiguration, out *v1alpha1.EndpointSliceControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_EndpointSliceControllerConfiguration_To_v1alpha1_EndpointSliceControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultEndpointSliceControllerConfiguration defaults a pointer to
// a EndpointSliceControllerConfiguration struct. This will set the recommended
// default values, but they may be subject to change between API versions. This
// function is intentionally not registered in the scheme as a "normal"
// `SetDefaults_Foo` function to allow consumers of this type to set whatever
// defaults for their embedded configs. Forcing consumers to use these defaults
// would be problematic as defaulting in the scheme is done as part of the
// conversion, and there would be no easy way to opt-out. Instead, if you want
// to use this defaulting method run it in your wrapper struct of this type in
// its `SetDefaults_` method.
func RecommendedDefaultEndpointSliceControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.EndpointSliceControllerConfiguration) {
if obj.ConcurrentServiceEndpointSyncs == 0 {
obj.ConcurrentServiceEndpointSyncs = 5
}
if obj.MaxEndpointsPerSlice == 0 {
obj.MaxEndpointsPerSlice = 100
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/endpointslice/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.EndpointSliceControllerConfiguration)(nil), (*configv1alpha1.EndpointSliceControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_EndpointSliceControllerConfiguration_To_v1alpha1_EndpointSliceControllerConfiguration(a.(*config.EndpointSliceControllerConfiguration), b.(*configv1alpha1.EndpointSliceControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.EndpointSliceControllerConfiguration)(nil), (*config.EndpointSliceControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_EndpointSliceControllerConfiguration_To_config_EndpointSliceControllerConfiguration(a.(*configv1alpha1.EndpointSliceControllerConfiguration), b.(*config.EndpointSliceControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_EndpointSliceControllerConfiguration_To_config_EndpointSliceControllerConfiguration(in *configv1alpha1.EndpointSliceControllerConfiguration, out *config.EndpointSliceControllerConfiguration, s conversion.Scope) error {
out.ConcurrentServiceEndpointSyncs = in.ConcurrentServiceEndpointSyncs
out.MaxEndpointsPerSlice = in.MaxEndpointsPerSlice
out.EndpointUpdatesBatchPeriod = in.EndpointUpdatesBatchPeriod
return nil
}
func autoConvert_config_EndpointSliceControllerConfiguration_To_v1alpha1_EndpointSliceControllerConfiguration(in *config.EndpointSliceControllerConfiguration, out *configv1alpha1.EndpointSliceControllerConfiguration, s conversion.Scope) error {
out.ConcurrentServiceEndpointSyncs = in.ConcurrentServiceEndpointSyncs
out.MaxEndpointsPerSlice = in.MaxEndpointsPerSlice
out.EndpointUpdatesBatchPeriod = in.EndpointUpdatesBatchPeriod
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSliceControllerConfiguration) DeepCopyInto(out *EndpointSliceControllerConfiguration) {
*out = *in
out.EndpointUpdatesBatchPeriod = in.EndpointUpdatesBatchPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceControllerConfiguration.
func (in *EndpointSliceControllerConfiguration) DeepCopy() *EndpointSliceControllerConfiguration {
if in == nil {
return nil
}
out := new(EndpointSliceControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
endpointslicemirroringconfig "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config"
)
// Important! The public back-and-forth conversion functions for the types in
// this package with EndpointSliceMirroringControllerConfiguratio types need to
// be manually exposed like this in order for other packages that reference this
// package to be able to call these conversion functions in an autogenerated
// manner. TODO: Fix the bug in conversion-gen so it automatically discovers
// these Convert_* functions in autogenerated code as well.
// Convert_v1alpha1_EndpointSliceMirroringControllerConfiguration_To_config_EndpointSliceMirroringControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_EndpointSliceMirroringControllerConfiguration_To_config_EndpointSliceMirroringControllerConfiguration(in *v1alpha1.EndpointSliceMirroringControllerConfiguration, out *endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_EndpointSliceMirroringControllerConfiguration_To_config_EndpointSliceMirroringControllerConfiguration(in, out, s)
}
// Convert_config_EndpointSliceMirroringControllerConfiguration_To_v1alpha1_EndpointSliceMirroringControllerConfiguration is an autogenerated conversion function.
func Convert_config_EndpointSliceMirroringControllerConfiguration_To_v1alpha1_EndpointSliceMirroringControllerConfiguration(in *endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration, out *v1alpha1.EndpointSliceMirroringControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_EndpointSliceMirroringControllerConfiguration_To_v1alpha1_EndpointSliceMirroringControllerConfiguration(in, out, s)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultEndpointSliceMirroringControllerConfiguration defaults a
// pointer to a EndpointSliceMirroringControllerConfiguration struct. This will
// set the recommended default values, but they may be subject to change between
// API versions. This function is intentionally not registered in the scheme as
// a "normal" `SetDefaults_Foo` function to allow consumers of this type to set
// whatever defaults for their embedded configs. Forcing consumers to use these
// defaults would be problematic as defaulting in the scheme is done as part of
// the conversion, and there would be no easy way to opt-out. Instead, if you
// want to use this defaulting method run it in your wrapper struct of this type
// in its `SetDefaults_` method.
func RecommendedDefaultEndpointSliceMirroringControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.EndpointSliceMirroringControllerConfiguration) {
if obj.MirroringConcurrentServiceEndpointSyncs == 0 {
obj.MirroringConcurrentServiceEndpointSyncs = 5
}
if obj.MirroringMaxEndpointsPerSubset == 0 {
obj.MirroringMaxEndpointsPerSubset = 1000
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.EndpointSliceMirroringControllerConfiguration)(nil), (*configv1alpha1.EndpointSliceMirroringControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_EndpointSliceMirroringControllerConfiguration_To_v1alpha1_EndpointSliceMirroringControllerConfiguration(a.(*config.EndpointSliceMirroringControllerConfiguration), b.(*configv1alpha1.EndpointSliceMirroringControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.EndpointSliceMirroringControllerConfiguration)(nil), (*config.EndpointSliceMirroringControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_EndpointSliceMirroringControllerConfiguration_To_config_EndpointSliceMirroringControllerConfiguration(a.(*configv1alpha1.EndpointSliceMirroringControllerConfiguration), b.(*config.EndpointSliceMirroringControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_EndpointSliceMirroringControllerConfiguration_To_config_EndpointSliceMirroringControllerConfiguration(in *configv1alpha1.EndpointSliceMirroringControllerConfiguration, out *config.EndpointSliceMirroringControllerConfiguration, s conversion.Scope) error {
out.MirroringConcurrentServiceEndpointSyncs = in.MirroringConcurrentServiceEndpointSyncs
out.MirroringMaxEndpointsPerSubset = in.MirroringMaxEndpointsPerSubset
out.MirroringEndpointUpdatesBatchPeriod = in.MirroringEndpointUpdatesBatchPeriod
return nil
}
func autoConvert_config_EndpointSliceMirroringControllerConfiguration_To_v1alpha1_EndpointSliceMirroringControllerConfiguration(in *config.EndpointSliceMirroringControllerConfiguration, out *configv1alpha1.EndpointSliceMirroringControllerConfiguration, s conversion.Scope) error {
out.MirroringConcurrentServiceEndpointSyncs = in.MirroringConcurrentServiceEndpointSyncs
out.MirroringMaxEndpointsPerSubset = in.MirroringMaxEndpointsPerSubset
out.MirroringEndpointUpdatesBatchPeriod = in.MirroringEndpointUpdatesBatchPeriod
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointSliceMirroringControllerConfiguration) DeepCopyInto(out *EndpointSliceMirroringControllerConfiguration) {
*out = *in
out.MirroringEndpointUpdatesBatchPeriod = in.MirroringEndpointUpdatesBatchPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceMirroringControllerConfiguration.
func (in *EndpointSliceMirroringControllerConfiguration) DeepCopy() *EndpointSliceMirroringControllerConfiguration {
if in == nil {
return nil
}
out := new(EndpointSliceMirroringControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/garbagecollector/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with GarbageCollectorControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_GarbageCollectorControllerConfiguration_To_config_GarbageCollectorControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_GarbageCollectorControllerConfiguration_To_config_GarbageCollectorControllerConfiguration(in *v1alpha1.GarbageCollectorControllerConfiguration, out *config.GarbageCollectorControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_GarbageCollectorControllerConfiguration_To_config_GarbageCollectorControllerConfiguration(in, out, s)
}
// Convert_config_GarbageCollectorControllerConfiguration_To_v1alpha1_GarbageCollectorControllerConfiguration is an autogenerated conversion function.
func Convert_config_GarbageCollectorControllerConfiguration_To_v1alpha1_GarbageCollectorControllerConfiguration(in *config.GarbageCollectorControllerConfiguration, out *v1alpha1.GarbageCollectorControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_GarbageCollectorControllerConfiguration_To_v1alpha1_GarbageCollectorControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/utils/ptr"
)
// RecommendedDefaultGarbageCollectorControllerConfiguration defaults a pointer to a
// GarbageCollectorControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultGarbageCollectorControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.GarbageCollectorControllerConfiguration) {
if obj.EnableGarbageCollector == nil {
obj.EnableGarbageCollector = ptr.To(true)
}
if obj.ConcurrentGCSyncs == 0 {
obj.ConcurrentGCSyncs = 20
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/garbagecollector/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*config.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_config_GroupResource(a.(*configv1alpha1.GroupResource), b.(*config.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_GroupResource_To_v1alpha1_GroupResource(a.(*config.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.GarbageCollectorControllerConfiguration)(nil), (*configv1alpha1.GarbageCollectorControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_GarbageCollectorControllerConfiguration_To_v1alpha1_GarbageCollectorControllerConfiguration(a.(*config.GarbageCollectorControllerConfiguration), b.(*configv1alpha1.GarbageCollectorControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.GarbageCollectorControllerConfiguration)(nil), (*config.GarbageCollectorControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GarbageCollectorControllerConfiguration_To_config_GarbageCollectorControllerConfiguration(a.(*configv1alpha1.GarbageCollectorControllerConfiguration), b.(*config.GarbageCollectorControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GarbageCollectorControllerConfiguration_To_config_GarbageCollectorControllerConfiguration(in *configv1alpha1.GarbageCollectorControllerConfiguration, out *config.GarbageCollectorControllerConfiguration, s conversion.Scope) error {
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableGarbageCollector, &out.EnableGarbageCollector, s); err != nil {
return err
}
out.ConcurrentGCSyncs = in.ConcurrentGCSyncs
out.GCIgnoredResources = *(*[]config.GroupResource)(unsafe.Pointer(&in.GCIgnoredResources))
return nil
}
func autoConvert_config_GarbageCollectorControllerConfiguration_To_v1alpha1_GarbageCollectorControllerConfiguration(in *config.GarbageCollectorControllerConfiguration, out *configv1alpha1.GarbageCollectorControllerConfiguration, s conversion.Scope) error {
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableGarbageCollector, &out.EnableGarbageCollector, s); err != nil {
return err
}
out.ConcurrentGCSyncs = in.ConcurrentGCSyncs
out.GCIgnoredResources = *(*[]configv1alpha1.GroupResource)(unsafe.Pointer(&in.GCIgnoredResources))
return nil
}
func autoConvert_v1alpha1_GroupResource_To_config_GroupResource(in *configv1alpha1.GroupResource, out *config.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_config_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_config_GroupResource(in *configv1alpha1.GroupResource, out *config.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_config_GroupResource(in, out, s)
}
func autoConvert_config_GroupResource_To_v1alpha1_GroupResource(in *config.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_config_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_config_GroupResource_To_v1alpha1_GroupResource(in *config.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_config_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GarbageCollectorControllerConfiguration) DeepCopyInto(out *GarbageCollectorControllerConfiguration) {
*out = *in
if in.GCIgnoredResources != nil {
in, out := &in.GCIgnoredResources, &out.GCIgnoredResources
*out = make([]GroupResource, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GarbageCollectorControllerConfiguration.
func (in *GarbageCollectorControllerConfiguration) DeepCopy() *GarbageCollectorControllerConfiguration {
if in == nil {
return nil
}
out := new(GarbageCollectorControllerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GroupResource) DeepCopyInto(out *GroupResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource.
func (in *GroupResource) DeepCopy() *GroupResource {
if in == nil {
return nil
}
out := new(GroupResource)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/job/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with JobControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_JobControllerConfiguration_To_config_JobControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_JobControllerConfiguration_To_config_JobControllerConfiguration(in *v1alpha1.JobControllerConfiguration, out *config.JobControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_JobControllerConfiguration_To_config_JobControllerConfiguration(in, out, s)
}
// Convert_config_JobControllerConfiguration_To_v1alpha1_JobControllerConfiguration is an autogenerated conversion function.
func Convert_config_JobControllerConfiguration_To_v1alpha1_JobControllerConfiguration(in *config.JobControllerConfiguration, out *v1alpha1.JobControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_JobControllerConfiguration_To_v1alpha1_JobControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultJobControllerConfiguration defaults a pointer to a
// JobControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultJobControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.JobControllerConfiguration) {
if obj.ConcurrentJobSyncs == 0 {
obj.ConcurrentJobSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/job/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.JobControllerConfiguration)(nil), (*configv1alpha1.JobControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_JobControllerConfiguration_To_v1alpha1_JobControllerConfiguration(a.(*config.JobControllerConfiguration), b.(*configv1alpha1.JobControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.JobControllerConfiguration)(nil), (*config.JobControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_JobControllerConfiguration_To_config_JobControllerConfiguration(a.(*configv1alpha1.JobControllerConfiguration), b.(*config.JobControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_JobControllerConfiguration_To_config_JobControllerConfiguration(in *configv1alpha1.JobControllerConfiguration, out *config.JobControllerConfiguration, s conversion.Scope) error {
out.ConcurrentJobSyncs = in.ConcurrentJobSyncs
return nil
}
func autoConvert_config_JobControllerConfiguration_To_v1alpha1_JobControllerConfiguration(in *config.JobControllerConfiguration, out *configv1alpha1.JobControllerConfiguration, s conversion.Scope) error {
out.ConcurrentJobSyncs = in.ConcurrentJobSyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobControllerConfiguration) DeepCopyInto(out *JobControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobControllerConfiguration.
func (in *JobControllerConfiguration) DeepCopy() *JobControllerConfiguration {
if in == nil {
return nil
}
out := new(JobControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/namespace/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with NamespaceControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_NamespaceControllerConfiguration_To_config_NamespaceControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_NamespaceControllerConfiguration_To_config_NamespaceControllerConfiguration(in *v1alpha1.NamespaceControllerConfiguration, out *config.NamespaceControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_NamespaceControllerConfiguration_To_config_NamespaceControllerConfiguration(in, out, s)
}
// Convert_config_NamespaceControllerConfiguration_To_v1alpha1_NamespaceControllerConfiguration is an autogenerated conversion function.
func Convert_config_NamespaceControllerConfiguration_To_v1alpha1_NamespaceControllerConfiguration(in *config.NamespaceControllerConfiguration, out *v1alpha1.NamespaceControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_NamespaceControllerConfiguration_To_v1alpha1_NamespaceControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultNamespaceControllerConfiguration defaults a pointer to a
// NamespaceControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultNamespaceControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.NamespaceControllerConfiguration) {
zero := metav1.Duration{}
if obj.ConcurrentNamespaceSyncs == 0 {
obj.ConcurrentNamespaceSyncs = 10
}
if obj.NamespaceSyncPeriod == zero {
obj.NamespaceSyncPeriod = metav1.Duration{Duration: 5 * time.Minute}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/namespace/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.NamespaceControllerConfiguration)(nil), (*configv1alpha1.NamespaceControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_NamespaceControllerConfiguration_To_v1alpha1_NamespaceControllerConfiguration(a.(*config.NamespaceControllerConfiguration), b.(*configv1alpha1.NamespaceControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.NamespaceControllerConfiguration)(nil), (*config.NamespaceControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NamespaceControllerConfiguration_To_config_NamespaceControllerConfiguration(a.(*configv1alpha1.NamespaceControllerConfiguration), b.(*config.NamespaceControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_NamespaceControllerConfiguration_To_config_NamespaceControllerConfiguration(in *configv1alpha1.NamespaceControllerConfiguration, out *config.NamespaceControllerConfiguration, s conversion.Scope) error {
out.NamespaceSyncPeriod = in.NamespaceSyncPeriod
out.ConcurrentNamespaceSyncs = in.ConcurrentNamespaceSyncs
return nil
}
func autoConvert_config_NamespaceControllerConfiguration_To_v1alpha1_NamespaceControllerConfiguration(in *config.NamespaceControllerConfiguration, out *configv1alpha1.NamespaceControllerConfiguration, s conversion.Scope) error {
out.NamespaceSyncPeriod = in.NamespaceSyncPeriod
out.ConcurrentNamespaceSyncs = in.ConcurrentNamespaceSyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceControllerConfiguration) DeepCopyInto(out *NamespaceControllerConfiguration) {
*out = *in
out.NamespaceSyncPeriod = in.NamespaceSyncPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceControllerConfiguration.
func (in *NamespaceControllerConfiguration) DeepCopy() *NamespaceControllerConfiguration {
if in == nil {
return nil
}
out := new(NamespaceControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/nodeipam/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with NodeIPAMControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_NodeIPAMControllerConfiguration_To_config_NodeIPAMControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_NodeIPAMControllerConfiguration_To_config_NodeIPAMControllerConfiguration(in *v1alpha1.NodeIPAMControllerConfiguration, out *config.NodeIPAMControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_NodeIPAMControllerConfiguration_To_config_NodeIPAMControllerConfiguration(in, out, s)
}
// Convert_config_NodeIPAMControllerConfiguration_To_v1alpha1_NodeIPAMControllerConfiguration is an autogenerated conversion function.
func Convert_config_NodeIPAMControllerConfiguration_To_v1alpha1_NodeIPAMControllerConfiguration(in *config.NodeIPAMControllerConfiguration, out *v1alpha1.NodeIPAMControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_NodeIPAMControllerConfiguration_To_v1alpha1_NodeIPAMControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultNodeIPAMControllerConfiguration defaults a pointer to a
// NodeIPAMControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultNodeIPAMControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.NodeIPAMControllerConfiguration) {
// The default mask size is not set here because we need to determine the cluster cidr family before setting the
// appropriate mask size.
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/nodeipam/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.NodeIPAMControllerConfiguration)(nil), (*configv1alpha1.NodeIPAMControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_NodeIPAMControllerConfiguration_To_v1alpha1_NodeIPAMControllerConfiguration(a.(*config.NodeIPAMControllerConfiguration), b.(*configv1alpha1.NodeIPAMControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.NodeIPAMControllerConfiguration)(nil), (*config.NodeIPAMControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NodeIPAMControllerConfiguration_To_config_NodeIPAMControllerConfiguration(a.(*configv1alpha1.NodeIPAMControllerConfiguration), b.(*config.NodeIPAMControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_NodeIPAMControllerConfiguration_To_config_NodeIPAMControllerConfiguration(in *configv1alpha1.NodeIPAMControllerConfiguration, out *config.NodeIPAMControllerConfiguration, s conversion.Scope) error {
out.ServiceCIDR = in.ServiceCIDR
out.SecondaryServiceCIDR = in.SecondaryServiceCIDR
out.NodeCIDRMaskSize = in.NodeCIDRMaskSize
out.NodeCIDRMaskSizeIPv4 = in.NodeCIDRMaskSizeIPv4
out.NodeCIDRMaskSizeIPv6 = in.NodeCIDRMaskSizeIPv6
return nil
}
func autoConvert_config_NodeIPAMControllerConfiguration_To_v1alpha1_NodeIPAMControllerConfiguration(in *config.NodeIPAMControllerConfiguration, out *configv1alpha1.NodeIPAMControllerConfiguration, s conversion.Scope) error {
out.ServiceCIDR = in.ServiceCIDR
out.SecondaryServiceCIDR = in.SecondaryServiceCIDR
out.NodeCIDRMaskSize = in.NodeCIDRMaskSize
out.NodeCIDRMaskSizeIPv4 = in.NodeCIDRMaskSizeIPv4
out.NodeCIDRMaskSizeIPv6 = in.NodeCIDRMaskSizeIPv6
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeIPAMControllerConfiguration) DeepCopyInto(out *NodeIPAMControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeIPAMControllerConfiguration.
func (in *NodeIPAMControllerConfiguration) DeepCopy() *NodeIPAMControllerConfiguration {
if in == nil {
return nil
}
out := new(NodeIPAMControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/nodelifecycle/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with NodeLifecycleControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration(in *v1alpha1.NodeLifecycleControllerConfiguration, out *config.NodeLifecycleControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration(in, out, s)
}
// Convert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration is an autogenerated conversion function.
func Convert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration(in *config.NodeLifecycleControllerConfiguration, out *v1alpha1.NodeLifecycleControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultNodeLifecycleControllerConfiguration defaults a pointer to a
// NodeLifecycleControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultNodeLifecycleControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.NodeLifecycleControllerConfiguration) {
zero := metav1.Duration{}
if obj.PodEvictionTimeout == zero {
obj.PodEvictionTimeout = metav1.Duration{Duration: 5 * time.Minute}
}
// NodeMonitorGracePeriod is set to a default value of 50 seconds.
// This value should be greater than the sum of HTTP2_PING_TIMEOUT_SECONDS (30s)
// and HTTP2_READ_IDLE_TIMEOUT_SECONDS (15s) from the http2 health check
// to ensure that the server has adequate time to handle slow or idle connections
// properly before marking a node as unhealthy.
if obj.NodeMonitorGracePeriod == zero {
obj.NodeMonitorGracePeriod = metav1.Duration{Duration: 50 * time.Second}
}
if obj.NodeStartupGracePeriod == zero {
obj.NodeStartupGracePeriod = metav1.Duration{Duration: 60 * time.Second}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/nodelifecycle/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.NodeLifecycleControllerConfiguration)(nil), (*configv1alpha1.NodeLifecycleControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration(a.(*config.NodeLifecycleControllerConfiguration), b.(*configv1alpha1.NodeLifecycleControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.NodeLifecycleControllerConfiguration)(nil), (*config.NodeLifecycleControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration(a.(*configv1alpha1.NodeLifecycleControllerConfiguration), b.(*config.NodeLifecycleControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration(in *configv1alpha1.NodeLifecycleControllerConfiguration, out *config.NodeLifecycleControllerConfiguration, s conversion.Scope) error {
out.NodeEvictionRate = in.NodeEvictionRate
out.SecondaryNodeEvictionRate = in.SecondaryNodeEvictionRate
out.NodeStartupGracePeriod = in.NodeStartupGracePeriod
out.NodeMonitorGracePeriod = in.NodeMonitorGracePeriod
// WARNING: in.PodEvictionTimeout requires manual conversion: does not exist in peer-type
out.LargeClusterSizeThreshold = in.LargeClusterSizeThreshold
out.UnhealthyZoneThreshold = in.UnhealthyZoneThreshold
return nil
}
func autoConvert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration(in *config.NodeLifecycleControllerConfiguration, out *configv1alpha1.NodeLifecycleControllerConfiguration, s conversion.Scope) error {
out.NodeEvictionRate = in.NodeEvictionRate
out.SecondaryNodeEvictionRate = in.SecondaryNodeEvictionRate
out.NodeStartupGracePeriod = in.NodeStartupGracePeriod
out.NodeMonitorGracePeriod = in.NodeMonitorGracePeriod
out.LargeClusterSizeThreshold = in.LargeClusterSizeThreshold
out.UnhealthyZoneThreshold = in.UnhealthyZoneThreshold
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeLifecycleControllerConfiguration) DeepCopyInto(out *NodeLifecycleControllerConfiguration) {
*out = *in
out.NodeStartupGracePeriod = in.NodeStartupGracePeriod
out.NodeMonitorGracePeriod = in.NodeMonitorGracePeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeLifecycleControllerConfiguration.
func (in *NodeLifecycleControllerConfiguration) DeepCopy() *NodeLifecycleControllerConfiguration {
if in == nil {
return nil
}
out := new(NodeLifecycleControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/podautoscaler/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with HPAControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConfiguration is an autogenerated conversion function.
func Convert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConfiguration(in *config.HPAControllerConfiguration, out *v1alpha1.HPAControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConfiguration(in, out, s)
}
// Convert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConfiguration(in *v1alpha1.HPAControllerConfiguration, out *config.HPAControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultHPAControllerConfiguration defaults a pointer to a
// HPAControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultHPAControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.HPAControllerConfiguration) {
zero := metav1.Duration{}
if obj.ConcurrentHorizontalPodAutoscalerSyncs == 0 {
obj.ConcurrentHorizontalPodAutoscalerSyncs = 5
}
if obj.HorizontalPodAutoscalerSyncPeriod == zero {
obj.HorizontalPodAutoscalerSyncPeriod = metav1.Duration{Duration: 15 * time.Second}
}
if obj.HorizontalPodAutoscalerDownscaleStabilizationWindow == zero {
obj.HorizontalPodAutoscalerDownscaleStabilizationWindow = metav1.Duration{Duration: 5 * time.Minute}
}
if obj.HorizontalPodAutoscalerCPUInitializationPeriod == zero {
obj.HorizontalPodAutoscalerCPUInitializationPeriod = metav1.Duration{Duration: 5 * time.Minute}
}
if obj.HorizontalPodAutoscalerInitialReadinessDelay == zero {
obj.HorizontalPodAutoscalerInitialReadinessDelay = metav1.Duration{Duration: 30 * time.Second}
}
if obj.HorizontalPodAutoscalerTolerance == 0 {
obj.HorizontalPodAutoscalerTolerance = 0.1
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/podautoscaler/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.HPAControllerConfiguration)(nil), (*configv1alpha1.HPAControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConfiguration(a.(*config.HPAControllerConfiguration), b.(*configv1alpha1.HPAControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.HPAControllerConfiguration)(nil), (*config.HPAControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConfiguration(a.(*configv1alpha1.HPAControllerConfiguration), b.(*config.HPAControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConfiguration(in *configv1alpha1.HPAControllerConfiguration, out *config.HPAControllerConfiguration, s conversion.Scope) error {
out.ConcurrentHorizontalPodAutoscalerSyncs = in.ConcurrentHorizontalPodAutoscalerSyncs
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
out.HorizontalPodAutoscalerTolerance = in.HorizontalPodAutoscalerTolerance
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
return nil
}
func autoConvert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConfiguration(in *config.HPAControllerConfiguration, out *configv1alpha1.HPAControllerConfiguration, s conversion.Scope) error {
out.ConcurrentHorizontalPodAutoscalerSyncs = in.ConcurrentHorizontalPodAutoscalerSyncs
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
out.HorizontalPodAutoscalerTolerance = in.HorizontalPodAutoscalerTolerance
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HPAControllerConfiguration) DeepCopyInto(out *HPAControllerConfiguration) {
*out = *in
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAControllerConfiguration.
func (in *HPAControllerConfiguration) DeepCopy() *HPAControllerConfiguration {
if in == nil {
return nil
}
out := new(HPAControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/podgc/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with PodGCControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_PodGCControllerConfiguration_To_config_PodGCControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_PodGCControllerConfiguration_To_config_PodGCControllerConfiguration(in *v1alpha1.PodGCControllerConfiguration, out *config.PodGCControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_PodGCControllerConfiguration_To_config_PodGCControllerConfiguration(in, out, s)
}
// Convert_config_PodGCControllerConfiguration_To_v1alpha1_PodGCControllerConfiguration is an autogenerated conversion function.
func Convert_config_PodGCControllerConfiguration_To_v1alpha1_PodGCControllerConfiguration(in *config.PodGCControllerConfiguration, out *v1alpha1.PodGCControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_PodGCControllerConfiguration_To_v1alpha1_PodGCControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultPodGCControllerConfiguration defaults a pointer to a
// PodGCControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultPodGCControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.PodGCControllerConfiguration) {
if obj.TerminatedPodGCThreshold == 0 {
obj.TerminatedPodGCThreshold = 12500
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/podgc/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.PodGCControllerConfiguration)(nil), (*configv1alpha1.PodGCControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_PodGCControllerConfiguration_To_v1alpha1_PodGCControllerConfiguration(a.(*config.PodGCControllerConfiguration), b.(*configv1alpha1.PodGCControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.PodGCControllerConfiguration)(nil), (*config.PodGCControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodGCControllerConfiguration_To_config_PodGCControllerConfiguration(a.(*configv1alpha1.PodGCControllerConfiguration), b.(*config.PodGCControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_PodGCControllerConfiguration_To_config_PodGCControllerConfiguration(in *configv1alpha1.PodGCControllerConfiguration, out *config.PodGCControllerConfiguration, s conversion.Scope) error {
out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold
return nil
}
func autoConvert_config_PodGCControllerConfiguration_To_v1alpha1_PodGCControllerConfiguration(in *config.PodGCControllerConfiguration, out *configv1alpha1.PodGCControllerConfiguration, s conversion.Scope) error {
out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodGCControllerConfiguration) DeepCopyInto(out *PodGCControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGCControllerConfiguration.
func (in *PodGCControllerConfiguration) DeepCopy() *PodGCControllerConfiguration {
if in == nil {
return nil
}
out := new(PodGCControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/replicaset/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with ReplicaSetControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_ReplicaSetControllerConfiguration_To_config_ReplicaSetControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_ReplicaSetControllerConfiguration_To_config_ReplicaSetControllerConfiguration(in *v1alpha1.ReplicaSetControllerConfiguration, out *config.ReplicaSetControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_ReplicaSetControllerConfiguration_To_config_ReplicaSetControllerConfiguration(in, out, s)
}
// Convert_config_ReplicaSetControllerConfiguration_To_v1alpha1_ReplicaSetControllerConfiguration is an autogenerated conversion function.
func Convert_config_ReplicaSetControllerConfiguration_To_v1alpha1_ReplicaSetControllerConfiguration(in *config.ReplicaSetControllerConfiguration, out *v1alpha1.ReplicaSetControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_ReplicaSetControllerConfiguration_To_v1alpha1_ReplicaSetControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultReplicaSetControllerConfiguration defaults a pointer to a
// ReplicaSetControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultReplicaSetControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.ReplicaSetControllerConfiguration) {
if obj.ConcurrentRSSyncs == 0 {
obj.ConcurrentRSSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/replicaset/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.ReplicaSetControllerConfiguration)(nil), (*configv1alpha1.ReplicaSetControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ReplicaSetControllerConfiguration_To_v1alpha1_ReplicaSetControllerConfiguration(a.(*config.ReplicaSetControllerConfiguration), b.(*configv1alpha1.ReplicaSetControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.ReplicaSetControllerConfiguration)(nil), (*config.ReplicaSetControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ReplicaSetControllerConfiguration_To_config_ReplicaSetControllerConfiguration(a.(*configv1alpha1.ReplicaSetControllerConfiguration), b.(*config.ReplicaSetControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_ReplicaSetControllerConfiguration_To_config_ReplicaSetControllerConfiguration(in *configv1alpha1.ReplicaSetControllerConfiguration, out *config.ReplicaSetControllerConfiguration, s conversion.Scope) error {
out.ConcurrentRSSyncs = in.ConcurrentRSSyncs
return nil
}
func autoConvert_config_ReplicaSetControllerConfiguration_To_v1alpha1_ReplicaSetControllerConfiguration(in *config.ReplicaSetControllerConfiguration, out *configv1alpha1.ReplicaSetControllerConfiguration, s conversion.Scope) error {
out.ConcurrentRSSyncs = in.ConcurrentRSSyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetControllerConfiguration) DeepCopyInto(out *ReplicaSetControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetControllerConfiguration.
func (in *ReplicaSetControllerConfiguration) DeepCopy() *ReplicaSetControllerConfiguration {
if in == nil {
return nil
}
out := new(ReplicaSetControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/replication/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with ReplicationControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_ReplicationControllerConfiguration_To_config_ReplicationControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_ReplicationControllerConfiguration_To_config_ReplicationControllerConfiguration(in *v1alpha1.ReplicationControllerConfiguration, out *config.ReplicationControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_ReplicationControllerConfiguration_To_config_ReplicationControllerConfiguration(in, out, s)
}
// Convert_config_ReplicationControllerConfiguration_To_v1alpha1_ReplicationControllerConfiguration is an autogenerated conversion function.
func Convert_config_ReplicationControllerConfiguration_To_v1alpha1_ReplicationControllerConfiguration(in *config.ReplicationControllerConfiguration, out *v1alpha1.ReplicationControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_ReplicationControllerConfiguration_To_v1alpha1_ReplicationControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultReplicationControllerConfiguration defaults a pointer to a
// ReplicationControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultReplicationControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.ReplicationControllerConfiguration) {
if obj.ConcurrentRCSyncs == 0 {
obj.ConcurrentRCSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/replication/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.ReplicationControllerConfiguration)(nil), (*configv1alpha1.ReplicationControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ReplicationControllerConfiguration_To_v1alpha1_ReplicationControllerConfiguration(a.(*config.ReplicationControllerConfiguration), b.(*configv1alpha1.ReplicationControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.ReplicationControllerConfiguration)(nil), (*config.ReplicationControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ReplicationControllerConfiguration_To_config_ReplicationControllerConfiguration(a.(*configv1alpha1.ReplicationControllerConfiguration), b.(*config.ReplicationControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_ReplicationControllerConfiguration_To_config_ReplicationControllerConfiguration(in *configv1alpha1.ReplicationControllerConfiguration, out *config.ReplicationControllerConfiguration, s conversion.Scope) error {
out.ConcurrentRCSyncs = in.ConcurrentRCSyncs
return nil
}
func autoConvert_config_ReplicationControllerConfiguration_To_v1alpha1_ReplicationControllerConfiguration(in *config.ReplicationControllerConfiguration, out *configv1alpha1.ReplicationControllerConfiguration, s conversion.Scope) error {
out.ConcurrentRCSyncs = in.ConcurrentRCSyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationControllerConfiguration) DeepCopyInto(out *ReplicationControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerConfiguration.
func (in *ReplicationControllerConfiguration) DeepCopy() *ReplicationControllerConfiguration {
if in == nil {
return nil
}
out := new(ReplicationControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/resourcequota/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with ResourceQuotaControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_ResourceQuotaControllerConfiguration_To_config_ResourceQuotaControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_ResourceQuotaControllerConfiguration_To_config_ResourceQuotaControllerConfiguration(in *v1alpha1.ResourceQuotaControllerConfiguration, out *config.ResourceQuotaControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceQuotaControllerConfiguration_To_config_ResourceQuotaControllerConfiguration(in, out, s)
}
// Convert_config_ResourceQuotaControllerConfiguration_To_v1alpha1_ResourceQuotaControllerConfiguration is an autogenerated conversion function.
func Convert_config_ResourceQuotaControllerConfiguration_To_v1alpha1_ResourceQuotaControllerConfiguration(in *config.ResourceQuotaControllerConfiguration, out *v1alpha1.ResourceQuotaControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_ResourceQuotaControllerConfiguration_To_v1alpha1_ResourceQuotaControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultResourceQuotaControllerConfiguration defaults a pointer to a
// ResourceQuotaControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultResourceQuotaControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.ResourceQuotaControllerConfiguration) {
zero := metav1.Duration{}
if obj.ConcurrentResourceQuotaSyncs == 0 {
obj.ConcurrentResourceQuotaSyncs = 5
}
if obj.ResourceQuotaSyncPeriod == zero {
obj.ResourceQuotaSyncPeriod = metav1.Duration{Duration: 5 * time.Minute}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/resourcequota/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.ResourceQuotaControllerConfiguration)(nil), (*configv1alpha1.ResourceQuotaControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ResourceQuotaControllerConfiguration_To_v1alpha1_ResourceQuotaControllerConfiguration(a.(*config.ResourceQuotaControllerConfiguration), b.(*configv1alpha1.ResourceQuotaControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.ResourceQuotaControllerConfiguration)(nil), (*config.ResourceQuotaControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceQuotaControllerConfiguration_To_config_ResourceQuotaControllerConfiguration(a.(*configv1alpha1.ResourceQuotaControllerConfiguration), b.(*config.ResourceQuotaControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_ResourceQuotaControllerConfiguration_To_config_ResourceQuotaControllerConfiguration(in *configv1alpha1.ResourceQuotaControllerConfiguration, out *config.ResourceQuotaControllerConfiguration, s conversion.Scope) error {
out.ResourceQuotaSyncPeriod = in.ResourceQuotaSyncPeriod
out.ConcurrentResourceQuotaSyncs = in.ConcurrentResourceQuotaSyncs
return nil
}
func autoConvert_config_ResourceQuotaControllerConfiguration_To_v1alpha1_ResourceQuotaControllerConfiguration(in *config.ResourceQuotaControllerConfiguration, out *configv1alpha1.ResourceQuotaControllerConfiguration, s conversion.Scope) error {
out.ResourceQuotaSyncPeriod = in.ResourceQuotaSyncPeriod
out.ConcurrentResourceQuotaSyncs = in.ConcurrentResourceQuotaSyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuotaControllerConfiguration) DeepCopyInto(out *ResourceQuotaControllerConfiguration) {
*out = *in
out.ResourceQuotaSyncPeriod = in.ResourceQuotaSyncPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaControllerConfiguration.
func (in *ResourceQuotaControllerConfiguration) DeepCopy() *ResourceQuotaControllerConfiguration {
if in == nil {
return nil
}
out := new(ResourceQuotaControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/serviceaccount/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with SAControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_SAControllerConfiguration_To_config_SAControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_SAControllerConfiguration_To_config_SAControllerConfiguration(in *v1alpha1.SAControllerConfiguration, out *config.SAControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_SAControllerConfiguration_To_config_SAControllerConfiguration(in, out, s)
}
// Convert_config_SAControllerConfiguration_To_v1alpha1_SAControllerConfiguration is an autogenerated conversion function.
func Convert_config_SAControllerConfiguration_To_v1alpha1_SAControllerConfiguration(in *config.SAControllerConfiguration, out *v1alpha1.SAControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_SAControllerConfiguration_To_v1alpha1_SAControllerConfiguration(in, out, s)
}
// Convert_v1alpha1_LegacySATokenCleanerConfiguration_To_config_LegacySATokenCleanerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_LegacySATokenCleanerConfiguration_To_config_LegacySATokenCleanerConfiguration(in *v1alpha1.LegacySATokenCleanerConfiguration, out *config.LegacySATokenCleanerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_LegacySATokenCleanerConfiguration_To_config_LegacySATokenCleanerConfiguration(in, out, s)
}
// Convert_config_LegacySATokenCleanerConfiguration_To_v1alpha1_LegacySATokenCleanerConfiguration is an autogenerated conversion function.
func Convert_config_LegacySATokenCleanerConfiguration_To_v1alpha1_LegacySATokenCleanerConfiguration(in *config.LegacySATokenCleanerConfiguration, out *v1alpha1.LegacySATokenCleanerConfiguration, s conversion.Scope) error {
return autoConvert_config_LegacySATokenCleanerConfiguration_To_v1alpha1_LegacySATokenCleanerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultSAControllerConfiguration defaults a pointer to a
// SAControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultSAControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.SAControllerConfiguration) {
if obj.ConcurrentSATokenSyncs == 0 {
obj.ConcurrentSATokenSyncs = 5
}
}
func RecommendedDefaultLegacySATokenCleanerConfiguration(obj *kubectrlmgrconfigv1alpha1.LegacySATokenCleanerConfiguration) {
zero := metav1.Duration{}
if obj.CleanUpPeriod == zero {
obj.CleanUpPeriod = metav1.Duration{Duration: 365 * 24 * time.Hour}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/serviceaccount/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.LegacySATokenCleanerConfiguration)(nil), (*configv1alpha1.LegacySATokenCleanerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_LegacySATokenCleanerConfiguration_To_v1alpha1_LegacySATokenCleanerConfiguration(a.(*config.LegacySATokenCleanerConfiguration), b.(*configv1alpha1.LegacySATokenCleanerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.SAControllerConfiguration)(nil), (*configv1alpha1.SAControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_SAControllerConfiguration_To_v1alpha1_SAControllerConfiguration(a.(*config.SAControllerConfiguration), b.(*configv1alpha1.SAControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.LegacySATokenCleanerConfiguration)(nil), (*config.LegacySATokenCleanerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_LegacySATokenCleanerConfiguration_To_config_LegacySATokenCleanerConfiguration(a.(*configv1alpha1.LegacySATokenCleanerConfiguration), b.(*config.LegacySATokenCleanerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.SAControllerConfiguration)(nil), (*config.SAControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_SAControllerConfiguration_To_config_SAControllerConfiguration(a.(*configv1alpha1.SAControllerConfiguration), b.(*config.SAControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_LegacySATokenCleanerConfiguration_To_config_LegacySATokenCleanerConfiguration(in *configv1alpha1.LegacySATokenCleanerConfiguration, out *config.LegacySATokenCleanerConfiguration, s conversion.Scope) error {
out.CleanUpPeriod = in.CleanUpPeriod
return nil
}
func autoConvert_config_LegacySATokenCleanerConfiguration_To_v1alpha1_LegacySATokenCleanerConfiguration(in *config.LegacySATokenCleanerConfiguration, out *configv1alpha1.LegacySATokenCleanerConfiguration, s conversion.Scope) error {
out.CleanUpPeriod = in.CleanUpPeriod
return nil
}
func autoConvert_v1alpha1_SAControllerConfiguration_To_config_SAControllerConfiguration(in *configv1alpha1.SAControllerConfiguration, out *config.SAControllerConfiguration, s conversion.Scope) error {
out.ServiceAccountKeyFile = in.ServiceAccountKeyFile
out.ConcurrentSATokenSyncs = in.ConcurrentSATokenSyncs
out.RootCAFile = in.RootCAFile
return nil
}
func autoConvert_config_SAControllerConfiguration_To_v1alpha1_SAControllerConfiguration(in *config.SAControllerConfiguration, out *configv1alpha1.SAControllerConfiguration, s conversion.Scope) error {
out.ServiceAccountKeyFile = in.ServiceAccountKeyFile
out.ConcurrentSATokenSyncs = in.ConcurrentSATokenSyncs
out.RootCAFile = in.RootCAFile
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LegacySATokenCleanerConfiguration) DeepCopyInto(out *LegacySATokenCleanerConfiguration) {
*out = *in
out.CleanUpPeriod = in.CleanUpPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacySATokenCleanerConfiguration.
func (in *LegacySATokenCleanerConfiguration) DeepCopy() *LegacySATokenCleanerConfiguration {
if in == nil {
return nil
}
out := new(LegacySATokenCleanerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SAControllerConfiguration) DeepCopyInto(out *SAControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SAControllerConfiguration.
func (in *SAControllerConfiguration) DeepCopy() *SAControllerConfiguration {
if in == nil {
return nil
}
out := new(SAControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
statefulsetconfig "k8s.io/kubernetes/pkg/controller/statefulset/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with StatefulsetControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_StatefulSetControllerConfiguration_To_config_StatefulSetControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_StatefulSetControllerConfiguration_To_config_StatefulSetControllerConfiguration(in *v1alpha1.StatefulSetControllerConfiguration, out *statefulsetconfig.StatefulSetControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_StatefulSetControllerConfiguration_To_config_StatefulSetControllerConfiguration(in, out, s)
}
// Convert_config_StatefulSetControllerConfiguration_To_v1alpha1_StatefulSetControllerConfiguration is an autogenerated conversion function.
func Convert_config_StatefulSetControllerConfiguration_To_v1alpha1_StatefulSetControllerConfiguration(in *statefulsetconfig.StatefulSetControllerConfiguration, out *v1alpha1.StatefulSetControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_StatefulSetControllerConfiguration_To_v1alpha1_StatefulSetControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultStatefulSetControllerConfiguration defaults a pointer to a
// StatefulSetControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultStatefulSetControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.StatefulSetControllerConfiguration) {
if obj.ConcurrentStatefulSetSyncs == 0 {
obj.ConcurrentStatefulSetSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/statefulset/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.StatefulSetControllerConfiguration)(nil), (*configv1alpha1.StatefulSetControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_StatefulSetControllerConfiguration_To_v1alpha1_StatefulSetControllerConfiguration(a.(*config.StatefulSetControllerConfiguration), b.(*configv1alpha1.StatefulSetControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.StatefulSetControllerConfiguration)(nil), (*config.StatefulSetControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_StatefulSetControllerConfiguration_To_config_StatefulSetControllerConfiguration(a.(*configv1alpha1.StatefulSetControllerConfiguration), b.(*config.StatefulSetControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_StatefulSetControllerConfiguration_To_config_StatefulSetControllerConfiguration(in *configv1alpha1.StatefulSetControllerConfiguration, out *config.StatefulSetControllerConfiguration, s conversion.Scope) error {
out.ConcurrentStatefulSetSyncs = in.ConcurrentStatefulSetSyncs
return nil
}
func autoConvert_config_StatefulSetControllerConfiguration_To_v1alpha1_StatefulSetControllerConfiguration(in *config.StatefulSetControllerConfiguration, out *configv1alpha1.StatefulSetControllerConfiguration, s conversion.Scope) error {
out.ConcurrentStatefulSetSyncs = in.ConcurrentStatefulSetSyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetControllerConfiguration) DeepCopyInto(out *StatefulSetControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetControllerConfiguration.
func (in *StatefulSetControllerConfiguration) DeepCopy() *StatefulSetControllerConfiguration {
if in == nil {
return nil
}
out := new(StatefulSetControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
const taintEvictionControllerSubsystem = "taint_eviction_controller"
var (
// PodDeletionsTotal counts the number of Pods deleted by TaintEvictionController since its start.
PodDeletionsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: taintEvictionControllerSubsystem,
Name: "pod_deletions_total",
Help: "Total number of Pods deleted by TaintEvictionController since its start.",
StabilityLevel: metrics.ALPHA,
},
)
// PodDeletionsLatency tracks the latency, in seconds, between the time when a taint effect has been activated
// for the Pod and its deletion.
PodDeletionsLatency = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: taintEvictionControllerSubsystem,
Name: "pod_deletion_duration_seconds",
Help: "Latency, in seconds, between the time when a taint effect has been activated for the Pod and its deletion via TaintEvictionController.",
Buckets: []float64{0.005, 0.025, 0.1, 0.5, 1, 2.5, 10, 30, 60, 120, 180, 240}, // 5ms to 4m
StabilityLevel: metrics.ALPHA,
},
)
)
var registerMetrics sync.Once
// Register registers TaintEvictionController metrics.
func Register() {
registerMetrics.Do(func() {
legacyregistry.MustRegister(PodDeletionsTotal)
legacyregistry.MustRegister(PodDeletionsLatency)
})
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tainteviction
import (
"k8s.io/apimachinery/pkg/types"
)
// NamespacedObject comprises a resource name with a mandatory namespace
// and optional UID. It gets rendered as "<namespace>/<name>[:<uid>]"
// (text output) or as an object (JSON output).
type NamespacedObject struct {
types.NamespacedName
UID types.UID
}
// String returns the general purpose string representation
func (n NamespacedObject) String() string {
if n.UID != "" {
return n.Namespace + string(types.Separator) + n.Name + ":" + string(n.UID)
}
return n.Namespace + string(types.Separator) + n.Name
}
// MarshalLog emits a struct containing required key/value pair
func (n NamespacedObject) MarshalLog() interface{} {
return struct {
Name string `json:"name"`
Namespace string `json:"namespace,omitempty"`
UID types.UID `json:"uid,omitempty"`
}{
Name: n.Name,
Namespace: n.Namespace,
UID: n.UID,
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tainteviction
import (
"context"
"fmt"
"hash/fnv"
"io"
"math"
"sync"
"time"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
corev1informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/helper"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/controller/tainteviction/metrics"
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
utilpod "k8s.io/kubernetes/pkg/util/pod"
)
const (
// TODO (k82cn): Figure out a reasonable number of workers/channels and propagate
// the number of workers up making it a parameter of Run() function.
// NodeUpdateChannelSize defines the size of channel for node update events.
NodeUpdateChannelSize = 10
// UpdateWorkerSize defines the size of workers for node update or/and pod update.
UpdateWorkerSize = 8
podUpdateChannelSize = 1
retries = 5
)
type nodeUpdateItem struct {
nodeName string
}
type podUpdateItem struct {
podName string
podNamespace string
nodeName string
}
func hash(val string, max int) int {
hasher := fnv.New32a()
io.WriteString(hasher, val)
return int(hasher.Sum32() % uint32(max))
}
// GetPodsByNodeNameFunc returns the list of pods assigned to the specified node.
type GetPodsByNodeNameFunc func(nodeName string) ([]*v1.Pod, error)
// Controller listens to Taint/Toleration changes and is responsible for removing Pods
// from Nodes tainted with NoExecute Taints.
type Controller struct {
name string
client clientset.Interface
broadcaster record.EventBroadcaster
recorder record.EventRecorder
podLister corelisters.PodLister
podListerSynced cache.InformerSynced
nodeLister corelisters.NodeLister
nodeListerSynced cache.InformerSynced
getPodsAssignedToNode GetPodsByNodeNameFunc
taintEvictionQueue *TimedWorkerQueue
// keeps a map from nodeName to all noExecute taints on that Node
taintedNodesLock sync.Mutex
taintedNodes map[string][]v1.Taint
nodeUpdateChannels []chan nodeUpdateItem
podUpdateChannels []chan podUpdateItem
nodeUpdateQueue workqueue.TypedInterface[nodeUpdateItem]
podUpdateQueue workqueue.TypedInterface[podUpdateItem]
}
func deletePodHandler(c clientset.Interface, emitEventFunc func(types.NamespacedName), controllerName string) func(ctx context.Context, fireAt time.Time, args *WorkArgs) error {
return func(ctx context.Context, fireAt time.Time, args *WorkArgs) error {
ns := args.Object.Namespace
name := args.Object.Name
klog.FromContext(ctx).Info("Deleting pod", "controller", controllerName, "pod", args.Object)
if emitEventFunc != nil {
emitEventFunc(args.Object.NamespacedName)
}
var err error
for i := 0; i < retries; i++ {
err = addConditionAndDeletePod(ctx, c, name, ns)
if err == nil {
metrics.PodDeletionsTotal.Inc()
metrics.PodDeletionsLatency.Observe(float64(time.Since(fireAt) * time.Second))
break
}
time.Sleep(10 * time.Millisecond)
}
return err
}
}
func addConditionAndDeletePod(ctx context.Context, c clientset.Interface, name, ns string) (err error) {
pod, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return err
}
newStatus := pod.Status.DeepCopy()
updated := apipod.UpdatePodCondition(newStatus, &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: apipod.CalculatePodConditionObservedGeneration(&pod.Status, pod.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: "DeletionByTaintManager",
Message: "Taint manager: deleting due to NoExecute taint",
})
if updated {
if _, _, _, err := utilpod.PatchPodStatus(ctx, c, pod.Namespace, pod.Name, pod.UID, pod.Status, *newStatus); err != nil {
return err
}
}
return c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
}
func getNoExecuteTaints(taints []v1.Taint) []v1.Taint {
result := []v1.Taint{}
for i := range taints {
if taints[i].Effect == v1.TaintEffectNoExecute {
result = append(result, taints[i])
}
}
return result
}
// getMinTolerationTime returns minimal toleration time from the given slice, or -1 if it's infinite.
func getMinTolerationTime(tolerations []v1.Toleration) time.Duration {
minTolerationTime := int64(math.MaxInt64)
if len(tolerations) == 0 {
return 0
}
for i := range tolerations {
if tolerations[i].TolerationSeconds != nil {
tolerationSeconds := *(tolerations[i].TolerationSeconds)
if tolerationSeconds <= 0 {
return 0
} else if tolerationSeconds < minTolerationTime {
minTolerationTime = tolerationSeconds
}
}
}
if minTolerationTime == int64(math.MaxInt64) {
return -1
}
return time.Duration(minTolerationTime) * time.Second
}
// New creates a new Controller that will use passed clientset to communicate with the API server.
func New(ctx context.Context, c clientset.Interface, podInformer corev1informers.PodInformer, nodeInformer corev1informers.NodeInformer, controllerName string) (*Controller, error) {
logger := klog.FromContext(ctx)
metrics.Register()
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podIndexer := podInformer.Informer().GetIndexer()
tm := &Controller{
name: controllerName,
client: c,
broadcaster: eventBroadcaster,
recorder: recorder,
podLister: podInformer.Lister(),
podListerSynced: podInformer.Informer().HasSynced,
nodeLister: nodeInformer.Lister(),
nodeListerSynced: nodeInformer.Informer().HasSynced,
getPodsAssignedToNode: func(nodeName string) ([]*v1.Pod, error) {
objs, err := podIndexer.ByIndex("spec.nodeName", nodeName)
if err != nil {
return nil, err
}
pods := make([]*v1.Pod, 0, len(objs))
for _, obj := range objs {
pod, ok := obj.(*v1.Pod)
if !ok {
continue
}
pods = append(pods, pod)
}
return pods, nil
},
taintedNodes: make(map[string][]v1.Taint),
nodeUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[nodeUpdateItem]{Name: "noexec_taint_node"}),
podUpdateQueue: workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[podUpdateItem]{Name: "noexec_taint_pod"}),
}
tm.taintEvictionQueue = CreateWorkerQueue(deletePodHandler(c, tm.emitPodDeletionEvent, tm.name))
_, err := podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
pod := obj.(*v1.Pod)
tm.PodUpdated(nil, pod)
},
UpdateFunc: func(prev, obj interface{}) {
prevPod := prev.(*v1.Pod)
newPod := obj.(*v1.Pod)
tm.PodUpdated(prevPod, newPod)
},
DeleteFunc: func(obj interface{}) {
pod, isPod := obj.(*v1.Pod)
// We can get DeletedFinalStateUnknown instead of *v1.Pod here and we need to handle that correctly.
if !isPod {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
logger.Error(nil, "Received unexpected object", "object", obj)
return
}
pod, ok = deletedState.Obj.(*v1.Pod)
if !ok {
logger.Error(nil, "DeletedFinalStateUnknown contained non-Pod object", "object", deletedState.Obj)
return
}
}
tm.PodUpdated(pod, nil)
},
})
if err != nil {
return nil, fmt.Errorf("unable to add pod event handler: %w", err)
}
_, err = nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error {
tm.NodeUpdated(nil, node)
return nil
}),
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error {
tm.NodeUpdated(oldNode, newNode)
return nil
}),
DeleteFunc: controllerutil.CreateDeleteNodeHandler(logger, func(node *v1.Node) error {
tm.NodeUpdated(node, nil)
return nil
}),
})
if err != nil {
return nil, fmt.Errorf("unable to add node event handler: %w", err)
}
return tm, nil
}
// Run starts the controller which will run in loop until `stopCh` is closed.
func (tc *Controller) Run(ctx context.Context) {
defer utilruntime.HandleCrash()
logger := klog.FromContext(ctx)
logger.Info("Starting", "controller", tc.name)
defer logger.Info("Shutting down controller", "controller", tc.name)
// Start events processing pipeline.
tc.broadcaster.StartStructuredLogging(3)
if tc.client != nil {
logger.Info("Sending events to api server")
tc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: tc.client.CoreV1().Events("")})
} else {
logger.Error(nil, "kubeClient is nil", "controller", tc.name)
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
defer tc.broadcaster.Shutdown()
defer tc.nodeUpdateQueue.ShutDown()
defer tc.podUpdateQueue.ShutDown()
// wait for the cache to be synced
if !cache.WaitForNamedCacheSync(tc.name, ctx.Done(), tc.podListerSynced, tc.nodeListerSynced) {
return
}
for i := 0; i < UpdateWorkerSize; i++ {
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan nodeUpdateItem, NodeUpdateChannelSize))
tc.podUpdateChannels = append(tc.podUpdateChannels, make(chan podUpdateItem, podUpdateChannelSize))
}
// Functions that are responsible for taking work items out of the workqueues and putting them
// into channels.
go func(stopCh <-chan struct{}) {
for {
nodeUpdate, shutdown := tc.nodeUpdateQueue.Get()
if shutdown {
break
}
hash := hash(nodeUpdate.nodeName, UpdateWorkerSize)
select {
case <-stopCh:
tc.nodeUpdateQueue.Done(nodeUpdate)
return
case tc.nodeUpdateChannels[hash] <- nodeUpdate:
// tc.nodeUpdateQueue.Done is called by the nodeUpdateChannels worker
}
}
}(ctx.Done())
go func(stopCh <-chan struct{}) {
for {
podUpdate, shutdown := tc.podUpdateQueue.Get()
if shutdown {
break
}
// The fact that pods are processed by the same worker as nodes is used to avoid races
// between node worker setting tc.taintedNodes and pod worker reading this to decide
// whether to delete pod.
// It's possible that even without this assumption this code is still correct.
hash := hash(podUpdate.nodeName, UpdateWorkerSize)
select {
case <-stopCh:
tc.podUpdateQueue.Done(podUpdate)
return
case tc.podUpdateChannels[hash] <- podUpdate:
// tc.podUpdateQueue.Done is called by the podUpdateChannels worker
}
}
}(ctx.Done())
wg := sync.WaitGroup{}
wg.Add(UpdateWorkerSize)
for i := 0; i < UpdateWorkerSize; i++ {
go tc.worker(ctx, i, wg.Done, ctx.Done())
}
wg.Wait()
}
func (tc *Controller) worker(ctx context.Context, worker int, done func(), stopCh <-chan struct{}) {
defer done()
// When processing events we want to prioritize Node updates over Pod updates,
// as NodeUpdates that interest the controller should be handled as soon as possible -
// we don't want user (or system) to wait until PodUpdate queue is drained before it can
// start evicting Pods from tainted Nodes.
for {
select {
case <-stopCh:
return
case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
tc.handleNodeUpdate(ctx, nodeUpdate)
tc.nodeUpdateQueue.Done(nodeUpdate)
case podUpdate := <-tc.podUpdateChannels[worker]:
// If we found a Pod update we need to empty Node queue first.
priority:
for {
select {
case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
tc.handleNodeUpdate(ctx, nodeUpdate)
tc.nodeUpdateQueue.Done(nodeUpdate)
default:
break priority
}
}
// After Node queue is emptied we process podUpdate.
tc.handlePodUpdate(ctx, podUpdate)
tc.podUpdateQueue.Done(podUpdate)
}
}
}
// PodUpdated is used to notify NoExecuteTaintManager about Pod changes.
func (tc *Controller) PodUpdated(oldPod *v1.Pod, newPod *v1.Pod) {
podName := ""
podNamespace := ""
nodeName := ""
oldTolerations := []v1.Toleration{}
if oldPod != nil {
podName = oldPod.Name
podNamespace = oldPod.Namespace
nodeName = oldPod.Spec.NodeName
oldTolerations = oldPod.Spec.Tolerations
}
newTolerations := []v1.Toleration{}
if newPod != nil {
podName = newPod.Name
podNamespace = newPod.Namespace
nodeName = newPod.Spec.NodeName
newTolerations = newPod.Spec.Tolerations
}
if oldPod != nil && newPod != nil && helper.Semantic.DeepEqual(oldTolerations, newTolerations) && oldPod.Spec.NodeName == newPod.Spec.NodeName {
return
}
updateItem := podUpdateItem{
podName: podName,
podNamespace: podNamespace,
nodeName: nodeName,
}
tc.podUpdateQueue.Add(updateItem)
}
// NodeUpdated is used to notify NoExecuteTaintManager about Node changes.
func (tc *Controller) NodeUpdated(oldNode *v1.Node, newNode *v1.Node) {
nodeName := ""
oldTaints := []v1.Taint{}
if oldNode != nil {
nodeName = oldNode.Name
oldTaints = getNoExecuteTaints(oldNode.Spec.Taints)
}
newTaints := []v1.Taint{}
if newNode != nil {
nodeName = newNode.Name
newTaints = getNoExecuteTaints(newNode.Spec.Taints)
}
if oldNode != nil && newNode != nil && helper.Semantic.DeepEqual(oldTaints, newTaints) {
return
}
updateItem := nodeUpdateItem{
nodeName: nodeName,
}
tc.nodeUpdateQueue.Add(updateItem)
}
func (tc *Controller) cancelWorkWithEvent(logger klog.Logger, nsName types.NamespacedName) {
if tc.taintEvictionQueue.CancelWork(logger, nsName.String()) {
tc.emitCancelPodDeletionEvent(nsName)
}
}
func (tc *Controller) processPodOnNode(
ctx context.Context,
podNamespacedName types.NamespacedName,
nodeName string,
tolerations []v1.Toleration,
taints []v1.Taint,
now time.Time,
) {
logger := klog.FromContext(ctx)
if len(taints) == 0 {
tc.cancelWorkWithEvent(logger, podNamespacedName)
}
allTolerated, usedTolerations := v1helper.GetMatchingTolerations(taints, tolerations)
if !allTolerated {
logger.V(2).Info("Not all taints are tolerated after update for pod on node", "pod", podNamespacedName.String(), "node", klog.KRef("", nodeName))
// We're canceling scheduled work (if any), as we're going to delete the Pod right away.
tc.cancelWorkWithEvent(logger, podNamespacedName)
tc.taintEvictionQueue.AddWork(ctx, NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), now, now)
return
}
minTolerationTime := getMinTolerationTime(usedTolerations)
// getMinTolerationTime returns negative value to denote infinite toleration.
if minTolerationTime < 0 {
logger.V(4).Info("Current tolerations for pod tolerate forever, cancelling any scheduled deletion", "pod", podNamespacedName.String())
tc.cancelWorkWithEvent(logger, podNamespacedName)
return
}
startTime := now
triggerTime := startTime.Add(minTolerationTime)
scheduledEviction := tc.taintEvictionQueue.GetWorkerUnsafe(podNamespacedName.String())
if scheduledEviction != nil {
startTime = scheduledEviction.CreatedAt
if startTime.Add(minTolerationTime).Before(triggerTime) {
return
}
tc.cancelWorkWithEvent(logger, podNamespacedName)
}
tc.taintEvictionQueue.AddWork(ctx, NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), startTime, triggerTime)
}
func (tc *Controller) handlePodUpdate(ctx context.Context, podUpdate podUpdateItem) {
pod, err := tc.podLister.Pods(podUpdate.podNamespace).Get(podUpdate.podName)
logger := klog.FromContext(ctx)
if err != nil {
if apierrors.IsNotFound(err) {
// Delete
podNamespacedName := types.NamespacedName{Namespace: podUpdate.podNamespace, Name: podUpdate.podName}
logger.V(4).Info("Noticed pod deletion", "pod", podNamespacedName)
tc.cancelWorkWithEvent(logger, podNamespacedName)
return
}
utilruntime.HandleError(fmt.Errorf("could not get pod %s/%s: %v", podUpdate.podName, podUpdate.podNamespace, err))
return
}
// We key the workqueue and shard workers by nodeName. If we don't match the current state we should not be the one processing the current object.
if pod.Spec.NodeName != podUpdate.nodeName {
return
}
// Create or Update
podNamespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
logger.V(4).Info("Noticed pod update", "pod", podNamespacedName)
nodeName := pod.Spec.NodeName
if nodeName == "" {
return
}
taints, ok := func() ([]v1.Taint, bool) {
tc.taintedNodesLock.Lock()
defer tc.taintedNodesLock.Unlock()
taints, ok := tc.taintedNodes[nodeName]
return taints, ok
}()
// It's possible that Node was deleted, or Taints were removed before, which triggered
// eviction cancelling if it was needed.
if !ok {
return
}
tc.processPodOnNode(ctx, podNamespacedName, nodeName, pod.Spec.Tolerations, taints, time.Now())
}
func (tc *Controller) handleNodeUpdate(ctx context.Context, nodeUpdate nodeUpdateItem) {
node, err := tc.nodeLister.Get(nodeUpdate.nodeName)
logger := klog.FromContext(ctx)
if err != nil {
if apierrors.IsNotFound(err) {
// Delete
logger.V(4).Info("Noticed node deletion", "node", klog.KRef("", nodeUpdate.nodeName))
tc.taintedNodesLock.Lock()
defer tc.taintedNodesLock.Unlock()
delete(tc.taintedNodes, nodeUpdate.nodeName)
return
}
utilruntime.HandleError(fmt.Errorf("cannot get node %s: %v", nodeUpdate.nodeName, err))
return
}
// Create or Update
logger.V(4).Info("Noticed node update", "node", klog.KObj(node))
taints := getNoExecuteTaints(node.Spec.Taints)
func() {
tc.taintedNodesLock.Lock()
defer tc.taintedNodesLock.Unlock()
logger.V(4).Info("Updating known taints on node", "node", klog.KObj(node), "taints", taints)
if len(taints) == 0 {
delete(tc.taintedNodes, node.Name)
} else {
tc.taintedNodes[node.Name] = taints
}
}()
// This is critical that we update tc.taintedNodes before we call getPodsAssignedToNode:
// getPodsAssignedToNode can be delayed as long as all future updates to pods will call
// tc.PodUpdated which will use tc.taintedNodes to potentially delete delayed pods.
pods, err := tc.getPodsAssignedToNode(node.Name)
if err != nil {
logger.Error(err, "Failed to get pods assigned to node", "node", klog.KObj(node))
return
}
if len(pods) == 0 {
return
}
// Short circuit, to make this controller a bit faster.
if len(taints) == 0 {
logger.V(4).Info("All taints were removed from the node. Cancelling all evictions...", "node", klog.KObj(node))
for i := range pods {
tc.cancelWorkWithEvent(logger, types.NamespacedName{Namespace: pods[i].Namespace, Name: pods[i].Name})
}
return
}
now := time.Now()
for _, pod := range pods {
podNamespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
tc.processPodOnNode(ctx, podNamespacedName, node.Name, pod.Spec.Tolerations, taints, now)
}
}
func (tc *Controller) emitPodDeletionEvent(nsName types.NamespacedName) {
if tc.recorder == nil {
return
}
ref := &v1.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Name: nsName.Name,
Namespace: nsName.Namespace,
}
tc.recorder.Eventf(ref, v1.EventTypeNormal, "TaintManagerEviction", "Marking for deletion Pod %s", nsName.String())
}
func (tc *Controller) emitCancelPodDeletionEvent(nsName types.NamespacedName) {
if tc.recorder == nil {
return
}
ref := &v1.ObjectReference{
APIVersion: "v1",
Kind: "Pod",
Name: nsName.Name,
Namespace: nsName.Namespace,
}
tc.recorder.Eventf(ref, v1.EventTypeNormal, "TaintManagerEviction", "Cancelling deletion of Pod %s", nsName.String())
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tainteviction
import (
"context"
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
// WorkArgs keeps arguments that will be passed to the function executed by the worker.
type WorkArgs struct {
// Object is the work item. The UID is only set if it was set when adding the work item.
Object NamespacedObject
}
// KeyFromWorkArgs creates a key for the given `WorkArgs`.
//
// The key is the same as the NamespacedName of the object in the work item,
// i.e. the UID is ignored. There cannot be two different
// work items with the same NamespacedName and different UIDs.
func (w *WorkArgs) KeyFromWorkArgs() string {
return w.Object.NamespacedName.String()
}
// NewWorkArgs is a helper function to create new `WorkArgs` without a UID.
func NewWorkArgs(name, namespace string) *WorkArgs {
return &WorkArgs{
Object: NamespacedObject{NamespacedName: types.NamespacedName{Namespace: namespace, Name: name}},
}
}
// TimedWorker is a responsible for executing a function no earlier than at FireAt time.
type TimedWorker struct {
WorkItem *WorkArgs
CreatedAt time.Time
FireAt time.Time
Timer clock.Timer
}
// createWorker creates a TimedWorker that will execute `f` not earlier than `fireAt`.
// Returns nil if the work was started immediately and doesn't need a timer.
func createWorker(ctx context.Context, args *WorkArgs, createdAt time.Time, fireAt time.Time, f func(ctx context.Context, fireAt time.Time, args *WorkArgs) error, clock clock.WithDelayedExecution) *TimedWorker {
delay := fireAt.Sub(createdAt)
logger := klog.FromContext(ctx)
fWithErrorLogging := func() {
err := f(ctx, fireAt, args)
if err != nil {
logger.Error(err, "TaintEvictionController: timed worker failed")
}
}
if delay <= 0 {
go fWithErrorLogging()
return nil
}
timer := clock.AfterFunc(delay, fWithErrorLogging)
return &TimedWorker{
WorkItem: args,
CreatedAt: createdAt,
FireAt: fireAt,
Timer: timer,
}
}
// Cancel cancels the execution of function by the `TimedWorker`
func (w *TimedWorker) Cancel() {
if w != nil {
w.Timer.Stop()
}
}
// TimedWorkerQueue keeps a set of TimedWorkers that are still wait for execution.
type TimedWorkerQueue struct {
sync.Mutex
// map of workers keyed by string returned by 'KeyFromWorkArgs' from the given worker.
// Entries may be nil if the work didn't need a timer and is already running.
workers map[string]*TimedWorker
workFunc func(ctx context.Context, fireAt time.Time, args *WorkArgs) error
clock clock.WithDelayedExecution
}
// CreateWorkerQueue creates a new TimedWorkerQueue for workers that will execute
// given function `f`.
func CreateWorkerQueue(f func(ctx context.Context, fireAt time.Time, args *WorkArgs) error) *TimedWorkerQueue {
return &TimedWorkerQueue{
workers: make(map[string]*TimedWorker),
workFunc: f,
clock: clock.RealClock{},
}
}
func (q *TimedWorkerQueue) getWrappedWorkerFunc(key string) func(ctx context.Context, fireAt time.Time, args *WorkArgs) error {
return func(ctx context.Context, fireAt time.Time, args *WorkArgs) error {
logger := klog.FromContext(ctx)
logger.V(4).Info("Firing worker", "item", key, "firedTime", fireAt)
err := q.workFunc(ctx, fireAt, args)
q.Lock()
defer q.Unlock()
logger.V(4).Info("Worker finished, removing", "item", key, "err", err)
delete(q.workers, key)
return err
}
}
// AddWork adds a work to the WorkerQueue which will be executed not earlier than `fireAt`.
// If replace is false, an existing work item will not get replaced, otherwise it
// gets canceled and the new one is added instead.
func (q *TimedWorkerQueue) AddWork(ctx context.Context, args *WorkArgs, createdAt time.Time, fireAt time.Time) {
key := args.KeyFromWorkArgs()
logger := klog.FromContext(ctx)
q.Lock()
defer q.Unlock()
if _, exists := q.workers[key]; exists {
logger.V(4).Info("Trying to add already existing work, skipping", "item", key, "createTime", createdAt, "firedTime", fireAt)
return
}
logger.V(4).Info("Adding TimedWorkerQueue item and to be fired at firedTime", "item", key, "createTime", createdAt, "firedTime", fireAt)
worker := createWorker(ctx, args, createdAt, fireAt, q.getWrappedWorkerFunc(key), q.clock)
q.workers[key] = worker
}
// UpdateWork adds or replaces a work item such that it will be executed not earlier than `fireAt`.
// This is a cheap no-op when the old and new fireAt are the same.
func (q *TimedWorkerQueue) UpdateWork(ctx context.Context, args *WorkArgs, createdAt time.Time, fireAt time.Time) {
key := args.KeyFromWorkArgs()
logger := klog.FromContext(ctx)
q.Lock()
defer q.Unlock()
if worker, exists := q.workers[key]; exists {
if worker == nil {
logger.V(4).Info("Keeping existing work, already in progress", "item", key)
return
}
if worker.FireAt.Compare(fireAt) == 0 {
logger.V(4).Info("Keeping existing work, same time", "item", key, "createTime", worker.CreatedAt, "firedTime", worker.FireAt)
return
}
logger.V(4).Info("Replacing existing work", "item", key, "createTime", worker.CreatedAt, "firedTime", worker.FireAt)
worker.Cancel()
}
logger.V(4).Info("Adding TimedWorkerQueue item and to be fired at firedTime", "item", key, "createTime", createdAt, "firedTime", fireAt)
worker := createWorker(ctx, args, createdAt, fireAt, q.getWrappedWorkerFunc(key), q.clock)
q.workers[key] = worker
}
// CancelWork removes scheduled function execution from the queue. Returns true if work was cancelled.
// The key must be the same as the one returned by WorkArgs.KeyFromWorkArgs, i.e.
// the result of NamespacedName.String.
func (q *TimedWorkerQueue) CancelWork(logger klog.Logger, key string) bool {
q.Lock()
defer q.Unlock()
worker, found := q.workers[key]
result := false
if found {
logger.V(4).Info("Cancelling TimedWorkerQueue item", "item", key, "time", time.Now())
if worker != nil {
result = true
worker.Cancel()
}
delete(q.workers, key)
}
return result
}
// GetWorkerUnsafe returns a TimedWorker corresponding to the given key.
// Unsafe method - workers have attached goroutines which can fire after this function is called.
func (q *TimedWorkerQueue) GetWorkerUnsafe(key string) *TimedWorker {
q.Lock()
defer q.Unlock()
return q.workers[key]
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/ttlafterfinished/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with TTLAfterFinishedControllerConfiguration types need to be manually exposed like this
// in order for other packages that reference this package to be able to call these conversion
// functions in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_TTLAfterFinishedControllerConfiguration_To_config_TTLAfterFinishedControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_TTLAfterFinishedControllerConfiguration_To_config_TTLAfterFinishedControllerConfiguration(in *v1alpha1.TTLAfterFinishedControllerConfiguration, out *config.TTLAfterFinishedControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_TTLAfterFinishedControllerConfiguration_To_config_TTLAfterFinishedControllerConfiguration(in, out, s)
}
// Convert_config_TTLAfterFinishedControllerConfiguration_To_v1alpha1_TTLAfterFinishedControllerConfiguration is an autogenerated conversion function.
func Convert_config_TTLAfterFinishedControllerConfiguration_To_v1alpha1_TTLAfterFinishedControllerConfiguration(in *config.TTLAfterFinishedControllerConfiguration, out *v1alpha1.TTLAfterFinishedControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_TTLAfterFinishedControllerConfiguration_To_v1alpha1_TTLAfterFinishedControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultTTLAfterFinishedControllerConfiguration defaults a pointer to a
// TTLAfterFinishedControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultTTLAfterFinishedControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.TTLAfterFinishedControllerConfiguration) {
if obj.ConcurrentTTLSyncs <= 0 {
obj.ConcurrentTTLSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/ttlafterfinished/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.TTLAfterFinishedControllerConfiguration)(nil), (*configv1alpha1.TTLAfterFinishedControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_TTLAfterFinishedControllerConfiguration_To_v1alpha1_TTLAfterFinishedControllerConfiguration(a.(*config.TTLAfterFinishedControllerConfiguration), b.(*configv1alpha1.TTLAfterFinishedControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.TTLAfterFinishedControllerConfiguration)(nil), (*config.TTLAfterFinishedControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_TTLAfterFinishedControllerConfiguration_To_config_TTLAfterFinishedControllerConfiguration(a.(*configv1alpha1.TTLAfterFinishedControllerConfiguration), b.(*config.TTLAfterFinishedControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_TTLAfterFinishedControllerConfiguration_To_config_TTLAfterFinishedControllerConfiguration(in *configv1alpha1.TTLAfterFinishedControllerConfiguration, out *config.TTLAfterFinishedControllerConfiguration, s conversion.Scope) error {
out.ConcurrentTTLSyncs = in.ConcurrentTTLSyncs
return nil
}
func autoConvert_config_TTLAfterFinishedControllerConfiguration_To_v1alpha1_TTLAfterFinishedControllerConfiguration(in *config.TTLAfterFinishedControllerConfiguration, out *configv1alpha1.TTLAfterFinishedControllerConfiguration, s conversion.Scope) error {
out.ConcurrentTTLSyncs = in.ConcurrentTTLSyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TTLAfterFinishedControllerConfiguration) DeepCopyInto(out *TTLAfterFinishedControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLAfterFinishedControllerConfiguration.
func (in *TTLAfterFinishedControllerConfiguration) DeepCopy() *TTLAfterFinishedControllerConfiguration {
if in == nil {
return nil
}
out := new(TTLAfterFinishedControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
appsv1listers "k8s.io/client-go/listers/apps/v1"
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubelet/util/format"
nodepkg "k8s.io/kubernetes/pkg/util/node"
"k8s.io/klog/v2"
)
// DeletePods will delete all pods from master running on given node,
// and return true if any pods were deleted, or were found pending
// deletion.
func DeletePods(ctx context.Context, kubeClient clientset.Interface, pods []*v1.Pod, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore appsv1listers.DaemonSetLister) (bool, error) {
remaining := false
var updateErrList []error
logger := klog.FromContext(ctx)
if len(pods) > 0 {
RecordNodeEvent(ctx, recorder, nodeName, nodeUID, v1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
}
for i := range pods {
// Defensive check, also needed for tests.
if pods[i].Spec.NodeName != nodeName {
continue
}
// Pod will be modified, so making copy is required.
pod := pods[i].DeepCopy()
// Set reason and message in the pod object.
if _, err := SetPodTerminationReason(ctx, kubeClient, pod, nodeName); err != nil {
if apierrors.IsConflict(err) {
updateErrList = append(updateErrList,
fmt.Errorf("update status failed for pod %q: %v", format.Pod(pod), err))
continue
}
}
// if the pod has already been marked for deletion, we still return true that there are remaining pods.
if pod.DeletionGracePeriodSeconds != nil {
remaining = true
continue
}
// if the pod is managed by a daemonset, ignore it
if _, err := daemonStore.GetPodDaemonSets(pod); err == nil {
// No error means at least one daemonset was found
continue
}
logger.V(2).Info("Starting deletion of pod", "pod", klog.KObj(pod))
recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
// NotFound error means that pod was already deleted.
// There is nothing left to do with this pod.
continue
}
return false, err
}
remaining = true
}
if len(updateErrList) > 0 {
return false, utilerrors.NewAggregate(updateErrList)
}
return remaining, nil
}
// SetPodTerminationReason attempts to set a reason and message in the
// pod status, updates it in the apiserver, and returns an error if it
// encounters one.
func SetPodTerminationReason(ctx context.Context, kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
if pod.Status.Reason == nodepkg.NodeUnreachablePodReason {
return pod, nil
}
pod.Status.Reason = nodepkg.NodeUnreachablePodReason
pod.Status.Message = fmt.Sprintf(nodepkg.NodeUnreachablePodMessage, nodeName, pod.Name)
var updatedPod *v1.Pod
var err error
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{}); err != nil {
return nil, err
}
return updatedPod, nil
}
// MarkPodsNotReady updates ready status of given pods running on
// given node from master return true if success
func MarkPodsNotReady(ctx context.Context, kubeClient clientset.Interface, recorder record.EventRecorder, pods []*v1.Pod, nodeName string) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Update ready status of pods on node", "node", klog.KRef("", nodeName))
errs := []error{}
for i := range pods {
// Defensive check, also needed for tests.
if pods[i].Spec.NodeName != nodeName {
continue
}
// Pod will be modified, so making copy is required.
pod := pods[i].DeepCopy()
for _, cond := range pod.Status.Conditions {
if cond.Type != v1.PodReady {
continue
}
cond.Status = v1.ConditionFalse
if !utilpod.UpdatePodCondition(&pod.Status, &cond) {
break
}
logger.V(2).Info("Updating ready status of pod to false", "pod", klog.KObj(pod))
if _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{}); err != nil {
if apierrors.IsNotFound(err) {
// NotFound error means that pod was already deleted.
// There is nothing left to do with this pod.
continue
}
logger.Info("Failed to update status for pod", "pod", klog.KObj(pod), "err", err)
errs = append(errs, err)
}
// record NodeNotReady event after updateStatus to make sure pod still exists
recorder.Event(pod, v1.EventTypeWarning, "NodeNotReady", "Node is not ready")
break
}
}
return utilerrors.NewAggregate(errs)
}
// RecordNodeEvent records a event related to a node.
func RecordNodeEvent(ctx context.Context, recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
logger := klog.FromContext(ctx)
ref := &v1.ObjectReference{
APIVersion: "v1",
Kind: "Node",
Name: nodeName,
UID: types.UID(nodeUID),
Namespace: "",
}
logger.V(2).Info("Recording event message for node", "event", event, "node", klog.KRef("", nodeName))
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
// RecordNodeStatusChange records a event related to a node status change. (Common to lifecycle and ipam)
func RecordNodeStatusChange(logger klog.Logger, recorder record.EventRecorder, node *v1.Node, newStatus string) {
ref := &v1.ObjectReference{
APIVersion: "v1",
Kind: "Node",
Name: node.Name,
UID: node.UID,
Namespace: "",
}
logger.V(2).Info("Recording status change event message for node", "status", newStatus, "node", node.Name)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
recorder.Eventf(ref, v1.EventTypeNormal, newStatus, "Node %s status is now: %s", node.Name, newStatus)
}
// SwapNodeControllerTaint returns true in case of success and false
// otherwise.
func SwapNodeControllerTaint(ctx context.Context, kubeClient clientset.Interface, taintsToAdd, taintsToRemove []*v1.Taint, node *v1.Node) bool {
logger := klog.FromContext(ctx)
for _, taintToAdd := range taintsToAdd {
now := metav1.Now()
taintToAdd.TimeAdded = &now
}
err := controller.AddOrUpdateTaintOnNode(ctx, kubeClient, node.Name, taintsToAdd...)
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to taint %+v unresponsive Node %q: %v",
taintsToAdd,
node.Name,
err))
return false
}
logger.V(4).Info("Added taint to node", "taint", taintsToAdd, "node", klog.KRef("", node.Name))
err = controller.RemoveTaintOffNode(ctx, kubeClient, node.Name, node, taintsToRemove...)
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to remove %+v unneeded taint from unresponsive Node %q: %v",
taintsToRemove,
node.Name,
err))
return false
}
logger.V(4).Info("Made sure that node has no taint", "node", klog.KRef("", node.Name), "taint", taintsToRemove)
return true
}
// AddOrUpdateLabelsOnNode updates the labels on the node and returns true on
// success and false on failure.
func AddOrUpdateLabelsOnNode(ctx context.Context, kubeClient clientset.Interface, labelsToUpdate map[string]string, node *v1.Node) bool {
logger := klog.FromContext(ctx)
if err := controller.AddOrUpdateLabelsOnNode(kubeClient, node.Name, labelsToUpdate); err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to update labels %+v for Node %q: %v",
labelsToUpdate,
node.Name,
err))
return false
}
logger.V(4).Info("Updated labels to node", "label", labelsToUpdate, "node", klog.KRef("", node.Name))
return true
}
// CreateAddNodeHandler creates an add node handler.
func CreateAddNodeHandler(f func(node *v1.Node) error) func(obj interface{}) {
return func(originalObj interface{}) {
node := originalObj.(*v1.Node).DeepCopy()
if err := f(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add: %v", err))
}
}
}
// CreateUpdateNodeHandler creates a node update handler. (Common to lifecycle and ipam)
func CreateUpdateNodeHandler(f func(oldNode, newNode *v1.Node) error) func(oldObj, newObj interface{}) {
return func(origOldObj, origNewObj interface{}) {
node := origNewObj.(*v1.Node).DeepCopy()
prevNode := origOldObj.(*v1.Node).DeepCopy()
if err := f(prevNode, node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err))
}
}
}
// CreateDeleteNodeHandler creates a delete node handler. (Common to lifecycle and ipam)
func CreateDeleteNodeHandler(logger klog.Logger, f func(node *v1.Node) error) func(obj interface{}) {
return func(originalObj interface{}) {
originalNode, isNode := originalObj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *v1.Node here and
// we need to handle that correctly. #34692
if !isNode {
deletedState, ok := originalObj.(cache.DeletedFinalStateUnknown)
if !ok {
logger.Error(nil, "Received unexpected object", "object", originalObj)
return
}
originalNode, ok = deletedState.Obj.(*v1.Node)
if !ok {
logger.Error(nil, "DeletedFinalStateUnknown contained non-Node object", "object", deletedState.Obj)
return
}
}
node := originalNode.DeepCopy()
if err := f(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err))
}
}
}
// GetNodeCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) {
if status == nil {
return -1, nil
}
for i := range status.Conditions {
if status.Conditions[i].Type == conditionType {
return i, &status.Conditions[i]
}
}
return -1, nil
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/validatingadmissionpolicystatus/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// need to be manually exposed like this in order for other packages that reference
// this package to be able to call these conversion functions in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well. This is a limitation that affects all controller configurations
// This issue was introduced in #72800
// Convert_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration_To_config_ValidatingAdmissionPolicyStatusControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration_To_config_ValidatingAdmissionPolicyStatusControllerConfiguration(in *v1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration, out *config.ValidatingAdmissionPolicyStatusControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration_To_config_ValidatingAdmissionPolicyStatusControllerConfiguration(in, out, s)
}
// Convert_config_ValidatingAdmissionPolicyStatusControllerConfiguration_To_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration is an autogenerated conversion function.
func Convert_config_ValidatingAdmissionPolicyStatusControllerConfiguration_To_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration(in *config.ValidatingAdmissionPolicyStatusControllerConfiguration, out *v1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_ValidatingAdmissionPolicyStatusControllerConfiguration_To_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration(in, out, s)
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultValidatingAdmissionPolicyStatusControllerConfiguration defaults a pointer to a
// ValidatingAdmissionPolicyStatusControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultValidatingAdmissionPolicyStatusControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration) {
if obj.ConcurrentPolicySyncs == 0 {
obj.ConcurrentPolicySyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/validatingadmissionpolicystatus/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.ValidatingAdmissionPolicyStatusControllerConfiguration)(nil), (*configv1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ValidatingAdmissionPolicyStatusControllerConfiguration_To_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration(a.(*config.ValidatingAdmissionPolicyStatusControllerConfiguration), b.(*configv1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration)(nil), (*config.ValidatingAdmissionPolicyStatusControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration_To_config_ValidatingAdmissionPolicyStatusControllerConfiguration(a.(*configv1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration), b.(*config.ValidatingAdmissionPolicyStatusControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration_To_config_ValidatingAdmissionPolicyStatusControllerConfiguration(in *configv1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration, out *config.ValidatingAdmissionPolicyStatusControllerConfiguration, s conversion.Scope) error {
out.ConcurrentPolicySyncs = in.ConcurrentPolicySyncs
return nil
}
func autoConvert_config_ValidatingAdmissionPolicyStatusControllerConfiguration_To_v1alpha1_ValidatingAdmissionPolicyStatusControllerConfiguration(in *config.ValidatingAdmissionPolicyStatusControllerConfiguration, out *configv1alpha1.ValidatingAdmissionPolicyStatusControllerConfiguration, s conversion.Scope) error {
out.ConcurrentPolicySyncs = in.ConcurrentPolicySyncs
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingAdmissionPolicyStatusControllerConfiguration) DeepCopyInto(out *ValidatingAdmissionPolicyStatusControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatusControllerConfiguration.
func (in *ValidatingAdmissionPolicyStatusControllerConfiguration) DeepCopy() *ValidatingAdmissionPolicyStatusControllerConfiguration {
if in == nil {
return nil
}
out := new(ValidatingAdmissionPolicyStatusControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with AttachDetachControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_AttachDetachControllerConfiguration_To_config_AttachDetachControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_AttachDetachControllerConfiguration_To_config_AttachDetachControllerConfiguration(in *v1alpha1.AttachDetachControllerConfiguration, out *config.AttachDetachControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_AttachDetachControllerConfiguration_To_config_AttachDetachControllerConfiguration(in, out, s)
}
// Convert_config_AttachDetachControllerConfiguration_To_v1alpha1_AttachDetachControllerConfiguration is an autogenerated conversion function.
func Convert_config_AttachDetachControllerConfiguration_To_v1alpha1_AttachDetachControllerConfiguration(in *config.AttachDetachControllerConfiguration, out *v1alpha1.AttachDetachControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_AttachDetachControllerConfiguration_To_v1alpha1_AttachDetachControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultAttachDetachControllerConfiguration defaults a pointer to a
// AttachDetachControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultAttachDetachControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.AttachDetachControllerConfiguration) {
zero := metav1.Duration{}
if obj.ReconcilerSyncLoopPeriod == zero {
obj.ReconcilerSyncLoopPeriod = metav1.Duration{Duration: 60 * time.Second}
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.AttachDetachControllerConfiguration)(nil), (*configv1alpha1.AttachDetachControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_AttachDetachControllerConfiguration_To_v1alpha1_AttachDetachControllerConfiguration(a.(*config.AttachDetachControllerConfiguration), b.(*configv1alpha1.AttachDetachControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.AttachDetachControllerConfiguration)(nil), (*config.AttachDetachControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_AttachDetachControllerConfiguration_To_config_AttachDetachControllerConfiguration(a.(*configv1alpha1.AttachDetachControllerConfiguration), b.(*config.AttachDetachControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_AttachDetachControllerConfiguration_To_config_AttachDetachControllerConfiguration(in *configv1alpha1.AttachDetachControllerConfiguration, out *config.AttachDetachControllerConfiguration, s conversion.Scope) error {
out.DisableAttachDetachReconcilerSync = in.DisableAttachDetachReconcilerSync
out.ReconcilerSyncLoopPeriod = in.ReconcilerSyncLoopPeriod
out.DisableForceDetachOnTimeout = in.DisableForceDetachOnTimeout
return nil
}
func autoConvert_config_AttachDetachControllerConfiguration_To_v1alpha1_AttachDetachControllerConfiguration(in *config.AttachDetachControllerConfiguration, out *configv1alpha1.AttachDetachControllerConfiguration, s conversion.Scope) error {
out.DisableAttachDetachReconcilerSync = in.DisableAttachDetachReconcilerSync
out.ReconcilerSyncLoopPeriod = in.ReconcilerSyncLoopPeriod
out.DisableForceDetachOnTimeout = in.DisableForceDetachOnTimeout
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AttachDetachControllerConfiguration) DeepCopyInto(out *AttachDetachControllerConfiguration) {
*out = *in
out.ReconcilerSyncLoopPeriod = in.ReconcilerSyncLoopPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachDetachControllerConfiguration.
func (in *AttachDetachControllerConfiguration) DeepCopy() *AttachDetachControllerConfiguration {
if in == nil {
return nil
}
out := new(AttachDetachControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/volume/ephemeral/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with EphemeralVolumeControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_EphemeralVolumeControllerConfiguration_To_config_EphemeralVolumeControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_EphemeralVolumeControllerConfiguration_To_config_EphemeralVolumeControllerConfiguration(in *v1alpha1.EphemeralVolumeControllerConfiguration, out *config.EphemeralVolumeControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_EphemeralVolumeControllerConfiguration_To_config_EphemeralVolumeControllerConfiguration(in, out, s)
}
// Convert_config_EphemeralVolumeControllerConfiguration_To_v1alpha1_EphemeralVolumeControllerConfiguration is an autogenerated conversion function.
func Convert_config_EphemeralVolumeControllerConfiguration_To_v1alpha1_EphemeralVolumeControllerConfiguration(in *config.EphemeralVolumeControllerConfiguration, out *v1alpha1.EphemeralVolumeControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_EphemeralVolumeControllerConfiguration_To_v1alpha1_EphemeralVolumeControllerConfiguration(in, out, s)
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
)
// RecommendedDefaultEphemeralVolumeControllerConfiguration defaults a pointer to a
// EphemeralVolumeControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultEphemeralVolumeControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.EphemeralVolumeControllerConfiguration) {
if obj.ConcurrentEphemeralVolumeSyncs == 0 {
obj.ConcurrentEphemeralVolumeSyncs = 5
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/volume/ephemeral/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.EphemeralVolumeControllerConfiguration)(nil), (*configv1alpha1.EphemeralVolumeControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_EphemeralVolumeControllerConfiguration_To_v1alpha1_EphemeralVolumeControllerConfiguration(a.(*config.EphemeralVolumeControllerConfiguration), b.(*configv1alpha1.EphemeralVolumeControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.EphemeralVolumeControllerConfiguration)(nil), (*config.EphemeralVolumeControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_EphemeralVolumeControllerConfiguration_To_config_EphemeralVolumeControllerConfiguration(a.(*configv1alpha1.EphemeralVolumeControllerConfiguration), b.(*config.EphemeralVolumeControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_EphemeralVolumeControllerConfiguration_To_config_EphemeralVolumeControllerConfiguration(in *configv1alpha1.EphemeralVolumeControllerConfiguration, out *config.EphemeralVolumeControllerConfiguration, s conversion.Scope) error {
out.ConcurrentEphemeralVolumeSyncs = in.ConcurrentEphemeralVolumeSyncs
return nil
}
func autoConvert_config_EphemeralVolumeControllerConfiguration_To_v1alpha1_EphemeralVolumeControllerConfiguration(in *config.EphemeralVolumeControllerConfiguration, out *configv1alpha1.EphemeralVolumeControllerConfiguration, s conversion.Scope) error {
out.ConcurrentEphemeralVolumeSyncs = in.ConcurrentEphemeralVolumeSyncs
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralVolumeControllerConfiguration) DeepCopyInto(out *EphemeralVolumeControllerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeControllerConfiguration.
func (in *EphemeralVolumeControllerConfiguration) DeepCopy() *EphemeralVolumeControllerConfiguration {
if in == nil {
return nil
}
out := new(EphemeralVolumeControllerConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config"
)
// Important! The public back-and-forth conversion functions for the types in this package
// with PersistentVolumeBinderControllerConfiguration types need to be manually exposed like this in order for
// other packages that reference this package to be able to call these conversion functions
// in an autogenerated manner.
// TODO: Fix the bug in conversion-gen so it automatically discovers these Convert_* functions
// in autogenerated code as well.
// Convert_v1alpha1_PersistentVolumeBinderControllerConfiguration_To_config_PersistentVolumeBinderControllerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_PersistentVolumeBinderControllerConfiguration_To_config_PersistentVolumeBinderControllerConfiguration(in *v1alpha1.PersistentVolumeBinderControllerConfiguration, out *config.PersistentVolumeBinderControllerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_PersistentVolumeBinderControllerConfiguration_To_config_PersistentVolumeBinderControllerConfiguration(in, out, s)
}
// Convert_config_PersistentVolumeBinderControllerConfiguration_To_v1alpha1_PersistentVolumeBinderControllerConfiguration is an autogenerated conversion function.
func Convert_config_PersistentVolumeBinderControllerConfiguration_To_v1alpha1_PersistentVolumeBinderControllerConfiguration(in *config.PersistentVolumeBinderControllerConfiguration, out *v1alpha1.PersistentVolumeBinderControllerConfiguration, s conversion.Scope) error {
return autoConvert_config_PersistentVolumeBinderControllerConfiguration_To_v1alpha1_PersistentVolumeBinderControllerConfiguration(in, out, s)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/utils/ptr"
)
// RecommendedDefaultPersistentVolumeBinderControllerConfiguration defaults a pointer to a
// PersistentVolumeBinderControllerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultPersistentVolumeBinderControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.PersistentVolumeBinderControllerConfiguration) {
zero := metav1.Duration{}
if obj.PVClaimBinderSyncPeriod == zero {
obj.PVClaimBinderSyncPeriod = metav1.Duration{Duration: 15 * time.Second}
}
// Use the default VolumeConfiguration options.
RecommendedDefaultVolumeConfiguration(&obj.VolumeConfiguration)
}
// RecommendedDefaultVolumeConfiguration defaults a pointer to a VolumeConfiguration
// struct. This will set the recommended default values, but they may be subject to
// change between API versions. This function is intentionally not registered in the
// scheme as a "normal" `SetDefaults_Foo` function to allow consumers of this type to
// set whatever defaults for their embedded configs. Forcing consumers to use these
// defaults would be problematic as defaulting in the scheme is done as part of the
// conversion, and there would be no easy way to opt-out. Instead, if you want to use
// this defaulting method run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultVolumeConfiguration(obj *kubectrlmgrconfigv1alpha1.VolumeConfiguration) {
if obj.EnableHostPathProvisioning == nil {
obj.EnableHostPathProvisioning = ptr.To(false)
}
if obj.EnableDynamicProvisioning == nil {
obj.EnableDynamicProvisioning = ptr.To(true)
}
if obj.FlexVolumePluginDir == "" {
obj.FlexVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
}
// Use the default PersistentVolumeRecyclerConfiguration options.
RecommendedDefaultPersistentVolumeRecyclerConfiguration(&obj.PersistentVolumeRecyclerConfiguration)
}
// RecommendedDefaultPersistentVolumeRecyclerConfiguration defaults a pointer to a
// PersistentVolumeRecyclerConfiguration struct. This will set the recommended default
// values, but they may be subject to change between API versions. This function
// is intentionally not registered in the scheme as a "normal" `SetDefaults_Foo`
// function to allow consumers of this type to set whatever defaults for their
// embedded configs. Forcing consumers to use these defaults would be problematic
// as defaulting in the scheme is done as part of the conversion, and there would
// be no easy way to opt-out. Instead, if you want to use this defaulting method
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultPersistentVolumeRecyclerConfiguration(obj *kubectrlmgrconfigv1alpha1.PersistentVolumeRecyclerConfiguration) {
if obj.MaximumRetry == 0 {
obj.MaximumRetry = 3
}
if obj.MinimumTimeoutNFS == 0 {
obj.MinimumTimeoutNFS = 300
}
if obj.IncrementTimeoutNFS == 0 {
obj.IncrementTimeoutNFS = 30
}
if obj.MinimumTimeoutHostPath == 0 {
obj.MinimumTimeoutHostPath = 60
}
if obj.IncrementTimeoutHostPath == 0 {
obj.IncrementTimeoutHostPath = 30
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
config "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.GroupResource)(nil), (*v1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_GroupResource_To_v1_GroupResource(a.(*configv1alpha1.GroupResource), b.(*v1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.GroupResource)(nil), (*configv1alpha1.GroupResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_GroupResource_To_v1alpha1_GroupResource(a.(*v1.GroupResource), b.(*configv1alpha1.GroupResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.PersistentVolumeRecyclerConfiguration)(nil), (*config.PersistentVolumeRecyclerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PersistentVolumeRecyclerConfiguration_To_config_PersistentVolumeRecyclerConfiguration(a.(*configv1alpha1.PersistentVolumeRecyclerConfiguration), b.(*config.PersistentVolumeRecyclerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.PersistentVolumeRecyclerConfiguration)(nil), (*configv1alpha1.PersistentVolumeRecyclerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_PersistentVolumeRecyclerConfiguration_To_v1alpha1_PersistentVolumeRecyclerConfiguration(a.(*config.PersistentVolumeRecyclerConfiguration), b.(*configv1alpha1.PersistentVolumeRecyclerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.VolumeConfiguration)(nil), (*config.VolumeConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VolumeConfiguration_To_config_VolumeConfiguration(a.(*configv1alpha1.VolumeConfiguration), b.(*config.VolumeConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.VolumeConfiguration)(nil), (*configv1alpha1.VolumeConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_VolumeConfiguration_To_v1alpha1_VolumeConfiguration(a.(*config.VolumeConfiguration), b.(*configv1alpha1.VolumeConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.PersistentVolumeBinderControllerConfiguration)(nil), (*configv1alpha1.PersistentVolumeBinderControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_PersistentVolumeBinderControllerConfiguration_To_v1alpha1_PersistentVolumeBinderControllerConfiguration(a.(*config.PersistentVolumeBinderControllerConfiguration), b.(*configv1alpha1.PersistentVolumeBinderControllerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.PersistentVolumeBinderControllerConfiguration)(nil), (*config.PersistentVolumeBinderControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PersistentVolumeBinderControllerConfiguration_To_config_PersistentVolumeBinderControllerConfiguration(a.(*configv1alpha1.PersistentVolumeBinderControllerConfiguration), b.(*config.PersistentVolumeBinderControllerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1alpha1_GroupResource_To_v1_GroupResource is an autogenerated conversion function.
func Convert_v1alpha1_GroupResource_To_v1_GroupResource(in *configv1alpha1.GroupResource, out *v1.GroupResource, s conversion.Scope) error {
return autoConvert_v1alpha1_GroupResource_To_v1_GroupResource(in, out, s)
}
func autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
out.Group = in.Group
out.Resource = in.Resource
return nil
}
// Convert_v1_GroupResource_To_v1alpha1_GroupResource is an autogenerated conversion function.
func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, out *configv1alpha1.GroupResource, s conversion.Scope) error {
return autoConvert_v1_GroupResource_To_v1alpha1_GroupResource(in, out, s)
}
func autoConvert_v1alpha1_PersistentVolumeBinderControllerConfiguration_To_config_PersistentVolumeBinderControllerConfiguration(in *configv1alpha1.PersistentVolumeBinderControllerConfiguration, out *config.PersistentVolumeBinderControllerConfiguration, s conversion.Scope) error {
out.PVClaimBinderSyncPeriod = in.PVClaimBinderSyncPeriod
if err := Convert_v1alpha1_VolumeConfiguration_To_config_VolumeConfiguration(&in.VolumeConfiguration, &out.VolumeConfiguration, s); err != nil {
return err
}
return nil
}
func autoConvert_config_PersistentVolumeBinderControllerConfiguration_To_v1alpha1_PersistentVolumeBinderControllerConfiguration(in *config.PersistentVolumeBinderControllerConfiguration, out *configv1alpha1.PersistentVolumeBinderControllerConfiguration, s conversion.Scope) error {
out.PVClaimBinderSyncPeriod = in.PVClaimBinderSyncPeriod
if err := Convert_config_VolumeConfiguration_To_v1alpha1_VolumeConfiguration(&in.VolumeConfiguration, &out.VolumeConfiguration, s); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_PersistentVolumeRecyclerConfiguration_To_config_PersistentVolumeRecyclerConfiguration(in *configv1alpha1.PersistentVolumeRecyclerConfiguration, out *config.PersistentVolumeRecyclerConfiguration, s conversion.Scope) error {
out.MaximumRetry = in.MaximumRetry
out.MinimumTimeoutNFS = in.MinimumTimeoutNFS
out.PodTemplateFilePathNFS = in.PodTemplateFilePathNFS
out.IncrementTimeoutNFS = in.IncrementTimeoutNFS
out.PodTemplateFilePathHostPath = in.PodTemplateFilePathHostPath
out.MinimumTimeoutHostPath = in.MinimumTimeoutHostPath
out.IncrementTimeoutHostPath = in.IncrementTimeoutHostPath
return nil
}
// Convert_v1alpha1_PersistentVolumeRecyclerConfiguration_To_config_PersistentVolumeRecyclerConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_PersistentVolumeRecyclerConfiguration_To_config_PersistentVolumeRecyclerConfiguration(in *configv1alpha1.PersistentVolumeRecyclerConfiguration, out *config.PersistentVolumeRecyclerConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_PersistentVolumeRecyclerConfiguration_To_config_PersistentVolumeRecyclerConfiguration(in, out, s)
}
func autoConvert_config_PersistentVolumeRecyclerConfiguration_To_v1alpha1_PersistentVolumeRecyclerConfiguration(in *config.PersistentVolumeRecyclerConfiguration, out *configv1alpha1.PersistentVolumeRecyclerConfiguration, s conversion.Scope) error {
out.MaximumRetry = in.MaximumRetry
out.MinimumTimeoutNFS = in.MinimumTimeoutNFS
out.PodTemplateFilePathNFS = in.PodTemplateFilePathNFS
out.IncrementTimeoutNFS = in.IncrementTimeoutNFS
out.PodTemplateFilePathHostPath = in.PodTemplateFilePathHostPath
out.MinimumTimeoutHostPath = in.MinimumTimeoutHostPath
out.IncrementTimeoutHostPath = in.IncrementTimeoutHostPath
return nil
}
// Convert_config_PersistentVolumeRecyclerConfiguration_To_v1alpha1_PersistentVolumeRecyclerConfiguration is an autogenerated conversion function.
func Convert_config_PersistentVolumeRecyclerConfiguration_To_v1alpha1_PersistentVolumeRecyclerConfiguration(in *config.PersistentVolumeRecyclerConfiguration, out *configv1alpha1.PersistentVolumeRecyclerConfiguration, s conversion.Scope) error {
return autoConvert_config_PersistentVolumeRecyclerConfiguration_To_v1alpha1_PersistentVolumeRecyclerConfiguration(in, out, s)
}
func autoConvert_v1alpha1_VolumeConfiguration_To_config_VolumeConfiguration(in *configv1alpha1.VolumeConfiguration, out *config.VolumeConfiguration, s conversion.Scope) error {
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableHostPathProvisioning, &out.EnableHostPathProvisioning, s); err != nil {
return err
}
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableDynamicProvisioning, &out.EnableDynamicProvisioning, s); err != nil {
return err
}
if err := Convert_v1alpha1_PersistentVolumeRecyclerConfiguration_To_config_PersistentVolumeRecyclerConfiguration(&in.PersistentVolumeRecyclerConfiguration, &out.PersistentVolumeRecyclerConfiguration, s); err != nil {
return err
}
out.FlexVolumePluginDir = in.FlexVolumePluginDir
return nil
}
// Convert_v1alpha1_VolumeConfiguration_To_config_VolumeConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_VolumeConfiguration_To_config_VolumeConfiguration(in *configv1alpha1.VolumeConfiguration, out *config.VolumeConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_VolumeConfiguration_To_config_VolumeConfiguration(in, out, s)
}
func autoConvert_config_VolumeConfiguration_To_v1alpha1_VolumeConfiguration(in *config.VolumeConfiguration, out *configv1alpha1.VolumeConfiguration, s conversion.Scope) error {
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableHostPathProvisioning, &out.EnableHostPathProvisioning, s); err != nil {
return err
}
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableDynamicProvisioning, &out.EnableDynamicProvisioning, s); err != nil {
return err
}
if err := Convert_config_PersistentVolumeRecyclerConfiguration_To_v1alpha1_PersistentVolumeRecyclerConfiguration(&in.PersistentVolumeRecyclerConfiguration, &out.PersistentVolumeRecyclerConfiguration, s); err != nil {
return err
}
out.FlexVolumePluginDir = in.FlexVolumePluginDir
return nil
}
// Convert_config_VolumeConfiguration_To_v1alpha1_VolumeConfiguration is an autogenerated conversion function.
func Convert_config_VolumeConfiguration_To_v1alpha1_VolumeConfiguration(in *config.VolumeConfiguration, out *configv1alpha1.VolumeConfiguration, s conversion.Scope) error {
return autoConvert_config_VolumeConfiguration_To_v1alpha1_VolumeConfiguration(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeBinderControllerConfiguration) DeepCopyInto(out *PersistentVolumeBinderControllerConfiguration) {
*out = *in
out.PVClaimBinderSyncPeriod = in.PVClaimBinderSyncPeriod
out.VolumeConfiguration = in.VolumeConfiguration
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeBinderControllerConfiguration.
func (in *PersistentVolumeBinderControllerConfiguration) DeepCopy() *PersistentVolumeBinderControllerConfiguration {
if in == nil {
return nil
}
out := new(PersistentVolumeBinderControllerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeRecyclerConfiguration) DeepCopyInto(out *PersistentVolumeRecyclerConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeRecyclerConfiguration.
func (in *PersistentVolumeRecyclerConfiguration) DeepCopy() *PersistentVolumeRecyclerConfiguration {
if in == nil {
return nil
}
out := new(PersistentVolumeRecyclerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeConfiguration) DeepCopyInto(out *VolumeConfiguration) {
*out = *in
out.PersistentVolumeRecyclerConfiguration = in.PersistentVolumeRecyclerConfiguration
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeConfiguration.
func (in *VolumeConfiguration) DeepCopy() *VolumeConfiguration {
if in == nil {
return nil
}
out := new(VolumeConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentialprovider
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"k8s.io/klog/v2"
)
const (
maxReadLength = 10 * 1 << 20 // 10MB
)
// DockerConfigJSON represents ~/.docker/config.json file info
// see https://github.com/docker/docker/pull/12009
type DockerConfigJSON struct {
Auths DockerConfig `json:"auths"`
// +optional
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
}
// DockerConfig represents the config file used by the docker CLI.
// This config that represents the credentials that should be used
// when pulling images from specific image repositories.
type DockerConfig map[string]DockerConfigEntry
// DockerConfigEntry wraps a docker config as a entry
type DockerConfigEntry struct {
Username string
Password string
Email string
Provider DockerConfigProvider
}
var (
preferredPathLock sync.Mutex
preferredPath = ""
workingDirPath = ""
homeDirPath, _ = os.UserHomeDir()
rootDirPath = "/"
homeJSONDirPath = filepath.Join(homeDirPath, ".docker")
rootJSONDirPath = filepath.Join(rootDirPath, ".docker")
configFileName = ".dockercfg"
configJSONFileName = "config.json"
)
// SetPreferredDockercfgPath set preferred docker config path
func SetPreferredDockercfgPath(path string) {
preferredPathLock.Lock()
defer preferredPathLock.Unlock()
preferredPath = path
}
// GetPreferredDockercfgPath get preferred docker config path
func GetPreferredDockercfgPath() string {
preferredPathLock.Lock()
defer preferredPathLock.Unlock()
return preferredPath
}
// DefaultDockercfgPaths returns default search paths of .dockercfg
func DefaultDockercfgPaths() []string {
return []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath}
}
// DefaultDockerConfigJSONPaths returns default search paths of .docker/config.json
func DefaultDockerConfigJSONPaths() []string {
return []string{GetPreferredDockercfgPath(), workingDirPath, homeJSONDirPath, rootJSONDirPath}
}
// ReadDockercfgFile attempts to read a legacy dockercfg file from the given paths.
// if searchPaths is empty, the default paths are used.
func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) {
if len(searchPaths) == 0 {
searchPaths = DefaultDockercfgPaths()
}
for _, configPath := range searchPaths {
absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName))
if err != nil {
klog.Errorf("while trying to canonicalize %s: %v", configPath, err)
continue
}
klog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation)
contents, err := os.ReadFile(absDockerConfigFileLocation)
if os.IsNotExist(err) {
continue
}
if err != nil {
klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err)
continue
}
cfg, err := ReadDockerConfigFileFromBytes(contents)
if err != nil {
klog.V(4).Infof("couldn't get the config from %q contents: %v", absDockerConfigFileLocation, err)
continue
}
klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation)
return cfg, nil
}
return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", searchPaths)
}
// ReadDockerConfigJSONFile attempts to read a docker config.json file from the given paths.
// if searchPaths is empty, the default paths are used.
func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error) {
if len(searchPaths) == 0 {
searchPaths = DefaultDockerConfigJSONPaths()
}
for _, configPath := range searchPaths {
absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJSONFileName))
if err != nil {
klog.Errorf("while trying to canonicalize %s: %v", configPath, err)
continue
}
klog.V(4).Infof("looking for %s at %s", configJSONFileName, absDockerConfigFileLocation)
cfg, err = ReadSpecificDockerConfigJSONFile(absDockerConfigFileLocation)
if err != nil {
if !os.IsNotExist(err) {
klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err)
}
continue
}
klog.V(4).Infof("found valid %s at %s", configJSONFileName, absDockerConfigFileLocation)
return cfg, nil
}
return nil, fmt.Errorf("couldn't find valid %s after checking in %v", configJSONFileName, searchPaths)
}
// ReadSpecificDockerConfigJSONFile attempts to read docker configJSON from a given file path.
func ReadSpecificDockerConfigJSONFile(filePath string) (cfg DockerConfig, err error) {
var contents []byte
if contents, err = os.ReadFile(filePath); err != nil {
return nil, err
}
return readDockerConfigJSONFileFromBytes(contents)
}
// ReadDockerConfigFile read a docker config file from default path
func ReadDockerConfigFile() (cfg DockerConfig, err error) {
if cfg, err := ReadDockerConfigJSONFile(nil); err == nil {
return cfg, nil
}
// Can't find latest config file so check for the old one
return ReadDockercfgFile(nil)
}
// HTTPError wraps a non-StatusOK error code as an error.
type HTTPError struct {
StatusCode int
URL string
}
// Error implements error
func (he *HTTPError) Error() string {
return fmt.Sprintf("http status code: %d while fetching url %s",
he.StatusCode, he.URL)
}
// ReadURL read contents from given url
func ReadURL(url string, client *http.Client, header *http.Header) (body []byte, err error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
if header != nil {
req.Header = *header
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
klog.V(2).InfoS("Failed to read URL", "statusCode", resp.StatusCode, "URL", url)
return nil, &HTTPError{
StatusCode: resp.StatusCode,
URL: url,
}
}
limitedReader := &io.LimitedReader{R: resp.Body, N: maxReadLength}
contents, err := io.ReadAll(limitedReader)
if err != nil {
return nil, err
}
if limitedReader.N <= 0 {
return nil, errors.New("the read limit is reached")
}
return contents, nil
}
// ReadDockerConfigFileFromBytes read a docker config file from the given bytes
func ReadDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
if err = json.Unmarshal(contents, &cfg); err != nil {
return nil, errors.New("error occurred while trying to unmarshal json")
}
return
}
func readDockerConfigJSONFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
var cfgJSON DockerConfigJSON
if err = json.Unmarshal(contents, &cfgJSON); err != nil {
return nil, errors.New("error occurred while trying to unmarshal json")
}
cfg = cfgJSON.Auths
return
}
// dockerConfigEntryWithAuth is used solely for deserializing the Auth field
// into a dockerConfigEntry during JSON deserialization.
type dockerConfigEntryWithAuth struct {
// +optional
Username string `json:"username,omitempty"`
// +optional
Password string `json:"password,omitempty"`
// +optional
Email string `json:"email,omitempty"`
// +optional
Auth string `json:"auth,omitempty"`
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (ident *DockerConfigEntry) UnmarshalJSON(data []byte) error {
var tmp dockerConfigEntryWithAuth
err := json.Unmarshal(data, &tmp)
if err != nil {
return err
}
ident.Username = tmp.Username
ident.Password = tmp.Password
ident.Email = tmp.Email
if len(tmp.Auth) == 0 {
return nil
}
ident.Username, ident.Password, err = decodeDockerConfigFieldAuth(tmp.Auth)
return err
}
// MarshalJSON implements the json.Marshaler interface.
func (ident DockerConfigEntry) MarshalJSON() ([]byte, error) {
toEncode := dockerConfigEntryWithAuth{ident.Username, ident.Password, ident.Email, ""}
toEncode.Auth = encodeDockerConfigFieldAuth(ident.Username, ident.Password)
return json.Marshal(toEncode)
}
// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a
// username and a password. The format of the auth field is base64(<username>:<password>).
func decodeDockerConfigFieldAuth(field string) (username, password string, err error) {
var decoded []byte
// StdEncoding can only decode padded string
// RawStdEncoding can only decode unpadded string
if strings.HasSuffix(strings.TrimSpace(field), "=") {
// decode padded data
decoded, err = base64.StdEncoding.DecodeString(field)
} else {
// decode unpadded data
decoded, err = base64.RawStdEncoding.DecodeString(field)
}
if err != nil {
return
}
parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 {
err = fmt.Errorf("unable to parse auth field, must be formatted as base64(username:password)")
return
}
username = parts[0]
password = parts[1]
return
}
func encodeDockerConfigFieldAuth(username, password string) string {
fieldValue := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(fieldValue))
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentialprovider
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"net"
"net/url"
"path/filepath"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
)
// DockerKeyring tracks a set of docker registry credentials, maintaining a
// reverse index across the registry endpoints. A registry endpoint is made
// up of a host (e.g. registry.example.com), but it may also contain a path
// (e.g. registry.example.com/foo) This index is important for two reasons:
// - registry endpoints may overlap, and when this happens we must find the
// most specific match for a given image
// - iterating a map does not yield predictable results
type DockerKeyring interface {
Lookup(image string) ([]TrackedAuthConfig, bool)
}
// BasicDockerKeyring is a trivial map-backed implementation of DockerKeyring
type BasicDockerKeyring struct {
index []string
creds map[string][]TrackedAuthConfig
}
// providersDockerKeyring is an implementation of DockerKeyring that
// materializes its dockercfg based on a set of dockerConfigProviders.
type providersDockerKeyring struct {
Providers []DockerConfigProvider
}
// TrackedAuthConfig wraps the AuthConfig and adds information about the source
// of the credentials.
type TrackedAuthConfig struct {
AuthConfig
AuthConfigHash string
Source *CredentialSource
}
// NewTrackedAuthConfig initializes the TrackedAuthConfig structure by adding
// the source information to the supplied AuthConfig. It also counts a hash of the
// AuthConfig and keeps it in the returned structure.
//
// The supplied CredentialSource is only used when the "KubeletEnsureSecretPulledImages"
// is enabled, the same applies for counting the hash.
func NewTrackedAuthConfig(c *AuthConfig, src *CredentialSource) *TrackedAuthConfig {
if c == nil {
panic("cannot construct TrackedAuthConfig with a nil AuthConfig")
}
authConfig := &TrackedAuthConfig{
AuthConfig: *c,
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletEnsureSecretPulledImages) {
authConfig.Source = src
authConfig.AuthConfigHash = hashAuthConfig(c)
}
return authConfig
}
type CredentialSource struct {
Secret *SecretCoordinates
ServiceAccount *ServiceAccountCoordinates
}
type SecretCoordinates struct {
UID string
Namespace string
Name string
}
type ServiceAccountCoordinates struct {
UID string
Namespace string
Name string
}
// AuthConfig contains authorization information for connecting to a Registry
// This type mirrors "github.com/docker/docker/api/types.AuthConfig"
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// Email is an optional value associated with the username.
// This field is deprecated and will be removed in a later
// version of docker.
Email string `json:"email,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}
// Add inserts the docker config `cfg` into the basic docker keyring. It attaches
// the `src` information that describes where the docker config `cfg` comes from.
// `src` is nil if the docker config is globally available on the node.
func (dk *BasicDockerKeyring) Add(src *CredentialSource, cfg DockerConfig) {
if dk.index == nil {
dk.index = make([]string, 0)
dk.creds = make(map[string][]TrackedAuthConfig)
}
for loc, ident := range cfg {
creds := AuthConfig{
Username: ident.Username,
Password: ident.Password,
Email: ident.Email,
}
value := loc
if !strings.HasPrefix(value, "https://") && !strings.HasPrefix(value, "http://") {
value = "https://" + value
}
parsed, err := url.Parse(value)
if err != nil {
klog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err)
continue
}
// The docker client allows exact matches:
// foo.bar.com/namespace
// Or hostname matches:
// foo.bar.com
// It also considers /v2/ and /v1/ equivalent to the hostname
// See ResolveAuthConfig in docker/registry/auth.go.
effectivePath := parsed.Path
if strings.HasPrefix(effectivePath, "/v2/") || strings.HasPrefix(effectivePath, "/v1/") {
effectivePath = effectivePath[3:]
}
var key string
if (len(effectivePath) > 0) && (effectivePath != "/") {
key = parsed.Host + effectivePath
} else {
key = parsed.Host
}
trackedCreds := NewTrackedAuthConfig(&creds, src)
dk.creds[key] = append(dk.creds[key], *trackedCreds)
dk.index = append(dk.index, key)
}
eliminateDupes := sets.NewString(dk.index...)
dk.index = eliminateDupes.List()
// Update the index used to identify which credentials to use for a given
// image. The index is reverse-sorted so more specific paths are matched
// first. For example, if for the given image "gcr.io/etcd-development/etcd",
// credentials for "quay.io/coreos" should match before "quay.io".
sort.Sort(sort.Reverse(sort.StringSlice(dk.index)))
}
const (
defaultRegistryHost = "index.docker.io"
defaultRegistryKey = defaultRegistryHost + "/v1/"
)
// isDefaultRegistryMatch determines whether the given image will
// pull from the default registry (DockerHub) based on the
// characteristics of its name.
func isDefaultRegistryMatch(image string) bool {
parts := strings.SplitN(image, "/", 2)
if len(parts[0]) == 0 {
return false
}
if len(parts) == 1 {
// e.g. library/ubuntu
return true
}
if parts[0] == "docker.io" || parts[0] == "index.docker.io" {
// resolve docker.io/image and index.docker.io/image as default registry
return true
}
// From: http://blog.docker.com/2013/07/how-to-use-your-own-registry/
// Docker looks for either a “.” (domain separator) or “:” (port separator)
// to learn that the first part of the repository name is a location and not
// a user name.
return !strings.ContainsAny(parts[0], ".:")
}
// ParseSchemelessURL parses a schemeless url and returns a url.URL
// url.Parse require a scheme, but ours don't have schemes. Adding a
// scheme to make url.Parse happy, then clear out the resulting scheme.
func ParseSchemelessURL(schemelessURL string) (*url.URL, error) {
parsed, err := url.Parse("https://" + schemelessURL)
if err != nil {
return nil, err
}
// clear out the resulting scheme
parsed.Scheme = ""
return parsed, nil
}
// SplitURL splits the host name into parts, as well as the port
func SplitURL(url *url.URL) (parts []string, port string) {
host, port, err := net.SplitHostPort(url.Host)
if err != nil {
// could not parse port
host, port = url.Host, ""
}
return strings.Split(host, "."), port
}
// URLsMatchStr is wrapper for URLsMatch, operating on strings instead of URLs.
func URLsMatchStr(glob string, target string) (bool, error) {
globURL, err := ParseSchemelessURL(glob)
if err != nil {
return false, err
}
targetURL, err := ParseSchemelessURL(target)
if err != nil {
return false, err
}
return URLsMatch(globURL, targetURL)
}
// URLsMatch checks whether the given target url matches the glob url, which may have
// glob wild cards in the host name.
//
// Examples:
//
// globURL=*.docker.io, targetURL=blah.docker.io => match
// globURL=*.docker.io, targetURL=not.right.io => no match
//
// Note that we don't support wildcards in ports and paths yet.
func URLsMatch(globURL *url.URL, targetURL *url.URL) (bool, error) {
globURLParts, globPort := SplitURL(globURL)
targetURLParts, targetPort := SplitURL(targetURL)
if globPort != targetPort {
// port doesn't match
return false, nil
}
if len(globURLParts) != len(targetURLParts) {
// host name does not have the same number of parts
return false, nil
}
if !strings.HasPrefix(targetURL.Path, globURL.Path) {
// the path of the credential must be a prefix
return false, nil
}
for k, globURLPart := range globURLParts {
targetURLPart := targetURLParts[k]
matched, err := filepath.Match(globURLPart, targetURLPart)
if err != nil {
return false, err
}
if !matched {
// glob mismatch for some part
return false, nil
}
}
// everything matches
return true, nil
}
// Lookup implements the DockerKeyring method for fetching credentials based on image name.
// Multiple credentials may be returned if there are multiple potentially valid credentials
// available. This allows for rotation.
func (dk *BasicDockerKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
// range over the index as iterating over a map does not provide a predictable ordering
ret := []TrackedAuthConfig{}
for _, k := range dk.index {
// both k and image are schemeless URLs because even though schemes are allowed
// in the credential configurations, we remove them in Add.
if matched, _ := URLsMatchStr(k, image); matched {
ret = append(ret, dk.creds[k]...)
}
}
if len(ret) > 0 {
return ret, true
}
// Use credentials for the default registry if provided, and appropriate
if isDefaultRegistryMatch(image) {
if auth, ok := dk.creds[defaultRegistryHost]; ok {
return auth, true
}
}
return []TrackedAuthConfig{}, false
}
// Lookup implements the DockerKeyring method for fetching credentials
// based on image name.
func (dk *providersDockerKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
keyring := &BasicDockerKeyring{}
for _, p := range dk.Providers {
keyring.Add(nil, p.Provide(image))
}
return keyring.Lookup(image)
}
// FakeKeyring a fake config credentials
type FakeKeyring struct {
auth []TrackedAuthConfig
ok bool
}
// Lookup implements the DockerKeyring method for fetching credentials based on image name
// return fake auth and ok
func (f *FakeKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
return f.auth, f.ok
}
// UnionDockerKeyring delegates to a set of keyrings.
type UnionDockerKeyring []DockerKeyring
// Lookup implements the DockerKeyring method for fetching credentials based on image name.
// return each credentials
func (k UnionDockerKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
authConfigs := []TrackedAuthConfig{}
for _, subKeyring := range k {
if subKeyring == nil {
continue
}
currAuthResults, _ := subKeyring.Lookup(image)
authConfigs = append(authConfigs, currAuthResults...)
}
return authConfigs, (len(authConfigs) > 0)
}
func hashAuthConfig(creds *AuthConfig) string {
credBytes, err := json.Marshal(creds)
if err != nil {
return ""
}
hash := sha256.New()
hash.Write([]byte(credBytes))
return hex.EncodeToString(hash.Sum(nil))
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"crypto/sha256"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
credentialproviderv1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
var (
validCacheTypes = sets.New[string](
string(kubeletconfig.ServiceAccountServiceAccountTokenCacheType),
string(kubeletconfig.TokenServiceAccountTokenCacheType),
)
)
// readCredentialProviderConfig receives a path to a config file or directory.
// If the path is a directory, it reads all "*.json", "*.yaml" and "*.yml" files in lexicographic order,
// decodes them, and merges their entries into a single CredentialProviderConfig object.
// If the path is a file, it decodes the file into a CredentialProviderConfig object directly.
// It also returns the SHA256 hash of all the raw file content. This hash is exposed via metrics
// as an external API to allow monitoring of configuration changes.
// The hash format follows container digest conventions (sha256:hexstring) for consistency.
func readCredentialProviderConfig(configPath string) (*kubeletconfig.CredentialProviderConfig, string, error) {
if configPath == "" {
return nil, "", fmt.Errorf("credential provider config path is empty")
}
fileInfo, err := os.Stat(configPath)
if err != nil {
return nil, "", fmt.Errorf("unable to access path %q: %w", configPath, err)
}
var configs []*kubeletconfig.CredentialProviderConfig
var configFiles []string
if fileInfo.IsDir() {
entries, err := os.ReadDir(configPath)
if err != nil {
return nil, "", fmt.Errorf("unable to read directory %q: %w", configPath, err)
}
// Filter and sort *.json/*.yaml/*.yml files in lexicographic order
for _, entry := range entries {
ext := filepath.Ext(entry.Name())
if !entry.IsDir() && (ext == ".json" || ext == ".yaml" || ext == ".yml") {
configFiles = append(configFiles, filepath.Join(configPath, entry.Name()))
}
}
sort.Strings(configFiles)
if len(configFiles) == 0 {
return nil, "", fmt.Errorf("no configuration files found in directory %q", configPath)
}
} else {
configFiles = append(configFiles, configPath)
}
hasher := sha256.New()
for _, filePath := range configFiles {
data, err := os.ReadFile(filePath)
if err != nil {
return nil, "", fmt.Errorf("unable to read file %q: %w", filePath, err)
}
// Use length prefix to prevent hash collisions
dataLen := uint64(len(data))
if err := binary.Write(hasher, binary.BigEndian, dataLen); err != nil {
return nil, "", fmt.Errorf("error writing length prefix for file %q: %w", filePath, err)
}
hasher.Write(data)
config, err := decode(data)
if err != nil {
return nil, "", fmt.Errorf("error decoding config %q: %w", filePath, err)
}
configs = append(configs, config)
}
// Merge all configs into a single CredentialProviderConfig
mergedConfig := &kubeletconfig.CredentialProviderConfig{}
providerNames := sets.NewString()
for _, config := range configs {
for _, provider := range config.Providers {
if providerNames.Has(provider.Name) {
return nil, "", fmt.Errorf("duplicate provider name %q found in configuration file(s)", provider.Name)
}
providerNames.Insert(provider.Name)
mergedConfig.Providers = append(mergedConfig.Providers, provider)
}
}
configHash := fmt.Sprintf("sha256:%x", hasher.Sum(nil))
return mergedConfig, configHash, nil
}
// decode decodes data into the internal CredentialProviderConfig type.
func decode(data []byte) (*kubeletconfig.CredentialProviderConfig, error) {
obj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil)
if err != nil {
return nil, err
}
if gvk.Kind != "CredentialProviderConfig" {
return nil, fmt.Errorf("failed to decode %q (wrong Kind)", gvk.Kind)
}
if gvk.Group != kubeletconfig.GroupName {
return nil, fmt.Errorf("failed to decode CredentialProviderConfig, unexpected Group: %s", gvk.Group)
}
if internalConfig, ok := obj.(*kubeletconfig.CredentialProviderConfig); ok {
return internalConfig, nil
}
return nil, fmt.Errorf("unable to convert %T to *CredentialProviderConfig", obj)
}
// validateCredentialProviderConfig validates CredentialProviderConfig.
func validateCredentialProviderConfig(config *kubeletconfig.CredentialProviderConfig, saTokenForCredentialProviders bool) field.ErrorList {
allErrs := field.ErrorList{}
if len(config.Providers) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("providers"), "at least 1 item in plugins is required"))
}
fieldPath := field.NewPath("providers")
seenProviderNames := sets.NewString()
for _, provider := range config.Providers {
if strings.Contains(provider.Name, "/") {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("name"), provider.Name, "provider name cannot contain '/'"))
}
if strings.Contains(provider.Name, " ") {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("name"), provider.Name, "provider name cannot contain spaces"))
}
if provider.Name == "." {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("name"), provider.Name, "provider name cannot be '.'"))
}
if provider.Name == ".." {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("name"), provider.Name, "provider name cannot be '..'"))
}
if seenProviderNames.Has(provider.Name) {
allErrs = append(allErrs, field.Duplicate(fieldPath.Child("name"), provider.Name))
}
seenProviderNames.Insert(provider.Name)
if provider.APIVersion == "" {
allErrs = append(allErrs, field.Required(fieldPath.Child("apiVersion"), ""))
} else if _, ok := apiVersions[provider.APIVersion]; !ok {
validAPIVersions := sets.StringKeySet(apiVersions).List()
allErrs = append(allErrs, field.NotSupported(fieldPath.Child("apiVersion"), provider.APIVersion, validAPIVersions))
}
if len(provider.MatchImages) == 0 {
allErrs = append(allErrs, field.Required(fieldPath.Child("matchImages"), "at least 1 item in matchImages is required"))
}
for _, matchImage := range provider.MatchImages {
if _, err := credentialprovider.ParseSchemelessURL(matchImage); err != nil {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("matchImages"), matchImage, fmt.Sprintf("match image is invalid: %s", err.Error())))
}
}
if provider.DefaultCacheDuration == nil {
allErrs = append(allErrs, field.Required(fieldPath.Child("defaultCacheDuration"), ""))
}
if provider.DefaultCacheDuration != nil && provider.DefaultCacheDuration.Duration < 0 {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("defaultCacheDuration"), provider.DefaultCacheDuration, "must be greater than or equal to 0"))
}
if provider.TokenAttributes != nil {
fldPath := fieldPath.Child("tokenAttributes")
if !saTokenForCredentialProviders {
allErrs = append(allErrs, field.Forbidden(fldPath, "tokenAttributes is not supported when KubeletServiceAccountTokenForCredentialProviders feature gate is disabled"))
}
if len(provider.TokenAttributes.ServiceAccountTokenAudience) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("serviceAccountTokenAudience"), ""))
}
if provider.TokenAttributes.RequireServiceAccount == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("requireServiceAccount"), ""))
}
if provider.APIVersion != credentialproviderv1.SchemeGroupVersion.String() {
allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("tokenAttributes is only supported for %s API version", credentialproviderv1.SchemeGroupVersion.String())))
}
if provider.TokenAttributes.RequireServiceAccount != nil && !*provider.TokenAttributes.RequireServiceAccount && len(provider.TokenAttributes.RequiredServiceAccountAnnotationKeys) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("requiredServiceAccountAnnotationKeys"), "requireServiceAccount cannot be false when requiredServiceAccountAnnotationKeys is set"))
}
allErrs = append(allErrs, validateServiceAccountAnnotationKeys(fldPath.Child("requiredServiceAccountAnnotationKeys"), provider.TokenAttributes.RequiredServiceAccountAnnotationKeys)...)
allErrs = append(allErrs, validateServiceAccountAnnotationKeys(fldPath.Child("optionalServiceAccountAnnotationKeys"), provider.TokenAttributes.OptionalServiceAccountAnnotationKeys)...)
requiredServiceAccountAnnotationKeys := sets.New[string](provider.TokenAttributes.RequiredServiceAccountAnnotationKeys...)
optionalServiceAccountAnnotationKeys := sets.New[string](provider.TokenAttributes.OptionalServiceAccountAnnotationKeys...)
duplicateAnnotationKeys := requiredServiceAccountAnnotationKeys.Intersection(optionalServiceAccountAnnotationKeys)
if duplicateAnnotationKeys.Len() > 0 {
allErrs = append(allErrs, field.Invalid(fldPath, sets.List(duplicateAnnotationKeys), "annotation keys cannot be both required and optional"))
}
switch {
case len(provider.TokenAttributes.CacheType) == 0:
allErrs = append(allErrs, field.Required(fldPath.Child("cacheType"), fmt.Sprintf("cacheType is required to be set when tokenAttributes is specified. Supported values are: %s", strings.Join(sets.List(validCacheTypes), ", "))))
case validCacheTypes.Has(string(provider.TokenAttributes.CacheType)):
// ok
default:
allErrs = append(allErrs, field.NotSupported(fldPath.Child("cacheType"), provider.TokenAttributes.CacheType, sets.List(validCacheTypes)))
}
}
}
return allErrs
}
// validateServiceAccountAnnotationKeys validates the service account annotation keys.
func validateServiceAccountAnnotationKeys(fldPath *field.Path, keys []string) field.ErrorList {
allErrs := field.ErrorList{}
seenAnnotationKeys := sets.New[string]()
// Using the validation logic for keys from https://github.com/kubernetes/kubernetes/blob/69dbc74417304328a9fd3c161643dc4f0a057f41/staging/src/k8s.io/apimachinery/pkg/api/validation/objectmeta.go#L46-L51
for _, k := range keys {
// The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking.
for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {
allErrs = append(allErrs, field.Invalid(fldPath, k, msg))
}
if seenAnnotationKeys.Has(k) {
allErrs = append(allErrs, field.Duplicate(fldPath, k))
}
seenAnnotationKeys.Insert(k)
}
return allErrs
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"sync"
"k8s.io/apiserver/pkg/util/configmetrics"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
const (
KubeletSubsystem = "kubelet"
)
var (
registerOnce sync.Once
kubeletCredentialProviderPluginErrors = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: "credential_provider_plugin_errors_total",
Help: "Number of errors from credential provider plugin",
StabilityLevel: metrics.ALPHA,
},
[]string{"plugin_name"},
)
kubeletCredentialProviderPluginDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: "credential_provider_plugin_duration",
Help: "Duration of execution in seconds for credential provider plugin",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"plugin_name"},
)
// kubeletCredentialProviderConfigInfo provides information about the credential provider configuration.
// The hash label is typically constant for the lifetime of the kubelet process, changing only when
// the configuration is updated and the kubelet is restarted.
kubeletCredentialProviderConfigInfo = metrics.NewDesc(
metrics.BuildFQName("", KubeletSubsystem, "credential_provider_config_info"),
"Information about the last applied credential provider configuration with hash as label",
[]string{"hash"},
nil,
metrics.ALPHA,
"",
)
)
var configHashProvider = configmetrics.NewAtomicHashProvider()
// registerMetrics registers credential provider metrics.
func registerMetrics() {
registerOnce.Do(func() {
legacyregistry.MustRegister(kubeletCredentialProviderPluginErrors)
legacyregistry.MustRegister(kubeletCredentialProviderPluginDuration)
legacyregistry.CustomMustRegister(configmetrics.NewConfigInfoCustomCollector(kubeletCredentialProviderConfigInfo, configHashProvider))
})
}
// recordCredentialProviderConfigHash records the hash of the credential provider configuration
func recordCredentialProviderConfigHash(configHash string) {
configHashProvider.SetHashes(configHash)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"bytes"
"context"
"crypto/sha256"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/sync/singleflight"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider"
"k8s.io/kubelet/pkg/apis/credentialprovider/install"
credentialproviderv1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1"
credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1"
credentialproviderv1beta1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1beta1"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/utils/clock"
)
const (
// globalCacheKey is the key used for caching credentials that are not specific to a registry or image.
// angle brackets are used to avoid conflicts with actual image or registry names.
globalCacheKey = "<global>"
cachePurgeInterval = time.Minute * 15
)
var (
scheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(scheme, serializer.EnableStrict)
apiVersions = map[string]schema.GroupVersion{
credentialproviderv1alpha1.SchemeGroupVersion.String(): credentialproviderv1alpha1.SchemeGroupVersion,
credentialproviderv1beta1.SchemeGroupVersion.String(): credentialproviderv1beta1.SchemeGroupVersion,
credentialproviderv1.SchemeGroupVersion.String(): credentialproviderv1.SchemeGroupVersion,
}
)
// GetServiceAccountFunc is a function type that returns a service account for the given namespace and name.
type GetServiceAccountFunc func(namespace, name string) (*v1.ServiceAccount, error)
// getServiceAccountTokenFunc is a function type that returns a service account token for the given namespace and name.
type getServiceAccountTokenFunc func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
type cacheKeyParams struct {
namespace string
serviceAccountName string
serviceAccountUID types.UID
saAnnotations map[string]string
podName string
podUID types.UID
saTokenHash string
cacheType kubeletconfig.ServiceAccountTokenCacheType
}
type cacheKeyBuilder struct {
builder *cryptobyte.Builder
}
func newCacheKeyBuilder() *cacheKeyBuilder {
return &cacheKeyBuilder{builder: cryptobyte.NewBuilder(nil)}
}
func (c *cacheKeyBuilder) addString(value string) *cacheKeyBuilder {
c.builder.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes([]byte(value))
})
return c
}
func (c *cacheKeyBuilder) addAnnotations(annotations map[string]string) *cacheKeyBuilder {
keys := sets.StringKeySet(annotations).List()
c.builder.AddUint32(uint32(len(keys)))
for _, k := range keys {
c.builder.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes([]byte(k))
})
c.builder.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes([]byte(annotations[k]))
})
}
return c
}
func (c *cacheKeyBuilder) build() (string, error) {
keyBytes, err := c.builder.Bytes()
if err != nil {
return "", err
}
return string(keyBytes), nil
}
func init() {
install.Install(scheme)
kubeletconfig.AddToScheme(scheme)
kubeletconfigv1alpha1.AddToScheme(scheme)
kubeletconfigv1beta1.AddToScheme(scheme)
kubeletconfigv1.AddToScheme(scheme)
}
// RegisterCredentialProviderPlugins is called from kubelet to register external credential provider
// plugins according to the CredentialProviderConfig config file.
func RegisterCredentialProviderPlugins(pluginConfigPath, pluginBinDir string,
getServiceAccountToken getServiceAccountTokenFunc,
getServiceAccount GetServiceAccountFunc,
) error {
if _, err := os.Stat(pluginBinDir); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("plugin binary directory %s did not exist", pluginBinDir)
}
return fmt.Errorf("error inspecting binary directory %s: %w", pluginBinDir, err)
}
credentialProviderConfig, configHash, err := readCredentialProviderConfig(pluginConfigPath)
if err != nil {
return err
}
saTokenForCredentialProvidersFeatureEnabled := utilfeature.DefaultFeatureGate.Enabled(features.KubeletServiceAccountTokenForCredentialProviders)
if errs := validateCredentialProviderConfig(credentialProviderConfig, saTokenForCredentialProvidersFeatureEnabled); len(errs) > 0 {
return fmt.Errorf("failed to validate credential provider config: %v", errs.ToAggregate())
}
// Register metrics for credential providers
registerMetrics()
// Record the hash of the credential provider configuration.
// This hash is exposed via metrics as an external API to allow monitoring of configuration changes.
recordCredentialProviderConfigHash(configHash)
for _, provider := range credentialProviderConfig.Providers {
// Considering Windows binary with suffix ".exe", LookPath() helps to find the correct path.
// LookPath() also calls os.Stat().
pluginBin, err := exec.LookPath(filepath.Join(pluginBinDir, provider.Name))
if err != nil {
if errors.Is(err, os.ErrNotExist) || errors.Is(err, exec.ErrNotFound) {
return fmt.Errorf("plugin binary executable %s did not exist", pluginBin)
}
return fmt.Errorf("error inspecting binary executable %s: %w", pluginBin, err)
}
plugin, err := newPluginProvider(pluginBinDir, provider, getServiceAccountToken, getServiceAccount)
if err != nil {
return fmt.Errorf("error initializing plugin provider %s: %w", provider.Name, err)
}
registerCredentialProviderPlugin(provider.Name, plugin)
}
return nil
}
// newPluginProvider returns a new pluginProvider based on the credential provider config.
func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider,
getServiceAccountToken getServiceAccountTokenFunc,
getServiceAccount GetServiceAccountFunc,
) (*pluginProvider, error) {
mediaType := "application/json"
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("unsupported media type %q", mediaType)
}
gv, ok := apiVersions[provider.APIVersion]
if !ok {
return nil, fmt.Errorf("invalid apiVersion: %q", provider.APIVersion)
}
clock := clock.RealClock{}
return &pluginProvider{
name: provider.Name,
clock: clock,
matchImages: provider.MatchImages,
cache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),
defaultCacheDuration: provider.DefaultCacheDuration.Duration,
lastCachePurge: clock.Now(),
plugin: &execPlugin{
name: provider.Name,
apiVersion: provider.APIVersion,
encoder: codecs.EncoderForVersion(info.Serializer, gv),
pluginBinDir: pluginBinDir,
args: provider.Args,
envVars: provider.Env,
environ: os.Environ,
},
serviceAccountProvider: newServiceAccountProvider(provider, getServiceAccount, getServiceAccountToken),
}, nil
}
// pluginProvider is the plugin-based implementation of the DockerConfigProvider interface.
type pluginProvider struct {
name string
clock clock.Clock
sync.Mutex
group singleflight.Group
// matchImages defines the matching image URLs this plugin should operate against.
// The plugin provider will not return any credentials for images that do not match
// against this list of match URLs.
matchImages []string
// cache stores DockerConfig entries with an expiration time based on the cache duration
// returned from the credential provider plugin.
cache cache.Store
// defaultCacheDuration is the default duration credentials are cached in-memory if the auth plugin
// response did not provide a cache duration for credentials.
defaultCacheDuration time.Duration
// plugin is the exec implementation of the credential providing plugin.
plugin Plugin
// lastCachePurge is the last time cache is cleaned for expired entries.
lastCachePurge time.Time
// serviceAccountProvider holds the logic for handling service account tokens when needed.
serviceAccountProvider *serviceAccountProvider
}
type serviceAccountProvider struct {
audience string
requireServiceAccount bool
getServiceAccountFunc GetServiceAccountFunc
getServiceAccountTokenFunc getServiceAccountTokenFunc
requiredServiceAccountAnnotationKeys []string
optionalServiceAccountAnnotationKeys []string
cacheType kubeletconfig.ServiceAccountTokenCacheType
}
func newServiceAccountProvider(
provider kubeletconfig.CredentialProvider,
getServiceAccount GetServiceAccountFunc,
getServiceAccountToken getServiceAccountTokenFunc,
) *serviceAccountProvider {
featureGateEnabled := utilfeature.DefaultFeatureGate.Enabled(features.KubeletServiceAccountTokenForCredentialProviders)
serviceAccountTokenAudienceSet := provider.TokenAttributes != nil && len(provider.TokenAttributes.ServiceAccountTokenAudience) > 0
if !featureGateEnabled || !serviceAccountTokenAudienceSet {
return nil
}
return &serviceAccountProvider{
audience: provider.TokenAttributes.ServiceAccountTokenAudience,
requireServiceAccount: *provider.TokenAttributes.RequireServiceAccount,
getServiceAccountFunc: getServiceAccount,
getServiceAccountTokenFunc: getServiceAccountToken,
requiredServiceAccountAnnotationKeys: provider.TokenAttributes.RequiredServiceAccountAnnotationKeys,
optionalServiceAccountAnnotationKeys: provider.TokenAttributes.OptionalServiceAccountAnnotationKeys,
cacheType: provider.TokenAttributes.CacheType,
}
}
type requiredAnnotationNotFoundError string
func (e requiredAnnotationNotFoundError) Error() string {
return fmt.Sprintf("required annotation %s not found", string(e))
}
func isRequiredAnnotationNotFoundError(err error) bool {
var requiredAnnotationNotFoundErr requiredAnnotationNotFoundError
return errors.As(err, &requiredAnnotationNotFoundErr)
}
// getServiceAccountData returns the service account UID and required annotations for the service account.
// If the service account does not exist, an error is returned.
// saAnnotations is a map of annotation keys and values that the plugin requires to generate credentials
// that's defined in the tokenAttributes in the credential provider config.
// requiredServiceAccountAnnotationKeys are the keys that are required to be present in the service account.
// If any of the keys defined in this list are not present in the service account, kubelet will not invoke the plugin
// and will return an error.
// optionalServiceAccountAnnotationKeys are the keys that are optional to be present in the service account.
// If present, they will be added to the saAnnotations map.
func (s *serviceAccountProvider) getServiceAccountData(namespace, name string) (types.UID, map[string]string, error) {
sa, err := s.getServiceAccountFunc(namespace, name)
if err != nil {
return "", nil, err
}
saAnnotations := make(map[string]string, len(s.requiredServiceAccountAnnotationKeys)+len(s.optionalServiceAccountAnnotationKeys))
for _, k := range s.requiredServiceAccountAnnotationKeys {
val, ok := sa.Annotations[k]
if !ok {
return "", nil, requiredAnnotationNotFoundError(k)
}
saAnnotations[k] = val
}
for _, k := range s.optionalServiceAccountAnnotationKeys {
if val, ok := sa.Annotations[k]; ok {
saAnnotations[k] = val
}
}
return sa.UID, saAnnotations, nil
}
// getServiceAccountToken returns a service account token for the service account.
func (s *serviceAccountProvider) getServiceAccountToken(podNamespace, podName, serviceAccountName string, serviceAccountUID, podUID types.UID) (string, error) {
tr, err := s.getServiceAccountTokenFunc(podNamespace, serviceAccountName, &authenticationv1.TokenRequest{
ObjectMeta: metav1.ObjectMeta{
UID: serviceAccountUID,
},
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{s.audience},
// expirationSeconds is not set explicitly here. It has the same default value of "ExpirationSeconds" in the TokenRequestSpec.
BoundObjectRef: &authenticationv1.BoundObjectReference{
APIVersion: "v1",
Kind: "Pod",
Name: podName,
UID: podUID,
},
},
})
if err != nil {
return "", err
}
return tr.Status.Token, nil
}
// cacheEntry is the cache object that will be stored in cache.Store.
type cacheEntry struct {
key string
credentials credentialprovider.DockerConfig
expiresAt time.Time
}
// cacheKeyFunc extracts AuthEntry.MatchKey as the cache key function for the plugin provider.
func cacheKeyFunc(obj interface{}) (string, error) {
key := obj.(*cacheEntry).key
return key, nil
}
// cacheExpirationPolicy defines implements cache.ExpirationPolicy, determining expiration based on the expiresAt timestamp.
type cacheExpirationPolicy struct {
clock clock.Clock
}
// IsExpired returns true if the current time is after cacheEntry.expiresAt, which is determined by the
// cache duration returned from the credential provider plugin response.
func (c *cacheExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
return c.clock.Now().After(entry.Obj.(*cacheEntry).expiresAt)
}
// perPluginProvider holds the shared pluginProvider and the per-request information
// like podName, podNamespace, podUID and serviceAccountName.
// This is used to provide the per-request information to the pluginProvider.provide method, so
// that the plugin can use this information to get the pod's service account and generate bound service account tokens
// for plugins running in service account token mode.
type perPodPluginProvider struct {
provider *pluginProvider
podNamespace string
podName string
podUID types.UID
serviceAccountName string
}
// provideWithCoordinates returns the DockerConfig and, if available, the ServiceAccountCoordinates
// used for credential resolution. If ServiceAccountCoordinates is nil, it means no service account
// context was used (e.g., the plugin is not operating in service account token mode or no service
// account was provided for the request).
func (p *perPodPluginProvider) provideWithCoordinates(image string) (credentialprovider.DockerConfig, *credentialprovider.ServiceAccountCoordinates) {
credentials, coordinates, err := p.provider.provide(image, p.podNamespace, p.podName, p.podUID, p.serviceAccountName)
if err == nil {
return credentials, coordinates
}
// If there was an error providing credentials, we log the error but do not return it.
if p.provider.serviceAccountProvider != nil {
klog.ErrorS(err, "Failed to provide credentials for image", "provider", p.provider.name, "image", image, "pod", klog.KRef(p.podNamespace, p.podName), "podUID", p.podUID, "serviceAccount", klog.KRef(p.podNamespace, p.serviceAccountName))
} else {
klog.ErrorS(err, "Failed to provide credentials for image", "provider", p.provider.name, "image", image)
}
return credentialprovider.DockerConfig{}, nil
}
// provide returns a credentialprovider.DockerConfig based on the credentials returned
// from cache or the exec plugin. The returned ServiceAccountCoordinates may be nil.
// If ServiceAccountCoordinates is nil, it means no service account context was used
// (e.g., the plugin is not operating in service account token mode or no service account
// was provided for the request).
func (p *pluginProvider) provide(image, podNamespace, podName string, podUID types.UID, serviceAccountName string) (credentialprovider.DockerConfig, *credentialprovider.ServiceAccountCoordinates, error) {
if !p.isImageAllowed(image) {
return credentialprovider.DockerConfig{}, nil, nil
}
var serviceAccountUID types.UID
var serviceAccountToken string
var saAnnotations map[string]string
var err error
var serviceAccountCacheKey string
var serviceAccountTokenHash string
var serviceAccountCoordinates *credentialprovider.ServiceAccountCoordinates
if p.serviceAccountProvider != nil {
if len(serviceAccountName) == 0 && p.serviceAccountProvider.requireServiceAccount {
klog.V(5).InfoS("Service account name is empty", "provider", p.name, "image", image, "pod", klog.KRef(podNamespace, podName), "podUID", podUID)
return credentialprovider.DockerConfig{}, nil, nil
}
// If the service account name is empty and the plugin has indicated that invoking the plugin
// without a service account is allowed, we will continue without generating a service account token.
// This is useful for plugins that are running in service account token mode and are also used
// to pull images for pods without service accounts (e.g., static pods).
if len(serviceAccountName) > 0 {
if serviceAccountUID, saAnnotations, err = p.serviceAccountProvider.getServiceAccountData(podNamespace, serviceAccountName); err != nil {
if isRequiredAnnotationNotFoundError(err) {
// The required annotation could be a mechanism for individual workloads to opt in to using service account tokens
// for image pull. If any of the required annotation is missing, we will not invoke the plugin. We will log the error
// at higher verbosity level as it could be noisy.
klog.V(5).ErrorS(err, "Failed to get service account data", "provider", p.name, "image", image, "pod", klog.KRef(podNamespace, podName), "podUID", podUID, "serviceAccount", klog.KRef(podNamespace, serviceAccountName))
return credentialprovider.DockerConfig{}, nil, nil
}
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("failed to get service account: %w", err)
}
if serviceAccountToken, err = p.serviceAccountProvider.getServiceAccountToken(podNamespace, podName, serviceAccountName, serviceAccountUID, podUID); err != nil {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("failed to get service account token: %w", err)
}
serviceAccountTokenHash = getHashIfNotEmpty(serviceAccountToken)
c := cacheKeyParams{
namespace: podNamespace,
serviceAccountName: serviceAccountName,
serviceAccountUID: serviceAccountUID,
saAnnotations: saAnnotations,
podName: podName,
podUID: podUID,
saTokenHash: serviceAccountTokenHash,
cacheType: p.serviceAccountProvider.cacheType,
}
serviceAccountCacheKey, err = generateServiceAccountCacheKey(c)
if err != nil {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("error generating service account cache key: %w", err)
}
serviceAccountCoordinates = &credentialprovider.ServiceAccountCoordinates{
UID: string(serviceAccountUID),
Namespace: podNamespace,
Name: serviceAccountName,
}
}
}
// Check if the credentials are cached and return them if found.
cachedConfig, found, errCache := p.getCachedCredentials(image, serviceAccountCacheKey)
if errCache != nil {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("failed to get cached docker config: %w", errCache)
}
if found {
return cachedConfig, serviceAccountCoordinates, nil
}
// ExecPlugin is wrapped in single flight to exec plugin once for concurrent same image request.
// The caveat here is we don't know cacheKeyType yet, so if cacheKeyType is registry/global and credentials saved in cache
// on per registry/global basis then exec will be called for all requests if requests are made concurrently.
// foo.bar.registry
// foo.bar.registry/image1
// foo.bar.registry/image2
// When the plugin is operating in the service account token mode, the singleflight key is the image plus the serviceAccountCacheKey
// which is generated from the service account namespace, name, uid and the annotations passed to the plugin.
singleFlightKey := image
if p.serviceAccountProvider != nil && len(serviceAccountName) > 0 {
// When the plugin is operating in the service account token mode, the singleflight key is the
// image + sa annotations + sa token.
// This does mean the singleflight key is different for each image pull request (even if the image is the same)
// and the workload is using the same service account.
if singleFlightKey, err = generateSingleFlightKey(image, serviceAccountTokenHash, saAnnotations); err != nil {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("error generating singleflight key: %w", err)
}
}
res, err, _ := p.group.Do(singleFlightKey, func() (interface{}, error) {
return p.plugin.ExecPlugin(context.Background(), image, serviceAccountToken, saAnnotations)
})
if err != nil {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("failed to get credential from external registry credential provider: %w", err)
}
response, ok := res.(*credentialproviderapi.CredentialProviderResponse)
if !ok {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("invalid response type returned by external credential provider: %T", res)
}
if len(serviceAccountToken) > 0 && p.serviceAccountProvider.cacheType != kubeletconfig.TokenServiceAccountTokenCacheType {
// validate that the response credentials are not the echoed token back verbatim when cache
// type is not token. Only Token cache type is valid when the service account token
// is returned as the registry credentials.
for _, authConfig := range response.Auth {
if authConfig.Password == serviceAccountToken {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("credential provider plugin returned the service account token as the password which is not allowed when service account cache type is not set to 'Token'")
}
}
}
var cacheKey string
switch cacheKeyType := response.CacheKeyType; cacheKeyType {
case credentialproviderapi.ImagePluginCacheKeyType:
cacheKey = image
case credentialproviderapi.RegistryPluginCacheKeyType:
registry := parseRegistry(image)
cacheKey = registry
case credentialproviderapi.GlobalPluginCacheKeyType:
cacheKey = globalCacheKey
default:
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("credential provider plugin did not return a valid cacheKeyType: %s", cacheKeyType)
}
dockerConfig := make(credentialprovider.DockerConfig, len(response.Auth))
for matchImage, authConfig := range response.Auth {
dockerConfig[matchImage] = credentialprovider.DockerConfigEntry{
Username: authConfig.Username,
Password: authConfig.Password,
}
}
// cache duration was explicitly 0 so don't cache this response at all.
if response.CacheDuration != nil && response.CacheDuration.Duration == 0 {
return dockerConfig, serviceAccountCoordinates, nil
}
var expiresAt time.Time
// nil cache duration means use the default cache duration
if response.CacheDuration == nil {
if p.defaultCacheDuration == 0 {
return dockerConfig, serviceAccountCoordinates, nil
}
expiresAt = p.clock.Now().Add(p.defaultCacheDuration)
} else {
expiresAt = p.clock.Now().Add(response.CacheDuration.Duration)
}
cacheKey, err = generateCacheKey(cacheKey, serviceAccountCacheKey)
if err != nil {
return credentialprovider.DockerConfig{}, nil, fmt.Errorf("error generating cache key: %w", err)
}
cachedEntry := &cacheEntry{
key: cacheKey,
credentials: dockerConfig,
expiresAt: expiresAt,
}
if err := p.cache.Add(cachedEntry); err != nil {
// If we fail to add the credentials to the cache, we still return the credentials
// to the caller, but we log an error.
return dockerConfig, serviceAccountCoordinates, fmt.Errorf("failed to add credentials to cache: %w", err)
}
return dockerConfig, serviceAccountCoordinates, nil
}
// isImageAllowed returns true if the image matches against the list of allowed matches by the plugin.
func (p *pluginProvider) isImageAllowed(image string) bool {
for _, matchImage := range p.matchImages {
if matched, _ := credentialprovider.URLsMatchStr(matchImage, image); matched {
return true
}
}
return false
}
// getCachedCredentials returns a credentialprovider.DockerConfig if cached from the plugin.
func (p *pluginProvider) getCachedCredentials(image, serviceAccountCacheKey string) (credentialprovider.DockerConfig, bool, error) {
p.Lock()
if p.clock.Now().After(p.lastCachePurge.Add(cachePurgeInterval)) {
// NewExpirationCache purges expired entries when List() is called
// The expired entry in the cache is removed only when Get or List called on it.
// List() is called on some interval to remove those expired entries on which Get is never called.
_ = p.cache.List()
p.lastCachePurge = p.clock.Now()
}
p.Unlock()
baseCacheKeyCandidates := []string{
image,
parseRegistry(image),
globalCacheKey,
}
for _, baseCacheKey := range baseCacheKeyCandidates {
if config, found, err := p.lookupCredentialsInCache(baseCacheKey, serviceAccountCacheKey); err != nil {
return nil, false, err
} else if found {
return config, true, nil
}
}
return nil, false, nil
}
// lookupCredentialsInCache performs a single cache lookup for the given base key and service account cache key
func (p *pluginProvider) lookupCredentialsInCache(baseKey, serviceAccountCacheKey string) (credentialprovider.DockerConfig, bool, error) {
cacheKey, err := generateCacheKey(baseKey, serviceAccountCacheKey)
if err != nil {
return nil, false, fmt.Errorf("error generating cache key: %w", err)
}
obj, found, err := p.cache.GetByKey(cacheKey)
if err != nil {
return nil, false, err
}
if found {
return obj.(*cacheEntry).credentials, true, nil
}
return nil, false, nil
}
// Plugin is the interface calling ExecPlugin. This is mainly for testability
// so tests don't have to actually exec any processes.
type Plugin interface {
ExecPlugin(ctx context.Context, image, serviceAccountToken string, serviceAccountAnnotations map[string]string) (*credentialproviderapi.CredentialProviderResponse, error)
}
// execPlugin is the implementation of the Plugin interface that execs a credential provider plugin based
// on it's name provided in CredentialProviderConfig. It is assumed that the executable is available in the
// plugin directory provided by the kubelet.
type execPlugin struct {
name string
apiVersion string
encoder runtime.Encoder
args []string
envVars []kubeletconfig.ExecEnvVar
pluginBinDir string
environ func() []string
}
// ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig:
//
// $ ENV_NAME=ENV_VALUE <plugin-name> args[0] args[1] <<<request
//
// The plugin is expected to receive the CredentialProviderRequest API via stdin from the kubelet and
// return CredentialProviderResponse via stdout.
func (e *execPlugin) ExecPlugin(ctx context.Context, image, serviceAccountToken string, serviceAccountAnnotations map[string]string) (*credentialproviderapi.CredentialProviderResponse, error) {
klog.V(5).InfoS("Getting image credentials from external exec plugin", "pluginName", e.name, "image", image)
authRequest := &credentialproviderapi.CredentialProviderRequest{Image: image, ServiceAccountToken: serviceAccountToken, ServiceAccountAnnotations: serviceAccountAnnotations}
data, err := e.encodeRequest(authRequest)
if err != nil {
return nil, fmt.Errorf("failed to encode auth request: %w", err)
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
stdin := bytes.NewBuffer(data)
// Use a catch-all timeout of 1 minute for all exec-based plugins, this should leave enough
// head room in case a plugin needs to retry a failed request while ensuring an exec plugin
// does not run forever. In the future we may want this timeout to be tweakable from the plugin
// config file.
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, filepath.Join(e.pluginBinDir, e.name), e.args...)
cmd.Stdout, cmd.Stderr, cmd.Stdin = stdout, stderr, stdin
var configEnvVars []string
for _, v := range e.envVars {
configEnvVars = append(configEnvVars, fmt.Sprintf("%s=%s", v.Name, v.Value))
}
// Append current system environment variables, to the ones configured in the
// credential provider file. Failing to do so may result in unsuccessful execution
// of the provider binary, see https://github.com/kubernetes/kubernetes/issues/102750
// also, this behaviour is inline with Credential Provider Config spec
cmd.Env = mergeEnvVars(e.environ(), configEnvVars)
if err = e.runPlugin(ctx, cmd, image); err != nil {
return nil, fmt.Errorf("%w: %s", err, stderr.String())
}
data = stdout.Bytes()
// check that the response apiVersion matches what is expected
gvk, err := json.DefaultMetaFactory.Interpret(data)
if err != nil {
return nil, fmt.Errorf("error reading GVK from response: %w", err)
}
if gvk.GroupVersion().String() != e.apiVersion {
return nil, fmt.Errorf("apiVersion from credential plugin response did not match expected apiVersion:%s, actual apiVersion:%s", e.apiVersion, gvk.GroupVersion().String())
}
response, err := e.decodeResponse(data)
if err != nil {
// err is explicitly not wrapped since it may contain credentials in the response.
return nil, errors.New("error decoding credential provider plugin response from stdout")
}
return response, nil
}
func (e *execPlugin) runPlugin(ctx context.Context, cmd *exec.Cmd, image string) error {
startTime := time.Now()
defer func() {
kubeletCredentialProviderPluginDuration.WithLabelValues(e.name).Observe(time.Since(startTime).Seconds())
}()
err := cmd.Run()
if ctx.Err() != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, ctx.Err())
}
if err != nil {
kubeletCredentialProviderPluginErrors.WithLabelValues(e.name).Inc()
return fmt.Errorf("error execing credential provider plugin %s for image %s: %w", e.name, image, err)
}
return nil
}
// encodeRequest encodes the internal CredentialProviderRequest type into the v1alpha1 version in json
func (e *execPlugin) encodeRequest(request *credentialproviderapi.CredentialProviderRequest) ([]byte, error) {
data, err := runtime.Encode(e.encoder, request)
if err != nil {
return nil, fmt.Errorf("error encoding request: %w", err)
}
return data, nil
}
// decodeResponse decodes data into the internal CredentialProviderResponse type
func (e *execPlugin) decodeResponse(data []byte) (*credentialproviderapi.CredentialProviderResponse, error) {
obj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil)
if err != nil {
return nil, err
}
if gvk.Kind != "CredentialProviderResponse" {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Kind: %q", gvk.Kind)
}
if gvk.Group != credentialproviderapi.GroupName {
return nil, fmt.Errorf("failed to decode CredentialProviderResponse, unexpected Group: %s", gvk.Group)
}
if internalResponse, ok := obj.(*credentialproviderapi.CredentialProviderResponse); ok {
return internalResponse, nil
}
return nil, fmt.Errorf("unable to convert %T to *CredentialProviderResponse", obj)
}
// parseRegistry extracts the registry hostname of an image (including port if specified).
func parseRegistry(image string) string {
imageParts := strings.Split(image, "/")
return imageParts[0]
}
// mergedEnvVars overlays system defined env vars with credential provider env vars,
// it gives priority to the credential provider vars allowing user to override system
// env vars
func mergeEnvVars(sysEnvVars, credProviderVars []string) []string {
mergedEnvVars := sysEnvVars
mergedEnvVars = append(mergedEnvVars, credProviderVars...)
return mergedEnvVars
}
func generateServiceAccountCacheKey(params cacheKeyParams) (string, error) {
builder := newCacheKeyBuilder().
addString(params.namespace).
addString(params.serviceAccountName).
addString(string(params.serviceAccountUID)).
addAnnotations(params.saAnnotations)
if params.cacheType == kubeletconfig.TokenServiceAccountTokenCacheType {
builder = builder.addString("token").addString(params.saTokenHash)
}
return builder.build()
}
func generateCacheKey(baseKey, serviceAccountCacheKey string) (string, error) {
builder := newCacheKeyBuilder().
addString(baseKey).
addString(serviceAccountCacheKey)
return builder.build()
}
func generateSingleFlightKey(image, saTokenHash string, saAnnotations map[string]string) (string, error) {
builder := newCacheKeyBuilder().
addString(image).
addString(saTokenHash).
addAnnotations(saAnnotations)
return builder.build()
}
// getHashIfNotEmpty returns the sha256 hash of the data if it is not empty.
func getHashIfNotEmpty(data string) string {
if len(data) > 0 {
return fmt.Sprintf("sha256:%x", sha256.Sum256([]byte(data)))
}
return ""
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"fmt"
"sync"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/features"
)
type dockerConfigProviderWithCoordinates interface {
// provideWithCoordinates returns the DockerConfig and service account coordinates for the given image.
provideWithCoordinates(image string) (credentialprovider.DockerConfig, *credentialprovider.ServiceAccountCoordinates)
}
type provider struct {
name string
impl *pluginProvider
}
var providersMutex sync.RWMutex
var providers = make([]provider, 0)
var seenProviderNames = sets.NewString()
func registerCredentialProviderPlugin(name string, p *pluginProvider) {
providersMutex.Lock()
defer providersMutex.Unlock()
if seenProviderNames.Has(name) {
panic(fmt.Sprintf("Credential provider %q was registered twice", name))
}
seenProviderNames.Insert(name)
providers = append(providers, provider{name, p})
klog.V(4).InfoS("Registered credential provider", "provider", name)
}
type externalCredentialProviderKeyring struct {
providers []dockerConfigProviderWithCoordinates
}
func NewExternalCredentialProviderDockerKeyring(podNamespace, podName, podUID, serviceAccountName string) credentialprovider.DockerKeyring {
providersMutex.RLock()
defer providersMutex.RUnlock()
keyring := &externalCredentialProviderKeyring{
providers: make([]dockerConfigProviderWithCoordinates, 0, len(providers)),
}
for _, p := range providers {
pp := &perPodPluginProvider{
provider: p.impl,
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletServiceAccountTokenForCredentialProviders) {
klog.V(4).InfoS("Generating per pod credential provider", "provider", p.name, "podName", podName, "podNamespace", podNamespace, "podUID", podUID, "serviceAccountName", serviceAccountName)
pp.podNamespace = podNamespace
pp.podName = podName
pp.podUID = types.UID(podUID)
pp.serviceAccountName = serviceAccountName
} else {
klog.V(4).InfoS("Generating credential provider", "provider", p.name)
}
keyring.providers = append(keyring.providers, pp)
}
return keyring
}
func (k *externalCredentialProviderKeyring) Lookup(image string) ([]credentialprovider.TrackedAuthConfig, bool) {
keyring := &credentialprovider.BasicDockerKeyring{}
for _, p := range k.providers {
dockerConfig, saCoords := p.provideWithCoordinates(image)
if saCoords != nil {
keyring.Add(&credentialprovider.CredentialSource{ServiceAccount: saCoords}, dockerConfig)
} else {
keyring.Add(nil, dockerConfig)
}
}
return keyring.Lookup(image)
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentialprovider
import (
"os"
"reflect"
"sync"
"time"
"k8s.io/klog/v2"
)
// DockerConfigProvider is the interface that registered extensions implement
// to materialize 'dockercfg' credentials.
type DockerConfigProvider interface {
// Enabled returns true if the config provider is enabled.
// Implementations can be blocking - e.g. metadata server unavailable.
Enabled() bool
// Provide returns docker configuration.
// Implementations can be blocking - e.g. metadata server unavailable.
// The image is passed in as context in the event that the
// implementation depends on information in the image name to return
// credentials; implementations are safe to ignore the image.
Provide(image string) DockerConfig
}
// A DockerConfigProvider that simply reads the .dockercfg file
type defaultDockerConfigProvider struct{}
// CachingDockerConfigProvider implements DockerConfigProvider by composing
// with another DockerConfigProvider and caching the DockerConfig it provides
// for a pre-specified lifetime.
type CachingDockerConfigProvider struct {
Provider DockerConfigProvider
Lifetime time.Duration
// ShouldCache is an optional function that returns true if the specific config should be cached.
// If nil, all configs are treated as cacheable.
ShouldCache func(DockerConfig) bool
// cache fields
cacheDockerConfig DockerConfig
expiration time.Time
mu sync.Mutex
}
// Enabled implements dockerConfigProvider
func (d *defaultDockerConfigProvider) Enabled() bool {
return true
}
// Provide implements dockerConfigProvider
func (d *defaultDockerConfigProvider) Provide(image string) DockerConfig {
// Read the standard Docker credentials from .dockercfg
if cfg, err := ReadDockerConfigFile(); err == nil {
return cfg
} else if !os.IsNotExist(err) {
klog.V(2).Infof("Docker config file not found: %v", err)
}
return DockerConfig{}
}
// Enabled implements dockerConfigProvider
func (d *CachingDockerConfigProvider) Enabled() bool {
return d.Provider.Enabled()
}
// Provide implements dockerConfigProvider
func (d *CachingDockerConfigProvider) Provide(image string) DockerConfig {
d.mu.Lock()
defer d.mu.Unlock()
// If the cache hasn't expired, return our cache
if time.Now().Before(d.expiration) {
return d.cacheDockerConfig
}
klog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String())
config := d.Provider.Provide(image)
if d.ShouldCache == nil || d.ShouldCache(config) {
d.cacheDockerConfig = config
d.expiration = time.Now().Add(d.Lifetime)
}
return config
}
// NewDefaultDockerKeyring creates a DockerKeyring to use for resolving credentials,
// which returns the default credentials from the .dockercfg file.
func NewDefaultDockerKeyring() DockerKeyring {
return &providersDockerKeyring{
Providers: []DockerConfigProvider{
&CachingDockerConfigProvider{
Provider: &defaultDockerConfigProvider{},
Lifetime: 5 * time.Minute,
},
},
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secrets
import (
"encoding/json"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
)
// MakeDockerKeyring inspects the passedSecrets to see if they contain any DockerConfig secrets. If they do,
// then a DockerKeyring is built based on every hit and unioned with the defaultKeyring.
// If they do not, then the default keyring is returned
func MakeDockerKeyring(passedSecrets []v1.Secret, defaultKeyring credentialprovider.DockerKeyring) (credentialprovider.DockerKeyring, error) {
providerFromSecrets, err := secretsToTrackedDockerConfigs(passedSecrets)
if err != nil {
return nil, err
}
if providerFromSecrets == nil {
return defaultKeyring, nil
}
return credentialprovider.UnionDockerKeyring{providerFromSecrets, defaultKeyring}, nil
}
func secretsToTrackedDockerConfigs(secrets []v1.Secret) (credentialprovider.DockerKeyring, error) {
provider := &credentialprovider.BasicDockerKeyring{}
validSecretsFound := 0
for _, passedSecret := range secrets {
if dockerConfigJSONBytes, dockerConfigJSONExists := passedSecret.Data[v1.DockerConfigJsonKey]; (passedSecret.Type == v1.SecretTypeDockerConfigJson) && dockerConfigJSONExists && (len(dockerConfigJSONBytes) > 0) {
dockerConfigJSON := credentialprovider.DockerConfigJSON{}
if err := json.Unmarshal(dockerConfigJSONBytes, &dockerConfigJSON); err != nil {
return nil, err
}
coords := &credentialprovider.SecretCoordinates{
UID: string(passedSecret.UID),
Namespace: passedSecret.Namespace,
Name: passedSecret.Name}
provider.Add(&credentialprovider.CredentialSource{Secret: coords}, dockerConfigJSON.Auths)
validSecretsFound++
} else if dockercfgBytes, dockercfgExists := passedSecret.Data[v1.DockerConfigKey]; (passedSecret.Type == v1.SecretTypeDockercfg) && dockercfgExists && (len(dockercfgBytes) > 0) {
dockercfg := credentialprovider.DockerConfig{}
if err := json.Unmarshal(dockercfgBytes, &dockercfg); err != nil {
return nil, err
}
coords := &credentialprovider.SecretCoordinates{
UID: string(passedSecret.UID),
Namespace: passedSecret.Namespace,
Name: passedSecret.Name}
provider.Add(&credentialprovider.CredentialSource{Secret: coords}, dockercfg)
validSecretsFound++
}
}
if validSecretsFound == 0 {
return nil, nil
}
return provider, nil
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"fmt"
clientfeatures "k8s.io/client-go/features"
"k8s.io/component-base/featuregate"
)
// clientAdapter adapts a k8s.io/component-base/featuregate.MutableFeatureGate to client-go's
// feature Gate and Registry interfaces. The component-base types Feature, FeatureSpec, and
// prerelease, and the component-base prerelease constants, are duplicated by parallel types and
// constants in client-go. The parallel types exist to allow the feature gate mechanism to be used
// for client-go features without introducing a circular dependency between component-base and
// client-go.
type clientAdapter struct {
mfg featuregate.MutableFeatureGate
}
var _ clientfeatures.Gates = &clientAdapter{}
func (a *clientAdapter) Enabled(name clientfeatures.Feature) bool {
return a.mfg.Enabled(featuregate.Feature(name))
}
var _ clientfeatures.Registry = &clientAdapter{}
func (a *clientAdapter) Add(in map[clientfeatures.Feature]clientfeatures.FeatureSpec) error {
out := map[featuregate.Feature]featuregate.FeatureSpec{}
for name, spec := range in {
converted := featuregate.FeatureSpec{
Default: spec.Default,
LockToDefault: spec.LockToDefault,
}
switch spec.PreRelease {
case clientfeatures.Alpha:
converted.PreRelease = featuregate.Alpha
case clientfeatures.Beta:
converted.PreRelease = featuregate.Beta
case clientfeatures.GA:
converted.PreRelease = featuregate.GA
case clientfeatures.Deprecated:
converted.PreRelease = featuregate.Deprecated
default:
// The default case implies programmer error. The same set of prerelease
// constants must exist in both component-base and client-go, and each one
// must have a case here.
panic(fmt.Sprintf("unrecognized prerelease %q of feature %q", spec.PreRelease, name))
}
out[featuregate.Feature(name)] = converted
}
return a.mfg.Add(out) //nolint:forbidigo // No need to support versioned feature gates in client adapter
}
// Set implements the unexported interface that client-go feature gate testing expects for
// ek8s.io/client-go/features/testing.SetFeatureDuringTest. This is necessary for integration tests
// to set test overrides for client-go feature gates.
func (a *clientAdapter) Set(name clientfeatures.Feature, enabled bool) error {
return a.mfg.SetFromMap(map[string]bool{string(name): enabled})
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/version"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientfeatures "k8s.io/client-go/features"
"k8s.io/component-base/featuregate"
zpagesfeatures "k8s.io/component-base/zpages/features"
kcmfeatures "k8s.io/controller-manager/pkg/features"
)
// Every feature gate should have an entry here following this template:
//
// // owner: @username
// // kep: https://kep.k8s.io/NNN
// MyFeature featuregate.Feature = "MyFeature"
//
// Feature gates should be listed in alphabetical, case-sensitive
// (upper before any lower case character) order. This reduces the risk
// of code conflicts because changes are more likely to be scattered
// across the file.
const (
// owner: @aojea
//
// Allow kubelet to request a certificate without any Node IP available, only
// with DNS names.
AllowDNSOnlyNodeCSR featuregate.Feature = "AllowDNSOnlyNodeCSR"
// owner: @micahhausler
//
// Setting AllowInsecureKubeletCertificateSigningRequests to true disables node admission validation of CSRs
// for kubelet signers where CN=system:node:$nodeName.
AllowInsecureKubeletCertificateSigningRequests featuregate.Feature = "AllowInsecureKubeletCertificateSigningRequests"
// owner: @HirazawaUi
//
// Allow spec.terminationGracePeriodSeconds to be overridden by MaxPodGracePeriodSeconds in soft evictions.
AllowOverwriteTerminationGracePeriodSeconds featuregate.Feature = "AllowOverwriteTerminationGracePeriodSeconds"
// owner: @thockin
//
// Enables Service.status.ingress.loadBanace to be set on
// services of types other than LoadBalancer.
AllowServiceLBStatusOnNonLB featuregate.Feature = "AllowServiceLBStatusOnNonLB"
// owner: @bswartz
//
// Enables usage of any object for volume data source in PVCs
AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource"
// owner: @liggitt
// kep: https://kep.k8s.io/4601
//
// Make the Node authorizer use fine-grained selector authorization.
// Requires AuthorizeWithSelectors to be enabled.
AuthorizeNodeWithSelectors featuregate.Feature = "AuthorizeNodeWithSelectors"
// owner: @szuecs
//
// Enable nodes to change CPUCFSQuotaPeriod
CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod"
// owner: @fromanirh
// beta: see below.
//
// Allow fine-tuning of cpumanager policies, experimental, alpha-quality options
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// We want to avoid a proliferation of feature gates. This feature gate:
// - will guard *a group* of cpumanager options whose quality level is alpha.
// - will never graduate to beta or stable.
// See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// for details about the removal of this feature gate.
CPUManagerPolicyAlphaOptions featuregate.Feature = "CPUManagerPolicyAlphaOptions"
// owner: @fromanirh
// beta: see below.
//
// Allow fine-tuning of cpumanager policies, experimental, beta-quality options
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// We want to avoid a proliferation of feature gates. This feature gate:
// - will guard *a group* of cpumanager options whose quality level is beta.
// - is thus *introduced* as beta
// - will never graduate to stable.
// See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// for details about the removal of this feature gate.
CPUManagerPolicyBetaOptions featuregate.Feature = "CPUManagerPolicyBetaOptions"
// owner: @ffromani
//
// Allow the usage of options to fine-tune the cpumanager policies.
CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions"
// owner: @trierra
// kep: http://kep.k8s.io/2589
//
// Enables the Portworx in-tree driver to Portworx migration feature.
CSIMigrationPortworx featuregate.Feature = "CSIMigrationPortworx"
// owner: @fengzixu
//
// Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it.
CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth"
// owner: @sanposhiho @wojtek-t
// kep: https://kep.k8s.io/5278
//
// Clear pod.Status.NominatedNodeName when pod is bound to a node.
// This prevents stale information from affecting external scheduling components.
ClearingNominatedNodeNameAfterBinding featuregate.Feature = "ClearingNominatedNodeNameAfterBinding"
// owner: @ahmedtd
//
// Enable ClusterTrustBundle objects and Kubelet integration.
ClusterTrustBundle featuregate.Feature = "ClusterTrustBundle"
// owner: @ahmedtd
//
// Enable ClusterTrustBundle Kubelet projected volumes. Depends on ClusterTrustBundle.
ClusterTrustBundleProjection featuregate.Feature = "ClusterTrustBundleProjection"
// owner: @adrianreber
// kep: https://kep.k8s.io/2008
//
// Enables container Checkpoint support in the kubelet
ContainerCheckpoint featuregate.Feature = "ContainerCheckpoint"
// onwer: @yuanwang04
// kep: https://kep.k8s.io/5307
//
// Supports container restart policy and container restart policy rules to override the pod restart policy.
// Enable a single container to restart even if the pod has restart policy "Never".
ContainerRestartRules featuregate.Feature = "ContainerRestartRules"
// owner: @sreeram-venkitesh
//
// Enables configuring custom stop signals for containers from container lifecycle
ContainerStopSignals featuregate.Feature = "ContainerStopSignals"
// owner: @jefftree
// kep: https://kep.k8s.io/4355
//
// Enables coordinated leader election in the API server
CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection"
// owner: @ttakahashi21 @mkimuram
// kep: https://kep.k8s.io/3294
//
// Enable usage of Provision of PVCs from snapshots in other namespaces
CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource"
// owner: @ritazh
// kep: http://kep.k8s.io/5018
//
// Enables support for requesting admin access in a ResourceClaim.
// Admin access is granted even if a device is already in use and,
// depending on the DRA driver, may enable additional permissions
// when a container uses the allocated device.
DRAAdminAccess featuregate.Feature = "DRAAdminAccess"
// owner: @sunya-ch
// kep: https://kep.k8s.io/5075
//
// DRAConsumableCapacity
DRAConsumableCapacity featuregate.Feature = "DRAConsumableCapacity"
// owner: @KobayashiD27
// kep: http://kep.k8s.io/5007
// alpha: v1.34
//
// Enables support for delaying the binding of pods
// which depend on devices with binding conditions.
//
// DRAResourceClaimDeviceStatus also needs to be
// enabled.
DRADeviceBindingConditions featuregate.Feature = "DRADeviceBindingConditions"
// owner: @pohly
// kep: http://kep.k8s.io/5055
//
// Marking devices as tainted can prevent using them for new pods and/or
// cause pods using them to stop. Users can decide to tolerate taints.
DRADeviceTaints featuregate.Feature = "DRADeviceTaints"
// owner: @yliaog
// kep: http://kep.k8s.io/5004
//
// Enables support for providing extended resource requests backed by DRA.
DRAExtendedResource featuregate.Feature = "DRAExtendedResource"
// owner: @mortent, @cici37
// kep: http://kep.k8s.io/4815
//
// Enables support for dynamically partitioning devices based on
// which parts of them were allocated during scheduling.
//
DRAPartitionableDevices featuregate.Feature = "DRAPartitionableDevices"
// owner: @mortent
// kep: http://kep.k8s.io/4816
//
// Enables support for providing a prioritized list of requests
// for resources. The first entry that can be satisfied will
// be selected.
DRAPrioritizedList featuregate.Feature = "DRAPrioritizedList"
// owner: @LionelJouin
// kep: http://kep.k8s.io/4817
//
// Enables support the ResourceClaim.status.devices field and for setting this
// status from DRA drivers.
DRAResourceClaimDeviceStatus featuregate.Feature = "DRAResourceClaimDeviceStatus"
// owner: @pohly
// kep: http://kep.k8s.io/4381
//
// Enables aborting the per-node Filter operation in the scheduler after
// a certain time (10 seconds by default, configurable in the DynamicResources
// scheduler plugin configuration).
DRASchedulerFilterTimeout featuregate.Feature = "DRASchedulerFilterTimeout"
// owner: @jpbetz @aaron-prindle @yongruilin
// kep: http://kep.k8s.io/5073
// beta: v1.33
//
// Enables running declarative validation of APIs, where declared. When enabled, APIs with
// declarative validation rules will validate objects using the generated
// declarative validation code and compare the results to the regular imperative validation.
// See DeclarativeValidationTakeover for more.
DeclarativeValidation featuregate.Feature = "DeclarativeValidation"
// owner: @jpbetz @aaron-prindle @yongruilin
// kep: http://kep.k8s.io/5073
// beta: v1.33
//
// When enabled, declarative validation errors are returned directly to the caller,
// replacing hand-written validation errors for rules that have declarative implementations.
// When disabled, hand-written validation errors are always returned, effectively putting
// declarative validation in a "shadow mode" that monitors but does not affect API responses.
// Note: Although declarative validation aims for functional equivalence with hand-written validation,
// the exact number, format, and content of error messages may differ between the two approaches.
DeclarativeValidationTakeover featuregate.Feature = "DeclarativeValidationTakeover"
// owner: @atiratree
// kep: http://kep.k8s.io/3973
//
// Deployments and replica sets can now also track terminating pods via .status.terminatingReplicas.
DeploymentReplicaSetTerminatingReplicas featuregate.Feature = "DeploymentReplicaSetTerminatingReplicas"
// owner: @aojea
//
// The apiservers with the MultiCIDRServiceAllocator feature enable, in order to support live migration from the old bitmap ClusterIP
// allocators to the new IPAddress allocators introduced by the MultiCIDRServiceAllocator feature, performs a dual-write on
// both allocators. This feature gate disables the dual write on the new Cluster IP allocators.
DisableAllocatorDualWrite featuregate.Feature = "DisableAllocatorDualWrite"
// owner: @ffromani
// beta: v1.33
//
// Disables CPU Quota for containers which have exclusive CPUs allocated.
// Disables pod-Level CPU Quota for pods containing at least one container with exclusive CPUs allocated
// Exclusive CPUs for a container (init, application, sidecar) are allocated when:
// (1) cpumanager policy is static,
// (2) the pod has QoS Guaranteed,
// (3) the container has integer cpu request.
// The expected behavior is that CPU Quota for containers having exclusive CPUs allocated is disabled.
// Because this fix changes a long-established (but incorrect) behavior, users observing
// any regressions can use the DisableCPUQuotaWithExclusiveCPUs feature gate (default on) to
// restore the old behavior. Please file issues if you hit issues and have to use this Feature Gate.
// The Feature Gate will be locked to true and then removed in +2 releases (1.35) if there are no bug reported
DisableCPUQuotaWithExclusiveCPUs featuregate.Feature = "DisableCPUQuotaWithExclusiveCPUs"
// owner: @HirazawaUi
// kep: http://kep.k8s.io/4004
//
// DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node
DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion"
// owner: @pohly
// kep: http://kep.k8s.io/4381
//
// Enables support for resources with custom parameters and a lifecycle
// that is independent of a Pod. Resource allocation is done by the scheduler
// based on "structured parameters".
DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation"
// owner: @HirazawaUi
// kep: http://kep.k8s.io/3721
//
// Allow containers to read environment variables from a file.
// Environment variables file must be produced by an initContainer and located within an emptyDir volume.
// The kubelet will populate the environment variables in the container
// from the specified file in the emptyDir volume, without mounting the file.
EnvFiles featuregate.Feature = "EnvFiles"
// owner: @harche
// kep: http://kep.k8s.io/3386
//
// Allows using event-driven PLEG (pod lifecycle event generator) through kubelet
// which avoids frequent relisting of containers which helps optimize performance.
EventedPLEG featuregate.Feature = "EventedPLEG"
// owner: @andrewsykim @SergeyKanzhelev
//
// Ensure kubelet respects exec probe timeouts. Feature gate exists in-case existing workloads
// may depend on old behavior where exec probe timeouts were ignored.
// Lock to default and remove after v1.22 based on user feedback that should be reflected in KEP #1972 update
ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout"
// owner: @HarshalNeelkamal
//
// Enables external service account JWT signing and key management.
// If enabled, it allows passing --service-account-signing-endpoint flag to configure external signer.
ExternalServiceAccountTokenSigner featuregate.Feature = "ExternalServiceAccountTokenSigner"
// owner: @vinayakankugoyal @thockin
//
// Controls if the gitRepo volume plugin is supported or not.
// KEP #5040 disables the gitRepo volume plugin by default starting v1.33,
// this provides a way for users to override it.
GitRepoVolumeDriver featuregate.Feature = "GitRepoVolumeDriver"
// owner: @bobbypage
// Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown.
GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown"
// owner: @wzshiming
// Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown.
GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority"
// owner: @jm-franc
// kep: https://kep.k8s.io/4951
//
// Enables support of configurable HPA scale-up and scale-down tolerances.
HPAConfigurableTolerance featuregate.Feature = "HPAConfigurableTolerance"
// owner: @dxist
//
// Enables support of HPA scaling to zero pods when an object or custom metric is configured.
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
// owner: @deepakkinni @xing-yang
// kep: https://kep.k8s.io/2644
//
// Honor Persistent Volume Reclaim Policy when it is "Delete" irrespective of PV-PVC
// deletion ordering.
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
// owner: @HirazawaUi
// kep: https://kep.k8s.io/4762
//
// Allows setting any FQDN as the pod's hostname
HostnameOverride featuregate.Feature = "HostnameOverride"
// owner: @haircommander
// kep: http://kep.k8s.io/4210
// ImageMaximumGCAge enables the Kubelet configuration field of the same name, allowing an admin
// to specify the age after which an image will be garbage collected.
ImageMaximumGCAge featuregate.Feature = "ImageMaximumGCAge"
// owner: @saschagrunert
// kep: https://kep.k8s.io/4639
//
// Enables the image volume source.
ImageVolume featuregate.Feature = "ImageVolume"
// owner: @vinaykul,@tallclair
// kep: http://kep.k8s.io/1287
//
// Enables In-Place Pod Vertical Scaling
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
// owner: @tallclair
// kep: http://kep.k8s.io/1287
//
// Deprecated: This feature gate is no longer used.
// Was: Enables the AllocatedResources field in container status. This feature requires
// InPlacePodVerticalScaling also be enabled.
InPlacePodVerticalScalingAllocatedStatus featuregate.Feature = "InPlacePodVerticalScalingAllocatedStatus"
// owner: @tallclair @esotsal
//
// Allow resource resize for containers in Guaranteed pods with integer CPU requests ( default false ).
// Applies only in nodes with InPlacePodVerticalScaling and CPU Manager features enabled, and
// CPU Manager Static Policy option set.
InPlacePodVerticalScalingExclusiveCPUs featuregate.Feature = "InPlacePodVerticalScalingExclusiveCPUs"
// owner: @tallclair @pkrishn
//
// Allow memory resize for containers in Guaranteed pods (default false) when Memory Manager Policy is set to Static.
// Applies only in nodes with InPlacePodVerticalScaling and Memory Manager features enabled.
InPlacePodVerticalScalingExclusiveMemory featuregate.Feature = "InPlacePodVerticalScalingExclusiveMemory"
// owner: @trierra
//
// Disables the Portworx in-tree driver.
InTreePluginPortworxUnregister featuregate.Feature = "InTreePluginPortworxUnregister"
// owner: @mimowo
// kep: https://kep.k8s.io/3850
//
// Allows users to specify counting of failed pods per index.
JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex"
// owner: @mimowo
// kep: https://kep.k8s.io/4368
//
// Allows to delegate reconciliation of a Job object to an external controller.
JobManagedBy featuregate.Feature = "JobManagedBy"
// owner: @kannon92
// kep : https://kep.k8s.io/3939
//
// Allow users to specify recreating pods of a job only when
// pods have fully terminated.
JobPodReplacementPolicy featuregate.Feature = "JobPodReplacementPolicy"
// owner: @tenzen-y
// kep: https://kep.k8s.io/3998
//
// Allow users to specify when a Job can be declared as succeeded
// based on the set of succeeded pods.
JobSuccessPolicy featuregate.Feature = "JobSuccessPolicy"
// owner: @marquiz
// kep: http://kep.k8s.io/4033
//
// Enable detection of the kubelet cgroup driver configuration option from
// the CRI. The CRI runtime also needs to support this feature in which
// case the kubelet will ignore the cgroupDriver (--cgroup-driver)
// configuration option. If runtime doesn't support it, the kubelet will
// fallback to using it's cgroupDriver option.
KubeletCgroupDriverFromCRI featuregate.Feature = "KubeletCgroupDriverFromCRI"
// owner: @lauralorenz
// kep: https://kep.k8s.io/4603
//
// Enables support for configurable per-node backoff maximums for restarting
// containers (aka containers in CrashLoopBackOff)
KubeletCrashLoopBackOffMax featuregate.Feature = "KubeletCrashLoopBackOffMax"
// owner: @stlaz
// kep: https://kep.k8s.io/2535
//
// Enables tracking credentials for image pulls in order to authorize image
// access for different tenants.
KubeletEnsureSecretPulledImages featuregate.Feature = "KubeletEnsureSecretPulledImages"
// owner: @vinayakankugoyal
// kep: http://kep.k8s.io/2862
//
// Enable fine-grained kubelet API authorization for webhook based
// authorization.
KubeletFineGrainedAuthz featuregate.Feature = "KubeletFineGrainedAuthz"
// owner: @AkihiroSuda
//
// Enables support for running kubelet in a user namespace.
// The user namespace has to be created before running kubelet.
// All the node components such as CRI need to be running in the same user namespace.
KubeletInUserNamespace featuregate.Feature = "KubeletInUserNamespace"
// KubeletPSI enables Kubelet to surface PSI metrics
// owner: @roycaihw
// kep: https://kep.k8s.io/4205
KubeletPSI featuregate.Feature = "KubeletPSI"
// owner: @moshe010
//
// Enable POD resources API to return resources allocated by Dynamic Resource Allocation
KubeletPodResourcesDynamicResources featuregate.Feature = "KubeletPodResourcesDynamicResources"
// owner: @moshe010
//
// Enable POD resources API with Get method
KubeletPodResourcesGet featuregate.Feature = "KubeletPodResourcesGet"
// owner: @ffromani
// Deprecated: v1.34
//
// issue: https://github.com/kubernetes/kubernetes/issues/119423
// Disables restricted output for the podresources API list endpoint.
// "Restricted" output only includes the pods which are actually running and thus they
// hold resources. Turns out this was originally the intended behavior, see:
// https://github.com/kubernetes/kubernetes/pull/79409#issuecomment-505975671
// This behavior was lost over time and interaction with memory manager creates
// an unfixable bug because the endpoint returns spurious stale information the clients
// cannot filter out, because the API doesn't provide enough context. See:
// https://github.com/kubernetes/kubernetes/issues/132020
// The endpoint has returning extra information for long time, but that information
// is also useless for the purpose of this API. Nevertheless, we are changing a long-established
// albeit buggy behavior, so users observing any regressions can use the
// KubeletPodResourcesListUseActivePods/ feature gate (default on) to restore the old behavior.
// Please file issues if you hit issues and have to use this Feature Gate.
// The Feature Gate will be locked to true in +4 releases (1.38) and then removed (1.39)
// if there are no bug reported.
KubeletPodResourcesListUseActivePods featuregate.Feature = "KubeletPodResourcesListUseActivePods"
// owner: @hoskeri
//
// Restores previous behavior where Kubelet fails self registration if node create returns 403 Forbidden.
KubeletRegistrationGetOnExistsOnly featuregate.Feature = "KubeletRegistrationGetOnExistsOnly"
// owner: @kannon92
// kep: https://kep.k8s.io/4191
//
// The split image filesystem feature enables kubelet to perform garbage collection
// of images (read-only layers) and/or containers (writeable layers) deployed on
// separate filesystems.
KubeletSeparateDiskGC featuregate.Feature = "KubeletSeparateDiskGC"
// owner: @aramase
// kep: http://kep.k8s.io/4412
//
// Enable kubelet to send the service account token bound to the pod for which the image
// is being pulled to the credential provider plugin.
KubeletServiceAccountTokenForCredentialProviders featuregate.Feature = "KubeletServiceAccountTokenForCredentialProviders"
// owner: @sallyom
// kep: https://kep.k8s.io/2832
//
// Add support for distributed tracing in the kubelet
KubeletTracing featuregate.Feature = "KubeletTracing"
// owner: @Sh4d1,@RyanAoh,@rikatz
// kep: http://kep.k8s.io/1860
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode"
// owner: @RobertKrawitz
//
// Allow use of filesystems for ephemeral storage monitoring.
// Only applies if LocalStorageCapacityIsolation is set.
// Relies on UserNamespacesSupport feature, and thus should follow it when setting defaults.
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
// owner: @damemi
//
// Enables scaling down replicas via logarithmic comparison of creation/ready timestamps
LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown"
// owner: @sanposhiho
// kep: https://kep.k8s.io/3633
//
// Enables the MatchLabelKeys and MismatchLabelKeys in PodAffinity and PodAntiAffinity.
MatchLabelKeysInPodAffinity featuregate.Feature = "MatchLabelKeysInPodAffinity"
// owner: @denkensk
// kep: https://kep.k8s.io/3243
//
// Enable MatchLabelKeys in PodTopologySpread.
MatchLabelKeysInPodTopologySpread featuregate.Feature = "MatchLabelKeysInPodTopologySpread"
// owner: @mochizuki875
// kep: https://kep.k8s.io/3243
//
// Enable merging key-value labels into LabelSelector corresponding to MatchLabelKeys in PodTopologySpread.
MatchLabelKeysInPodTopologySpreadSelectorMerge featuregate.Feature = "MatchLabelKeysInPodTopologySpreadSelectorMerge"
// owner: @krmayankk
//
// Enables maxUnavailable for StatefulSet
MaxUnavailableStatefulSet featuregate.Feature = "MaxUnavailableStatefulSet"
// owner: @cynepco3hahue(alukiano) @cezaryzukowski @k-wiatrzyk, @Tal-or (only for GA graduation)
//
// Allows setting memory affinity for a container based on NUMA topology
MemoryManager featuregate.Feature = "MemoryManager"
// owner: @xiaoxubeii
// kep: https://kep.k8s.io/2570
//
// Enables kubelet to support memory QoS with cgroups v2.
MemoryQoS featuregate.Feature = "MemoryQoS"
// owner: @aojea
// kep: https://kep.k8s.io/1880
//
// Enables the dynamic configuration of Service IP ranges
MultiCIDRServiceAllocator featuregate.Feature = "MultiCIDRServiceAllocator"
// owner: torredil
// kep: https://kep.k8s.io/4876
//
// Makes CSINode.Spec.Drivers[*].Allocatable.Count mutable, allowing CSI drivers to
// update the number of volumes that can be allocated on a node
MutableCSINodeAllocatableCount featuregate.Feature = "MutableCSINodeAllocatableCount"
// owner: @danwinship
// kep: https://kep.k8s.io/3866
//
// Allows running kube-proxy with `--mode nftables`.
NFTablesProxyMode featuregate.Feature = "NFTablesProxyMode"
// owner: @kerthcet
// kep: https://kep.k8s.io/3094
//
// Allow users to specify whether to take nodeAffinity/nodeTaint into consideration when
// calculating pod topology spread skew.
NodeInclusionPolicyInPodTopologySpread featuregate.Feature = "NodeInclusionPolicyInPodTopologySpread"
// owner: @aravindhp @LorbusChris
// kep: http://kep.k8s.io/2271
//
// Enables querying logs of node services using the /logs endpoint. Enabling this feature has security implications.
// The recommendation is to enable it on a need basis for debugging purposes and disabling otherwise.
NodeLogQuery featuregate.Feature = "NodeLogQuery"
// owner: @iholder101 @kannon92
// kep: https://kep.k8s.io/2400
//
// Permits kubelet to run with swap enabled.
NodeSwap featuregate.Feature = "NodeSwap"
// owner: @sanposhiho, @wojtek-t
// kep: https://kep.k8s.io/5278
//
// Extends NominatedNodeName field to express expected pod placement, allowing
// both the scheduler and external components (e.g., Cluster Autoscaler, Karpenter, Kueue)
// to share pod placement intentions. This enables better coordination between
// components, prevents inappropriate node scale-downs, and helps the scheduler
// resume work after restarts.
NominatedNodeNameForExpectation featuregate.Feature = "NominatedNodeNameForExpectation"
// owner: @cici37
// kep: https://kep.k8s.io/5080
//
// Enables ordered namespace deletion.
OrderedNamespaceDeletion featuregate.Feature = "OrderedNamespaceDeletion"
// owner: @haircommander
// kep: https://kep.k8s.io/2364
//
// Configures the Kubelet to use the CRI to populate pod and container stats, instead of supplimenting with stats from cAdvisor.
// Requires the CRI implementation supports supplying the required stats.
PodAndContainerStatsFromCRI featuregate.Feature = "PodAndContainerStatsFromCRI"
// owner: @ahmedtd
// kep: https://kep.k8s.io/4317
//
// Enable PodCertificateRequest objects and podCertificate projected volume sources.
PodCertificateRequest featuregate.Feature = "PodCertificateRequest"
// owner: @ahg-g
//
// Enables controlling pod ranking on replicaset scale-down.
PodDeletionCost featuregate.Feature = "PodDeletionCost"
// owner: @danielvegamyhre
// kep: https://kep.k8s.io/4017
//
// Set pod completion index as a pod label for Indexed Jobs.
PodIndexLabel featuregate.Feature = "PodIndexLabel"
// owner: @ndixita
// key: https://kep.k8s.io/2837
//
// Enables specifying resources at pod-level.
PodLevelResources featuregate.Feature = "PodLevelResources"
// owner: @AxeZhan
// kep: http://kep.k8s.io/3960
//
// Enables SleepAction in container lifecycle hooks
PodLifecycleSleepAction featuregate.Feature = "PodLifecycleSleepAction"
// owner: @sreeram-venkitesh
// kep: http://kep.k8s.io/4818
//
// Allows zero value for sleep duration in SleepAction in container lifecycle hooks
PodLifecycleSleepActionAllowZero featuregate.Feature = "PodLifecycleSleepActionAllowZero"
// owner: @knight42
// kep: https://kep.k8s.io/3288
//
// Enables only stdout or stderr of the container to be retrievd.
PodLogsQuerySplitStreams featuregate.Feature = "PodLogsQuerySplitStreams"
// owner: @natasha41575
// kep: http://kep.k8s.io/5067
//
// Enables the pod to report status.ObservedGeneration to reflect the generation of the last observed podspec.
PodObservedGenerationTracking featuregate.Feature = "PodObservedGenerationTracking"
// owner: @ddebroy, @kannon92
//
// Enables reporting of PodReadyToStartContainersCondition condition in pod status after pod
// sandbox creation and network configuration completes successfully
PodReadyToStartContainersCondition featuregate.Feature = "PodReadyToStartContainersCondition"
// owner: @Huang-Wei
// kep: https://kep.k8s.io/3521
//
// Enable users to specify when a Pod is ready for scheduling.
PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness"
// owner: @munnerz
// kep: https://kep.k8s.io/4742
// alpha: v1.33
//
// Enables the PodTopologyLabelsAdmission admission plugin that mutates `pod/binding`
// requests by copying the `topology.kubernetes.io/{zone,region}` labels from the assigned
// Node object (in the Binding being admitted) onto the Binding
// so that it can be persisted onto the Pod object when the Pod is being scheduled.
// This allows workloads running in pods to understand the topology information of their assigned node.
// Enabling this feature also permits external schedulers to set labels on pods in an atomic
// operation when scheduling a Pod by setting the `metadata.labels` field on the submitted Binding,
// similar to how `metadata.annotations` behaves.
PodTopologyLabelsAdmission featuregate.Feature = "PodTopologyLabelsAdmission"
// owner: @seans3
// kep: http://kep.k8s.io/4006
//
// Enables PortForward to be proxied with a websocket client
PortForwardWebsockets featuregate.Feature = "PortForwardWebsockets"
// owner: @danwinship
// kep: https://kep.k8s.io/3015
//
// Enables PreferSameZone and PreferSameNode values for trafficDistribution
PreferSameTrafficDistribution featuregate.Feature = "PreferSameTrafficDistribution"
// owner: @sreeram-venkitesh
//
// Denies pod admission if static pods reference other API objects.
PreventStaticPodAPIReferences featuregate.Feature = "PreventStaticPodAPIReferences"
// owner: @tssurya
// kep: https://kep.k8s.io/4559
//
// Enables probe host enforcement for Pod Security Standards.
ProbeHostPodSecurityStandards featuregate.Feature = "ProbeHostPodSecurityStandards"
// owner: @jessfraz
//
// Enables control over ProcMountType for containers.
ProcMountType featuregate.Feature = "ProcMountType"
// owner: @sjenning
//
// Allows resource reservations at the QoS level preventing pods at lower QoS levels from
// bursting into resources requested at higher QoS levels (memory only for now)
QOSReserved featuregate.Feature = "QOSReserved"
// owner: @gnufied
// kep: https://kep.k8s.io/1790
//
// Allow users to recover from volume expansion failure
RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure"
// owner: @AkihiroSuda
// kep: https://kep.k8s.io/3857
//
// Allows recursive read-only mounts.
RecursiveReadOnlyMounts featuregate.Feature = "RecursiveReadOnlyMounts"
// owner: @lauralorenz
// kep: https://kep.k8s.io/4603
//
// Enables support for a lower internal cluster-wide backoff maximum for restarting
// containers (aka containers in CrashLoopBackOff)
ReduceDefaultCrashLoopBackOffDecay featuregate.Feature = "ReduceDefaultCrashLoopBackOffDecay"
// owner: @adrianmoisey
// kep: https://kep.k8s.io/4427
//
// Relaxed DNS search string validation.
RelaxedDNSSearchValidation featuregate.Feature = "RelaxedDNSSearchValidation"
// owner: @HirazawaUi
// kep: https://kep.k8s.io/4369
//
// Allow almost all printable ASCII characters in environment variables
RelaxedEnvironmentVariableValidation featuregate.Feature = "RelaxedEnvironmentVariableValidation"
// owner: @adrianmoisey
// kep: https://kep.k8s.io/5311
//
// Relaxed DNS search string validation.
RelaxedServiceNameValidation featuregate.Feature = "RelaxedServiceNameValidation"
// owner: @zhangweikop
//
// Enable kubelet tls server to update certificate if the specified certificate files are changed.
// This feature is useful when specifying tlsCertFile & tlsPrivateKeyFile in kubelet Configuration.
// No effect for other cases such as using serverTLSbootstap.
ReloadKubeletServerCertificateFile featuregate.Feature = "ReloadKubeletServerCertificateFile"
// owner: @SergeyKanzhelev
// kep: https://kep.k8s.io/4680
//
// Adds the AllocatedResourcesStatus to the container status.
ResourceHealthStatus featuregate.Feature = "ResourceHealthStatus"
// owner: @mikedanese
//
// Gets a server certificate for the kubelet from the Certificate Signing
// Request API instead of generating one self signed and auto rotates the
// certificate as expiration approaches.
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
// owner: @kiashok
// kep: https://kep.k8s.io/4216
//
// Adds support to pull images based on the runtime class specified.
RuntimeClassInImageCriAPI featuregate.Feature = "RuntimeClassInImageCriApi"
// owner: @jsafrane
// kep: https://kep.k8s.io/1710
//
// Speed up container startup by mounting volumes with the correct SELinux label
// instead of changing each file on the volumes recursively.
// Enables the SELinuxChangePolicy field in PodSecurityContext before SELinuxMount featgure gate is enabled.
SELinuxChangePolicy featuregate.Feature = "SELinuxChangePolicy"
// owner: @jsafrane
// kep: https://kep.k8s.io/1710
// Speed up container startup by mounting volumes with the correct SELinux label
// instead of changing each file on the volumes recursively.
SELinuxMount featuregate.Feature = "SELinuxMount"
// owner: @jsafrane
// kep: https://kep.k8s.io/1710
// Speed up container startup by mounting volumes with the correct SELinux label
// instead of changing each file on the volumes recursively.
// Initial implementation focused on ReadWriteOncePod volumes.
SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
// owner: @macsko
// kep: http://kep.k8s.io/5229
//
// Makes all API calls during scheduling asynchronous, by introducing a new kube-scheduler-wide way of handling such calls.
SchedulerAsyncAPICalls featuregate.Feature = "SchedulerAsyncAPICalls"
// owner: @sanposhiho
// kep: http://kep.k8s.io/4832
//
// Running some expensive operation within the scheduler's preemption asynchronously,
// which improves the scheduling latency when the preemption involves in.
SchedulerAsyncPreemption featuregate.Feature = "SchedulerAsyncPreemption"
// owner: @macsko
// kep: http://kep.k8s.io/5142
//
// Improves scheduling queue behavior by popping pods from the backoffQ when the activeQ is empty.
// This allows to process potentially schedulable pods ASAP, eliminating a penalty effect of the backoff queue.
SchedulerPopFromBackoffQ featuregate.Feature = "SchedulerPopFromBackoffQ"
// owner: @sanposhiho
// kep: http://kep.k8s.io/4247
//
// Enables the scheduler's enhancement called QueueingHints,
// which benefits to reduce the useless requeueing.
SchedulerQueueingHints featuregate.Feature = "SchedulerQueueingHints"
// owner: @atosatto @yuanchen8911
// kep: http://kep.k8s.io/3902
//
// Decouples Taint Eviction Controller, performing taint-based Pod eviction, from Node Lifecycle Controller.
SeparateTaintEvictionController featuregate.Feature = "SeparateTaintEvictionController"
// owner: @aramase
// kep: https://kep.k8s.io/4412
//
// ServiceAccountNodeAudienceRestriction is used to restrict the audience for which the
// kubelet can request a service account token for.
ServiceAccountNodeAudienceRestriction featuregate.Feature = "ServiceAccountNodeAudienceRestriction"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether JTIs (UUIDs) are embedded into generated service account tokens, and whether these JTIs are
// recorded into the audit log for future requests made by these tokens.
ServiceAccountTokenJTI featuregate.Feature = "ServiceAccountTokenJTI"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether the apiserver supports binding service account tokens to Node objects.
ServiceAccountTokenNodeBinding featuregate.Feature = "ServiceAccountTokenNodeBinding"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether the apiserver will validate Node claims in service account tokens.
ServiceAccountTokenNodeBindingValidation featuregate.Feature = "ServiceAccountTokenNodeBindingValidation"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
//
// Controls whether the apiserver embeds the node name and uid for the associated node when issuing
// service account tokens bound to Pod objects.
ServiceAccountTokenPodNodeInfo featuregate.Feature = "ServiceAccountTokenPodNodeInfo"
// owner: @gauravkghildiyal @robscott
// kep: https://kep.k8s.io/4444
//
// Enables trafficDistribution field on Services.
ServiceTrafficDistribution featuregate.Feature = "ServiceTrafficDistribution"
// owner: @gjkim42 @SergeyKanzhelev @matthyx @tzneal
// kep: http://kep.k8s.io/753
//
// Introduces sidecar containers, a new type of init container that starts
// before other containers but remains running for the full duration of the
// pod's lifecycle and will not block pod termination.
SidecarContainers featuregate.Feature = "SidecarContainers"
// owner: @cupnes
// kep: https://kep.k8s.io/4049
//
// Enables scoring nodes by available storage capacity with
// StorageCapacityScoring feature gate.
StorageCapacityScoring featuregate.Feature = "StorageCapacityScoring"
// owner: @ahutsunshine
//
// Allows namespace indexer for namespace scope resources in apiserver cache to accelerate list operations.
// Superseded by BtreeWatchCache.
StorageNamespaceIndex featuregate.Feature = "StorageNamespaceIndex"
// owner: @nilekhc
// kep: https://kep.k8s.io/4192
//
// Enables support for the StorageVersionMigrator controller.
StorageVersionMigrator featuregate.Feature = "StorageVersionMigrator"
// owner: @serathius
// Allow API server JSON encoder to encode collections item by item, instead of all at once.
StreamingCollectionEncodingToJSON featuregate.Feature = "StreamingCollectionEncodingToJSON"
// owner: serathius
// Allow API server Protobuf encoder to encode collections item by item, instead of all at once.
StreamingCollectionEncodingToProtobuf featuregate.Feature = "StreamingCollectionEncodingToProtobuf"
// owner: @danwinship
// kep: https://kep.k8s.io/4858
//
// Requires stricter validation of IP addresses and CIDR values in API objects.
StrictIPCIDRValidation featuregate.Feature = "StrictIPCIDRValidation"
// owner: @everpeace
// kep: https://kep.k8s.io/3619
//
// Enable SupplementalGroupsPolicy feature in PodSecurityContext
SupplementalGroupsPolicy featuregate.Feature = "SupplementalGroupsPolicy"
// owner: @zhifei92
//
// Enables the systemd watchdog for the kubelet. When enabled, the kubelet will
// periodically notify the systemd watchdog to indicate that it is still alive.
// This can help prevent the system from restarting the kubelet if it becomes
// unresponsive. The feature gate is enabled by default, but should only be used
// if the system supports the systemd watchdog feature and has it configured properly.
SystemdWatchdog = featuregate.Feature("SystemdWatchdog")
// owner: @robscott
// kep: https://kep.k8s.io/2433
//
// Enables topology aware hints for EndpointSlices
TopologyAwareHints featuregate.Feature = "TopologyAwareHints"
// owner: @PiotrProkop
// kep: https://kep.k8s.io/3545
//
// Allow fine-tuning of topology manager policies with alpha options.
// This feature gate:
// - will guard *a group* of topology manager options whose quality level is alpha.
// - will never graduate to beta or stable.
TopologyManagerPolicyAlphaOptions featuregate.Feature = "TopologyManagerPolicyAlphaOptions"
// owner: @PiotrProkop
// kep: https://kep.k8s.io/3545
//
// Allow fine-tuning of topology manager policies with beta options.
// This feature gate:
// - will guard *a group* of topology manager options whose quality level is beta.
// - is thus *introduced* as beta
// - will never graduate to stable.
TopologyManagerPolicyBetaOptions featuregate.Feature = "TopologyManagerPolicyBetaOptions"
// owner: @PiotrProkop
// kep: https://kep.k8s.io/3545
//
// Allow the usage of options to fine-tune the topology manager policies.
TopologyManagerPolicyOptions featuregate.Feature = "TopologyManagerPolicyOptions"
// owner: @seans3
// kep: http://kep.k8s.io/4006
//
// Enables StreamTranslator proxy to handle WebSockets upgrade requests for the
// version of the RemoteCommand subprotocol that supports the "close" signal.
TranslateStreamCloseWebsocketRequests featuregate.Feature = "TranslateStreamCloseWebsocketRequests"
// owner: @richabanker
//
// Proxies client to an apiserver capable of serving the request in the event of version skew.
UnknownVersionInteroperabilityProxy featuregate.Feature = "UnknownVersionInteroperabilityProxy"
// owner: @saschagrunert
//
// Enables user namespace support for Pod Security Standards. Enabling this
// feature will modify all Pod Security Standard rules to allow setting:
// spec[.*].securityContext.[runAsNonRoot,runAsUser]
// This feature gate should only be enabled if all nodes in the cluster
// support the user namespace feature and have it enabled. The feature gate
// will not graduate or be enabled by default in future Kubernetes
// releases.
UserNamespacesPodSecurityStandards featuregate.Feature = "UserNamespacesPodSecurityStandards"
// owner: @rata, @giuseppe
// kep: https://kep.k8s.io/127
//
// Enables user namespace support for stateless pods.
UserNamespacesSupport featuregate.Feature = "UserNamespacesSupport"
// owner: @mattcarry, @sunnylovestiramisu
// kep: https://kep.k8s.io/3751
//
// Enables user specified volume attributes for persistent volumes, like iops and throughput.
VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass"
// owner: @ksubrmnn
//
// Allows kube-proxy to create DSR loadbalancers for Windows
WinDSR featuregate.Feature = "WinDSR"
// owner: @ksubrmnn
//
// Allows kube-proxy to run in Overlay mode for Windows
WinOverlay featuregate.Feature = "WinOverlay"
// owner: @jsturtevant
// kep: https://kep.k8s.io/4888
//
// Add CPU and Memory Affinity support to Windows nodes with CPUManager, MemoryManager and Topology manager
WindowsCPUAndMemoryAffinity featuregate.Feature = "WindowsCPUAndMemoryAffinity"
// owner: @zylxjtu
// kep: https://kep.k8s.io/4802
//
// Enables support for graceful shutdown windows node.
WindowsGracefulNodeShutdown featuregate.Feature = "WindowsGracefulNodeShutdown"
// owner: @marosset
// kep: https://kep.k8s.io/3503
//
// Enables support for joining Windows containers to a hosts' network namespace.
WindowsHostNetwork featuregate.Feature = "WindowsHostNetwork"
)
// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs.
// To add a new feature, define a key for it in pkg/features/kube_features.go and add it here. The features will be
// available throughout Kubernetes binaries.
// For features available via specific kubernetes components like apiserver,
// cloud-controller-manager, etc find the respective kube_features.go file
// (eg:staging/src/apiserver/pkg/features/kube_features.go), define the versioned
// feature gate there, and reference it in this file.
// To support n-3 compatibility version, features may only be removed 3 releases after graduation.
//
// Entries are alphabetized.
var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
AllowDNSOnlyNodeCSR: {
{Version: version.MustParse("1.0"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
},
AllowInsecureKubeletCertificateSigningRequests: {
{Version: version.MustParse("1.0"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
},
AllowOverwriteTerminationGracePeriodSeconds: {
{Version: version.MustParse("1.0"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated},
{Version: version.MustParse("1.35"), Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.38
},
AllowServiceLBStatusOnNonLB: {
{Version: version.MustParse("1.0"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated},
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.35
},
AnyVolumeDataSource: {
{Version: version.MustParse("1.18"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.33 -> remove in 1.36
},
AuthorizeNodeWithSelectors: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
CPUCFSQuotaPeriod: {
{Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha},
},
CPUManagerPolicyAlphaOptions: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
},
CPUManagerPolicyBetaOptions: {
{Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta},
},
CPUManagerPolicyOptions: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
},
CSIMigrationPortworx: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, // On by default (requires Portworx CSI driver)
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
},
CSIVolumeHealth: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
},
ClearingNominatedNodeNameAfterBinding: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
ClusterTrustBundle: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
},
ClusterTrustBundleProjection: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
},
ContainerCheckpoint: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
ContainerRestartRules: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
ContainerStopSignals: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
CrossNamespaceVolumeDataSource: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
},
DRAAdminAccess: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
DRAConsumableCapacity: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
DRADeviceBindingConditions: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
DRADeviceTaints: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
DRAExtendedResource: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
DRAPartitionableDevices: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
DRAPrioritizedList: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
DRAResourceClaimDeviceStatus: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
DRASchedulerFilterTimeout: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
DeclarativeValidation: {
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
DeclarativeValidationTakeover: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
},
DeploymentReplicaSetTerminatingReplicas: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
DisableAllocatorDualWrite: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.35"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove after MultiCIDRServiceAllocator is GA
},
DisableCPUQuotaWithExclusiveCPUs: {
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
DisableNodeKubeProxyVersion: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Deprecated}, // lock to default in 1.34 and remove in v1.37
},
DynamicResourceAllocation: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA}, // lock to default in 1.35
},
EnvFiles: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
EventedPLEG: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
},
ExecProbeTimeout: {
{Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
},
ExternalServiceAccountTokenSigner: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
GitRepoVolumeDriver: {
{Version: version.MustParse("1.0"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Deprecated},
},
GracefulNodeShutdown: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta},
},
GracefulNodeShutdownBasedOnPodPriority: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
},
HPAConfigurableTolerance: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
HPAScaleToZero: {
{Version: version.MustParse("1.16"), Default: false, PreRelease: featuregate.Alpha},
},
HonorPVReclaimPolicy: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
},
HostnameOverride: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
ImageMaximumGCAge: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
ImageVolume: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
},
InPlacePodVerticalScaling: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
InPlacePodVerticalScalingAllocatedStatus: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Deprecated}, // remove in 1.36
},
InPlacePodVerticalScalingExclusiveCPUs: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
InPlacePodVerticalScalingExclusiveMemory: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
InTreePluginPortworxUnregister: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, // remove it along with CSIMigrationPortworx in 1.36
},
JobBackoffLimitPerIndex: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
},
JobManagedBy: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
JobPodReplacementPolicy: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
JobSuccessPolicy: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
},
KubeletCgroupDriverFromCRI: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
KubeletCrashLoopBackOffMax: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletEnsureSecretPulledImages: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletFineGrainedAuthz: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
KubeletInUserNamespace: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
},
KubeletPSI: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
KubeletPodResourcesDynamicResources: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
KubeletPodResourcesGet: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
KubeletPodResourcesListUseActivePods: {
{Version: version.MustParse("1.0"), Default: false, PreRelease: featuregate.GA},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Deprecated}, // lock to default in 1.38, remove in 1.39
},
KubeletRegistrationGetOnExistsOnly: {
{Version: version.MustParse("1.0"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated},
},
KubeletSeparateDiskGC: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
KubeletServiceAccountTokenForCredentialProviders: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
KubeletTracing: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
LoadBalancerIPMode: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
LocalStorageCapacityIsolationFSQuotaMonitoring: {
{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
LogarithmicScaleDown: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
MatchLabelKeysInPodAffinity: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
MatchLabelKeysInPodTopologySpread: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
},
MatchLabelKeysInPodTopologySpreadSelectorMerge: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
MaxUnavailableStatefulSet: {
{Version: version.MustParse("1.24"), Default: false, PreRelease: featuregate.Alpha},
},
MemoryManager: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
MemoryQoS: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
},
MultiCIDRServiceAllocator: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37 (locked to default in 1.34)
},
MutableCSINodeAllocatableCount: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Beta},
},
NFTablesProxyMode: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
NodeInclusionPolicyInPodTopologySpread: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
NodeLogQuery: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
},
NodeSwap: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
NominatedNodeNameForExpectation: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
OrderedNamespaceDeletion: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
PodAndContainerStatsFromCRI: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
},
PodCertificateRequest: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
PodDeletionCost: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
},
PodIndexLabel: {
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35
},
PodLevelResources: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
PodLifecycleSleepAction: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.34; remove in 1.37
},
PodLifecycleSleepActionAllowZero: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.34, remove in 1.37
},
PodLogsQuerySplitStreams: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
PodObservedGenerationTracking: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
PodReadyToStartContainersCondition: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
PodSchedulingReadiness: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32
},
PodTopologyLabelsAdmission: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
PortForwardWebsockets: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
PreferSameTrafficDistribution: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
PreventStaticPodAPIReferences: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
// Policy is GA in first release, this gate only exists to disable the enforcement when emulating older minors
ProbeHostPodSecurityStandards: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
ProcMountType: {
{Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
QOSReserved: {
{Version: version.MustParse("1.11"), Default: false, PreRelease: featuregate.Alpha},
},
RecoverVolumeExpansionFailure: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.34; remove in 1.37
},
RecursiveReadOnlyMounts: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
},
ReduceDefaultCrashLoopBackOffDecay: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
RelaxedDNSSearchValidation: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
RelaxedEnvironmentVariableValidation: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
RelaxedServiceNameValidation: {
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
},
ReloadKubeletServerCertificateFile: {
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
},
ResourceHealthStatus: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
},
RotateKubeletServerCertificate: {
{Version: version.MustParse("1.7"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.12"), Default: true, PreRelease: featuregate.Beta},
},
RuntimeClassInImageCriAPI: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
},
SELinuxChangePolicy: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
SELinuxMount: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
},
SELinuxMountReadWriteOncePod: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
},
SchedulerAsyncAPICalls: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
SchedulerAsyncPreemption: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
SchedulerPopFromBackoffQ: {
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
SchedulerQueueingHints: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
SeparateTaintEvictionController: {
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37 (locked to default in 1.34)
},
ServiceAccountNodeAudienceRestriction: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
ServiceAccountTokenJTI: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
ServiceAccountTokenNodeBinding: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
ServiceAccountTokenNodeBindingValidation: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
ServiceAccountTokenPodNodeInfo: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
ServiceTrafficDistribution: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA and LockToDefault in 1.33, remove in 1.36
},
SidecarContainers: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, // GA in 1.33 remove in 1.36
},
StorageCapacityScoring: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
StorageNamespaceIndex: {
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Deprecated},
},
StorageVersionMigrator: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
},
StreamingCollectionEncodingToJSON: {
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
StreamingCollectionEncodingToProtobuf: {
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
StrictIPCIDRValidation: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
SupplementalGroupsPolicy: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
SystemdWatchdog: {
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
},
TopologyAwareHints: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
TopologyManagerPolicyAlphaOptions: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
},
TopologyManagerPolicyBetaOptions: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
},
TopologyManagerPolicyOptions: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA},
},
TranslateStreamCloseWebsocketRequests: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
},
UnknownVersionInteroperabilityProxy: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
},
UserNamespacesPodSecurityStandards: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
},
UserNamespacesSupport: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
VolumeAttributesClass: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA},
},
WinDSR: {
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
WinOverlay: {
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
WindowsCPUAndMemoryAffinity: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
WindowsGracefulNodeShutdown: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
WindowsHostNetwork: {
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Deprecated},
},
apiextensionsfeatures.CRDValidationRatcheting: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
apiextensionsfeatures.CustomResourceFieldSelectors: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
},
genericfeatures.APIResponseCompression: {
{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.APIServerIdentity: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.APIServerTracing: {
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
genericfeatures.APIServingWithRoutine: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.AggregatedDiscoveryRemoveBetaType: {
{Version: version.MustParse("1.0"), Default: false, PreRelease: featuregate.GA},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Deprecated},
},
genericfeatures.AllowParsingUserUIDFromCertAuth: {
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.AllowUnsafeMalformedObjectDeletion: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.AnonymousAuthConfigurableEndpoints: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.AuthorizeWithSelectors: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.37
},
genericfeatures.BtreeWatchCache: {
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.CBORServingAndStorage: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.ConcurrentWatchObjectDecode: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
genericfeatures.ConsistentListFromCache: {
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.CoordinatedLeaderElection: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
},
genericfeatures.DetectCacheInconsistency: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.KMSv1: {
{Version: version.MustParse("1.0"), Default: true, PreRelease: featuregate.GA},
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Deprecated},
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated},
},
genericfeatures.ListFromCacheSnapshot: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.MutatingAdmissionPolicy: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Beta},
},
genericfeatures.OpenAPIEnums: {
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.RemoteRequestHeaderUID: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.ResilientWatchCacheInitialization: {
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.RetryGenerateName: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
},
genericfeatures.SeparateCacheWatchRPC: {
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Deprecated},
},
genericfeatures.SizeBasedListCostEstimate: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.StorageVersionAPI: {
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
},
genericfeatures.StorageVersionHash: {
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.15"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.StrictCostEnforcementForVAP: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.StrictCostEnforcementForWebhooks: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.StructuredAuthenticationConfiguration: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA and LockToDefault in 1.34, remove in 1.37
},
genericfeatures.StructuredAuthenticationConfigurationEgressSelector: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.StructuredAuthorizationConfiguration: {
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
},
genericfeatures.TokenRequestServiceAccountUIDValidation: {
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.UnauthenticatedHTTP2DOSMitigation: {
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
},
genericfeatures.WatchCacheInitializationPostStartHook: {
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
},
genericfeatures.WatchFromStorageWithoutResourceVersion: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true},
},
genericfeatures.WatchList: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
// switch this back to false because the json and proto streaming encoders appear to work better.
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Beta},
{Version: version.MustParse("1.34"), Default: true, PreRelease: featuregate.Beta},
},
kcmfeatures.CloudControllerManagerWebhook: {
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
},
zpagesfeatures.ComponentFlagz: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
zpagesfeatures.ComponentStatusz: {
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
},
}
func init() {
runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates))
runtime.Must(zpagesfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate))
// Register all client-go features with kube's feature gate instance and make all client-go
// feature checks use kube's instance. The effect is that for kube binaries, client-go
// features are wired to the existing --feature-gates flag just as all other features
// are. Further, client-go features automatically support the existing mechanisms for
// feature enablement metrics and test overrides.
ca := &clientAdapter{utilfeature.DefaultMutableFeatureGate}
runtime.Must(clientfeatures.AddFeaturesToExistingFeatureGates(ca))
clientfeatures.ReplaceFeatureGates(ca)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fieldpath
import (
"fmt"
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/util/validation"
)
// FormatMap formats map[string]string to a string.
func FormatMap(m map[string]string) (fmtStr string) {
// output with keys in sorted order to provide stable output
keys := make([]string, 0, len(m))
var grow int
for k, v := range m {
keys = append(keys, k)
// why add 4: (for =, \n, " and ")
grow += len(k) + len(v) + 4
}
sort.Strings(keys)
// allocate space to avoid expansion
dst := make([]byte, 0, grow)
for _, key := range keys {
if len(dst) > 0 {
dst = append(dst, '\n')
}
dst = append(dst, key...)
dst = append(dst, '=')
dst = strconv.AppendQuote(dst, m[key])
}
return string(dst)
}
// ExtractFieldPathAsString extracts the field from the given object
// and returns it as a string. The object must be a pointer to an
// API type.
func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok {
switch path {
case "metadata.annotations":
if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetAnnotations()[subscript], nil
case "metadata.labels":
if errs := validation.IsQualifiedName(subscript); len(errs) != 0 {
return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";"))
}
return accessor.GetLabels()[subscript], nil
default:
return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath)
}
}
switch fieldPath {
case "metadata.annotations":
return FormatMap(accessor.GetAnnotations()), nil
case "metadata.labels":
return FormatMap(accessor.GetLabels()), nil
case "metadata.name":
return accessor.GetName(), nil
case "metadata.namespace":
return accessor.GetNamespace(), nil
case "metadata.uid":
return string(accessor.GetUID()), nil
}
return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath)
}
// SplitMaybeSubscriptedPath checks whether the specified fieldPath is
// subscripted, and
// - if yes, this function splits the fieldPath into path and subscript, and
// returns (path, subscript, true).
// - if no, this function returns (fieldPath, "", false).
//
// Example inputs and outputs:
//
// "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true)
// "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true)
// "metadata.labels['']" --> ("metadata.labels", "", true)
// "metadata.labels" --> ("metadata.labels", "", false)
func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) {
if !strings.HasSuffix(fieldPath, "']") {
return fieldPath, "", false
}
s := strings.TrimSuffix(fieldPath, "']")
parts := strings.SplitN(s, "['", 2)
if len(parts) < 2 {
return fieldPath, "", false
}
if len(parts[0]) == 0 {
return fieldPath, "", false
}
return parts[0], parts[1], true
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/utils/clock"
)
const (
reason = "DeadlineExceeded"
message = "Pod was active on the node longer than the specified deadline"
)
// activeDeadlineHandler knows how to enforce active deadlines on pods.
type activeDeadlineHandler struct {
// the clock to use for deadline enforcement
clock clock.Clock
// the provider of pod status
podStatusProvider status.PodStatusProvider
// the recorder to dispatch events when we identify a pod has exceeded active deadline
recorder record.EventRecorder
}
// newActiveDeadlineHandler returns an active deadline handler that can enforce pod active deadline
func newActiveDeadlineHandler(
podStatusProvider status.PodStatusProvider,
recorder record.EventRecorder,
clock clock.Clock,
) (*activeDeadlineHandler, error) {
// check for all required fields
if clock == nil || podStatusProvider == nil || recorder == nil {
return nil, fmt.Errorf("required arguments must not be nil: %v, %v, %v", clock, podStatusProvider, recorder)
}
return &activeDeadlineHandler{
clock: clock,
podStatusProvider: podStatusProvider,
recorder: recorder,
}, nil
}
// ShouldSync returns true if the pod is past its active deadline.
func (m *activeDeadlineHandler) ShouldSync(pod *v1.Pod) bool {
return m.pastActiveDeadline(pod)
}
// ShouldEvict returns true if the pod is past its active deadline.
// It dispatches an event that the pod should be evicted if it is past its deadline.
func (m *activeDeadlineHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
if !m.pastActiveDeadline(pod) {
return lifecycle.ShouldEvictResponse{Evict: false}
}
m.recorder.Eventf(pod, v1.EventTypeNormal, reason, message)
return lifecycle.ShouldEvictResponse{Evict: true, Reason: reason, Message: message}
}
// pastActiveDeadline returns true if the pod has been active for more than its ActiveDeadlineSeconds
func (m *activeDeadlineHandler) pastActiveDeadline(pod *v1.Pod) bool {
// no active deadline was specified
if pod.Spec.ActiveDeadlineSeconds == nil {
return false
}
// get the latest status to determine if it was started
podStatus, ok := m.podStatusProvider.GetPodStatus(pod.UID)
if !ok {
podStatus = pod.Status
}
// we have no start time so just return
if podStatus.StartTime.IsZero() {
return false
}
// determine if the deadline was exceeded
start := podStatus.StartTime.Time
duration := m.clock.Since(start)
allowedDuration := time.Duration(*pod.Spec.ActiveDeadlineSeconds) * time.Second
return duration >= allowedDuration
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocation
import (
"context"
"fmt"
"path/filepath"
"slices"
"sync"
"time"
"encoding/json"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/resource"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/allocation/state"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
// podStatusManagerStateFile is the file name where status manager stores its state
const (
allocatedPodsStateFile = "allocated_pods_state"
actuatedPodsStateFile = "actuated_pods_state"
initialRetryDelay = 30 * time.Second
retryDelay = 3 * time.Minute
TriggerReasonPodResized = "pod_resized"
TriggerReasonPodUpdated = "pod_updated"
TriggerReasonPodsAdded = "pods_added"
TriggerReasonPodsRemoved = "pods_removed"
triggerReasonPeriodic = "periodic_retry"
)
// AllocationManager tracks pod resource allocations.
type Manager interface {
// GetContainerResourceAllocation returns the AllocatedResources value for the container
GetContainerResourceAllocation(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
// UpdatePodFromAllocation overwrites the pod spec with the allocation.
// This function does a deep copy only if updates are needed.
// Returns the updated (or original) pod, and whether there was an allocation stored.
UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool)
// SetAllocatedResources checkpoints the resources allocated to a pod's containers.
SetAllocatedResources(allocatedPod *v1.Pod) error
// SetActuatedResources records the actuated resources of the given container (or the entire
// pod, if actuatedContainer is nil).
SetActuatedResources(allocatedPod *v1.Pod, actuatedContainer *v1.Container) error
// GetActuatedResources returns the stored actuated resources for the container, and whether they exist.
GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
// AddPodAdmitHandlers adds the admit handlers to the allocation manager.
// TODO: See if we can remove this and just add them in the allocation manager constructor.
AddPodAdmitHandlers(handlers lifecycle.PodAdmitHandlers)
// SetContainerRuntime sets the allocation manager's container runtime.
// TODO: See if we can remove this and just add it in the allocation manager constructor.
SetContainerRuntime(runtime kubecontainer.Runtime)
// AddPod checks if a pod can be admitted. If so, it admits the pod and updates the allocation.
// The function returns a boolean value indicating whether the pod
// can be admitted, a brief single-word reason and a message explaining why
// the pod cannot be admitted.
// allocatedPods should represent the pods that have already been admitted, along with their
// admitted (allocated) resources.
AddPod(activePods []*v1.Pod, pod *v1.Pod) (ok bool, reason, message string)
// RemovePod removes any stored state for the given pod UID.
RemovePod(uid types.UID)
// RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods.
RemoveOrphanedPods(remainingPods sets.Set[types.UID])
// Run starts the allocation manager. This is currently only used to handle periodic retry of
// pending resizes.
Run(ctx context.Context)
// PushPendingResize queues a pod with a pending resize request for later reevaluation.
PushPendingResize(uid types.UID)
// HasPendingResizes returns whether there are currently any pending resizes.
HasPendingResizes() bool
// RetryPendingResizes retries all pending resizes.
RetryPendingResizes(trigger string)
// CheckPodResizeInProgress checks whether the actuated resizable resources differ from the allocated resources
// for any running containers.
CheckPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus)
}
type manager struct {
allocated state.State
actuated state.State
admitHandlers lifecycle.PodAdmitHandlers
containerRuntime kubecontainer.Runtime
statusManager status.Manager
sourcesReady config.SourcesReady
nodeConfig cm.NodeConfig
nodeAllocatableAbsolute v1.ResourceList
ticker *time.Ticker
triggerPodSync func(pod *v1.Pod)
getActivePods func() []*v1.Pod
getPodByUID func(types.UID) (*v1.Pod, bool)
allocationMutex sync.Mutex
podsWithPendingResizes []types.UID
recorder record.EventRecorder
}
func NewManager(checkpointDirectory string,
nodeConfig cm.NodeConfig,
nodeAllocatableAbsolute v1.ResourceList,
statusManager status.Manager,
triggerPodSync func(pod *v1.Pod),
getActivePods func() []*v1.Pod,
getPodByUID func(types.UID) (*v1.Pod, bool),
sourcesReady config.SourcesReady,
recorder record.EventRecorder,
) Manager {
return &manager{
allocated: newStateImpl(checkpointDirectory, allocatedPodsStateFile),
actuated: newStateImpl(checkpointDirectory, actuatedPodsStateFile),
statusManager: statusManager,
admitHandlers: lifecycle.PodAdmitHandlers{},
sourcesReady: sourcesReady,
nodeConfig: nodeConfig,
nodeAllocatableAbsolute: nodeAllocatableAbsolute,
ticker: time.NewTicker(initialRetryDelay),
triggerPodSync: triggerPodSync,
getActivePods: getActivePods,
getPodByUID: getPodByUID,
recorder: recorder,
}
}
type containerAllocation struct {
Name string `json:"name"`
Resources v1.ResourceRequirements `json:"resources,omitempty"`
}
type podResourceSummary struct {
//TODO: resources v1.ResourceRequirements, add pod-level resources here once resizing pod-level resources is supported
InitContainers []containerAllocation `json:"initContainers,omitempty"`
Containers []containerAllocation `json:"containers,omitempty"`
}
func newStateImpl(checkpointDirectory, checkpointName string) state.State {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
return state.NewNoopStateCheckpoint()
}
stateImpl, err := state.NewStateCheckpoint(checkpointDirectory, checkpointName)
if err != nil {
// This is a critical, non-recoverable failure.
klog.ErrorS(err, "Failed to initialize allocation checkpoint manager",
"checkpointPath", filepath.Join(checkpointDirectory, checkpointName))
panic(err)
}
return stateImpl
}
// NewInMemoryManager returns an allocation manager that doesn't persist state.
// For testing purposes only!
func NewInMemoryManager(nodeConfig cm.NodeConfig,
nodeAllocatableAbsolute v1.ResourceList,
statusManager status.Manager,
triggerPodSync func(pod *v1.Pod),
getActivePods func() []*v1.Pod,
getPodByUID func(types.UID) (*v1.Pod, bool),
sourcesReady config.SourcesReady,
) Manager {
return &manager{
allocated: state.NewStateMemory(nil),
actuated: state.NewStateMemory(nil),
statusManager: statusManager,
admitHandlers: lifecycle.PodAdmitHandlers{},
sourcesReady: sourcesReady,
nodeConfig: nodeConfig,
nodeAllocatableAbsolute: nodeAllocatableAbsolute,
ticker: time.NewTicker(initialRetryDelay),
triggerPodSync: triggerPodSync,
getActivePods: getActivePods,
getPodByUID: getPodByUID,
}
}
func (m *manager) Run(ctx context.Context) {
// Start a goroutine to periodically check for pending resizes and process them if needed.
go func() {
for {
select {
case <-m.ticker.C:
successfulResizes := m.retryPendingResizes(triggerReasonPeriodic)
for _, po := range successfulResizes {
klog.InfoS("Successfully retried resize after timeout", "pod", klog.KObj(po))
}
case <-ctx.Done():
m.ticker.Stop()
return
}
}
}()
}
// Gernerate pod resize completed event message
func (m *manager) podResizeCompletionMsg(allocatedPod *v1.Pod) string {
podResizeSource := &podResourceSummary{}
podutil.VisitContainers(&allocatedPod.Spec, podutil.InitContainers|podutil.Containers,
func(allocatedContainer *v1.Container, containerType podutil.ContainerType) bool {
allocation := containerAllocation{
Name: allocatedContainer.Name,
Resources: allocatedContainer.Resources,
}
switch containerType {
case podutil.InitContainers:
podResizeSource.InitContainers = append(podResizeSource.InitContainers, allocation)
case podutil.Containers:
podResizeSource.Containers = append(podResizeSource.Containers, allocation)
}
return true
})
podResizeMsgDetailsJSON, err := json.Marshal(podResizeSource)
if err != nil {
klog.ErrorS(err, "Failed to serialize resource summary", "pod", format.Pod(allocatedPod))
return "Pod resize completed"
}
podResizeCompletedMsg := fmt.Sprintf("Pod resize completed: %s", string(podResizeMsgDetailsJSON))
return podResizeCompletedMsg
}
func (m *manager) RetryPendingResizes(trigger string) {
m.retryPendingResizes(trigger)
}
func (m *manager) retryPendingResizes(trigger string) []*v1.Pod {
m.allocationMutex.Lock()
defer m.allocationMutex.Unlock()
if !m.sourcesReady.AllReady() {
klog.V(4).InfoS("Skipping evaluation of pending resizes; sources are not ready")
m.ticker.Reset(initialRetryDelay)
return nil
}
m.ticker.Reset(retryDelay)
var newPendingResizes []types.UID
var successfulResizes []*v1.Pod
// Retry all pending resizes.
for _, uid := range m.podsWithPendingResizes {
pod, found := m.getPodByUID(uid)
if !found {
klog.V(4).InfoS("Pod not found; removing from pending resizes", "podUID", uid)
continue
}
oldResizeStatus := m.statusManager.GetPodResizeConditions(uid)
isDeferred := m.statusManager.IsPodResizeDeferred(uid)
resizeAllocated, err := m.handlePodResourcesResize(pod)
switch {
case err != nil:
klog.ErrorS(err, "Failed to handle pod resources resize", "pod", klog.KObj(pod))
newPendingResizes = append(newPendingResizes, uid)
case m.statusManager.IsPodResizeDeferred(uid):
klog.V(4).InfoS("Pod resize is deferred; will reevaluate later", "pod", klog.KObj(pod))
newPendingResizes = append(newPendingResizes, uid)
case m.statusManager.IsPodResizeInfeasible(uid):
klog.V(4).InfoS("Pod resize is infeasible", "pod", klog.KObj(pod))
default:
klog.V(4).InfoS("Pod resize successfully allocated", "pod", klog.KObj(pod))
successfulResizes = append(successfulResizes, pod)
if isDeferred {
metrics.PodDeferredAcceptedResizes.WithLabelValues(trigger).Inc()
}
}
// If the pod resize status has changed, we need to update the pod status.
newResizeStatus := m.statusManager.GetPodResizeConditions(uid)
if resizeAllocated || !apiequality.Semantic.DeepEqual(oldResizeStatus, newResizeStatus) {
m.triggerPodSync(pod)
}
}
m.podsWithPendingResizes = newPendingResizes
return successfulResizes
}
func (m *manager) PushPendingResize(uid types.UID) {
m.allocationMutex.Lock()
defer m.allocationMutex.Unlock()
for _, p := range m.podsWithPendingResizes {
if p == uid {
// Pod is already in the pending resizes queue.
return
}
}
// Add the pod to the pending resizes list and sort by priority.
m.podsWithPendingResizes = append(m.podsWithPendingResizes, uid)
m.sortPendingResizes()
}
// sortPendingResizes sorts the list of pending resizes:
// - First, prioritizing resizes that do not increase requests.
// - Second, based on the pod's PriorityClass.
// - Third, based on the pod's QoS class.
// - Last, prioritizing resizes that have been in the deferred state the longest.
func (m *manager) sortPendingResizes() {
var pendingPods []*v1.Pod
for _, uid := range m.podsWithPendingResizes {
pod, found := m.getPodByUID(uid)
if !found {
klog.V(4).InfoS("Pod not found; removing from pending resizes", "podUID", uid)
continue
}
pendingPods = append(pendingPods, pod)
}
slices.SortFunc(pendingPods, func(firstPod, secondPod *v1.Pod) int {
// First, resizes that don't increase requests will be prioritized.
// These resizes are expected to always succeed.
firstPodIncreasing := m.isResizeIncreasingRequests(firstPod)
secondPodIncreasing := m.isResizeIncreasingRequests(secondPod)
if !firstPodIncreasing {
return -1
}
if !secondPodIncreasing {
return 1
}
// Second, pods with a higher PriorityClass will be prioritized.
firstPodPriority := int32(0)
if firstPod.Spec.Priority != nil {
firstPodPriority = *firstPod.Spec.Priority
}
secondPodPriority := int32(0)
if secondPod.Spec.Priority != nil {
secondPodPriority = *secondPod.Spec.Priority
}
if firstPodPriority > secondPodPriority {
return -1
}
if secondPodPriority > firstPodPriority {
return 1
}
// Third, pods with a higher QoS class will be prioritized, where guaranteed > burstable.
// Best effort pods don't have resource requests or limits, so we don't need to consider them here.
firstPodQOS := v1qos.GetPodQOS(firstPod)
secondPodQOS := v1qos.GetPodQOS(secondPod)
if firstPodQOS == v1.PodQOSGuaranteed && secondPodQOS != v1.PodQOSGuaranteed {
return -1
}
if secondPodQOS == v1.PodQOSGuaranteed && firstPodQOS != v1.PodQOSGuaranteed {
return 1
}
// If all else is the same, resize requests that have been pending longer will be
// evaluated first.
var firstPodLastTransitionTime *metav1.Time
firstPodResizeConditions := m.statusManager.GetPodResizeConditions(firstPod.UID)
for _, c := range firstPodResizeConditions {
if c.Type == v1.PodResizePending {
firstPodLastTransitionTime = &c.LastTransitionTime
}
}
var secondPodLastTransitionTime *metav1.Time
secondPodResizeConditions := m.statusManager.GetPodResizeConditions(secondPod.UID)
for _, c := range secondPodResizeConditions {
if c.Type == v1.PodResizePending {
secondPodLastTransitionTime = &c.LastTransitionTime
}
}
if firstPodLastTransitionTime == nil {
return 1
}
if secondPodLastTransitionTime == nil {
return -1
}
if firstPodLastTransitionTime.Before(secondPodLastTransitionTime) {
return -1
}
return 1
})
m.podsWithPendingResizes = make([]types.UID, len(pendingPods))
for i, pod := range pendingPods {
m.podsWithPendingResizes[i] = pod.UID
}
}
// isResizeIncreasingRequests returns true if any of the resource requests are increasing.
func (m *manager) isResizeIncreasingRequests(pod *v1.Pod) bool {
allocatedPod, updated := m.UpdatePodFromAllocation(pod)
if !updated {
return false
}
opts := resourcehelper.PodResourcesOptions{
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
oldRequest := resourcehelper.PodRequests(allocatedPod, opts)
newRequest := resourcehelper.PodRequests(pod, opts)
return newRequest.Memory().Cmp(*oldRequest.Memory()) > 0 ||
newRequest.Cpu().Cmp(*oldRequest.Cpu()) > 0
}
func (m *manager) HasPendingResizes() bool {
m.allocationMutex.Lock()
defer m.allocationMutex.Unlock()
return len(m.podsWithPendingResizes) > 0
}
// GetContainerResourceAllocation returns the last checkpointed AllocatedResources values
// If checkpoint manager has not been initialized, it returns nil, false
func (m *manager) GetContainerResourceAllocation(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
return m.allocated.GetContainerResources(podUID, containerName)
}
// UpdatePodFromAllocation overwrites the pod spec with the allocation.
// This function does a deep copy only if updates are needed.
func (m *manager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) {
if pod == nil {
return pod, false
}
allocated, ok := m.allocated.GetPodResourceInfo(pod.UID)
if !ok {
return pod, false
}
return updatePodFromAllocation(pod, allocated)
}
func updatePodFromAllocation(pod *v1.Pod, allocated state.PodResourceInfo) (*v1.Pod, bool) {
if pod == nil {
return pod, false
}
updated := false
containerAlloc := func(c v1.Container) (v1.ResourceRequirements, bool) {
if cAlloc, ok := allocated.ContainerResources[c.Name]; ok {
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
// Allocation differs from pod spec, retrieve the allocation
if !updated {
// If this is the first update to be performed, copy the pod
pod = pod.DeepCopy()
updated = true
}
return cAlloc, true
}
}
return v1.ResourceRequirements{}, false
}
for i, c := range pod.Spec.Containers {
if cAlloc, found := containerAlloc(c); found {
// Allocation differs from pod spec, update
pod.Spec.Containers[i].Resources = cAlloc
}
}
for i, c := range pod.Spec.InitContainers {
if cAlloc, found := containerAlloc(c); found {
// Allocation differs from pod spec, update
pod.Spec.InitContainers[i].Resources = cAlloc
}
}
return pod, updated
}
// SetAllocatedResources checkpoints the resources allocated to a pod's containers
func (m *manager) SetAllocatedResources(pod *v1.Pod) error {
return m.allocated.SetPodResourceInfo(pod.UID, allocationFromPod(pod))
}
func allocationFromPod(pod *v1.Pod) state.PodResourceInfo {
var podAlloc state.PodResourceInfo
podAlloc.ContainerResources = make(map[string]v1.ResourceRequirements)
for _, container := range pod.Spec.Containers {
alloc := *container.Resources.DeepCopy()
podAlloc.ContainerResources[container.Name] = alloc
}
for _, container := range pod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&container) {
alloc := *container.Resources.DeepCopy()
podAlloc.ContainerResources[container.Name] = alloc
}
}
return podAlloc
}
func (m *manager) AddPodAdmitHandlers(handlers lifecycle.PodAdmitHandlers) {
for _, a := range handlers {
m.admitHandlers.AddPodAdmitHandler(a)
}
}
func (m *manager) SetContainerRuntime(runtime kubecontainer.Runtime) {
m.containerRuntime = runtime
}
func (m *manager) AddPod(activePods []*v1.Pod, pod *v1.Pod) (bool, string, string) {
m.allocationMutex.Lock()
defer m.allocationMutex.Unlock()
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// To handle kubelet restarts, test pod admissibility using AllocatedResources values
// (for cpu & memory) from checkpoint store. If found, that is the source of truth.
pod, _ = m.UpdatePodFromAllocation(pod)
}
// Check if we can admit the pod; if so, update the allocation.
allocatedPods := m.getAllocatedPods(activePods)
ok, reason, message := m.canAdmitPod(allocatedPods, pod)
if ok && utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// Checkpoint the resource values at which the Pod has been admitted or resized.
if err := m.SetAllocatedResources(pod); err != nil {
// TODO(vinaykul,InPlacePodVerticalScaling): Can we recover from this in some way? Investigate
klog.ErrorS(err, "SetPodAllocation failed", "pod", klog.KObj(pod))
}
}
return ok, reason, message
}
func (m *manager) RemovePod(uid types.UID) {
if err := m.allocated.RemovePod(uid); err != nil {
// If the deletion fails, it will be retried by RemoveOrphanedPods, so we can safely ignore the error.
klog.V(3).ErrorS(err, "Failed to delete pod allocation", "podUID", uid)
}
if err := m.actuated.RemovePod(uid); err != nil {
// If the deletion fails, it will be retried by RemoveOrphanedPods, so we can safely ignore the error.
klog.V(3).ErrorS(err, "Failed to delete pod allocation", "podUID", uid)
}
}
func (m *manager) RemoveOrphanedPods(remainingPods sets.Set[types.UID]) {
m.allocated.RemoveOrphanedPods(remainingPods)
m.actuated.RemoveOrphanedPods(remainingPods)
}
func (m *manager) SetActuatedResources(allocatedPod *v1.Pod, actuatedContainer *v1.Container) error {
if actuatedContainer == nil {
alloc := allocationFromPod(allocatedPod)
return m.actuated.SetPodResourceInfo(allocatedPod.UID, alloc)
}
return m.actuated.SetContainerResources(allocatedPod.UID, actuatedContainer.Name, actuatedContainer.Resources)
}
func (m *manager) GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
return m.actuated.GetContainerResources(podUID, containerName)
}
func (m *manager) handlePodResourcesResize(pod *v1.Pod) (bool, error) {
allocatedPod, updated := m.UpdatePodFromAllocation(pod)
if !updated {
// Desired resources == allocated resources. Pod allocation does not need to be updated.
m.statusManager.ClearPodResizePendingCondition(pod.UID)
return false, nil
} else if resizable, msg, reason := IsInPlacePodVerticalScalingAllowed(pod); !resizable {
// If there is a pending resize but the resize is not allowed, always use the allocated resources.
metrics.PodInfeasibleResizes.WithLabelValues(reason).Inc()
m.statusManager.SetPodResizePendingCondition(pod.UID, v1.PodReasonInfeasible, msg, pod.Generation)
return false, nil
} else if resizeNotAllowed, msg := disallowResizeForSwappableContainers(m.containerRuntime, pod, allocatedPod); resizeNotAllowed {
// If this resize involve swap recalculation, set as infeasible, as IPPR with swap is not supported for beta.
metrics.PodInfeasibleResizes.WithLabelValues("swap_limitation").Inc()
m.statusManager.SetPodResizePendingCondition(pod.UID, v1.PodReasonInfeasible, msg, pod.Generation)
return false, nil
}
// Desired resources != allocated resources. Can we update the allocation to the desired resources?
fit, reason, message := m.canResizePod(m.getAllocatedPods(m.getActivePods()), pod)
if fit {
// Update pod resource allocation checkpoint
if err := m.SetAllocatedResources(pod); err != nil {
return false, err
}
m.statusManager.ClearPodResizePendingCondition(pod.UID)
// Clear any errors that may have been surfaced from a previous resize and update the
// generation of the resize in-progress condition.
m.statusManager.ClearPodResizeInProgressCondition(pod.UID)
m.statusManager.SetPodResizeInProgressCondition(pod.UID, "", "", pod.Generation)
return true, nil
}
if reason != "" {
m.statusManager.SetPodResizePendingCondition(pod.UID, reason, message, pod.Generation)
}
return false, nil
}
func disallowResizeForSwappableContainers(runtime kubecontainer.Runtime, desiredPod, allocatedPod *v1.Pod) (bool, string) {
if desiredPod == nil || allocatedPod == nil {
return false, ""
}
restartableMemoryResizePolicy := func(resizePolicies []v1.ContainerResizePolicy) bool {
for _, policy := range resizePolicies {
if policy.ResourceName == v1.ResourceMemory {
return policy.RestartPolicy == v1.RestartContainer
}
}
return false
}
allocatedContainers := make(map[string]v1.Container)
for _, container := range append(allocatedPod.Spec.Containers, allocatedPod.Spec.InitContainers...) {
allocatedContainers[container.Name] = container
}
for _, desiredContainer := range append(desiredPod.Spec.Containers, desiredPod.Spec.InitContainers...) {
allocatedContainer, ok := allocatedContainers[desiredContainer.Name]
if !ok {
continue
}
origMemRequest := desiredContainer.Resources.Requests[v1.ResourceMemory]
newMemRequest := allocatedContainer.Resources.Requests[v1.ResourceMemory]
if !origMemRequest.Equal(newMemRequest) && !restartableMemoryResizePolicy(allocatedContainer.ResizePolicy) {
aSwapBehavior := runtime.GetContainerSwapBehavior(desiredPod, &desiredContainer)
bSwapBehavior := runtime.GetContainerSwapBehavior(allocatedPod, &allocatedContainer)
if aSwapBehavior != kubetypes.NoSwap || bSwapBehavior != kubetypes.NoSwap {
return true, "In-place resize of containers with swap is not supported."
}
}
}
return false, ""
}
// canAdmitPod determines if a pod can be admitted, and gives a reason if it
// cannot. "pod" is new pod, while "pods" are all admitted pods
// The function returns a boolean value indicating whether the pod
// can be admitted, a brief single-word reason and a message explaining why
// the pod cannot be admitted.
// allocatedPods should represent the pods that have already been admitted, along with their
// admitted (allocated) resources.
func (m *manager) canAdmitPod(allocatedPods []*v1.Pod, pod *v1.Pod) (bool, string, string) {
// Filter out the pod being evaluated.
allocatedPods = slices.DeleteFunc(allocatedPods, func(p *v1.Pod) bool { return p.UID == pod.UID })
// If any handler rejects, the pod is rejected.
attrs := &lifecycle.PodAdmitAttributes{Pod: pod, OtherPods: allocatedPods}
for _, podAdmitHandler := range m.admitHandlers {
if result := podAdmitHandler.Admit(attrs); !result.Admit {
klog.InfoS("Pod admission denied", "podUID", attrs.Pod.UID, "pod", klog.KObj(attrs.Pod), "reason", result.Reason, "message", result.Message)
return false, result.Reason, result.Message
}
}
return true, "", ""
}
// canResizePod determines if the requested resize is currently feasible.
// pod should hold the desired (pre-allocated) spec.
// Returns true if the resize can proceed; returns a reason and message
// otherwise.
func (m *manager) canResizePod(allocatedPods []*v1.Pod, pod *v1.Pod) (bool, string, string) {
// TODO: Move this logic into a PodAdmitHandler by introducing an operation field to
// lifecycle.PodAdmitAttributes, and combine canResizePod with canAdmitPod.
if v1qos.GetPodQOS(pod) == v1.PodQOSGuaranteed {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingExclusiveCPUs) &&
m.nodeConfig.CPUManagerPolicy == string(cpumanager.PolicyStatic) &&
m.guaranteedPodResourceResizeRequired(pod, v1.ResourceCPU) {
msg := fmt.Sprintf("Resize is infeasible for Guaranteed Pods alongside CPU Manager policy \"%s\"", string(cpumanager.PolicyStatic))
klog.V(3).InfoS(msg, "pod", format.Pod(pod))
metrics.PodInfeasibleResizes.WithLabelValues("guaranteed_pod_cpu_manager_static_policy").Inc()
return false, v1.PodReasonInfeasible, msg
}
if utilfeature.DefaultFeatureGate.Enabled(features.MemoryManager) &&
!utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingExclusiveMemory) &&
m.nodeConfig.MemoryManagerPolicy == string(memorymanager.PolicyTypeStatic) &&
m.guaranteedPodResourceResizeRequired(pod, v1.ResourceMemory) {
msg := fmt.Sprintf("Resize is infeasible for Guaranteed Pods alongside Memory Manager policy \"%s\"", string(memorymanager.PolicyTypeStatic))
klog.V(3).InfoS(msg, "pod", format.Pod(pod))
metrics.PodInfeasibleResizes.WithLabelValues("guaranteed_pod_memory_manager_static_policy").Inc()
return false, v1.PodReasonInfeasible, msg
}
}
cpuAvailable := m.nodeAllocatableAbsolute.Cpu().MilliValue()
memAvailable := m.nodeAllocatableAbsolute.Memory().Value()
cpuRequests := resource.GetResourceRequest(pod, v1.ResourceCPU)
memRequests := resource.GetResourceRequest(pod, v1.ResourceMemory)
if cpuRequests > cpuAvailable || memRequests > memAvailable {
var msg string
if memRequests > memAvailable {
msg = fmt.Sprintf("memory, requested: %d, capacity: %d", memRequests, memAvailable)
} else {
msg = fmt.Sprintf("cpu, requested: %d, capacity: %d", cpuRequests, cpuAvailable)
}
msg = "Node didn't have enough capacity: " + msg
klog.V(3).InfoS(msg, "pod", klog.KObj(pod))
metrics.PodInfeasibleResizes.WithLabelValues("insufficient_node_allocatable").Inc()
return false, v1.PodReasonInfeasible, msg
}
if ok, failReason, failMessage := m.canAdmitPod(allocatedPods, pod); !ok {
// Log reason and return.
klog.V(3).InfoS("Resize cannot be accommodated", "pod", klog.KObj(pod), "reason", failReason, "message", failMessage)
return false, v1.PodReasonDeferred, failMessage
}
return true, "", ""
}
func (m *manager) CheckPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) {
// If a resize is in progress, make sure the cache has the correct state in case the Kubelet restarted.
if m.isPodResizeInProgress(allocatedPod, podStatus) {
// This is a no-op if the resize in progress condition is already set.
m.statusManager.SetPodResizeInProgressCondition(allocatedPod.UID, "", "", allocatedPod.Generation)
} else if m.statusManager.ClearPodResizeInProgressCondition(allocatedPod.UID) {
// (Allocated == Actual) => clear the resize in-progress status.
// Generate Pod resize completed event
podResizeCompletedEventMsg := m.podResizeCompletionMsg(allocatedPod)
if m.recorder != nil {
m.recorder.Eventf(allocatedPod, v1.EventTypeNormal, events.ResizeCompleted, podResizeCompletedEventMsg)
}
}
}
// isPodResizeInProgress checks whether the actuated resizable resources differ from the allocated resources
// for any running containers. Specifically, the following differences are ignored:
// - Non-resizable containers: non-restartable init containers, ephemeral containers
// - Non-resizable resources: only CPU & memory are resizable
// - Non-running containers: they will be sized correctly when (re)started
func (m *manager) isPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
return !podutil.VisitContainers(&allocatedPod.Spec, podutil.InitContainers|podutil.Containers,
func(allocatedContainer *v1.Container, containerType podutil.ContainerType) (shouldContinue bool) {
if !IsResizableContainer(allocatedContainer, containerType) {
return true
}
containerStatus := podStatus.FindContainerStatusByName(allocatedContainer.Name)
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
// If the container isn't running, it doesn't need to be resized.
return true
}
actuatedResources, _ := m.GetActuatedResources(allocatedPod.UID, allocatedContainer.Name)
allocatedResources := allocatedContainer.Resources
return allocatedResources.Requests[v1.ResourceCPU].Equal(actuatedResources.Requests[v1.ResourceCPU]) &&
allocatedResources.Limits[v1.ResourceCPU].Equal(actuatedResources.Limits[v1.ResourceCPU]) &&
allocatedResources.Requests[v1.ResourceMemory].Equal(actuatedResources.Requests[v1.ResourceMemory]) &&
allocatedResources.Limits[v1.ResourceMemory].Equal(actuatedResources.Limits[v1.ResourceMemory])
})
}
func (m *manager) guaranteedPodResourceResizeRequired(pod *v1.Pod, resourceName v1.ResourceName) bool {
for container, containerType := range podutil.ContainerIter(&pod.Spec, podutil.InitContainers|podutil.Containers) {
if !IsResizableContainer(container, containerType) {
continue
}
requestedResources := container.Resources
allocatedresources, _ := m.GetContainerResourceAllocation(pod.UID, container.Name)
// For Guaranteed pods, requests must equal limits, so checking requests is sufficient.
if !requestedResources.Requests[resourceName].Equal(allocatedresources.Requests[resourceName]) {
return true
}
}
return false
}
func (m *manager) getAllocatedPods(activePods []*v1.Pod) []*v1.Pod {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
return activePods
}
allocatedPods := make([]*v1.Pod, len(activePods))
for i, pod := range activePods {
allocatedPods[i], _ = m.UpdatePodFromAllocation(pod)
}
return allocatedPods
}
func IsResizableContainer(container *v1.Container, containerType podutil.ContainerType) bool {
switch containerType {
case podutil.InitContainers:
return podutil.IsRestartableInitContainer(container)
case podutil.Containers:
return true
default:
return false
}
}
//go:build linux
// +build linux
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocation
import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
func IsInPlacePodVerticalScalingAllowed(pod *v1.Pod) (allowed bool, msg, reason string) {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
return false, "InPlacePodVerticalScaling is disabled", "feature_gate_off"
}
if kubetypes.IsStaticPod(pod) {
return false, "In-place resize of static-pods is not supported", "static_pod"
}
return true, "", ""
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"encoding/json"
"fmt"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
)
var _ checkpointmanager.Checkpoint = &Checkpoint{}
type PodResourceCheckpointInfo struct {
Entries PodResourceInfoMap `json:"entries,omitempty"`
}
// Checkpoint represents a structure to store pod resource allocation checkpoint data
type Checkpoint struct {
// Data is a serialized PodResourceAllocationInfo
Data string `json:"data"`
// Checksum is a checksum of Data
Checksum checksum.Checksum `json:"checksum"`
}
// NewCheckpoint creates a new checkpoint from a list of claim info states
func NewCheckpoint(allocations *PodResourceCheckpointInfo) (*Checkpoint, error) {
serializedAllocations, err := json.Marshal(allocations)
if err != nil {
return nil, fmt.Errorf("failed to serialize allocations for checkpointing: %w", err)
}
cp := &Checkpoint{
Data: string(serializedAllocations),
}
cp.Checksum = checksum.New(cp.Data)
return cp, nil
}
func (cp *Checkpoint) MarshalCheckpoint() ([]byte, error) {
return json.Marshal(cp)
}
// UnmarshalCheckpoint unmarshals checkpoint from JSON
func (cp *Checkpoint) UnmarshalCheckpoint(blob []byte) error {
return json.Unmarshal(blob, cp)
}
// VerifyChecksum verifies that current checksum
// of checkpointed Data is valid
func (cp *Checkpoint) VerifyChecksum() error {
return cp.Checksum.Verify(cp.Data)
}
// GetPodResourceCheckpointInfo returns Pod Resource Allocation info states from checkpoint
func (cp *Checkpoint) GetPodResourceCheckpointInfo() (*PodResourceCheckpointInfo, error) {
var data PodResourceCheckpointInfo
if err := json.Unmarshal([]byte(cp.Data), &data); err != nil {
return nil, err
}
return &data, nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
// PodResourceInfo stores resource requirements for containers within a pod.
type PodResourceInfo struct {
// ContainerResources maps container names to their respective ResourceRequirements.
ContainerResources map[string]v1.ResourceRequirements
}
// PodResourceInfoMap maps pod UIDs to their corresponding PodResourceInfo,
// tracking resource requirements for all containers within each pod.
type PodResourceInfoMap map[types.UID]PodResourceInfo
// Clone returns a copy of PodResourceInfoMap
func (pr PodResourceInfoMap) Clone() PodResourceInfoMap {
prCopy := make(PodResourceInfoMap)
for podUID, podInfo := range pr {
prCopy[podUID] = PodResourceInfo{
ContainerResources: make(map[string]v1.ResourceRequirements),
}
for containerName, containerInfo := range podInfo.ContainerResources {
prCopy[podUID].ContainerResources[containerName] = *containerInfo.DeepCopy()
}
}
return prCopy
}
// Reader interface used to read current pod resource state
type Reader interface {
GetContainerResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
GetPodResourceInfoMap() PodResourceInfoMap
GetPodResourceInfo(podUID types.UID) (PodResourceInfo, bool)
}
type writer interface {
SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error
SetPodResourceInfo(podUID types.UID, resourceInfo PodResourceInfo) error
RemovePod(podUID types.UID) error
// RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods.
RemoveOrphanedPods(remainingPods sets.Set[types.UID])
}
// State interface provides methods for tracking and setting pod resources
type State interface {
Reader
writer
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"fmt"
"path"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
)
var _ State = &stateCheckpoint{}
type stateCheckpoint struct {
mux sync.RWMutex
cache State
checkpointManager checkpointmanager.CheckpointManager
checkpointName string
lastChecksum checksum.Checksum
}
// NewStateCheckpoint creates new State for keeping track of pod resource information with checkpoint backend
func NewStateCheckpoint(stateDir, checkpointName string) (State, error) {
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
if err != nil {
return nil, fmt.Errorf("failed to initialize checkpoint manager for pod resource information tracking: %w", err)
}
pra, checksum, err := restoreState(checkpointManager, checkpointName)
if err != nil {
//lint:ignore ST1005 user-facing error message
return nil, fmt.Errorf("could not restore state from checkpoint: %w, please drain this node and delete pod resource information checkpoint file %q before restarting Kubelet",
err, path.Join(stateDir, checkpointName))
}
stateCheckpoint := &stateCheckpoint{
cache: NewStateMemory(pra),
checkpointManager: checkpointManager,
checkpointName: checkpointName,
lastChecksum: checksum,
}
return stateCheckpoint, nil
}
// restores state from a checkpoint and creates it if it doesn't exist
func restoreState(checkpointManager checkpointmanager.CheckpointManager, checkpointName string) (PodResourceInfoMap, checksum.Checksum, error) {
checkpoint := &Checkpoint{}
if err := checkpointManager.GetCheckpoint(checkpointName, checkpoint); err != nil {
if err == errors.ErrCheckpointNotFound {
return nil, 0, nil
}
return nil, 0, err
}
praInfo, err := checkpoint.GetPodResourceCheckpointInfo()
if err != nil {
return nil, 0, fmt.Errorf("failed to get pod resource information: %w", err)
}
klog.V(2).InfoS("State checkpoint: restored pod resource state from checkpoint")
return praInfo.Entries, checkpoint.Checksum, nil
}
// saves state to a checkpoint, caller is responsible for locking
func (sc *stateCheckpoint) storeState() error {
resourceInfo := sc.cache.GetPodResourceInfoMap()
checkpoint, err := NewCheckpoint(&PodResourceCheckpointInfo{
Entries: resourceInfo,
})
if err != nil {
return fmt.Errorf("failed to create checkpoint: %w", err)
}
if checkpoint.Checksum == sc.lastChecksum {
// No changes to the checkpoint => no need to re-write it.
return nil
}
err = sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
if err != nil {
klog.ErrorS(err, "Failed to save pod resource information checkpoint")
return err
}
sc.lastChecksum = checkpoint.Checksum
return nil
}
// GetContainerResources returns current resources information to a pod's container
func (sc *stateCheckpoint) GetContainerResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
sc.mux.RLock()
defer sc.mux.RUnlock()
return sc.cache.GetContainerResources(podUID, containerName)
}
// GetPodResourceInfoMap returns current pod resource information map
func (sc *stateCheckpoint) GetPodResourceInfoMap() PodResourceInfoMap {
sc.mux.RLock()
defer sc.mux.RUnlock()
return sc.cache.GetPodResourceInfoMap()
}
// GetPodResourceInfo returns current pod resource information
func (sc *stateCheckpoint) GetPodResourceInfo(podUID types.UID) (PodResourceInfo, bool) {
sc.mux.RLock()
defer sc.mux.RUnlock()
return sc.cache.GetPodResourceInfo(podUID)
}
// SetContainerResoruces sets resources information for a pod's container
func (sc *stateCheckpoint) SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error {
sc.mux.Lock()
defer sc.mux.Unlock()
err := sc.cache.SetContainerResources(podUID, containerName, resources)
if err != nil {
return err
}
return sc.storeState()
}
// SetPodResourceInfo sets pod resource information
func (sc *stateCheckpoint) SetPodResourceInfo(podUID types.UID, resourceInfo PodResourceInfo) error {
sc.mux.Lock()
defer sc.mux.Unlock()
err := sc.cache.SetPodResourceInfo(podUID, resourceInfo)
if err != nil {
return err
}
return sc.storeState()
}
// Delete deletes resource information for specified pod
func (sc *stateCheckpoint) RemovePod(podUID types.UID) error {
sc.mux.Lock()
defer sc.mux.Unlock()
// Skip writing the checkpoint for pod deletion, since there is no side effect to
// keeping a deleted pod. Deleted pods will eventually be cleaned up by RemoveOrphanedPods.
// The deletion will be stored the next time a non-delete update is made.
return sc.cache.RemovePod(podUID)
}
func (sc *stateCheckpoint) RemoveOrphanedPods(remainingPods sets.Set[types.UID]) {
sc.cache.RemoveOrphanedPods(remainingPods)
// Don't bother updating the stored state. If Kubelet is restarted before the cache is written,
// the orphaned pods will be removed the next time this method is called.
}
type noopStateCheckpoint struct{}
// NewNoopStateCheckpoint creates a dummy state checkpoint manager
func NewNoopStateCheckpoint() State {
return &noopStateCheckpoint{}
}
func (sc *noopStateCheckpoint) GetContainerResources(_ types.UID, _ string) (v1.ResourceRequirements, bool) {
return v1.ResourceRequirements{}, false
}
func (sc *noopStateCheckpoint) GetPodResourceInfoMap() PodResourceInfoMap {
return nil
}
func (sc *noopStateCheckpoint) GetPodResourceInfo(_ types.UID) (PodResourceInfo, bool) {
return PodResourceInfo{}, false
}
func (sc *noopStateCheckpoint) SetContainerResources(_ types.UID, _ string, _ v1.ResourceRequirements) error {
return nil
}
func (sc *noopStateCheckpoint) SetPodResourceInfo(_ types.UID, _ PodResourceInfo) error {
return nil
}
func (sc *noopStateCheckpoint) RemovePod(_ types.UID) error {
return nil
}
func (sc *noopStateCheckpoint) RemoveOrphanedPods(_ sets.Set[types.UID]) {}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
)
type stateMemory struct {
sync.RWMutex
podResources PodResourceInfoMap
}
var _ State = &stateMemory{}
// NewStateMemory creates new State to track resources resourcesated to pods
func NewStateMemory(resources PodResourceInfoMap) State {
if resources == nil {
resources = PodResourceInfoMap{}
}
klog.V(2).InfoS("Initialized new in-memory state store for pod resource information tracking")
return &stateMemory{
podResources: resources,
}
}
func (s *stateMemory) GetContainerResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
s.RLock()
defer s.RUnlock()
resourceInfo, ok := s.podResources[podUID]
if !ok {
return v1.ResourceRequirements{}, ok
}
resources, ok := resourceInfo.ContainerResources[containerName]
if !ok {
return v1.ResourceRequirements{}, ok
}
return *resources.DeepCopy(), ok
}
func (s *stateMemory) GetPodResourceInfoMap() PodResourceInfoMap {
s.RLock()
defer s.RUnlock()
return s.podResources.Clone()
}
func (s *stateMemory) GetPodResourceInfo(podUID types.UID) (PodResourceInfo, bool) {
s.RLock()
defer s.RUnlock()
resourceInfo, ok := s.podResources[podUID]
return resourceInfo, ok
}
func (s *stateMemory) SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error {
s.Lock()
defer s.Unlock()
if _, ok := s.podResources[podUID]; !ok {
s.podResources[podUID] = PodResourceInfo{
ContainerResources: make(map[string]v1.ResourceRequirements),
}
}
s.podResources[podUID].ContainerResources[containerName] = resources
klog.V(3).InfoS("Updated container resource information", "podUID", podUID, "containerName", containerName, "resources", resources)
return nil
}
func (s *stateMemory) SetPodResourceInfo(podUID types.UID, resourceInfo PodResourceInfo) error {
s.Lock()
defer s.Unlock()
s.podResources[podUID] = resourceInfo
klog.V(3).InfoS("Updated pod resource information", "podUID", podUID, "information", resourceInfo)
return nil
}
func (s *stateMemory) RemovePod(podUID types.UID) error {
s.Lock()
defer s.Unlock()
delete(s.podResources, podUID)
klog.V(3).InfoS("Deleted pod resource information", "podUID", podUID)
return nil
}
func (s *stateMemory) RemoveOrphanedPods(remainingPods sets.Set[types.UID]) {
s.Lock()
defer s.Unlock()
for podUID := range s.podResources {
if _, ok := remainingPods[types.UID(podUID)]; !ok {
delete(s.podResources, podUID)
}
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"math/rand"
"time"
"sigs.k8s.io/randfill"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubelet/config/v1beta1"
"k8s.io/kubernetes/pkg/cluster/ports"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/qos"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/utils/ptr"
)
// Funcs returns the fuzzer functions for the kubeletconfig apis.
func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
// provide non-empty values for fields with defaults, so the defaulter doesn't change values during round-trip
func(obj *kubeletconfig.KubeletConfiguration, c randfill.Continue) {
c.FillNoCustom(obj)
obj.EnableServer = true
obj.Authentication.Anonymous.Enabled = true
obj.Authentication.Webhook.Enabled = false
obj.Authentication.Webhook.CacheTTL = metav1.Duration{Duration: 2 * time.Minute}
obj.Authorization.Mode = kubeletconfig.KubeletAuthorizationModeAlwaysAllow
obj.Authorization.Webhook.CacheAuthorizedTTL = metav1.Duration{Duration: 5 * time.Minute}
obj.Authorization.Webhook.CacheUnauthorizedTTL = metav1.Duration{Duration: 30 * time.Second}
obj.Address = "0.0.0.0"
obj.VolumeStatsAggPeriod = metav1.Duration{Duration: time.Minute}
obj.RuntimeRequestTimeout = metav1.Duration{Duration: 2 * time.Minute}
obj.CPUCFSQuota = true
obj.EventBurst = 10
obj.EventRecordQPS = 5
obj.EnableControllerAttachDetach = true
obj.EnableDebuggingHandlers = true
obj.EnableSystemLogQuery = false
obj.FileCheckFrequency = metav1.Duration{Duration: 20 * time.Second}
obj.HealthzBindAddress = "127.0.0.1"
obj.HealthzPort = 10248
obj.HTTPCheckFrequency = metav1.Duration{Duration: 20 * time.Second}
obj.ImageMinimumGCAge = metav1.Duration{Duration: 2 * time.Minute}
obj.ImageMaximumGCAge = metav1.Duration{}
obj.ImageGCHighThresholdPercent = 85
obj.ImageGCLowThresholdPercent = 80
obj.KernelMemcgNotification = false
obj.MaxOpenFiles = 1000000
obj.MaxPods = 110
obj.MemoryManagerPolicy = v1beta1.NoneMemoryManagerPolicy
obj.PodPidsLimit = -1
obj.NodeStatusUpdateFrequency = metav1.Duration{Duration: 10 * time.Second}
obj.NodeStatusReportFrequency = metav1.Duration{Duration: time.Minute}
obj.NodeLeaseDurationSeconds = 40
obj.CPUManagerPolicy = "none"
obj.CPUManagerPolicyOptions = nil
obj.CPUManagerReconcilePeriod = obj.NodeStatusUpdateFrequency
obj.NodeStatusMaxImages = 50
obj.TopologyManagerPolicy = kubeletconfig.NoneTopologyManagerPolicy
obj.TopologyManagerScope = kubeletconfig.ContainerTopologyManagerScope
obj.TopologyManagerPolicyOptions = nil
obj.QOSReserved = map[string]string{
"memory": "50%",
}
obj.OOMScoreAdj = int32(qos.KubeletOOMScoreAdj)
obj.PodLogsDir = "/var/log/pods"
obj.Port = ports.KubeletPort
obj.ReadOnlyPort = ports.KubeletReadOnlyPort
obj.RegistryBurst = 10
obj.RegistryPullQPS = 5
obj.ResolverConfig = kubetypes.ResolvConfDefault
obj.SerializeImagePulls = true
obj.StreamingConnectionIdleTimeout = metav1.Duration{Duration: 4 * time.Hour}
obj.SyncFrequency = metav1.Duration{Duration: 1 * time.Minute}
obj.ContentType = "application/vnd.kubernetes.protobuf"
obj.KubeAPIQPS = 50
obj.KubeAPIBurst = 100
obj.HairpinMode = v1beta1.PromiscuousBridge
obj.EvictionHard = eviction.DefaultEvictionHard
obj.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 5 * time.Minute}
obj.MergeDefaultEvictionSettings = false
obj.MakeIPTablesUtilChains = true
obj.IPTablesMasqueradeBit = kubeletconfigv1beta1.DefaultIPTablesMasqueradeBit
obj.IPTablesDropBit = kubeletconfigv1beta1.DefaultIPTablesDropBit
obj.CgroupsPerQOS = true
obj.CgroupDriver = "cgroupfs"
obj.EnforceNodeAllocatable = kubeletconfigv1beta1.DefaultNodeAllocatableEnforcement
obj.StaticPodURLHeader = nil
obj.SingleProcessOOMKill = ptr.To(false)
obj.ContainerLogMaxFiles = 5
obj.ContainerLogMaxSize = "10Mi"
obj.ContainerLogMaxWorkers = 1
obj.ContainerLogMonitorInterval = metav1.Duration{Duration: 10 * time.Second}
obj.ConfigMapAndSecretChangeDetectionStrategy = "Watch"
obj.AllowedUnsafeSysctls = nil
obj.VolumePluginDir = kubeletconfigv1beta1.DefaultVolumePluginDir
obj.ContainerRuntimeEndpoint = "unix:///run/containerd/containerd.sock"
if obj.Logging.Format == "" {
obj.Logging.Format = "text"
}
obj.EnableSystemLogHandler = true
obj.MemoryThrottlingFactor = ptr.To(rand.Float64())
obj.LocalStorageCapacityIsolation = true
obj.FeatureGates = map[string]bool{
"AllAlpha": false,
"AllBeta": true,
}
},
// tokenAttributes field is only supported in v1 CredentialProvider
func(obj *kubeletconfig.CredentialProvider, c randfill.Continue) {
c.FillNoCustom(obj)
obj.TokenAttributes = nil
},
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
// KubeletConfigurationPathRefs returns pointers to all of the KubeletConfiguration fields that contain filepaths.
// You might use this, for example, to resolve all relative paths against some common root before
// passing the configuration to the application. This method must be kept up to date as new fields are added.
func KubeletConfigurationPathRefs(kc *KubeletConfiguration) []*string {
paths := []*string{}
paths = append(paths, &kc.StaticPodPath)
paths = append(paths, &kc.Authentication.X509.ClientCAFile)
paths = append(paths, &kc.TLSCertFile)
paths = append(paths, &kc.TLSPrivateKeyFile)
paths = append(paths, &kc.ResolverConfig)
paths = append(paths, &kc.VolumePluginDir)
paths = append(paths, &kc.PodLogsDir)
return paths
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "kubelet.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes registers known types to the given scheme
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeletConfiguration{},
&SerializedNodeConfigSource{},
&CredentialProviderConfig{},
&ImagePullIntent{},
&ImagePulledRecord{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1"
kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
)
// Utility functions for the Kubelet's kubeletconfig API group
// NewSchemeAndCodecs is a utility function that returns a Scheme and CodecFactory
// that understand the types in the kubeletconfig API group. Passing mutators allows
// for adjusting the behavior of the CodecFactory, for example enable strict decoding.
func NewSchemeAndCodecs(mutators ...serializer.CodecFactoryOptionsMutator) (*runtime.Scheme, *serializer.CodecFactory, error) {
scheme := runtime.NewScheme()
if err := kubeletconfig.AddToScheme(scheme); err != nil {
return nil, nil, err
}
if err := kubeletconfigv1beta1.AddToScheme(scheme); err != nil {
return nil, nil, err
}
if err := kubeletconfigv1.AddToScheme(scheme); err != nil {
return nil, nil, err
}
codecs := serializer.NewCodecFactory(scheme, mutators...)
return scheme, &codecs, nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1 "k8s.io/kubelet/config/v1"
config "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1.CredentialProvider)(nil), (*config.CredentialProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CredentialProvider_To_config_CredentialProvider(a.(*configv1.CredentialProvider), b.(*config.CredentialProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.CredentialProvider)(nil), (*configv1.CredentialProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CredentialProvider_To_v1_CredentialProvider(a.(*config.CredentialProvider), b.(*configv1.CredentialProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.CredentialProviderConfig)(nil), (*config.CredentialProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_CredentialProviderConfig_To_config_CredentialProviderConfig(a.(*configv1.CredentialProviderConfig), b.(*config.CredentialProviderConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.CredentialProviderConfig)(nil), (*configv1.CredentialProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CredentialProviderConfig_To_v1_CredentialProviderConfig(a.(*config.CredentialProviderConfig), b.(*configv1.CredentialProviderConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.ExecEnvVar)(nil), (*config.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExecEnvVar_To_config_ExecEnvVar(a.(*configv1.ExecEnvVar), b.(*config.ExecEnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ExecEnvVar)(nil), (*configv1.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ExecEnvVar_To_v1_ExecEnvVar(a.(*config.ExecEnvVar), b.(*configv1.ExecEnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.ServiceAccountTokenAttributes)(nil), (*config.ServiceAccountTokenAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ServiceAccountTokenAttributes_To_config_ServiceAccountTokenAttributes(a.(*configv1.ServiceAccountTokenAttributes), b.(*config.ServiceAccountTokenAttributes), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ServiceAccountTokenAttributes)(nil), (*configv1.ServiceAccountTokenAttributes)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ServiceAccountTokenAttributes_To_v1_ServiceAccountTokenAttributes(a.(*config.ServiceAccountTokenAttributes), b.(*configv1.ServiceAccountTokenAttributes), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_CredentialProvider_To_config_CredentialProvider(in *configv1.CredentialProvider, out *config.CredentialProvider, s conversion.Scope) error {
out.Name = in.Name
out.MatchImages = *(*[]string)(unsafe.Pointer(&in.MatchImages))
out.DefaultCacheDuration = (*metav1.Duration)(unsafe.Pointer(in.DefaultCacheDuration))
out.APIVersion = in.APIVersion
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.Env = *(*[]config.ExecEnvVar)(unsafe.Pointer(&in.Env))
out.TokenAttributes = (*config.ServiceAccountTokenAttributes)(unsafe.Pointer(in.TokenAttributes))
return nil
}
// Convert_v1_CredentialProvider_To_config_CredentialProvider is an autogenerated conversion function.
func Convert_v1_CredentialProvider_To_config_CredentialProvider(in *configv1.CredentialProvider, out *config.CredentialProvider, s conversion.Scope) error {
return autoConvert_v1_CredentialProvider_To_config_CredentialProvider(in, out, s)
}
func autoConvert_config_CredentialProvider_To_v1_CredentialProvider(in *config.CredentialProvider, out *configv1.CredentialProvider, s conversion.Scope) error {
out.Name = in.Name
out.MatchImages = *(*[]string)(unsafe.Pointer(&in.MatchImages))
out.DefaultCacheDuration = (*metav1.Duration)(unsafe.Pointer(in.DefaultCacheDuration))
out.APIVersion = in.APIVersion
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.Env = *(*[]configv1.ExecEnvVar)(unsafe.Pointer(&in.Env))
out.TokenAttributes = (*configv1.ServiceAccountTokenAttributes)(unsafe.Pointer(in.TokenAttributes))
return nil
}
// Convert_config_CredentialProvider_To_v1_CredentialProvider is an autogenerated conversion function.
func Convert_config_CredentialProvider_To_v1_CredentialProvider(in *config.CredentialProvider, out *configv1.CredentialProvider, s conversion.Scope) error {
return autoConvert_config_CredentialProvider_To_v1_CredentialProvider(in, out, s)
}
func autoConvert_v1_CredentialProviderConfig_To_config_CredentialProviderConfig(in *configv1.CredentialProviderConfig, out *config.CredentialProviderConfig, s conversion.Scope) error {
out.Providers = *(*[]config.CredentialProvider)(unsafe.Pointer(&in.Providers))
return nil
}
// Convert_v1_CredentialProviderConfig_To_config_CredentialProviderConfig is an autogenerated conversion function.
func Convert_v1_CredentialProviderConfig_To_config_CredentialProviderConfig(in *configv1.CredentialProviderConfig, out *config.CredentialProviderConfig, s conversion.Scope) error {
return autoConvert_v1_CredentialProviderConfig_To_config_CredentialProviderConfig(in, out, s)
}
func autoConvert_config_CredentialProviderConfig_To_v1_CredentialProviderConfig(in *config.CredentialProviderConfig, out *configv1.CredentialProviderConfig, s conversion.Scope) error {
out.Providers = *(*[]configv1.CredentialProvider)(unsafe.Pointer(&in.Providers))
return nil
}
// Convert_config_CredentialProviderConfig_To_v1_CredentialProviderConfig is an autogenerated conversion function.
func Convert_config_CredentialProviderConfig_To_v1_CredentialProviderConfig(in *config.CredentialProviderConfig, out *configv1.CredentialProviderConfig, s conversion.Scope) error {
return autoConvert_config_CredentialProviderConfig_To_v1_CredentialProviderConfig(in, out, s)
}
func autoConvert_v1_ExecEnvVar_To_config_ExecEnvVar(in *configv1.ExecEnvVar, out *config.ExecEnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_v1_ExecEnvVar_To_config_ExecEnvVar is an autogenerated conversion function.
func Convert_v1_ExecEnvVar_To_config_ExecEnvVar(in *configv1.ExecEnvVar, out *config.ExecEnvVar, s conversion.Scope) error {
return autoConvert_v1_ExecEnvVar_To_config_ExecEnvVar(in, out, s)
}
func autoConvert_config_ExecEnvVar_To_v1_ExecEnvVar(in *config.ExecEnvVar, out *configv1.ExecEnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_config_ExecEnvVar_To_v1_ExecEnvVar is an autogenerated conversion function.
func Convert_config_ExecEnvVar_To_v1_ExecEnvVar(in *config.ExecEnvVar, out *configv1.ExecEnvVar, s conversion.Scope) error {
return autoConvert_config_ExecEnvVar_To_v1_ExecEnvVar(in, out, s)
}
func autoConvert_v1_ServiceAccountTokenAttributes_To_config_ServiceAccountTokenAttributes(in *configv1.ServiceAccountTokenAttributes, out *config.ServiceAccountTokenAttributes, s conversion.Scope) error {
out.ServiceAccountTokenAudience = in.ServiceAccountTokenAudience
out.CacheType = config.ServiceAccountTokenCacheType(in.CacheType)
out.RequireServiceAccount = (*bool)(unsafe.Pointer(in.RequireServiceAccount))
out.RequiredServiceAccountAnnotationKeys = *(*[]string)(unsafe.Pointer(&in.RequiredServiceAccountAnnotationKeys))
out.OptionalServiceAccountAnnotationKeys = *(*[]string)(unsafe.Pointer(&in.OptionalServiceAccountAnnotationKeys))
return nil
}
// Convert_v1_ServiceAccountTokenAttributes_To_config_ServiceAccountTokenAttributes is an autogenerated conversion function.
func Convert_v1_ServiceAccountTokenAttributes_To_config_ServiceAccountTokenAttributes(in *configv1.ServiceAccountTokenAttributes, out *config.ServiceAccountTokenAttributes, s conversion.Scope) error {
return autoConvert_v1_ServiceAccountTokenAttributes_To_config_ServiceAccountTokenAttributes(in, out, s)
}
func autoConvert_config_ServiceAccountTokenAttributes_To_v1_ServiceAccountTokenAttributes(in *config.ServiceAccountTokenAttributes, out *configv1.ServiceAccountTokenAttributes, s conversion.Scope) error {
out.ServiceAccountTokenAudience = in.ServiceAccountTokenAudience
out.CacheType = configv1.ServiceAccountTokenCacheType(in.CacheType)
out.RequireServiceAccount = (*bool)(unsafe.Pointer(in.RequireServiceAccount))
out.RequiredServiceAccountAnnotationKeys = *(*[]string)(unsafe.Pointer(&in.RequiredServiceAccountAnnotationKeys))
out.OptionalServiceAccountAnnotationKeys = *(*[]string)(unsafe.Pointer(&in.OptionalServiceAccountAnnotationKeys))
return nil
}
// Convert_config_ServiceAccountTokenAttributes_To_v1_ServiceAccountTokenAttributes is an autogenerated conversion function.
func Convert_config_ServiceAccountTokenAttributes_To_v1_ServiceAccountTokenAttributes(in *config.ServiceAccountTokenAttributes, out *configv1.ServiceAccountTokenAttributes, s conversion.Scope) error {
return autoConvert_config_ServiceAccountTokenAttributes_To_v1_ServiceAccountTokenAttributes(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/conversion"
configv1alpha1 "k8s.io/kubelet/config/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/apis/config"
)
func Convert_config_CredentialProvider_To_v1alpha1_CredentialProvider(in *config.CredentialProvider, out *configv1alpha1.CredentialProvider, s conversion.Scope) error {
// This conversion intentionally omits the tokenAttributes field which is only supported in v1 CredentialProvider.
return autoConvert_config_CredentialProvider_To_v1alpha1_CredentialProvider(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kubelet/config/v1alpha1"
config "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.CredentialProvider)(nil), (*config.CredentialProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CredentialProvider_To_config_CredentialProvider(a.(*configv1alpha1.CredentialProvider), b.(*config.CredentialProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.CredentialProviderConfig)(nil), (*config.CredentialProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_CredentialProviderConfig_To_config_CredentialProviderConfig(a.(*configv1alpha1.CredentialProviderConfig), b.(*config.CredentialProviderConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.CredentialProviderConfig)(nil), (*configv1alpha1.CredentialProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CredentialProviderConfig_To_v1alpha1_CredentialProviderConfig(a.(*config.CredentialProviderConfig), b.(*configv1alpha1.CredentialProviderConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.ExecEnvVar)(nil), (*config.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ExecEnvVar_To_config_ExecEnvVar(a.(*configv1alpha1.ExecEnvVar), b.(*config.ExecEnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ExecEnvVar)(nil), (*configv1alpha1.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ExecEnvVar_To_v1alpha1_ExecEnvVar(a.(*config.ExecEnvVar), b.(*configv1alpha1.ExecEnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.ImagePullCredentials)(nil), (*config.ImagePullCredentials)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImagePullCredentials_To_config_ImagePullCredentials(a.(*configv1alpha1.ImagePullCredentials), b.(*config.ImagePullCredentials), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ImagePullCredentials)(nil), (*configv1alpha1.ImagePullCredentials)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ImagePullCredentials_To_v1alpha1_ImagePullCredentials(a.(*config.ImagePullCredentials), b.(*configv1alpha1.ImagePullCredentials), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.ImagePullIntent)(nil), (*config.ImagePullIntent)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImagePullIntent_To_config_ImagePullIntent(a.(*configv1alpha1.ImagePullIntent), b.(*config.ImagePullIntent), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ImagePullIntent)(nil), (*configv1alpha1.ImagePullIntent)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ImagePullIntent_To_v1alpha1_ImagePullIntent(a.(*config.ImagePullIntent), b.(*configv1alpha1.ImagePullIntent), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.ImagePullSecret)(nil), (*config.ImagePullSecret)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImagePullSecret_To_config_ImagePullSecret(a.(*configv1alpha1.ImagePullSecret), b.(*config.ImagePullSecret), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ImagePullSecret)(nil), (*configv1alpha1.ImagePullSecret)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ImagePullSecret_To_v1alpha1_ImagePullSecret(a.(*config.ImagePullSecret), b.(*configv1alpha1.ImagePullSecret), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.ImagePullServiceAccount)(nil), (*config.ImagePullServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImagePullServiceAccount_To_config_ImagePullServiceAccount(a.(*configv1alpha1.ImagePullServiceAccount), b.(*config.ImagePullServiceAccount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ImagePullServiceAccount)(nil), (*configv1alpha1.ImagePullServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ImagePullServiceAccount_To_v1alpha1_ImagePullServiceAccount(a.(*config.ImagePullServiceAccount), b.(*configv1alpha1.ImagePullServiceAccount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.ImagePulledRecord)(nil), (*config.ImagePulledRecord)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImagePulledRecord_To_config_ImagePulledRecord(a.(*configv1alpha1.ImagePulledRecord), b.(*config.ImagePulledRecord), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ImagePulledRecord)(nil), (*configv1alpha1.ImagePulledRecord)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ImagePulledRecord_To_v1alpha1_ImagePulledRecord(a.(*config.ImagePulledRecord), b.(*configv1alpha1.ImagePulledRecord), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.CredentialProvider)(nil), (*configv1alpha1.CredentialProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CredentialProvider_To_v1alpha1_CredentialProvider(a.(*config.CredentialProvider), b.(*configv1alpha1.CredentialProvider), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_CredentialProvider_To_config_CredentialProvider(in *configv1alpha1.CredentialProvider, out *config.CredentialProvider, s conversion.Scope) error {
out.Name = in.Name
out.MatchImages = *(*[]string)(unsafe.Pointer(&in.MatchImages))
out.DefaultCacheDuration = (*v1.Duration)(unsafe.Pointer(in.DefaultCacheDuration))
out.APIVersion = in.APIVersion
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.Env = *(*[]config.ExecEnvVar)(unsafe.Pointer(&in.Env))
return nil
}
// Convert_v1alpha1_CredentialProvider_To_config_CredentialProvider is an autogenerated conversion function.
func Convert_v1alpha1_CredentialProvider_To_config_CredentialProvider(in *configv1alpha1.CredentialProvider, out *config.CredentialProvider, s conversion.Scope) error {
return autoConvert_v1alpha1_CredentialProvider_To_config_CredentialProvider(in, out, s)
}
func autoConvert_config_CredentialProvider_To_v1alpha1_CredentialProvider(in *config.CredentialProvider, out *configv1alpha1.CredentialProvider, s conversion.Scope) error {
out.Name = in.Name
out.MatchImages = *(*[]string)(unsafe.Pointer(&in.MatchImages))
out.DefaultCacheDuration = (*v1.Duration)(unsafe.Pointer(in.DefaultCacheDuration))
out.APIVersion = in.APIVersion
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.Env = *(*[]configv1alpha1.ExecEnvVar)(unsafe.Pointer(&in.Env))
// WARNING: in.TokenAttributes requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_CredentialProviderConfig_To_config_CredentialProviderConfig(in *configv1alpha1.CredentialProviderConfig, out *config.CredentialProviderConfig, s conversion.Scope) error {
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]config.CredentialProvider, len(*in))
for i := range *in {
if err := Convert_v1alpha1_CredentialProvider_To_config_CredentialProvider(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Providers = nil
}
return nil
}
// Convert_v1alpha1_CredentialProviderConfig_To_config_CredentialProviderConfig is an autogenerated conversion function.
func Convert_v1alpha1_CredentialProviderConfig_To_config_CredentialProviderConfig(in *configv1alpha1.CredentialProviderConfig, out *config.CredentialProviderConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_CredentialProviderConfig_To_config_CredentialProviderConfig(in, out, s)
}
func autoConvert_config_CredentialProviderConfig_To_v1alpha1_CredentialProviderConfig(in *config.CredentialProviderConfig, out *configv1alpha1.CredentialProviderConfig, s conversion.Scope) error {
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]configv1alpha1.CredentialProvider, len(*in))
for i := range *in {
if err := Convert_config_CredentialProvider_To_v1alpha1_CredentialProvider(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Providers = nil
}
return nil
}
// Convert_config_CredentialProviderConfig_To_v1alpha1_CredentialProviderConfig is an autogenerated conversion function.
func Convert_config_CredentialProviderConfig_To_v1alpha1_CredentialProviderConfig(in *config.CredentialProviderConfig, out *configv1alpha1.CredentialProviderConfig, s conversion.Scope) error {
return autoConvert_config_CredentialProviderConfig_To_v1alpha1_CredentialProviderConfig(in, out, s)
}
func autoConvert_v1alpha1_ExecEnvVar_To_config_ExecEnvVar(in *configv1alpha1.ExecEnvVar, out *config.ExecEnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_v1alpha1_ExecEnvVar_To_config_ExecEnvVar is an autogenerated conversion function.
func Convert_v1alpha1_ExecEnvVar_To_config_ExecEnvVar(in *configv1alpha1.ExecEnvVar, out *config.ExecEnvVar, s conversion.Scope) error {
return autoConvert_v1alpha1_ExecEnvVar_To_config_ExecEnvVar(in, out, s)
}
func autoConvert_config_ExecEnvVar_To_v1alpha1_ExecEnvVar(in *config.ExecEnvVar, out *configv1alpha1.ExecEnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_config_ExecEnvVar_To_v1alpha1_ExecEnvVar is an autogenerated conversion function.
func Convert_config_ExecEnvVar_To_v1alpha1_ExecEnvVar(in *config.ExecEnvVar, out *configv1alpha1.ExecEnvVar, s conversion.Scope) error {
return autoConvert_config_ExecEnvVar_To_v1alpha1_ExecEnvVar(in, out, s)
}
func autoConvert_v1alpha1_ImagePullCredentials_To_config_ImagePullCredentials(in *configv1alpha1.ImagePullCredentials, out *config.ImagePullCredentials, s conversion.Scope) error {
out.KubernetesSecrets = *(*[]config.ImagePullSecret)(unsafe.Pointer(&in.KubernetesSecrets))
out.KubernetesServiceAccounts = *(*[]config.ImagePullServiceAccount)(unsafe.Pointer(&in.KubernetesServiceAccounts))
out.NodePodsAccessible = in.NodePodsAccessible
return nil
}
// Convert_v1alpha1_ImagePullCredentials_To_config_ImagePullCredentials is an autogenerated conversion function.
func Convert_v1alpha1_ImagePullCredentials_To_config_ImagePullCredentials(in *configv1alpha1.ImagePullCredentials, out *config.ImagePullCredentials, s conversion.Scope) error {
return autoConvert_v1alpha1_ImagePullCredentials_To_config_ImagePullCredentials(in, out, s)
}
func autoConvert_config_ImagePullCredentials_To_v1alpha1_ImagePullCredentials(in *config.ImagePullCredentials, out *configv1alpha1.ImagePullCredentials, s conversion.Scope) error {
out.KubernetesSecrets = *(*[]configv1alpha1.ImagePullSecret)(unsafe.Pointer(&in.KubernetesSecrets))
out.KubernetesServiceAccounts = *(*[]configv1alpha1.ImagePullServiceAccount)(unsafe.Pointer(&in.KubernetesServiceAccounts))
out.NodePodsAccessible = in.NodePodsAccessible
return nil
}
// Convert_config_ImagePullCredentials_To_v1alpha1_ImagePullCredentials is an autogenerated conversion function.
func Convert_config_ImagePullCredentials_To_v1alpha1_ImagePullCredentials(in *config.ImagePullCredentials, out *configv1alpha1.ImagePullCredentials, s conversion.Scope) error {
return autoConvert_config_ImagePullCredentials_To_v1alpha1_ImagePullCredentials(in, out, s)
}
func autoConvert_v1alpha1_ImagePullIntent_To_config_ImagePullIntent(in *configv1alpha1.ImagePullIntent, out *config.ImagePullIntent, s conversion.Scope) error {
out.Image = in.Image
return nil
}
// Convert_v1alpha1_ImagePullIntent_To_config_ImagePullIntent is an autogenerated conversion function.
func Convert_v1alpha1_ImagePullIntent_To_config_ImagePullIntent(in *configv1alpha1.ImagePullIntent, out *config.ImagePullIntent, s conversion.Scope) error {
return autoConvert_v1alpha1_ImagePullIntent_To_config_ImagePullIntent(in, out, s)
}
func autoConvert_config_ImagePullIntent_To_v1alpha1_ImagePullIntent(in *config.ImagePullIntent, out *configv1alpha1.ImagePullIntent, s conversion.Scope) error {
out.Image = in.Image
return nil
}
// Convert_config_ImagePullIntent_To_v1alpha1_ImagePullIntent is an autogenerated conversion function.
func Convert_config_ImagePullIntent_To_v1alpha1_ImagePullIntent(in *config.ImagePullIntent, out *configv1alpha1.ImagePullIntent, s conversion.Scope) error {
return autoConvert_config_ImagePullIntent_To_v1alpha1_ImagePullIntent(in, out, s)
}
func autoConvert_v1alpha1_ImagePullSecret_To_config_ImagePullSecret(in *configv1alpha1.ImagePullSecret, out *config.ImagePullSecret, s conversion.Scope) error {
out.UID = in.UID
out.Namespace = in.Namespace
out.Name = in.Name
out.CredentialHash = in.CredentialHash
return nil
}
// Convert_v1alpha1_ImagePullSecret_To_config_ImagePullSecret is an autogenerated conversion function.
func Convert_v1alpha1_ImagePullSecret_To_config_ImagePullSecret(in *configv1alpha1.ImagePullSecret, out *config.ImagePullSecret, s conversion.Scope) error {
return autoConvert_v1alpha1_ImagePullSecret_To_config_ImagePullSecret(in, out, s)
}
func autoConvert_config_ImagePullSecret_To_v1alpha1_ImagePullSecret(in *config.ImagePullSecret, out *configv1alpha1.ImagePullSecret, s conversion.Scope) error {
out.UID = in.UID
out.Namespace = in.Namespace
out.Name = in.Name
out.CredentialHash = in.CredentialHash
return nil
}
// Convert_config_ImagePullSecret_To_v1alpha1_ImagePullSecret is an autogenerated conversion function.
func Convert_config_ImagePullSecret_To_v1alpha1_ImagePullSecret(in *config.ImagePullSecret, out *configv1alpha1.ImagePullSecret, s conversion.Scope) error {
return autoConvert_config_ImagePullSecret_To_v1alpha1_ImagePullSecret(in, out, s)
}
func autoConvert_v1alpha1_ImagePullServiceAccount_To_config_ImagePullServiceAccount(in *configv1alpha1.ImagePullServiceAccount, out *config.ImagePullServiceAccount, s conversion.Scope) error {
out.UID = in.UID
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1alpha1_ImagePullServiceAccount_To_config_ImagePullServiceAccount is an autogenerated conversion function.
func Convert_v1alpha1_ImagePullServiceAccount_To_config_ImagePullServiceAccount(in *configv1alpha1.ImagePullServiceAccount, out *config.ImagePullServiceAccount, s conversion.Scope) error {
return autoConvert_v1alpha1_ImagePullServiceAccount_To_config_ImagePullServiceAccount(in, out, s)
}
func autoConvert_config_ImagePullServiceAccount_To_v1alpha1_ImagePullServiceAccount(in *config.ImagePullServiceAccount, out *configv1alpha1.ImagePullServiceAccount, s conversion.Scope) error {
out.UID = in.UID
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_config_ImagePullServiceAccount_To_v1alpha1_ImagePullServiceAccount is an autogenerated conversion function.
func Convert_config_ImagePullServiceAccount_To_v1alpha1_ImagePullServiceAccount(in *config.ImagePullServiceAccount, out *configv1alpha1.ImagePullServiceAccount, s conversion.Scope) error {
return autoConvert_config_ImagePullServiceAccount_To_v1alpha1_ImagePullServiceAccount(in, out, s)
}
func autoConvert_v1alpha1_ImagePulledRecord_To_config_ImagePulledRecord(in *configv1alpha1.ImagePulledRecord, out *config.ImagePulledRecord, s conversion.Scope) error {
out.LastUpdatedTime = in.LastUpdatedTime
out.ImageRef = in.ImageRef
out.CredentialMapping = *(*map[string]config.ImagePullCredentials)(unsafe.Pointer(&in.CredentialMapping))
return nil
}
// Convert_v1alpha1_ImagePulledRecord_To_config_ImagePulledRecord is an autogenerated conversion function.
func Convert_v1alpha1_ImagePulledRecord_To_config_ImagePulledRecord(in *configv1alpha1.ImagePulledRecord, out *config.ImagePulledRecord, s conversion.Scope) error {
return autoConvert_v1alpha1_ImagePulledRecord_To_config_ImagePulledRecord(in, out, s)
}
func autoConvert_config_ImagePulledRecord_To_v1alpha1_ImagePulledRecord(in *config.ImagePulledRecord, out *configv1alpha1.ImagePulledRecord, s conversion.Scope) error {
out.LastUpdatedTime = in.LastUpdatedTime
out.ImageRef = in.ImageRef
out.CredentialMapping = *(*map[string]configv1alpha1.ImagePullCredentials)(unsafe.Pointer(&in.CredentialMapping))
return nil
}
// Convert_config_ImagePulledRecord_To_v1alpha1_ImagePulledRecord is an autogenerated conversion function.
func Convert_config_ImagePulledRecord_To_v1alpha1_ImagePulledRecord(in *config.ImagePulledRecord, out *configv1alpha1.ImagePulledRecord, s conversion.Scope) error {
return autoConvert_config_ImagePulledRecord_To_v1alpha1_ImagePulledRecord(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/conversion"
configv1beta1 "k8s.io/kubelet/config/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/apis/config"
)
func Convert_config_CredentialProvider_To_v1beta1_CredentialProvider(in *config.CredentialProvider, out *configv1beta1.CredentialProvider, s conversion.Scope) error {
// This conversion intentionally omits the tokenAttributes field which is only supported in v1 CredentialProvider.
return autoConvert_config_CredentialProvider_To_v1beta1_CredentialProvider(in, out, s)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
// TODO: Cut references to k8s.io/kubernetes, eventually there should be none from this package
utilfeature "k8s.io/apiserver/pkg/util/feature"
logsapi "k8s.io/component-base/logs/api/v1"
"k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/qos"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/utils/ptr"
)
const (
// TODO: Move these constants to k8s.io/kubelet/config/v1beta1 instead?
DefaultIPTablesMasqueradeBit = 14
DefaultIPTablesDropBit = 15
DefaultVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
DefaultPodLogsDir = "/var/log/pods"
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos
DefaultMemoryThrottlingFactor = 0.9
// MaxContainerBackOff is the max backoff period for container restarts, exported for the e2e test
MaxContainerBackOff = 300 * time.Second
)
var (
zeroDuration = metav1.Duration{}
// TODO: Move these constants to k8s.io/kubelet/config/v1beta1 instead?
// Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information.
DefaultNodeAllocatableEnforcement = []string{"pods"}
)
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfiguration) {
// TODO(lauralorenz): Reasses conditional feature gating on defaults. Here
// we 1) copy the gates to a local var, unilaterally merge it with the gate
// config while being defaulted. Alternatively we could unilaterally set the
// default value, later check the gate and wipe it if needed, like API
// strategy does for gate-disabled fields. Meanwhile, KubeletConfiguration
// is increasingly dynamic and the configured gates may change depending on
// when this is called. See also validation.go.
localFeatureGate := utilfeature.DefaultMutableFeatureGate.DeepCopy()
if err := localFeatureGate.SetFromMap(obj.FeatureGates); err != nil {
panic(fmt.Sprintf("failed to merge global and in-flight KubeletConfiguration while setting defaults, error: %v", err))
}
if obj.EnableServer == nil {
obj.EnableServer = ptr.To(true)
}
if obj.SyncFrequency == zeroDuration {
obj.SyncFrequency = metav1.Duration{Duration: 1 * time.Minute}
}
if obj.FileCheckFrequency == zeroDuration {
obj.FileCheckFrequency = metav1.Duration{Duration: 20 * time.Second}
}
if obj.HTTPCheckFrequency == zeroDuration {
obj.HTTPCheckFrequency = metav1.Duration{Duration: 20 * time.Second}
}
if obj.Address == "" {
obj.Address = "0.0.0.0"
}
if obj.Port == 0 {
obj.Port = ports.KubeletPort
}
if obj.Authentication.Anonymous.Enabled == nil {
obj.Authentication.Anonymous.Enabled = ptr.To(false)
}
if obj.Authentication.Webhook.Enabled == nil {
obj.Authentication.Webhook.Enabled = ptr.To(true)
}
if obj.Authentication.Webhook.CacheTTL == zeroDuration {
obj.Authentication.Webhook.CacheTTL = metav1.Duration{Duration: 2 * time.Minute}
}
if obj.Authorization.Mode == "" {
obj.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook
}
if obj.Authorization.Webhook.CacheAuthorizedTTL == zeroDuration {
obj.Authorization.Webhook.CacheAuthorizedTTL = metav1.Duration{Duration: 5 * time.Minute}
}
if obj.Authorization.Webhook.CacheUnauthorizedTTL == zeroDuration {
obj.Authorization.Webhook.CacheUnauthorizedTTL = metav1.Duration{Duration: 30 * time.Second}
}
if obj.RegistryPullQPS == nil {
obj.RegistryPullQPS = ptr.To[int32](5)
}
if obj.RegistryBurst == 0 {
obj.RegistryBurst = 10
}
if obj.EventRecordQPS == nil {
obj.EventRecordQPS = ptr.To[int32](50)
}
if obj.EventBurst == 0 {
obj.EventBurst = 100
}
if obj.EnableDebuggingHandlers == nil {
obj.EnableDebuggingHandlers = ptr.To(true)
}
if obj.HealthzPort == nil {
obj.HealthzPort = ptr.To[int32](10248)
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.OOMScoreAdj == nil {
obj.OOMScoreAdj = ptr.To(int32(qos.KubeletOOMScoreAdj))
}
if obj.StreamingConnectionIdleTimeout == zeroDuration {
obj.StreamingConnectionIdleTimeout = metav1.Duration{Duration: 4 * time.Hour}
}
if obj.NodeStatusReportFrequency == zeroDuration {
// For backward compatibility, NodeStatusReportFrequency's default value is
// set to NodeStatusUpdateFrequency if NodeStatusUpdateFrequency is set
// explicitly.
if obj.NodeStatusUpdateFrequency == zeroDuration {
obj.NodeStatusReportFrequency = metav1.Duration{Duration: 5 * time.Minute}
} else {
obj.NodeStatusReportFrequency = obj.NodeStatusUpdateFrequency
}
}
if obj.NodeStatusUpdateFrequency == zeroDuration {
obj.NodeStatusUpdateFrequency = metav1.Duration{Duration: 10 * time.Second}
}
if obj.NodeLeaseDurationSeconds == 0 {
obj.NodeLeaseDurationSeconds = 40
}
if obj.ImageMinimumGCAge == zeroDuration {
obj.ImageMinimumGCAge = metav1.Duration{Duration: 2 * time.Minute}
}
if obj.ImageGCHighThresholdPercent == nil {
// default is below docker's default dm.min_free_space of 90%
obj.ImageGCHighThresholdPercent = ptr.To[int32](85)
}
if obj.ImageGCLowThresholdPercent == nil {
obj.ImageGCLowThresholdPercent = ptr.To[int32](80)
}
if obj.VolumeStatsAggPeriod == zeroDuration {
obj.VolumeStatsAggPeriod = metav1.Duration{Duration: time.Minute}
}
if obj.CgroupsPerQOS == nil {
obj.CgroupsPerQOS = ptr.To(true)
}
if obj.CgroupDriver == "" {
obj.CgroupDriver = "cgroupfs"
}
if obj.CPUManagerPolicy == "" {
obj.CPUManagerPolicy = "none"
}
if obj.CPUManagerReconcilePeriod == zeroDuration {
// Keep the same as default NodeStatusUpdateFrequency
obj.CPUManagerReconcilePeriod = metav1.Duration{Duration: 10 * time.Second}
}
if obj.MemoryManagerPolicy == "" {
obj.MemoryManagerPolicy = kubeletconfigv1beta1.NoneMemoryManagerPolicy
}
if obj.TopologyManagerPolicy == "" {
obj.TopologyManagerPolicy = kubeletconfigv1beta1.NoneTopologyManagerPolicy
}
if obj.TopologyManagerScope == "" {
obj.TopologyManagerScope = kubeletconfigv1beta1.ContainerTopologyManagerScope
}
if obj.RuntimeRequestTimeout == zeroDuration {
obj.RuntimeRequestTimeout = metav1.Duration{Duration: 2 * time.Minute}
}
if obj.HairpinMode == "" {
obj.HairpinMode = kubeletconfigv1beta1.PromiscuousBridge
}
if obj.MaxPods == 0 {
obj.MaxPods = 110
}
// default nil or negative value to -1 (implies node allocatable pid limit)
if obj.PodPidsLimit == nil || *obj.PodPidsLimit < int64(0) {
obj.PodPidsLimit = ptr.To[int64](-1)
}
if obj.ResolverConfig == nil {
obj.ResolverConfig = ptr.To(kubetypes.ResolvConfDefault)
}
if obj.CPUCFSQuota == nil {
obj.CPUCFSQuota = ptr.To(true)
}
if obj.CPUCFSQuotaPeriod == nil {
obj.CPUCFSQuotaPeriod = &metav1.Duration{Duration: 100 * time.Millisecond}
}
if obj.NodeStatusMaxImages == nil {
obj.NodeStatusMaxImages = ptr.To[int32](50)
}
if obj.MaxOpenFiles == 0 {
obj.MaxOpenFiles = 1000000
}
if obj.ContentType == "" {
obj.ContentType = "application/vnd.kubernetes.protobuf"
}
if obj.KubeAPIQPS == nil {
obj.KubeAPIQPS = ptr.To[int32](50)
}
if obj.KubeAPIBurst == 0 {
obj.KubeAPIBurst = 100
}
if obj.SerializeImagePulls == nil {
// SerializeImagePulls is default to true when MaxParallelImagePulls
// is not set, and false when MaxParallelImagePulls is set.
// This is to save users from having to set both configs.
if obj.MaxParallelImagePulls == nil || *obj.MaxParallelImagePulls < 2 {
obj.SerializeImagePulls = ptr.To(true)
} else {
obj.SerializeImagePulls = ptr.To(false)
}
}
if obj.EvictionPressureTransitionPeriod == zeroDuration {
obj.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 5 * time.Minute}
}
if obj.MergeDefaultEvictionSettings == nil {
obj.MergeDefaultEvictionSettings = ptr.To(false)
}
if obj.EnableControllerAttachDetach == nil {
obj.EnableControllerAttachDetach = ptr.To(true)
}
if obj.MakeIPTablesUtilChains == nil {
obj.MakeIPTablesUtilChains = ptr.To(true)
}
if obj.IPTablesMasqueradeBit == nil {
obj.IPTablesMasqueradeBit = ptr.To[int32](DefaultIPTablesMasqueradeBit)
}
if obj.IPTablesDropBit == nil {
obj.IPTablesDropBit = ptr.To[int32](DefaultIPTablesDropBit)
}
if obj.FailSwapOn == nil {
obj.FailSwapOn = ptr.To(true)
}
if obj.ContainerLogMaxSize == "" {
obj.ContainerLogMaxSize = "10Mi"
}
if obj.ContainerLogMaxFiles == nil {
obj.ContainerLogMaxFiles = ptr.To[int32](5)
}
if obj.ContainerLogMaxWorkers == nil {
obj.ContainerLogMaxWorkers = ptr.To[int32](1)
}
if obj.ContainerLogMonitorInterval == nil {
obj.ContainerLogMonitorInterval = &metav1.Duration{Duration: 10 * time.Second}
}
if obj.ConfigMapAndSecretChangeDetectionStrategy == "" {
obj.ConfigMapAndSecretChangeDetectionStrategy = kubeletconfigv1beta1.WatchChangeDetectionStrategy
}
if obj.EnforceNodeAllocatable == nil {
obj.EnforceNodeAllocatable = DefaultNodeAllocatableEnforcement
}
if obj.VolumePluginDir == "" {
obj.VolumePluginDir = DefaultVolumePluginDir
}
// Use the Default LoggingConfiguration option
logsapi.SetRecommendedLoggingConfiguration(&obj.Logging)
if obj.EnableSystemLogHandler == nil {
obj.EnableSystemLogHandler = ptr.To(true)
}
if obj.EnableProfilingHandler == nil {
obj.EnableProfilingHandler = ptr.To(true)
}
if obj.EnableDebugFlagsHandler == nil {
obj.EnableDebugFlagsHandler = ptr.To(true)
}
if obj.SeccompDefault == nil {
obj.SeccompDefault = ptr.To(false)
}
if obj.FailCgroupV1 == nil {
obj.FailCgroupV1 = ptr.To(false)
}
if obj.MemoryThrottlingFactor == nil {
obj.MemoryThrottlingFactor = ptr.To(DefaultMemoryThrottlingFactor)
}
if obj.RegisterNode == nil {
obj.RegisterNode = ptr.To(true)
}
if obj.LocalStorageCapacityIsolation == nil {
obj.LocalStorageCapacityIsolation = ptr.To(true)
}
if obj.ContainerRuntimeEndpoint == "" {
obj.ContainerRuntimeEndpoint = "unix:///run/containerd/containerd.sock"
}
if obj.PodLogsDir == "" {
obj.PodLogsDir = DefaultPodLogsDir
}
if localFeatureGate.Enabled(features.KubeletCrashLoopBackOffMax) {
if obj.CrashLoopBackOff.MaxContainerRestartPeriod == nil {
obj.CrashLoopBackOff.MaxContainerRestartPeriod = &metav1.Duration{Duration: MaxContainerBackOff}
}
}
if localFeatureGate.Enabled(features.KubeletEnsureSecretPulledImages) {
if obj.ImagePullCredentialsVerificationPolicy == "" {
obj.ImagePullCredentialsVerificationPolicy = kubeletconfigv1beta1.NeverVerifyPreloadedImages
}
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
)
// GroupName is the group name used in this package
const GroupName = "kubelet.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
var (
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
// defaulting and conversion init funcs are registered as well.
localSchemeBuilder = &kubeletconfigv1beta1.SchemeBuilder
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/component-base/tracing/api/v1"
configv1beta1 "k8s.io/kubelet/config/v1beta1"
config "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1beta1.CrashLoopBackOffConfig)(nil), (*config.CrashLoopBackOffConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CrashLoopBackOffConfig_To_config_CrashLoopBackOffConfig(a.(*configv1beta1.CrashLoopBackOffConfig), b.(*config.CrashLoopBackOffConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.CrashLoopBackOffConfig)(nil), (*configv1beta1.CrashLoopBackOffConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CrashLoopBackOffConfig_To_v1beta1_CrashLoopBackOffConfig(a.(*config.CrashLoopBackOffConfig), b.(*configv1beta1.CrashLoopBackOffConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.CredentialProvider)(nil), (*config.CredentialProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CredentialProvider_To_config_CredentialProvider(a.(*configv1beta1.CredentialProvider), b.(*config.CredentialProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.CredentialProviderConfig)(nil), (*config.CredentialProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_CredentialProviderConfig_To_config_CredentialProviderConfig(a.(*configv1beta1.CredentialProviderConfig), b.(*config.CredentialProviderConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.CredentialProviderConfig)(nil), (*configv1beta1.CredentialProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CredentialProviderConfig_To_v1beta1_CredentialProviderConfig(a.(*config.CredentialProviderConfig), b.(*configv1beta1.CredentialProviderConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.ExecEnvVar)(nil), (*config.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ExecEnvVar_To_config_ExecEnvVar(a.(*configv1beta1.ExecEnvVar), b.(*config.ExecEnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ExecEnvVar)(nil), (*configv1beta1.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ExecEnvVar_To_v1beta1_ExecEnvVar(a.(*config.ExecEnvVar), b.(*configv1beta1.ExecEnvVar), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.KubeletAnonymousAuthentication)(nil), (*config.KubeletAnonymousAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(a.(*configv1beta1.KubeletAnonymousAuthentication), b.(*config.KubeletAnonymousAuthentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeletAnonymousAuthentication)(nil), (*configv1beta1.KubeletAnonymousAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(a.(*config.KubeletAnonymousAuthentication), b.(*configv1beta1.KubeletAnonymousAuthentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.KubeletAuthentication)(nil), (*config.KubeletAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(a.(*configv1beta1.KubeletAuthentication), b.(*config.KubeletAuthentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeletAuthentication)(nil), (*configv1beta1.KubeletAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(a.(*config.KubeletAuthentication), b.(*configv1beta1.KubeletAuthentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.KubeletAuthorization)(nil), (*config.KubeletAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(a.(*configv1beta1.KubeletAuthorization), b.(*config.KubeletAuthorization), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeletAuthorization)(nil), (*configv1beta1.KubeletAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(a.(*config.KubeletAuthorization), b.(*configv1beta1.KubeletAuthorization), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.KubeletConfiguration)(nil), (*config.KubeletConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(a.(*configv1beta1.KubeletConfiguration), b.(*config.KubeletConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeletConfiguration)(nil), (*configv1beta1.KubeletConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(a.(*config.KubeletConfiguration), b.(*configv1beta1.KubeletConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.KubeletWebhookAuthentication)(nil), (*config.KubeletWebhookAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(a.(*configv1beta1.KubeletWebhookAuthentication), b.(*config.KubeletWebhookAuthentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeletWebhookAuthentication)(nil), (*configv1beta1.KubeletWebhookAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(a.(*config.KubeletWebhookAuthentication), b.(*configv1beta1.KubeletWebhookAuthentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.KubeletWebhookAuthorization)(nil), (*config.KubeletWebhookAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(a.(*configv1beta1.KubeletWebhookAuthorization), b.(*config.KubeletWebhookAuthorization), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeletWebhookAuthorization)(nil), (*configv1beta1.KubeletWebhookAuthorization)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(a.(*config.KubeletWebhookAuthorization), b.(*configv1beta1.KubeletWebhookAuthorization), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.KubeletX509Authentication)(nil), (*config.KubeletX509Authentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(a.(*configv1beta1.KubeletX509Authentication), b.(*config.KubeletX509Authentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeletX509Authentication)(nil), (*configv1beta1.KubeletX509Authentication)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(a.(*config.KubeletX509Authentication), b.(*configv1beta1.KubeletX509Authentication), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.MemoryReservation)(nil), (*config.MemoryReservation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MemoryReservation_To_config_MemoryReservation(a.(*configv1beta1.MemoryReservation), b.(*config.MemoryReservation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.MemoryReservation)(nil), (*configv1beta1.MemoryReservation)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_MemoryReservation_To_v1beta1_MemoryReservation(a.(*config.MemoryReservation), b.(*configv1beta1.MemoryReservation), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.MemorySwapConfiguration)(nil), (*config.MemorySwapConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_MemorySwapConfiguration_To_config_MemorySwapConfiguration(a.(*configv1beta1.MemorySwapConfiguration), b.(*config.MemorySwapConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.MemorySwapConfiguration)(nil), (*configv1beta1.MemorySwapConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_MemorySwapConfiguration_To_v1beta1_MemorySwapConfiguration(a.(*config.MemorySwapConfiguration), b.(*configv1beta1.MemorySwapConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.SerializedNodeConfigSource)(nil), (*config.SerializedNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(a.(*configv1beta1.SerializedNodeConfigSource), b.(*config.SerializedNodeConfigSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.SerializedNodeConfigSource)(nil), (*configv1beta1.SerializedNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(a.(*config.SerializedNodeConfigSource), b.(*configv1beta1.SerializedNodeConfigSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.ShutdownGracePeriodByPodPriority)(nil), (*config.ShutdownGracePeriodByPodPriority)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(a.(*configv1beta1.ShutdownGracePeriodByPodPriority), b.(*config.ShutdownGracePeriodByPodPriority), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ShutdownGracePeriodByPodPriority)(nil), (*configv1beta1.ShutdownGracePeriodByPodPriority)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(a.(*config.ShutdownGracePeriodByPodPriority), b.(*configv1beta1.ShutdownGracePeriodByPodPriority), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1beta1.UserNamespaces)(nil), (*config.UserNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_UserNamespaces_To_config_UserNamespaces(a.(*configv1beta1.UserNamespaces), b.(*config.UserNamespaces), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.UserNamespaces)(nil), (*configv1beta1.UserNamespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_UserNamespaces_To_v1beta1_UserNamespaces(a.(*config.UserNamespaces), b.(*configv1beta1.UserNamespaces), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.CredentialProvider)(nil), (*configv1beta1.CredentialProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_CredentialProvider_To_v1beta1_CredentialProvider(a.(*config.CredentialProvider), b.(*configv1beta1.CredentialProvider), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta1_CrashLoopBackOffConfig_To_config_CrashLoopBackOffConfig(in *configv1beta1.CrashLoopBackOffConfig, out *config.CrashLoopBackOffConfig, s conversion.Scope) error {
out.MaxContainerRestartPeriod = (*v1.Duration)(unsafe.Pointer(in.MaxContainerRestartPeriod))
return nil
}
// Convert_v1beta1_CrashLoopBackOffConfig_To_config_CrashLoopBackOffConfig is an autogenerated conversion function.
func Convert_v1beta1_CrashLoopBackOffConfig_To_config_CrashLoopBackOffConfig(in *configv1beta1.CrashLoopBackOffConfig, out *config.CrashLoopBackOffConfig, s conversion.Scope) error {
return autoConvert_v1beta1_CrashLoopBackOffConfig_To_config_CrashLoopBackOffConfig(in, out, s)
}
func autoConvert_config_CrashLoopBackOffConfig_To_v1beta1_CrashLoopBackOffConfig(in *config.CrashLoopBackOffConfig, out *configv1beta1.CrashLoopBackOffConfig, s conversion.Scope) error {
out.MaxContainerRestartPeriod = (*v1.Duration)(unsafe.Pointer(in.MaxContainerRestartPeriod))
return nil
}
// Convert_config_CrashLoopBackOffConfig_To_v1beta1_CrashLoopBackOffConfig is an autogenerated conversion function.
func Convert_config_CrashLoopBackOffConfig_To_v1beta1_CrashLoopBackOffConfig(in *config.CrashLoopBackOffConfig, out *configv1beta1.CrashLoopBackOffConfig, s conversion.Scope) error {
return autoConvert_config_CrashLoopBackOffConfig_To_v1beta1_CrashLoopBackOffConfig(in, out, s)
}
func autoConvert_v1beta1_CredentialProvider_To_config_CredentialProvider(in *configv1beta1.CredentialProvider, out *config.CredentialProvider, s conversion.Scope) error {
out.Name = in.Name
out.MatchImages = *(*[]string)(unsafe.Pointer(&in.MatchImages))
out.DefaultCacheDuration = (*v1.Duration)(unsafe.Pointer(in.DefaultCacheDuration))
out.APIVersion = in.APIVersion
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.Env = *(*[]config.ExecEnvVar)(unsafe.Pointer(&in.Env))
return nil
}
// Convert_v1beta1_CredentialProvider_To_config_CredentialProvider is an autogenerated conversion function.
func Convert_v1beta1_CredentialProvider_To_config_CredentialProvider(in *configv1beta1.CredentialProvider, out *config.CredentialProvider, s conversion.Scope) error {
return autoConvert_v1beta1_CredentialProvider_To_config_CredentialProvider(in, out, s)
}
func autoConvert_config_CredentialProvider_To_v1beta1_CredentialProvider(in *config.CredentialProvider, out *configv1beta1.CredentialProvider, s conversion.Scope) error {
out.Name = in.Name
out.MatchImages = *(*[]string)(unsafe.Pointer(&in.MatchImages))
out.DefaultCacheDuration = (*v1.Duration)(unsafe.Pointer(in.DefaultCacheDuration))
out.APIVersion = in.APIVersion
out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
out.Env = *(*[]configv1beta1.ExecEnvVar)(unsafe.Pointer(&in.Env))
// WARNING: in.TokenAttributes requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1beta1_CredentialProviderConfig_To_config_CredentialProviderConfig(in *configv1beta1.CredentialProviderConfig, out *config.CredentialProviderConfig, s conversion.Scope) error {
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]config.CredentialProvider, len(*in))
for i := range *in {
if err := Convert_v1beta1_CredentialProvider_To_config_CredentialProvider(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Providers = nil
}
return nil
}
// Convert_v1beta1_CredentialProviderConfig_To_config_CredentialProviderConfig is an autogenerated conversion function.
func Convert_v1beta1_CredentialProviderConfig_To_config_CredentialProviderConfig(in *configv1beta1.CredentialProviderConfig, out *config.CredentialProviderConfig, s conversion.Scope) error {
return autoConvert_v1beta1_CredentialProviderConfig_To_config_CredentialProviderConfig(in, out, s)
}
func autoConvert_config_CredentialProviderConfig_To_v1beta1_CredentialProviderConfig(in *config.CredentialProviderConfig, out *configv1beta1.CredentialProviderConfig, s conversion.Scope) error {
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]configv1beta1.CredentialProvider, len(*in))
for i := range *in {
if err := Convert_config_CredentialProvider_To_v1beta1_CredentialProvider(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Providers = nil
}
return nil
}
// Convert_config_CredentialProviderConfig_To_v1beta1_CredentialProviderConfig is an autogenerated conversion function.
func Convert_config_CredentialProviderConfig_To_v1beta1_CredentialProviderConfig(in *config.CredentialProviderConfig, out *configv1beta1.CredentialProviderConfig, s conversion.Scope) error {
return autoConvert_config_CredentialProviderConfig_To_v1beta1_CredentialProviderConfig(in, out, s)
}
func autoConvert_v1beta1_ExecEnvVar_To_config_ExecEnvVar(in *configv1beta1.ExecEnvVar, out *config.ExecEnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_v1beta1_ExecEnvVar_To_config_ExecEnvVar is an autogenerated conversion function.
func Convert_v1beta1_ExecEnvVar_To_config_ExecEnvVar(in *configv1beta1.ExecEnvVar, out *config.ExecEnvVar, s conversion.Scope) error {
return autoConvert_v1beta1_ExecEnvVar_To_config_ExecEnvVar(in, out, s)
}
func autoConvert_config_ExecEnvVar_To_v1beta1_ExecEnvVar(in *config.ExecEnvVar, out *configv1beta1.ExecEnvVar, s conversion.Scope) error {
out.Name = in.Name
out.Value = in.Value
return nil
}
// Convert_config_ExecEnvVar_To_v1beta1_ExecEnvVar is an autogenerated conversion function.
func Convert_config_ExecEnvVar_To_v1beta1_ExecEnvVar(in *config.ExecEnvVar, out *configv1beta1.ExecEnvVar, s conversion.Scope) error {
return autoConvert_config_ExecEnvVar_To_v1beta1_ExecEnvVar(in, out, s)
}
func autoConvert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(in *configv1beta1.KubeletAnonymousAuthentication, out *config.KubeletAnonymousAuthentication, s conversion.Scope) error {
if err := v1.Convert_Pointer_bool_To_bool(&in.Enabled, &out.Enabled, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication is an autogenerated conversion function.
func Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(in *configv1beta1.KubeletAnonymousAuthentication, out *config.KubeletAnonymousAuthentication, s conversion.Scope) error {
return autoConvert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(in, out, s)
}
func autoConvert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(in *config.KubeletAnonymousAuthentication, out *configv1beta1.KubeletAnonymousAuthentication, s conversion.Scope) error {
if err := v1.Convert_bool_To_Pointer_bool(&in.Enabled, &out.Enabled, s); err != nil {
return err
}
return nil
}
// Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication is an autogenerated conversion function.
func Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(in *config.KubeletAnonymousAuthentication, out *configv1beta1.KubeletAnonymousAuthentication, s conversion.Scope) error {
return autoConvert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(in, out, s)
}
func autoConvert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(in *configv1beta1.KubeletAuthentication, out *config.KubeletAuthentication, s conversion.Scope) error {
if err := Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(&in.X509, &out.X509, s); err != nil {
return err
}
if err := Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(&in.Webhook, &out.Webhook, s); err != nil {
return err
}
if err := Convert_v1beta1_KubeletAnonymousAuthentication_To_config_KubeletAnonymousAuthentication(&in.Anonymous, &out.Anonymous, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication is an autogenerated conversion function.
func Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(in *configv1beta1.KubeletAuthentication, out *config.KubeletAuthentication, s conversion.Scope) error {
return autoConvert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(in, out, s)
}
func autoConvert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(in *config.KubeletAuthentication, out *configv1beta1.KubeletAuthentication, s conversion.Scope) error {
if err := Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(&in.X509, &out.X509, s); err != nil {
return err
}
if err := Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(&in.Webhook, &out.Webhook, s); err != nil {
return err
}
if err := Convert_config_KubeletAnonymousAuthentication_To_v1beta1_KubeletAnonymousAuthentication(&in.Anonymous, &out.Anonymous, s); err != nil {
return err
}
return nil
}
// Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication is an autogenerated conversion function.
func Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(in *config.KubeletAuthentication, out *configv1beta1.KubeletAuthentication, s conversion.Scope) error {
return autoConvert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(in, out, s)
}
func autoConvert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(in *configv1beta1.KubeletAuthorization, out *config.KubeletAuthorization, s conversion.Scope) error {
out.Mode = config.KubeletAuthorizationMode(in.Mode)
if err := Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(&in.Webhook, &out.Webhook, s); err != nil {
return err
}
return nil
}
// Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization is an autogenerated conversion function.
func Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(in *configv1beta1.KubeletAuthorization, out *config.KubeletAuthorization, s conversion.Scope) error {
return autoConvert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(in, out, s)
}
func autoConvert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(in *config.KubeletAuthorization, out *configv1beta1.KubeletAuthorization, s conversion.Scope) error {
out.Mode = configv1beta1.KubeletAuthorizationMode(in.Mode)
if err := Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(&in.Webhook, &out.Webhook, s); err != nil {
return err
}
return nil
}
// Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization is an autogenerated conversion function.
func Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(in *config.KubeletAuthorization, out *configv1beta1.KubeletAuthorization, s conversion.Scope) error {
return autoConvert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(in, out, s)
}
func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in *configv1beta1.KubeletConfiguration, out *config.KubeletConfiguration, s conversion.Scope) error {
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableServer, &out.EnableServer, s); err != nil {
return err
}
out.StaticPodPath = in.StaticPodPath
out.PodLogsDir = in.PodLogsDir
out.SyncFrequency = in.SyncFrequency
out.FileCheckFrequency = in.FileCheckFrequency
out.HTTPCheckFrequency = in.HTTPCheckFrequency
out.StaticPodURL = in.StaticPodURL
out.StaticPodURLHeader = *(*map[string][]string)(unsafe.Pointer(&in.StaticPodURLHeader))
out.Address = in.Address
out.Port = in.Port
out.ReadOnlyPort = in.ReadOnlyPort
out.TLSCertFile = in.TLSCertFile
out.TLSPrivateKeyFile = in.TLSPrivateKeyFile
out.TLSCipherSuites = *(*[]string)(unsafe.Pointer(&in.TLSCipherSuites))
out.TLSMinVersion = in.TLSMinVersion
out.RotateCertificates = in.RotateCertificates
out.ServerTLSBootstrap = in.ServerTLSBootstrap
if err := Convert_v1beta1_KubeletAuthentication_To_config_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil {
return err
}
if err := Convert_v1beta1_KubeletAuthorization_To_config_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil {
return err
}
if err := v1.Convert_Pointer_int32_To_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil {
return err
}
out.RegistryBurst = in.RegistryBurst
out.ImagePullCredentialsVerificationPolicy = string(in.ImagePullCredentialsVerificationPolicy)
out.PreloadedImagesVerificationAllowlist = *(*[]string)(unsafe.Pointer(&in.PreloadedImagesVerificationAllowlist))
if err := v1.Convert_Pointer_int32_To_int32(&in.EventRecordQPS, &out.EventRecordQPS, s); err != nil {
return err
}
out.EventBurst = in.EventBurst
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers, s); err != nil {
return err
}
out.EnableContentionProfiling = in.EnableContentionProfiling
if err := v1.Convert_Pointer_int32_To_int32(&in.HealthzPort, &out.HealthzPort, s); err != nil {
return err
}
out.HealthzBindAddress = in.HealthzBindAddress
if err := v1.Convert_Pointer_int32_To_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil {
return err
}
out.ClusterDomain = in.ClusterDomain
out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS))
out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout
out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency
out.NodeStatusReportFrequency = in.NodeStatusReportFrequency
out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds
out.ImageMinimumGCAge = in.ImageMinimumGCAge
out.ImageMaximumGCAge = in.ImageMaximumGCAge
if err := v1.Convert_Pointer_int32_To_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil {
return err
}
if err := v1.Convert_Pointer_int32_To_int32(&in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent, s); err != nil {
return err
}
out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
out.KubeletCgroups = in.KubeletCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
if err := v1.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
return err
}
out.CgroupDriver = in.CgroupDriver
out.CPUManagerPolicy = in.CPUManagerPolicy
out.SingleProcessOOMKill = (*bool)(unsafe.Pointer(in.SingleProcessOOMKill))
out.CPUManagerPolicyOptions = *(*map[string]string)(unsafe.Pointer(&in.CPUManagerPolicyOptions))
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
out.MemoryManagerPolicy = in.MemoryManagerPolicy
out.TopologyManagerPolicy = in.TopologyManagerPolicy
out.TopologyManagerScope = in.TopologyManagerScope
out.TopologyManagerPolicyOptions = *(*map[string]string)(unsafe.Pointer(&in.TopologyManagerPolicyOptions))
out.QOSReserved = *(*map[string]string)(unsafe.Pointer(&in.QOSReserved))
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.HairpinMode = in.HairpinMode
out.MaxPods = in.MaxPods
out.PodCIDR = in.PodCIDR
if err := v1.Convert_Pointer_int64_To_int64(&in.PodPidsLimit, &out.PodPidsLimit, s); err != nil {
return err
}
if err := v1.Convert_Pointer_string_To_string(&in.ResolverConfig, &out.ResolverConfig, s); err != nil {
return err
}
out.RunOnce = in.RunOnce
if err := v1.Convert_Pointer_bool_To_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil {
return err
}
if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod, s); err != nil {
return err
}
if err := v1.Convert_Pointer_int32_To_int32(&in.NodeStatusMaxImages, &out.NodeStatusMaxImages, s); err != nil {
return err
}
out.MaxOpenFiles = in.MaxOpenFiles
out.ContentType = in.ContentType
if err := v1.Convert_Pointer_int32_To_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil {
return err
}
out.KubeAPIBurst = in.KubeAPIBurst
if err := v1.Convert_Pointer_bool_To_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil {
return err
}
out.MaxParallelImagePulls = (*int32)(unsafe.Pointer(in.MaxParallelImagePulls))
out.EvictionHard = *(*map[string]string)(unsafe.Pointer(&in.EvictionHard))
out.EvictionSoft = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoft))
out.EvictionSoftGracePeriod = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoftGracePeriod))
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = *(*map[string]string)(unsafe.Pointer(&in.EvictionMinimumReclaim))
if err := v1.Convert_Pointer_bool_To_bool(&in.MergeDefaultEvictionSettings, &out.MergeDefaultEvictionSettings, s); err != nil {
return err
}
out.PodsPerCore = in.PodsPerCore
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil {
return err
}
out.ProtectKernelDefaults = in.ProtectKernelDefaults
if err := v1.Convert_Pointer_bool_To_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil {
return err
}
if err := v1.Convert_Pointer_int32_To_int32(&in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit, s); err != nil {
return err
}
if err := v1.Convert_Pointer_int32_To_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil {
return err
}
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
if err := v1.Convert_Pointer_bool_To_bool(&in.FailSwapOn, &out.FailSwapOn, s); err != nil {
return err
}
if err := Convert_v1beta1_MemorySwapConfiguration_To_config_MemorySwapConfiguration(&in.MemorySwap, &out.MemorySwap, s); err != nil {
return err
}
out.ContainerLogMaxSize = in.ContainerLogMaxSize
if err := v1.Convert_Pointer_int32_To_int32(&in.ContainerLogMaxFiles, &out.ContainerLogMaxFiles, s); err != nil {
return err
}
if err := v1.Convert_Pointer_int32_To_int32(&in.ContainerLogMaxWorkers, &out.ContainerLogMaxWorkers, s); err != nil {
return err
}
if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.ContainerLogMonitorInterval, &out.ContainerLogMonitorInterval, s); err != nil {
return err
}
out.ConfigMapAndSecretChangeDetectionStrategy = config.ResourceChangeDetectionStrategy(in.ConfigMapAndSecretChangeDetectionStrategy)
out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved))
out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved))
out.ReservedSystemCPUs = in.ReservedSystemCPUs
out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion
out.SystemReservedCgroup = in.SystemReservedCgroup
out.KubeReservedCgroup = in.KubeReservedCgroup
out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable))
out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls))
out.VolumePluginDir = in.VolumePluginDir
out.ProviderID = in.ProviderID
out.KernelMemcgNotification = in.KernelMemcgNotification
out.Logging = in.Logging
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableSystemLogHandler, &out.EnableSystemLogHandler, s); err != nil {
return err
}
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableSystemLogQuery, &out.EnableSystemLogQuery, s); err != nil {
return err
}
out.ShutdownGracePeriod = in.ShutdownGracePeriod
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
out.ShutdownGracePeriodByPodPriority = *(*[]config.ShutdownGracePeriodByPodPriority)(unsafe.Pointer(&in.ShutdownGracePeriodByPodPriority))
if err := Convert_v1beta1_CrashLoopBackOffConfig_To_config_CrashLoopBackOffConfig(&in.CrashLoopBackOff, &out.CrashLoopBackOff, s); err != nil {
return err
}
out.ReservedMemory = *(*[]config.MemoryReservation)(unsafe.Pointer(&in.ReservedMemory))
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableProfilingHandler, &out.EnableProfilingHandler, s); err != nil {
return err
}
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableDebugFlagsHandler, &out.EnableDebugFlagsHandler, s); err != nil {
return err
}
if err := v1.Convert_Pointer_bool_To_bool(&in.SeccompDefault, &out.SeccompDefault, s); err != nil {
return err
}
out.MemoryThrottlingFactor = (*float64)(unsafe.Pointer(in.MemoryThrottlingFactor))
out.RegisterWithTaints = *(*[]corev1.Taint)(unsafe.Pointer(&in.RegisterWithTaints))
if err := v1.Convert_Pointer_bool_To_bool(&in.RegisterNode, &out.RegisterNode, s); err != nil {
return err
}
out.Tracing = (*apiv1.TracingConfiguration)(unsafe.Pointer(in.Tracing))
if err := v1.Convert_Pointer_bool_To_bool(&in.LocalStorageCapacityIsolation, &out.LocalStorageCapacityIsolation, s); err != nil {
return err
}
out.ContainerRuntimeEndpoint = in.ContainerRuntimeEndpoint
out.ImageServiceEndpoint = in.ImageServiceEndpoint
if err := v1.Convert_Pointer_bool_To_bool(&in.FailCgroupV1, &out.FailCgroupV1, s); err != nil {
return err
}
out.UserNamespaces = (*config.UserNamespaces)(unsafe.Pointer(in.UserNamespaces))
return nil
}
// Convert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration is an autogenerated conversion function.
func Convert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in *configv1beta1.KubeletConfiguration, out *config.KubeletConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in, out, s)
}
func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in *config.KubeletConfiguration, out *configv1beta1.KubeletConfiguration, s conversion.Scope) error {
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableServer, &out.EnableServer, s); err != nil {
return err
}
out.StaticPodPath = in.StaticPodPath
out.PodLogsDir = in.PodLogsDir
out.SyncFrequency = in.SyncFrequency
out.FileCheckFrequency = in.FileCheckFrequency
out.HTTPCheckFrequency = in.HTTPCheckFrequency
out.StaticPodURL = in.StaticPodURL
out.StaticPodURLHeader = *(*map[string][]string)(unsafe.Pointer(&in.StaticPodURLHeader))
out.Address = in.Address
out.Port = in.Port
out.ReadOnlyPort = in.ReadOnlyPort
out.VolumePluginDir = in.VolumePluginDir
out.ProviderID = in.ProviderID
out.TLSCertFile = in.TLSCertFile
out.TLSPrivateKeyFile = in.TLSPrivateKeyFile
out.TLSCipherSuites = *(*[]string)(unsafe.Pointer(&in.TLSCipherSuites))
out.TLSMinVersion = in.TLSMinVersion
out.RotateCertificates = in.RotateCertificates
out.ServerTLSBootstrap = in.ServerTLSBootstrap
if err := Convert_config_KubeletAuthentication_To_v1beta1_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil {
return err
}
if err := Convert_config_KubeletAuthorization_To_v1beta1_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil {
return err
}
if err := v1.Convert_int32_To_Pointer_int32(&in.RegistryPullQPS, &out.RegistryPullQPS, s); err != nil {
return err
}
out.RegistryBurst = in.RegistryBurst
out.ImagePullCredentialsVerificationPolicy = configv1beta1.ImagePullCredentialsVerificationPolicy(in.ImagePullCredentialsVerificationPolicy)
out.PreloadedImagesVerificationAllowlist = *(*[]string)(unsafe.Pointer(&in.PreloadedImagesVerificationAllowlist))
if err := v1.Convert_int32_To_Pointer_int32(&in.EventRecordQPS, &out.EventRecordQPS, s); err != nil {
return err
}
out.EventBurst = in.EventBurst
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers, s); err != nil {
return err
}
out.EnableContentionProfiling = in.EnableContentionProfiling
if err := v1.Convert_int32_To_Pointer_int32(&in.HealthzPort, &out.HealthzPort, s); err != nil {
return err
}
out.HealthzBindAddress = in.HealthzBindAddress
if err := v1.Convert_int32_To_Pointer_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil {
return err
}
out.ClusterDomain = in.ClusterDomain
out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS))
out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout
out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency
out.NodeStatusReportFrequency = in.NodeStatusReportFrequency
out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds
out.ImageMinimumGCAge = in.ImageMinimumGCAge
out.ImageMaximumGCAge = in.ImageMaximumGCAge
if err := v1.Convert_int32_To_Pointer_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil {
return err
}
if err := v1.Convert_int32_To_Pointer_int32(&in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent, s); err != nil {
return err
}
out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
out.KubeletCgroups = in.KubeletCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
if err := v1.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
return err
}
out.CgroupDriver = in.CgroupDriver
out.SingleProcessOOMKill = (*bool)(unsafe.Pointer(in.SingleProcessOOMKill))
out.CPUManagerPolicy = in.CPUManagerPolicy
out.CPUManagerPolicyOptions = *(*map[string]string)(unsafe.Pointer(&in.CPUManagerPolicyOptions))
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
out.MemoryManagerPolicy = in.MemoryManagerPolicy
out.TopologyManagerPolicy = in.TopologyManagerPolicy
out.TopologyManagerScope = in.TopologyManagerScope
out.TopologyManagerPolicyOptions = *(*map[string]string)(unsafe.Pointer(&in.TopologyManagerPolicyOptions))
out.QOSReserved = *(*map[string]string)(unsafe.Pointer(&in.QOSReserved))
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.HairpinMode = in.HairpinMode
out.MaxPods = in.MaxPods
out.PodCIDR = in.PodCIDR
if err := v1.Convert_int64_To_Pointer_int64(&in.PodPidsLimit, &out.PodPidsLimit, s); err != nil {
return err
}
if err := v1.Convert_string_To_Pointer_string(&in.ResolverConfig, &out.ResolverConfig, s); err != nil {
return err
}
out.RunOnce = in.RunOnce
if err := v1.Convert_bool_To_Pointer_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil {
return err
}
if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod, s); err != nil {
return err
}
out.MaxOpenFiles = in.MaxOpenFiles
if err := v1.Convert_int32_To_Pointer_int32(&in.NodeStatusMaxImages, &out.NodeStatusMaxImages, s); err != nil {
return err
}
out.ContentType = in.ContentType
if err := v1.Convert_int32_To_Pointer_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil {
return err
}
out.KubeAPIBurst = in.KubeAPIBurst
if err := v1.Convert_bool_To_Pointer_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil {
return err
}
out.MaxParallelImagePulls = (*int32)(unsafe.Pointer(in.MaxParallelImagePulls))
out.EvictionHard = *(*map[string]string)(unsafe.Pointer(&in.EvictionHard))
out.EvictionSoft = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoft))
out.EvictionSoftGracePeriod = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoftGracePeriod))
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = *(*map[string]string)(unsafe.Pointer(&in.EvictionMinimumReclaim))
if err := v1.Convert_bool_To_Pointer_bool(&in.MergeDefaultEvictionSettings, &out.MergeDefaultEvictionSettings, s); err != nil {
return err
}
out.PodsPerCore = in.PodsPerCore
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil {
return err
}
out.ProtectKernelDefaults = in.ProtectKernelDefaults
if err := v1.Convert_bool_To_Pointer_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil {
return err
}
if err := v1.Convert_int32_To_Pointer_int32(&in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit, s); err != nil {
return err
}
if err := v1.Convert_int32_To_Pointer_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil {
return err
}
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
if err := v1.Convert_bool_To_Pointer_bool(&in.FailSwapOn, &out.FailSwapOn, s); err != nil {
return err
}
if err := Convert_config_MemorySwapConfiguration_To_v1beta1_MemorySwapConfiguration(&in.MemorySwap, &out.MemorySwap, s); err != nil {
return err
}
out.ContainerLogMaxSize = in.ContainerLogMaxSize
if err := v1.Convert_int32_To_Pointer_int32(&in.ContainerLogMaxFiles, &out.ContainerLogMaxFiles, s); err != nil {
return err
}
if err := v1.Convert_int32_To_Pointer_int32(&in.ContainerLogMaxWorkers, &out.ContainerLogMaxWorkers, s); err != nil {
return err
}
if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.ContainerLogMonitorInterval, &out.ContainerLogMonitorInterval, s); err != nil {
return err
}
out.ConfigMapAndSecretChangeDetectionStrategy = configv1beta1.ResourceChangeDetectionStrategy(in.ConfigMapAndSecretChangeDetectionStrategy)
out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls))
out.KernelMemcgNotification = in.KernelMemcgNotification
out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved))
out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved))
out.SystemReservedCgroup = in.SystemReservedCgroup
out.KubeReservedCgroup = in.KubeReservedCgroup
out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable))
out.ReservedSystemCPUs = in.ReservedSystemCPUs
out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion
out.Logging = in.Logging
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableSystemLogHandler, &out.EnableSystemLogHandler, s); err != nil {
return err
}
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableSystemLogQuery, &out.EnableSystemLogQuery, s); err != nil {
return err
}
out.ShutdownGracePeriod = in.ShutdownGracePeriod
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
out.ShutdownGracePeriodByPodPriority = *(*[]configv1beta1.ShutdownGracePeriodByPodPriority)(unsafe.Pointer(&in.ShutdownGracePeriodByPodPriority))
out.ReservedMemory = *(*[]configv1beta1.MemoryReservation)(unsafe.Pointer(&in.ReservedMemory))
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableProfilingHandler, &out.EnableProfilingHandler, s); err != nil {
return err
}
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableDebugFlagsHandler, &out.EnableDebugFlagsHandler, s); err != nil {
return err
}
if err := v1.Convert_bool_To_Pointer_bool(&in.SeccompDefault, &out.SeccompDefault, s); err != nil {
return err
}
out.MemoryThrottlingFactor = (*float64)(unsafe.Pointer(in.MemoryThrottlingFactor))
out.RegisterWithTaints = *(*[]corev1.Taint)(unsafe.Pointer(&in.RegisterWithTaints))
if err := v1.Convert_bool_To_Pointer_bool(&in.RegisterNode, &out.RegisterNode, s); err != nil {
return err
}
out.Tracing = (*apiv1.TracingConfiguration)(unsafe.Pointer(in.Tracing))
if err := v1.Convert_bool_To_Pointer_bool(&in.LocalStorageCapacityIsolation, &out.LocalStorageCapacityIsolation, s); err != nil {
return err
}
out.ContainerRuntimeEndpoint = in.ContainerRuntimeEndpoint
out.ImageServiceEndpoint = in.ImageServiceEndpoint
if err := v1.Convert_bool_To_Pointer_bool(&in.FailCgroupV1, &out.FailCgroupV1, s); err != nil {
return err
}
if err := Convert_config_CrashLoopBackOffConfig_To_v1beta1_CrashLoopBackOffConfig(&in.CrashLoopBackOff, &out.CrashLoopBackOff, s); err != nil {
return err
}
out.UserNamespaces = (*configv1beta1.UserNamespaces)(unsafe.Pointer(in.UserNamespaces))
return nil
}
// Convert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration is an autogenerated conversion function.
func Convert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in *config.KubeletConfiguration, out *configv1beta1.KubeletConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in, out, s)
}
func autoConvert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(in *configv1beta1.KubeletWebhookAuthentication, out *config.KubeletWebhookAuthentication, s conversion.Scope) error {
if err := v1.Convert_Pointer_bool_To_bool(&in.Enabled, &out.Enabled, s); err != nil {
return err
}
out.CacheTTL = in.CacheTTL
return nil
}
// Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication is an autogenerated conversion function.
func Convert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(in *configv1beta1.KubeletWebhookAuthentication, out *config.KubeletWebhookAuthentication, s conversion.Scope) error {
return autoConvert_v1beta1_KubeletWebhookAuthentication_To_config_KubeletWebhookAuthentication(in, out, s)
}
func autoConvert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(in *config.KubeletWebhookAuthentication, out *configv1beta1.KubeletWebhookAuthentication, s conversion.Scope) error {
if err := v1.Convert_bool_To_Pointer_bool(&in.Enabled, &out.Enabled, s); err != nil {
return err
}
out.CacheTTL = in.CacheTTL
return nil
}
// Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication is an autogenerated conversion function.
func Convert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(in *config.KubeletWebhookAuthentication, out *configv1beta1.KubeletWebhookAuthentication, s conversion.Scope) error {
return autoConvert_config_KubeletWebhookAuthentication_To_v1beta1_KubeletWebhookAuthentication(in, out, s)
}
func autoConvert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(in *configv1beta1.KubeletWebhookAuthorization, out *config.KubeletWebhookAuthorization, s conversion.Scope) error {
out.CacheAuthorizedTTL = in.CacheAuthorizedTTL
out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL
return nil
}
// Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization is an autogenerated conversion function.
func Convert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(in *configv1beta1.KubeletWebhookAuthorization, out *config.KubeletWebhookAuthorization, s conversion.Scope) error {
return autoConvert_v1beta1_KubeletWebhookAuthorization_To_config_KubeletWebhookAuthorization(in, out, s)
}
func autoConvert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(in *config.KubeletWebhookAuthorization, out *configv1beta1.KubeletWebhookAuthorization, s conversion.Scope) error {
out.CacheAuthorizedTTL = in.CacheAuthorizedTTL
out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL
return nil
}
// Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization is an autogenerated conversion function.
func Convert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(in *config.KubeletWebhookAuthorization, out *configv1beta1.KubeletWebhookAuthorization, s conversion.Scope) error {
return autoConvert_config_KubeletWebhookAuthorization_To_v1beta1_KubeletWebhookAuthorization(in, out, s)
}
func autoConvert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(in *configv1beta1.KubeletX509Authentication, out *config.KubeletX509Authentication, s conversion.Scope) error {
out.ClientCAFile = in.ClientCAFile
return nil
}
// Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication is an autogenerated conversion function.
func Convert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(in *configv1beta1.KubeletX509Authentication, out *config.KubeletX509Authentication, s conversion.Scope) error {
return autoConvert_v1beta1_KubeletX509Authentication_To_config_KubeletX509Authentication(in, out, s)
}
func autoConvert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(in *config.KubeletX509Authentication, out *configv1beta1.KubeletX509Authentication, s conversion.Scope) error {
out.ClientCAFile = in.ClientCAFile
return nil
}
// Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication is an autogenerated conversion function.
func Convert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(in *config.KubeletX509Authentication, out *configv1beta1.KubeletX509Authentication, s conversion.Scope) error {
return autoConvert_config_KubeletX509Authentication_To_v1beta1_KubeletX509Authentication(in, out, s)
}
func autoConvert_v1beta1_MemoryReservation_To_config_MemoryReservation(in *configv1beta1.MemoryReservation, out *config.MemoryReservation, s conversion.Scope) error {
out.NumaNode = in.NumaNode
out.Limits = *(*corev1.ResourceList)(unsafe.Pointer(&in.Limits))
return nil
}
// Convert_v1beta1_MemoryReservation_To_config_MemoryReservation is an autogenerated conversion function.
func Convert_v1beta1_MemoryReservation_To_config_MemoryReservation(in *configv1beta1.MemoryReservation, out *config.MemoryReservation, s conversion.Scope) error {
return autoConvert_v1beta1_MemoryReservation_To_config_MemoryReservation(in, out, s)
}
func autoConvert_config_MemoryReservation_To_v1beta1_MemoryReservation(in *config.MemoryReservation, out *configv1beta1.MemoryReservation, s conversion.Scope) error {
out.NumaNode = in.NumaNode
out.Limits = *(*corev1.ResourceList)(unsafe.Pointer(&in.Limits))
return nil
}
// Convert_config_MemoryReservation_To_v1beta1_MemoryReservation is an autogenerated conversion function.
func Convert_config_MemoryReservation_To_v1beta1_MemoryReservation(in *config.MemoryReservation, out *configv1beta1.MemoryReservation, s conversion.Scope) error {
return autoConvert_config_MemoryReservation_To_v1beta1_MemoryReservation(in, out, s)
}
func autoConvert_v1beta1_MemorySwapConfiguration_To_config_MemorySwapConfiguration(in *configv1beta1.MemorySwapConfiguration, out *config.MemorySwapConfiguration, s conversion.Scope) error {
out.SwapBehavior = in.SwapBehavior
return nil
}
// Convert_v1beta1_MemorySwapConfiguration_To_config_MemorySwapConfiguration is an autogenerated conversion function.
func Convert_v1beta1_MemorySwapConfiguration_To_config_MemorySwapConfiguration(in *configv1beta1.MemorySwapConfiguration, out *config.MemorySwapConfiguration, s conversion.Scope) error {
return autoConvert_v1beta1_MemorySwapConfiguration_To_config_MemorySwapConfiguration(in, out, s)
}
func autoConvert_config_MemorySwapConfiguration_To_v1beta1_MemorySwapConfiguration(in *config.MemorySwapConfiguration, out *configv1beta1.MemorySwapConfiguration, s conversion.Scope) error {
out.SwapBehavior = in.SwapBehavior
return nil
}
// Convert_config_MemorySwapConfiguration_To_v1beta1_MemorySwapConfiguration is an autogenerated conversion function.
func Convert_config_MemorySwapConfiguration_To_v1beta1_MemorySwapConfiguration(in *config.MemorySwapConfiguration, out *configv1beta1.MemorySwapConfiguration, s conversion.Scope) error {
return autoConvert_config_MemorySwapConfiguration_To_v1beta1_MemorySwapConfiguration(in, out, s)
}
func autoConvert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(in *configv1beta1.SerializedNodeConfigSource, out *config.SerializedNodeConfigSource, s conversion.Scope) error {
out.Source = in.Source
return nil
}
// Convert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource is an autogenerated conversion function.
func Convert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(in *configv1beta1.SerializedNodeConfigSource, out *config.SerializedNodeConfigSource, s conversion.Scope) error {
return autoConvert_v1beta1_SerializedNodeConfigSource_To_config_SerializedNodeConfigSource(in, out, s)
}
func autoConvert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in *config.SerializedNodeConfigSource, out *configv1beta1.SerializedNodeConfigSource, s conversion.Scope) error {
out.Source = in.Source
return nil
}
// Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource is an autogenerated conversion function.
func Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in *config.SerializedNodeConfigSource, out *configv1beta1.SerializedNodeConfigSource, s conversion.Scope) error {
return autoConvert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in, out, s)
}
func autoConvert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(in *configv1beta1.ShutdownGracePeriodByPodPriority, out *config.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
out.Priority = in.Priority
out.ShutdownGracePeriodSeconds = in.ShutdownGracePeriodSeconds
return nil
}
// Convert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority is an autogenerated conversion function.
func Convert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(in *configv1beta1.ShutdownGracePeriodByPodPriority, out *config.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
return autoConvert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(in, out, s)
}
func autoConvert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(in *config.ShutdownGracePeriodByPodPriority, out *configv1beta1.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
out.Priority = in.Priority
out.ShutdownGracePeriodSeconds = in.ShutdownGracePeriodSeconds
return nil
}
// Convert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority is an autogenerated conversion function.
func Convert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(in *config.ShutdownGracePeriodByPodPriority, out *configv1beta1.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
return autoConvert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(in, out, s)
}
func autoConvert_v1beta1_UserNamespaces_To_config_UserNamespaces(in *configv1beta1.UserNamespaces, out *config.UserNamespaces, s conversion.Scope) error {
out.IDsPerPod = (*int64)(unsafe.Pointer(in.IDsPerPod))
return nil
}
// Convert_v1beta1_UserNamespaces_To_config_UserNamespaces is an autogenerated conversion function.
func Convert_v1beta1_UserNamespaces_To_config_UserNamespaces(in *configv1beta1.UserNamespaces, out *config.UserNamespaces, s conversion.Scope) error {
return autoConvert_v1beta1_UserNamespaces_To_config_UserNamespaces(in, out, s)
}
func autoConvert_config_UserNamespaces_To_v1beta1_UserNamespaces(in *config.UserNamespaces, out *configv1beta1.UserNamespaces, s conversion.Scope) error {
out.IDsPerPod = (*int64)(unsafe.Pointer(in.IDsPerPod))
return nil
}
// Convert_config_UserNamespaces_To_v1beta1_UserNamespaces is an autogenerated conversion function.
func Convert_config_UserNamespaces_To_v1beta1_UserNamespaces(in *config.UserNamespaces, out *configv1beta1.UserNamespaces, s conversion.Scope) error {
return autoConvert_config_UserNamespaces_To_v1beta1_UserNamespaces(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
configv1beta1 "k8s.io/kubelet/config/v1beta1"
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&configv1beta1.KubeletConfiguration{}, func(obj interface{}) {
SetObjectDefaults_KubeletConfiguration(obj.(*configv1beta1.KubeletConfiguration))
})
return nil
}
func SetObjectDefaults_KubeletConfiguration(in *configv1beta1.KubeletConfiguration) {
SetDefaults_KubeletConfiguration(in)
for i := range in.ReservedMemory {
a := &in.ReservedMemory[i]
v1.SetDefaults_ResourceList(&a.Limits)
}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/component-base/tracing/api/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CrashLoopBackOffConfig) DeepCopyInto(out *CrashLoopBackOffConfig) {
*out = *in
if in.MaxContainerRestartPeriod != nil {
in, out := &in.MaxContainerRestartPeriod, &out.MaxContainerRestartPeriod
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrashLoopBackOffConfig.
func (in *CrashLoopBackOffConfig) DeepCopy() *CrashLoopBackOffConfig {
if in == nil {
return nil
}
out := new(CrashLoopBackOffConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialProvider) DeepCopyInto(out *CredentialProvider) {
*out = *in
if in.MatchImages != nil {
in, out := &in.MatchImages, &out.MatchImages
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DefaultCacheDuration != nil {
in, out := &in.DefaultCacheDuration, &out.DefaultCacheDuration
*out = new(v1.Duration)
**out = **in
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]ExecEnvVar, len(*in))
copy(*out, *in)
}
if in.TokenAttributes != nil {
in, out := &in.TokenAttributes, &out.TokenAttributes
*out = new(ServiceAccountTokenAttributes)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialProvider.
func (in *CredentialProvider) DeepCopy() *CredentialProvider {
if in == nil {
return nil
}
out := new(CredentialProvider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialProviderConfig) DeepCopyInto(out *CredentialProviderConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]CredentialProvider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialProviderConfig.
func (in *CredentialProviderConfig) DeepCopy() *CredentialProviderConfig {
if in == nil {
return nil
}
out := new(CredentialProviderConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CredentialProviderConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
if in == nil {
return nil
}
out := new(ExecEnvVar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullCredentials) DeepCopyInto(out *ImagePullCredentials) {
*out = *in
if in.KubernetesSecrets != nil {
in, out := &in.KubernetesSecrets, &out.KubernetesSecrets
*out = make([]ImagePullSecret, len(*in))
copy(*out, *in)
}
if in.KubernetesServiceAccounts != nil {
in, out := &in.KubernetesServiceAccounts, &out.KubernetesServiceAccounts
*out = make([]ImagePullServiceAccount, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullCredentials.
func (in *ImagePullCredentials) DeepCopy() *ImagePullCredentials {
if in == nil {
return nil
}
out := new(ImagePullCredentials)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullIntent) DeepCopyInto(out *ImagePullIntent) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullIntent.
func (in *ImagePullIntent) DeepCopy() *ImagePullIntent {
if in == nil {
return nil
}
out := new(ImagePullIntent)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImagePullIntent) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullSecret) DeepCopyInto(out *ImagePullSecret) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullSecret.
func (in *ImagePullSecret) DeepCopy() *ImagePullSecret {
if in == nil {
return nil
}
out := new(ImagePullSecret)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullServiceAccount) DeepCopyInto(out *ImagePullServiceAccount) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullServiceAccount.
func (in *ImagePullServiceAccount) DeepCopy() *ImagePullServiceAccount {
if in == nil {
return nil
}
out := new(ImagePullServiceAccount)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePulledRecord) DeepCopyInto(out *ImagePulledRecord) {
*out = *in
out.TypeMeta = in.TypeMeta
in.LastUpdatedTime.DeepCopyInto(&out.LastUpdatedTime)
if in.CredentialMapping != nil {
in, out := &in.CredentialMapping, &out.CredentialMapping
*out = make(map[string]ImagePullCredentials, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePulledRecord.
func (in *ImagePulledRecord) DeepCopy() *ImagePulledRecord {
if in == nil {
return nil
}
out := new(ImagePulledRecord)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImagePulledRecord) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAnonymousAuthentication.
func (in *KubeletAnonymousAuthentication) DeepCopy() *KubeletAnonymousAuthentication {
if in == nil {
return nil
}
out := new(KubeletAnonymousAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAuthentication) DeepCopyInto(out *KubeletAuthentication) {
*out = *in
out.X509 = in.X509
out.Webhook = in.Webhook
out.Anonymous = in.Anonymous
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthentication.
func (in *KubeletAuthentication) DeepCopy() *KubeletAuthentication {
if in == nil {
return nil
}
out := new(KubeletAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAuthorization) DeepCopyInto(out *KubeletAuthorization) {
*out = *in
out.Webhook = in.Webhook
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthorization.
func (in *KubeletAuthorization) DeepCopy() *KubeletAuthorization {
if in == nil {
return nil
}
out := new(KubeletAuthorization)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.SyncFrequency = in.SyncFrequency
out.FileCheckFrequency = in.FileCheckFrequency
out.HTTPCheckFrequency = in.HTTPCheckFrequency
if in.StaticPodURLHeader != nil {
in, out := &in.StaticPodURLHeader, &out.StaticPodURLHeader
*out = make(map[string][]string, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.TLSCipherSuites != nil {
in, out := &in.TLSCipherSuites, &out.TLSCipherSuites
*out = make([]string, len(*in))
copy(*out, *in)
}
out.Authentication = in.Authentication
out.Authorization = in.Authorization
if in.PreloadedImagesVerificationAllowlist != nil {
in, out := &in.PreloadedImagesVerificationAllowlist, &out.PreloadedImagesVerificationAllowlist
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ClusterDNS != nil {
in, out := &in.ClusterDNS, &out.ClusterDNS
*out = make([]string, len(*in))
copy(*out, *in)
}
out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout
out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency
out.NodeStatusReportFrequency = in.NodeStatusReportFrequency
out.ImageMinimumGCAge = in.ImageMinimumGCAge
out.ImageMaximumGCAge = in.ImageMaximumGCAge
out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
if in.SingleProcessOOMKill != nil {
in, out := &in.SingleProcessOOMKill, &out.SingleProcessOOMKill
*out = new(bool)
**out = **in
}
if in.CPUManagerPolicyOptions != nil {
in, out := &in.CPUManagerPolicyOptions, &out.CPUManagerPolicyOptions
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
if in.TopologyManagerPolicyOptions != nil {
in, out := &in.TopologyManagerPolicyOptions, &out.TopologyManagerPolicyOptions
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.QOSReserved != nil {
in, out := &in.QOSReserved, &out.QOSReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.CPUCFSQuotaPeriod = in.CPUCFSQuotaPeriod
if in.MaxParallelImagePulls != nil {
in, out := &in.MaxParallelImagePulls, &out.MaxParallelImagePulls
*out = new(int32)
**out = **in
}
if in.EvictionHard != nil {
in, out := &in.EvictionHard, &out.EvictionHard
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EvictionSoft != nil {
in, out := &in.EvictionSoft, &out.EvictionSoft
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EvictionSoftGracePeriod != nil {
in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
if in.EvictionMinimumReclaim != nil {
in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.MemorySwap = in.MemorySwap
out.ContainerLogMonitorInterval = in.ContainerLogMonitorInterval
if in.AllowedUnsafeSysctls != nil {
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SystemReserved != nil {
in, out := &in.SystemReserved, &out.SystemReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.KubeReserved != nil {
in, out := &in.KubeReserved, &out.KubeReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EnforceNodeAllocatable != nil {
in, out := &in.EnforceNodeAllocatable, &out.EnforceNodeAllocatable
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Logging.DeepCopyInto(&out.Logging)
out.ShutdownGracePeriod = in.ShutdownGracePeriod
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
if in.ShutdownGracePeriodByPodPriority != nil {
in, out := &in.ShutdownGracePeriodByPodPriority, &out.ShutdownGracePeriodByPodPriority
*out = make([]ShutdownGracePeriodByPodPriority, len(*in))
copy(*out, *in)
}
if in.ReservedMemory != nil {
in, out := &in.ReservedMemory, &out.ReservedMemory
*out = make([]MemoryReservation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.MemoryThrottlingFactor != nil {
in, out := &in.MemoryThrottlingFactor, &out.MemoryThrottlingFactor
*out = new(float64)
**out = **in
}
if in.RegisterWithTaints != nil {
in, out := &in.RegisterWithTaints, &out.RegisterWithTaints
*out = make([]corev1.Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Tracing != nil {
in, out := &in.Tracing, &out.Tracing
*out = new(apiv1.TracingConfiguration)
(*in).DeepCopyInto(*out)
}
in.CrashLoopBackOff.DeepCopyInto(&out.CrashLoopBackOff)
if in.UserNamespaces != nil {
in, out := &in.UserNamespaces, &out.UserNamespaces
*out = new(UserNamespaces)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfiguration.
func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration {
if in == nil {
return nil
}
out := new(KubeletConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeletConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletWebhookAuthentication) DeepCopyInto(out *KubeletWebhookAuthentication) {
*out = *in
out.CacheTTL = in.CacheTTL
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthentication.
func (in *KubeletWebhookAuthentication) DeepCopy() *KubeletWebhookAuthentication {
if in == nil {
return nil
}
out := new(KubeletWebhookAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletWebhookAuthorization) DeepCopyInto(out *KubeletWebhookAuthorization) {
*out = *in
out.CacheAuthorizedTTL = in.CacheAuthorizedTTL
out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthorization.
func (in *KubeletWebhookAuthorization) DeepCopy() *KubeletWebhookAuthorization {
if in == nil {
return nil
}
out := new(KubeletWebhookAuthorization)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletX509Authentication) DeepCopyInto(out *KubeletX509Authentication) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletX509Authentication.
func (in *KubeletX509Authentication) DeepCopy() *KubeletX509Authentication {
if in == nil {
return nil
}
out := new(KubeletX509Authentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryReservation) DeepCopyInto(out *MemoryReservation) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryReservation.
func (in *MemoryReservation) DeepCopy() *MemoryReservation {
if in == nil {
return nil
}
out := new(MemoryReservation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemorySwapConfiguration) DeepCopyInto(out *MemorySwapConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemorySwapConfiguration.
func (in *MemorySwapConfiguration) DeepCopy() *MemorySwapConfiguration {
if in == nil {
return nil
}
out := new(MemorySwapConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SerializedNodeConfigSource) DeepCopyInto(out *SerializedNodeConfigSource) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedNodeConfigSource.
func (in *SerializedNodeConfigSource) DeepCopy() *SerializedNodeConfigSource {
if in == nil {
return nil
}
out := new(SerializedNodeConfigSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceAccountTokenAttributes) DeepCopyInto(out *ServiceAccountTokenAttributes) {
*out = *in
if in.RequireServiceAccount != nil {
in, out := &in.RequireServiceAccount, &out.RequireServiceAccount
*out = new(bool)
**out = **in
}
if in.RequiredServiceAccountAnnotationKeys != nil {
in, out := &in.RequiredServiceAccountAnnotationKeys, &out.RequiredServiceAccountAnnotationKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.OptionalServiceAccountAnnotationKeys != nil {
in, out := &in.OptionalServiceAccountAnnotationKeys, &out.OptionalServiceAccountAnnotationKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenAttributes.
func (in *ServiceAccountTokenAttributes) DeepCopy() *ServiceAccountTokenAttributes {
if in == nil {
return nil
}
out := new(ServiceAccountTokenAttributes)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ShutdownGracePeriodByPodPriority) DeepCopyInto(out *ShutdownGracePeriodByPodPriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShutdownGracePeriodByPodPriority.
func (in *ShutdownGracePeriodByPodPriority) DeepCopy() *ShutdownGracePeriodByPodPriority {
if in == nil {
return nil
}
out := new(ShutdownGracePeriodByPodPriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserNamespaces) DeepCopyInto(out *UserNamespaces) {
*out = *in
if in.IDsPerPod != nil {
in, out := &in.IDsPerPod, &out.IDsPerPod
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserNamespaces.
func (in *UserNamespaces) DeepCopy() *UserNamespaces {
if in == nil {
return nil
}
out := new(UserNamespaces)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package grpc
import (
"context"
gotimerate "golang.org/x/time/rate"
"k8s.io/klog/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var (
ErrorLimitExceeded = status.Error(codes.ResourceExhausted, "rejected by rate limit")
)
// Limiter defines the interface to perform request rate limiting,
// based on the interface exposed by https://pkg.go.dev/golang.org/x/time/rate#Limiter
type Limiter interface {
// Allow reports whether an event may happen now.
Allow() bool
}
// LimiterUnaryServerInterceptor returns a new unary server interceptors that performs request rate limiting.
func LimiterUnaryServerInterceptor(limiter Limiter) grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
if !limiter.Allow() {
return nil, ErrorLimitExceeded
}
return handler(ctx, req)
}
}
// WithRateLimiter creates new rate limiter with unary interceptor.
func WithRateLimiter(ctx context.Context, serviceName string, qps, burstTokens int32) grpc.ServerOption {
logger := klog.FromContext(ctx)
qpsVal := gotimerate.Limit(qps)
burstVal := int(burstTokens)
logger.Info("Setting rate limiting for endpoint", "service", serviceName, "qps", qpsVal, "burstTokens", burstVal)
return grpc.UnaryInterceptor(LimiterUnaryServerInterceptor(gotimerate.NewLimiter(qpsVal, burstVal)))
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podresources
import (
"context"
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/cri-client/pkg/util"
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
)
// Note: Consumers of the pod resources API should not be importing this package.
// They should copy paste the function in their project.
// GetV1alpha1Client returns a client for the PodResourcesLister grpc service
// Note: This is deprecated
func GetV1alpha1Client(socket string, connectionTimeout time.Duration, maxMsgSize int) (v1alpha1.PodResourcesListerClient, *grpc.ClientConn, error) {
addr, dialer, err := util.GetAddressAndDialer(socket)
if err != nil {
return nil, nil, err
}
ctx, cancel := context.WithTimeout(context.TODO(), connectionTimeout)
defer cancel()
conn, err := grpc.DialContext(ctx, addr,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialer),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)))
if err != nil {
return nil, nil, fmt.Errorf("error dialing socket %s: %v", socket, err)
}
return v1alpha1.NewPodResourcesListerClient(conn), conn, nil
}
// GetV1Client returns a client for the PodResourcesLister grpc service
func GetV1Client(socket string, connectionTimeout time.Duration, maxMsgSize int) (v1.PodResourcesListerClient, *grpc.ClientConn, error) {
addr, dialer, err := util.GetAddressAndDialer(socket)
if err != nil {
return nil, nil, err
}
ctx, cancel := context.WithTimeout(context.TODO(), connectionTimeout)
defer cancel()
conn, err := grpc.DialContext(ctx, addr,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialer),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)))
if err != nil {
return nil, nil, fmt.Errorf("error dialing socket %s: %v", socket, err)
}
return v1.NewPodResourcesListerClient(conn), conn, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podresources
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/metrics"
podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
)
// v1PodResourcesServer implements PodResourcesListerServer
type v1PodResourcesServer struct {
podsProvider PodsProvider
devicesProvider DevicesProvider
cpusProvider CPUsProvider
memoryProvider MemoryProvider
dynamicResourcesProvider DynamicResourcesProvider
useActivePods bool
podresourcesv1.UnsafePodResourcesListerServer
}
// NewV1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
// with device information provided by the DevicesProvider
func NewV1PodResourcesServer(ctx context.Context, providers PodResourcesProviders) podresourcesv1.PodResourcesListerServer {
logger := klog.FromContext(ctx)
useActivePods := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesListUseActivePods)
logger.Info("podresources", "method", "list", "useActivePods", useActivePods)
return &v1PodResourcesServer{
podsProvider: providers.Pods,
devicesProvider: providers.Devices,
cpusProvider: providers.Cpus,
memoryProvider: providers.Memory,
dynamicResourcesProvider: providers.DynamicResources,
useActivePods: useActivePods,
}
}
// List returns information about the resources assigned to pods on the node
func (p *v1PodResourcesServer) List(ctx context.Context, req *podresourcesv1.ListPodResourcesRequest) (*podresourcesv1.ListPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsListCount.WithLabelValues("v1").Inc()
var pods []*v1.Pod
if p.useActivePods {
pods = p.podsProvider.GetActivePods()
} else {
pods = p.podsProvider.GetPods()
}
podResources := make([]*podresourcesv1.PodResources, len(pods))
p.devicesProvider.UpdateAllocatedDevices()
for i, pod := range pods {
pRes := podresourcesv1.PodResources{
Name: pod.Name,
Namespace: pod.Namespace,
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
}
pRes.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
for _, container := range pod.Spec.InitContainers {
if !podutil.IsRestartableInitContainer(&container) {
continue
}
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
}
for _, container := range pod.Spec.Containers {
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
}
podResources[i] = &pRes
}
response := &podresourcesv1.ListPodResourcesResponse{
PodResources: podResources,
}
return response, nil
}
// GetAllocatableResources returns information about all the resources known by the server - this more like the capacity, not like the current amount of free resources.
func (p *v1PodResourcesServer) GetAllocatableResources(ctx context.Context, req *podresourcesv1.AllocatableResourcesRequest) (*podresourcesv1.AllocatableResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsGetAllocatableCount.WithLabelValues("v1").Inc()
response := &podresourcesv1.AllocatableResourcesResponse{
Devices: p.devicesProvider.GetAllocatableDevices(),
CpuIds: p.cpusProvider.GetAllocatableCPUs(),
Memory: p.memoryProvider.GetAllocatableMemory(),
}
return response, nil
}
// Get returns information about the resources assigned to a specific pod
func (p *v1PodResourcesServer) Get(ctx context.Context, req *podresourcesv1.GetPodResourcesRequest) (*podresourcesv1.GetPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1").Inc()
metrics.PodResourcesEndpointRequestsGetCount.WithLabelValues("v1").Inc()
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesGet) {
metrics.PodResourcesEndpointErrorsGetCount.WithLabelValues("v1").Inc()
return nil, fmt.Errorf("PodResources API Get method disabled")
}
pod, exist := p.podsProvider.GetPodByName(req.PodNamespace, req.PodName)
if !exist {
metrics.PodResourcesEndpointErrorsGetCount.WithLabelValues("v1").Inc()
return nil, fmt.Errorf("pod %s in namespace %s not found", req.PodName, req.PodNamespace)
}
podResources := &podresourcesv1.PodResources{
Name: pod.Name,
Namespace: pod.Namespace,
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
}
podResources.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
for _, container := range pod.Spec.InitContainers {
if !podutil.IsRestartableInitContainer(&container) {
continue
}
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
}
for _, container := range pod.Spec.Containers {
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
}
response := &podresourcesv1.GetPodResourcesResponse{
PodResources: podResources,
}
return response, nil
}
func (p *v1PodResourcesServer) getContainerResources(pod *v1.Pod, container *v1.Container) *podresourcesv1.ContainerResources {
containerResources := &podresourcesv1.ContainerResources{
Name: container.Name,
Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name),
CpuIds: p.cpusProvider.GetCPUs(string(pod.UID), container.Name),
Memory: p.memoryProvider.GetMemory(string(pod.UID), container.Name),
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletPodResourcesDynamicResources) {
containerResources.DynamicResources = p.dynamicResourcesProvider.GetDynamicResources(pod, container)
}
return containerResources
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podresources
import (
"context"
"k8s.io/kubernetes/pkg/kubelet/metrics"
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
)
// v1alpha1PodResourcesServer implements PodResourcesListerServer
type v1alpha1PodResourcesServer struct {
podsProvider PodsProvider
devicesProvider DevicesProvider
v1alpha1.UnsafePodResourcesListerServer
}
// NewV1alpha1PodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider
// with device information provided by the DevicesProvider
func NewV1alpha1PodResourcesServer(providers PodResourcesProviders) v1alpha1.PodResourcesListerServer {
return &v1alpha1PodResourcesServer{
podsProvider: providers.Pods,
devicesProvider: providers.Devices,
}
}
func v1DevicesToAlphaV1(alphaDevs []*v1.ContainerDevices) []*v1alpha1.ContainerDevices {
var devs []*v1alpha1.ContainerDevices
for _, alphaDev := range alphaDevs {
dev := v1alpha1.ContainerDevices{
ResourceName: alphaDev.ResourceName,
DeviceIds: alphaDev.DeviceIds,
}
devs = append(devs, &dev)
}
return devs
}
// List returns information about the resources assigned to pods on the node
func (p *v1alpha1PodResourcesServer) List(ctx context.Context, req *v1alpha1.ListPodResourcesRequest) (*v1alpha1.ListPodResourcesResponse, error) {
metrics.PodResourcesEndpointRequestsTotalCount.WithLabelValues("v1alpha1").Inc()
pods := p.podsProvider.GetPods()
podResources := make([]*v1alpha1.PodResources, len(pods))
p.devicesProvider.UpdateAllocatedDevices()
for i, pod := range pods {
pRes := v1alpha1.PodResources{
Name: pod.Name,
Namespace: pod.Namespace,
Containers: make([]*v1alpha1.ContainerResources, len(pod.Spec.Containers)),
}
for j, container := range pod.Spec.Containers {
pRes.Containers[j] = &v1alpha1.ContainerResources{
Name: container.Name,
Devices: v1DevicesToAlphaV1(p.devicesProvider.GetDevices(string(pod.UID), container.Name)),
}
}
podResources[i] = &pRes
}
return &v1alpha1.ListPodResourcesResponse{
PodResources: podResources,
}, nil
}
//go:build linux
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"path"
"time"
// Register supported container handlers.
_ "github.com/google/cadvisor/container/containerd/install"
_ "github.com/google/cadvisor/container/crio/install"
_ "github.com/google/cadvisor/container/systemd/install"
"github.com/google/cadvisor/cache/memory"
cadvisormetrics "github.com/google/cadvisor/container"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/manager"
"github.com/google/cadvisor/utils/sysfs"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/ptr"
)
type cadvisorClient struct {
imageFsInfoProvider ImageFsInfoProvider
rootPath string
manager.Manager
}
var _ Interface = new(cadvisorClient)
// TODO(vmarmol): Make configurable.
// The amount of time for which to keep stats in memory.
const statsCacheDuration = 2 * time.Minute
const maxHousekeepingInterval = 15 * time.Second
const defaultHousekeepingInterval = 10 * time.Second
const allowDynamicHousekeeping = true
func init() {
// Override cAdvisor flag defaults.
flagOverrides := map[string]string{
// Override the default cAdvisor housekeeping interval.
"housekeeping_interval": defaultHousekeepingInterval.String(),
// Disable event storage by default.
"event_storage_event_limit": "default=0",
"event_storage_age_limit": "default=0",
}
for name, defaultValue := range flagOverrides {
if f := flag.Lookup(name); f != nil {
f.DefValue = defaultValue
f.Value.Set(defaultValue)
} else {
ctx := context.Background()
klog.FromContext(ctx).Error(nil, "Expected cAdvisor flag not found", "flag", name)
}
}
}
// New creates a new cAdvisor Interface for linux systems.
func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, cgroupRoots []string, usingLegacyStats, localStorageCapacityIsolation bool) (Interface, error) {
sysFs := sysfs.NewRealSysFs()
includedMetrics := cadvisormetrics.MetricSet{
cadvisormetrics.CpuUsageMetrics: struct{}{},
cadvisormetrics.MemoryUsageMetrics: struct{}{},
cadvisormetrics.CpuLoadMetrics: struct{}{},
cadvisormetrics.DiskIOMetrics: struct{}{},
cadvisormetrics.NetworkUsageMetrics: struct{}{},
cadvisormetrics.AppMetrics: struct{}{},
cadvisormetrics.ProcessMetrics: struct{}{},
cadvisormetrics.OOMMetrics: struct{}{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
includedMetrics[cadvisormetrics.PressureMetrics] = struct{}{}
}
if usingLegacyStats || localStorageCapacityIsolation {
includedMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{}
}
duration := maxHousekeepingInterval
housekeepingConfig := manager.HousekeepingConfig{
Interval: &duration,
AllowDynamic: ptr.To(allowDynamicHousekeeping),
}
// Create the cAdvisor container manager.
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, housekeepingConfig, includedMetrics, http.DefaultClient, cgroupRoots, nil /* containerEnvMetadataWhiteList */, "" /* perfEventsFile */, time.Duration(0) /*resctrlInterval*/)
if err != nil {
return nil, err
}
if _, err := os.Stat(rootPath); err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(path.Clean(rootPath), 0750); err != nil {
return nil, fmt.Errorf("error creating root directory %q: %v", rootPath, err)
}
} else {
return nil, fmt.Errorf("failed to Stat %q: %v", rootPath, err)
}
}
return &cadvisorClient{
imageFsInfoProvider: imageFsInfoProvider,
rootPath: rootPath,
Manager: m,
}, nil
}
func (cc *cadvisorClient) Start() error {
return cc.Manager.Start()
}
func (cc *cadvisorClient) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
return cc.GetContainerInfoV2(name, options)
}
func (cc *cadvisorClient) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return cc.GetVersionInfo()
}
func (cc *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) {
return cc.GetMachineInfo()
}
func (cc *cadvisorClient) ImagesFsInfo(ctx context.Context) (cadvisorapiv2.FsInfo, error) {
label, err := cc.imageFsInfoProvider.ImageFsInfoLabel()
if err != nil {
return cadvisorapiv2.FsInfo{}, err
}
return cc.getFsInfo(ctx, label)
}
func (cc *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
return cc.GetDirFsInfo(cc.rootPath)
}
func (cc *cadvisorClient) getFsInfo(ctx context.Context, label string) (cadvisorapiv2.FsInfo, error) {
res, err := cc.GetFsInfo(label)
if err != nil {
return cadvisorapiv2.FsInfo{}, err
}
if len(res) == 0 {
return cadvisorapiv2.FsInfo{}, fmt.Errorf("failed to find information for the filesystem labeled %q", label)
}
// TODO(vmarmol): Handle this better when a label has more than one image filesystem.
if len(res) > 1 {
klog.FromContext(ctx).Info("More than one filesystem labeled. Only using the first one", "label", label, "fileSystem", res)
}
return res[0], nil
}
func (cc *cadvisorClient) ContainerFsInfo(ctx context.Context) (cadvisorapiv2.FsInfo, error) {
label, err := cc.imageFsInfoProvider.ContainerFsInfoLabel()
if err != nil {
return cadvisorapiv2.FsInfo{}, err
}
return cc.getFsInfo(ctx, label)
}
//go:build linux
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"fmt"
"strings"
cadvisorfs "github.com/google/cadvisor/fs"
)
// imageFsInfoProvider knows how to translate the configured runtime
// to its file system label for images.
type imageFsInfoProvider struct {
runtimeEndpoint string
}
// ImageFsInfoLabel returns the image fs label for the configured runtime.
// For remote runtimes, it handles additional runtimes natively understood by cAdvisor.
func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) {
if detectCrioWorkaround(i) {
return cadvisorfs.LabelCrioImages, nil
}
return "", fmt.Errorf("no imagefs label for configured runtime")
}
// ContainerFsInfoLabel returns the container fs label for the configured runtime.
// For remote runtimes, it handles addition runtimes natively understood by cAdvisor.
func (i *imageFsInfoProvider) ContainerFsInfoLabel() (string, error) {
if detectCrioWorkaround(i) {
return cadvisorfs.LabelCrioContainers, nil
}
return "", fmt.Errorf("no containerfs label for configured runtime")
}
// This is a temporary workaround to get stats for cri-o from cadvisor
// and should be removed.
// Related to https://github.com/kubernetes/kubernetes/issues/51798
func detectCrioWorkaround(i *imageFsInfoProvider) bool {
return strings.HasSuffix(i.runtimeEndpoint, CrioSocketSuffix)
}
// NewImageFsInfoProvider returns a provider for the specified runtime configuration.
func NewImageFsInfoProvider(runtimeEndpoint string) ImageFsInfoProvider {
return &imageFsInfoProvider{runtimeEndpoint: runtimeEndpoint}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
)
// Fake cadvisor.Interface implementation.
type Fake struct {
NodeName string
}
const (
// FakeKernelVersion is a fake kernel version for testing.
FakeKernelVersion = "3.16.0-0.bpo.4-amd64"
// FakeContainerOSVersion is a fake OS version for testing.
FakeContainerOSVersion = "Debian GNU/Linux 7 (wheezy)"
fakeNumCores = 1
fakeMemoryCapacity = 4026531840
fakeDockerVersion = "1.13.1"
)
var _ cadvisor.Interface = new(Fake)
// Start is a fake implementation of Interface.Start.
func (c *Fake) Start() error {
return nil
}
// ContainerInfoV2 is a fake implementation of Interface.ContainerInfoV2.
func (c *Fake) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
return map[string]cadvisorapiv2.ContainerInfo{}, nil
}
// GetRequestedContainersInfo is a fake implementation if Interface.GetRequestedContainersInfo
func (c *Fake) GetRequestedContainersInfo(containerName string, options cadvisorapiv2.RequestOptions) (map[string]*cadvisorapi.ContainerInfo, error) {
return map[string]*cadvisorapi.ContainerInfo{}, nil
}
// MachineInfo is a fake implementation of Interface.MachineInfo.
func (c *Fake) MachineInfo() (*cadvisorapi.MachineInfo, error) {
// Simulate a machine with 1 core and 3.75GB of memory.
// We set it to non-zero values to make non-zero-capacity machines in Kubemark.
return &cadvisorapi.MachineInfo{
NumCores: fakeNumCores,
InstanceID: cadvisorapi.InstanceID(c.NodeName),
MemoryCapacity: fakeMemoryCapacity,
}, nil
}
// VersionInfo is a fake implementation of Interface.VersionInfo.
func (c *Fake) VersionInfo() (*cadvisorapi.VersionInfo, error) {
return &cadvisorapi.VersionInfo{
KernelVersion: FakeKernelVersion,
ContainerOsVersion: FakeContainerOSVersion,
DockerVersion: fakeDockerVersion,
}, nil
}
// ImagesFsInfo is a fake implementation of Interface.ImagesFsInfo.
func (c *Fake) ImagesFsInfo(context.Context) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}
// RootFsInfo is a fake implementation of Interface.RootFsInfo.
func (c *Fake) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}
// ContainerFsInfo is a fake implementation of Interface.ContainerFsInfo.
func (c *Fake) ContainerFsInfo(context.Context) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}
// GetDirFsInfo is a fake implementation of Interface.GetDirFsInfo.
func (c *Fake) GetDirFsInfo(path string) (cadvisorapiv2.FsInfo, error) {
return cadvisorapiv2.FsInfo{}, nil
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package testing
import (
"context"
"github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2"
mock "github.com/stretchr/testify/mock"
)
// NewMockInterface creates a new instance of MockInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockInterface(t interface {
mock.TestingT
Cleanup(func())
}) *MockInterface {
mock := &MockInterface{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockInterface is an autogenerated mock type for the Interface type
type MockInterface struct {
mock.Mock
}
type MockInterface_Expecter struct {
mock *mock.Mock
}
func (_m *MockInterface) EXPECT() *MockInterface_Expecter {
return &MockInterface_Expecter{mock: &_m.Mock}
}
// ContainerFsInfo provides a mock function for the type MockInterface
func (_mock *MockInterface) ContainerFsInfo(context1 context.Context) (v2.FsInfo, error) {
ret := _mock.Called(context1)
if len(ret) == 0 {
panic("no return value specified for ContainerFsInfo")
}
var r0 v2.FsInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) (v2.FsInfo, error)); ok {
return returnFunc(context1)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) v2.FsInfo); ok {
r0 = returnFunc(context1)
} else {
r0 = ret.Get(0).(v2.FsInfo)
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(context1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_ContainerFsInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerFsInfo'
type MockInterface_ContainerFsInfo_Call struct {
*mock.Call
}
// ContainerFsInfo is a helper method to define mock.On call
// - context1 context.Context
func (_e *MockInterface_Expecter) ContainerFsInfo(context1 interface{}) *MockInterface_ContainerFsInfo_Call {
return &MockInterface_ContainerFsInfo_Call{Call: _e.mock.On("ContainerFsInfo", context1)}
}
func (_c *MockInterface_ContainerFsInfo_Call) Run(run func(context1 context.Context)) *MockInterface_ContainerFsInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockInterface_ContainerFsInfo_Call) Return(fsInfo v2.FsInfo, err error) *MockInterface_ContainerFsInfo_Call {
_c.Call.Return(fsInfo, err)
return _c
}
func (_c *MockInterface_ContainerFsInfo_Call) RunAndReturn(run func(context1 context.Context) (v2.FsInfo, error)) *MockInterface_ContainerFsInfo_Call {
_c.Call.Return(run)
return _c
}
// ContainerInfoV2 provides a mock function for the type MockInterface
func (_mock *MockInterface) ContainerInfoV2(name string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
ret := _mock.Called(name, options)
if len(ret) == 0 {
panic("no return value specified for ContainerInfoV2")
}
var r0 map[string]v2.ContainerInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func(string, v2.RequestOptions) (map[string]v2.ContainerInfo, error)); ok {
return returnFunc(name, options)
}
if returnFunc, ok := ret.Get(0).(func(string, v2.RequestOptions) map[string]v2.ContainerInfo); ok {
r0 = returnFunc(name, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string]v2.ContainerInfo)
}
}
if returnFunc, ok := ret.Get(1).(func(string, v2.RequestOptions) error); ok {
r1 = returnFunc(name, options)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_ContainerInfoV2_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerInfoV2'
type MockInterface_ContainerInfoV2_Call struct {
*mock.Call
}
// ContainerInfoV2 is a helper method to define mock.On call
// - name string
// - options v2.RequestOptions
func (_e *MockInterface_Expecter) ContainerInfoV2(name interface{}, options interface{}) *MockInterface_ContainerInfoV2_Call {
return &MockInterface_ContainerInfoV2_Call{Call: _e.mock.On("ContainerInfoV2", name, options)}
}
func (_c *MockInterface_ContainerInfoV2_Call) Run(run func(name string, options v2.RequestOptions)) *MockInterface_ContainerInfoV2_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
var arg1 v2.RequestOptions
if args[1] != nil {
arg1 = args[1].(v2.RequestOptions)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockInterface_ContainerInfoV2_Call) Return(stringToContainerInfo map[string]v2.ContainerInfo, err error) *MockInterface_ContainerInfoV2_Call {
_c.Call.Return(stringToContainerInfo, err)
return _c
}
func (_c *MockInterface_ContainerInfoV2_Call) RunAndReturn(run func(name string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error)) *MockInterface_ContainerInfoV2_Call {
_c.Call.Return(run)
return _c
}
// GetDirFsInfo provides a mock function for the type MockInterface
func (_mock *MockInterface) GetDirFsInfo(path string) (v2.FsInfo, error) {
ret := _mock.Called(path)
if len(ret) == 0 {
panic("no return value specified for GetDirFsInfo")
}
var r0 v2.FsInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func(string) (v2.FsInfo, error)); ok {
return returnFunc(path)
}
if returnFunc, ok := ret.Get(0).(func(string) v2.FsInfo); ok {
r0 = returnFunc(path)
} else {
r0 = ret.Get(0).(v2.FsInfo)
}
if returnFunc, ok := ret.Get(1).(func(string) error); ok {
r1 = returnFunc(path)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_GetDirFsInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDirFsInfo'
type MockInterface_GetDirFsInfo_Call struct {
*mock.Call
}
// GetDirFsInfo is a helper method to define mock.On call
// - path string
func (_e *MockInterface_Expecter) GetDirFsInfo(path interface{}) *MockInterface_GetDirFsInfo_Call {
return &MockInterface_GetDirFsInfo_Call{Call: _e.mock.On("GetDirFsInfo", path)}
}
func (_c *MockInterface_GetDirFsInfo_Call) Run(run func(path string)) *MockInterface_GetDirFsInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
run(
arg0,
)
})
return _c
}
func (_c *MockInterface_GetDirFsInfo_Call) Return(fsInfo v2.FsInfo, err error) *MockInterface_GetDirFsInfo_Call {
_c.Call.Return(fsInfo, err)
return _c
}
func (_c *MockInterface_GetDirFsInfo_Call) RunAndReturn(run func(path string) (v2.FsInfo, error)) *MockInterface_GetDirFsInfo_Call {
_c.Call.Return(run)
return _c
}
// GetRequestedContainersInfo provides a mock function for the type MockInterface
func (_mock *MockInterface) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*v1.ContainerInfo, error) {
ret := _mock.Called(containerName, options)
if len(ret) == 0 {
panic("no return value specified for GetRequestedContainersInfo")
}
var r0 map[string]*v1.ContainerInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func(string, v2.RequestOptions) (map[string]*v1.ContainerInfo, error)); ok {
return returnFunc(containerName, options)
}
if returnFunc, ok := ret.Get(0).(func(string, v2.RequestOptions) map[string]*v1.ContainerInfo); ok {
r0 = returnFunc(containerName, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string]*v1.ContainerInfo)
}
}
if returnFunc, ok := ret.Get(1).(func(string, v2.RequestOptions) error); ok {
r1 = returnFunc(containerName, options)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_GetRequestedContainersInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRequestedContainersInfo'
type MockInterface_GetRequestedContainersInfo_Call struct {
*mock.Call
}
// GetRequestedContainersInfo is a helper method to define mock.On call
// - containerName string
// - options v2.RequestOptions
func (_e *MockInterface_Expecter) GetRequestedContainersInfo(containerName interface{}, options interface{}) *MockInterface_GetRequestedContainersInfo_Call {
return &MockInterface_GetRequestedContainersInfo_Call{Call: _e.mock.On("GetRequestedContainersInfo", containerName, options)}
}
func (_c *MockInterface_GetRequestedContainersInfo_Call) Run(run func(containerName string, options v2.RequestOptions)) *MockInterface_GetRequestedContainersInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
var arg1 v2.RequestOptions
if args[1] != nil {
arg1 = args[1].(v2.RequestOptions)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockInterface_GetRequestedContainersInfo_Call) Return(stringToContainerInfo map[string]*v1.ContainerInfo, err error) *MockInterface_GetRequestedContainersInfo_Call {
_c.Call.Return(stringToContainerInfo, err)
return _c
}
func (_c *MockInterface_GetRequestedContainersInfo_Call) RunAndReturn(run func(containerName string, options v2.RequestOptions) (map[string]*v1.ContainerInfo, error)) *MockInterface_GetRequestedContainersInfo_Call {
_c.Call.Return(run)
return _c
}
// ImagesFsInfo provides a mock function for the type MockInterface
func (_mock *MockInterface) ImagesFsInfo(context1 context.Context) (v2.FsInfo, error) {
ret := _mock.Called(context1)
if len(ret) == 0 {
panic("no return value specified for ImagesFsInfo")
}
var r0 v2.FsInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) (v2.FsInfo, error)); ok {
return returnFunc(context1)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) v2.FsInfo); ok {
r0 = returnFunc(context1)
} else {
r0 = ret.Get(0).(v2.FsInfo)
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(context1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_ImagesFsInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImagesFsInfo'
type MockInterface_ImagesFsInfo_Call struct {
*mock.Call
}
// ImagesFsInfo is a helper method to define mock.On call
// - context1 context.Context
func (_e *MockInterface_Expecter) ImagesFsInfo(context1 interface{}) *MockInterface_ImagesFsInfo_Call {
return &MockInterface_ImagesFsInfo_Call{Call: _e.mock.On("ImagesFsInfo", context1)}
}
func (_c *MockInterface_ImagesFsInfo_Call) Run(run func(context1 context.Context)) *MockInterface_ImagesFsInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockInterface_ImagesFsInfo_Call) Return(fsInfo v2.FsInfo, err error) *MockInterface_ImagesFsInfo_Call {
_c.Call.Return(fsInfo, err)
return _c
}
func (_c *MockInterface_ImagesFsInfo_Call) RunAndReturn(run func(context1 context.Context) (v2.FsInfo, error)) *MockInterface_ImagesFsInfo_Call {
_c.Call.Return(run)
return _c
}
// MachineInfo provides a mock function for the type MockInterface
func (_mock *MockInterface) MachineInfo() (*v1.MachineInfo, error) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for MachineInfo")
}
var r0 *v1.MachineInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func() (*v1.MachineInfo, error)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() *v1.MachineInfo); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.MachineInfo)
}
}
if returnFunc, ok := ret.Get(1).(func() error); ok {
r1 = returnFunc()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_MachineInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MachineInfo'
type MockInterface_MachineInfo_Call struct {
*mock.Call
}
// MachineInfo is a helper method to define mock.On call
func (_e *MockInterface_Expecter) MachineInfo() *MockInterface_MachineInfo_Call {
return &MockInterface_MachineInfo_Call{Call: _e.mock.On("MachineInfo")}
}
func (_c *MockInterface_MachineInfo_Call) Run(run func()) *MockInterface_MachineInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockInterface_MachineInfo_Call) Return(machineInfo *v1.MachineInfo, err error) *MockInterface_MachineInfo_Call {
_c.Call.Return(machineInfo, err)
return _c
}
func (_c *MockInterface_MachineInfo_Call) RunAndReturn(run func() (*v1.MachineInfo, error)) *MockInterface_MachineInfo_Call {
_c.Call.Return(run)
return _c
}
// RootFsInfo provides a mock function for the type MockInterface
func (_mock *MockInterface) RootFsInfo() (v2.FsInfo, error) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for RootFsInfo")
}
var r0 v2.FsInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func() (v2.FsInfo, error)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() v2.FsInfo); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(v2.FsInfo)
}
if returnFunc, ok := ret.Get(1).(func() error); ok {
r1 = returnFunc()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_RootFsInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RootFsInfo'
type MockInterface_RootFsInfo_Call struct {
*mock.Call
}
// RootFsInfo is a helper method to define mock.On call
func (_e *MockInterface_Expecter) RootFsInfo() *MockInterface_RootFsInfo_Call {
return &MockInterface_RootFsInfo_Call{Call: _e.mock.On("RootFsInfo")}
}
func (_c *MockInterface_RootFsInfo_Call) Run(run func()) *MockInterface_RootFsInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockInterface_RootFsInfo_Call) Return(fsInfo v2.FsInfo, err error) *MockInterface_RootFsInfo_Call {
_c.Call.Return(fsInfo, err)
return _c
}
func (_c *MockInterface_RootFsInfo_Call) RunAndReturn(run func() (v2.FsInfo, error)) *MockInterface_RootFsInfo_Call {
_c.Call.Return(run)
return _c
}
// Start provides a mock function for the type MockInterface
func (_mock *MockInterface) Start() error {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for Start")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func() error); ok {
r0 = returnFunc()
} else {
r0 = ret.Error(0)
}
return r0
}
// MockInterface_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
type MockInterface_Start_Call struct {
*mock.Call
}
// Start is a helper method to define mock.On call
func (_e *MockInterface_Expecter) Start() *MockInterface_Start_Call {
return &MockInterface_Start_Call{Call: _e.mock.On("Start")}
}
func (_c *MockInterface_Start_Call) Run(run func()) *MockInterface_Start_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockInterface_Start_Call) Return(err error) *MockInterface_Start_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockInterface_Start_Call) RunAndReturn(run func() error) *MockInterface_Start_Call {
_c.Call.Return(run)
return _c
}
// VersionInfo provides a mock function for the type MockInterface
func (_mock *MockInterface) VersionInfo() (*v1.VersionInfo, error) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for VersionInfo")
}
var r0 *v1.VersionInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func() (*v1.VersionInfo, error)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() *v1.VersionInfo); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.VersionInfo)
}
}
if returnFunc, ok := ret.Get(1).(func() error); ok {
r1 = returnFunc()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockInterface_VersionInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VersionInfo'
type MockInterface_VersionInfo_Call struct {
*mock.Call
}
// VersionInfo is a helper method to define mock.On call
func (_e *MockInterface_Expecter) VersionInfo() *MockInterface_VersionInfo_Call {
return &MockInterface_VersionInfo_Call{Call: _e.mock.On("VersionInfo")}
}
func (_c *MockInterface_VersionInfo_Call) Run(run func()) *MockInterface_VersionInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockInterface_VersionInfo_Call) Return(versionInfo *v1.VersionInfo, err error) *MockInterface_VersionInfo_Call {
_c.Call.Return(versionInfo, err)
return _c
}
func (_c *MockInterface_VersionInfo_Call) RunAndReturn(run func() (*v1.VersionInfo, error)) *MockInterface_VersionInfo_Call {
_c.Call.Return(run)
return _c
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cadvisor
import (
"strings"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapi2 "github.com/google/cadvisor/info/v2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
)
const (
// CrioSocketSuffix is the path to the CRI-O socket.
// Please keep this in sync with the one in:
// github.com/google/cadvisor/tree/master/container/crio/client.go
// Note that however we only match on the suffix, as /var/run is often a
// symlink to /run, so the user can specify either path.
CrioSocketSuffix = "run/crio/crio.sock"
)
// CapacityFromMachineInfo returns the capacity of the resources from the machine info.
func CapacityFromMachineInfo(info *cadvisorapi.MachineInfo) v1.ResourceList {
c := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
int64(info.NumCores*1000),
resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(
int64(info.MemoryCapacity),
resource.BinarySI),
}
// if huge pages are enabled, we report them as a schedulable resource on the node
for _, hugepagesInfo := range info.HugePages {
pageSizeBytes := int64(hugepagesInfo.PageSize * 1024)
hugePagesBytes := pageSizeBytes * int64(hugepagesInfo.NumPages)
pageSizeQuantity := resource.NewQuantity(pageSizeBytes, resource.BinarySI)
c[v1helper.HugePageResourceName(*pageSizeQuantity)] = *resource.NewQuantity(hugePagesBytes, resource.BinarySI)
}
return c
}
// EphemeralStorageCapacityFromFsInfo returns the capacity of the ephemeral storage from the FsInfo.
func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceList {
c := v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewQuantity(
int64(info.Capacity),
resource.BinarySI),
}
return c
}
// UsingLegacyCadvisorStats returns true if container stats are provided by cadvisor instead of through the CRI.
// CRI integrations should get container metrics via CRI.
// TODO: cri-o relies on cadvisor as a temporary workaround. The code should
// be removed. Related issue:
// https://github.com/kubernetes/kubernetes/issues/51798
func UsingLegacyCadvisorStats(runtimeEndpoint string) bool {
// If PodAndContainerStatsFromCRI feature is enabled, then assume the user
// wants to use CRI stats, as the aforementioned workaround isn't needed
// when this feature is enabled.
if utilfeature.DefaultFeatureGate.Enabled(features.PodAndContainerStatsFromCRI) {
return false
}
return strings.HasSuffix(runtimeEndpoint, CrioSocketSuffix)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificate
import (
"context"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"math"
"net"
"sort"
"sync/atomic"
"time"
certificates "k8s.io/api/certificates/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/certificate"
compbasemetrics "k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/metrics"
netutils "k8s.io/utils/net"
)
func newGetTemplateFn(nodeName types.NodeName, getAddresses func() []v1.NodeAddress) func() *x509.CertificateRequest {
return func() *x509.CertificateRequest {
hostnames, ips := addressesToHostnamesAndIPs(getAddresses())
// by default, require at least one IP before requesting a serving certificate
hasRequiredAddresses := len(ips) > 0
// optionally allow requesting a serving certificate with just a DNS name
if utilfeature.DefaultFeatureGate.Enabled(features.AllowDNSOnlyNodeCSR) {
hasRequiredAddresses = hasRequiredAddresses || len(hostnames) > 0
}
// don't return a template if we have no addresses to request for
if !hasRequiredAddresses {
return nil
}
return &x509.CertificateRequest{
Subject: pkix.Name{
CommonName: fmt.Sprintf("system:node:%s", nodeName),
Organization: []string{"system:nodes"},
},
DNSNames: hostnames,
IPAddresses: ips,
}
}
}
// NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate
// or returns an error.
func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *kubeletconfig.KubeletConfiguration, nodeName types.NodeName, getAddresses func() []v1.NodeAddress, certDirectory string) (certificate.Manager, error) {
var clientsetFn certificate.ClientsetFunc
if kubeClient != nil {
clientsetFn = func(current *tls.Certificate) (clientset.Interface, error) {
return kubeClient, nil
}
}
certificateStore, err := certificate.NewFileStore(
"kubelet-server",
certDirectory,
certDirectory,
kubeCfg.TLSCertFile,
kubeCfg.TLSPrivateKeyFile)
if err != nil {
return nil, fmt.Errorf("failed to initialize server certificate store: %v", err)
}
var certificateRenewFailure = compbasemetrics.NewCounter(
&compbasemetrics.CounterOpts{
Subsystem: metrics.KubeletSubsystem,
Name: "server_expiration_renew_errors",
Help: "Counter of certificate renewal errors.",
StabilityLevel: compbasemetrics.ALPHA,
},
)
legacyregistry.MustRegister(certificateRenewFailure)
certificateRotationAge := compbasemetrics.NewHistogram(
&compbasemetrics.HistogramOpts{
Subsystem: metrics.KubeletSubsystem,
Name: "certificate_manager_server_rotation_seconds",
Help: "Histogram of the number of seconds the previous certificate lived before being rotated.",
Buckets: []float64{
60, // 1 minute
3600, // 1 hour
14400, // 4 hours
86400, // 1 day
604800, // 1 week
2592000, // 1 month
7776000, // 3 months
15552000, // 6 months
31104000, // 1 year
124416000, // 4 years
},
StabilityLevel: compbasemetrics.ALPHA,
},
)
legacyregistry.MustRegister(certificateRotationAge)
getTemplate := newGetTemplateFn(nodeName, getAddresses)
m, err := certificate.NewManager(&certificate.Config{
ClientsetFn: clientsetFn,
GetTemplate: getTemplate,
SignerName: certificates.KubeletServingSignerName,
GetUsages: certificate.DefaultKubeletServingGetUsages,
CertificateStore: certificateStore,
CertificateRotation: certificateRotationAge,
CertificateRenewFailure: certificateRenewFailure,
})
if err != nil {
return nil, fmt.Errorf("failed to initialize server certificate manager: %v", err)
}
legacyregistry.RawMustRegister(compbasemetrics.NewGaugeFunc(
&compbasemetrics.GaugeOpts{
Subsystem: metrics.KubeletSubsystem,
Name: "certificate_manager_server_ttl_seconds",
Help: "Gauge of the shortest TTL (time-to-live) of " +
"the Kubelet's serving certificate. The value is in seconds " +
"until certificate expiry (negative if already expired). If " +
"serving certificate is invalid or unused, the value will " +
"be +INF.",
StabilityLevel: compbasemetrics.ALPHA,
},
func() float64 {
if c := m.Current(); c != nil && c.Leaf != nil {
return math.Trunc(time.Until(c.Leaf.NotAfter).Seconds())
}
return math.Inf(1)
},
))
return m, nil
}
func addressesToHostnamesAndIPs(addresses []v1.NodeAddress) (dnsNames []string, ips []net.IP) {
seenDNSNames := map[string]bool{}
seenIPs := map[string]bool{}
for _, address := range addresses {
if len(address.Address) == 0 {
continue
}
switch address.Type {
case v1.NodeHostName:
if ip := netutils.ParseIPSloppy(address.Address); ip != nil {
seenIPs[address.Address] = true
} else {
seenDNSNames[address.Address] = true
}
case v1.NodeExternalIP, v1.NodeInternalIP:
if ip := netutils.ParseIPSloppy(address.Address); ip != nil {
seenIPs[address.Address] = true
}
case v1.NodeExternalDNS, v1.NodeInternalDNS:
seenDNSNames[address.Address] = true
}
}
for dnsName := range seenDNSNames {
dnsNames = append(dnsNames, dnsName)
}
for ip := range seenIPs {
ips = append(ips, netutils.ParseIPSloppy(ip))
}
// return in stable order
sort.Strings(dnsNames)
sort.Slice(ips, func(i, j int) bool { return ips[i].String() < ips[j].String() })
return dnsNames, ips
}
// NewKubeletClientCertificateManager sets up a certificate manager without a
// client that can be used to sign new certificates (or rotate). If a CSR
// client is set later, it may begin rotating/renewing the client cert.
func NewKubeletClientCertificateManager(
certDirectory string,
nodeName types.NodeName,
bootstrapCertData []byte,
bootstrapKeyData []byte,
certFile string,
keyFile string,
clientsetFn certificate.ClientsetFunc,
) (certificate.Manager, error) {
certificateStore, err := certificate.NewFileStore(
"kubelet-client",
certDirectory,
certDirectory,
certFile,
keyFile)
if err != nil {
return nil, fmt.Errorf("failed to initialize client certificate store: %v", err)
}
var certificateRenewFailure = compbasemetrics.NewCounter(
&compbasemetrics.CounterOpts{
Namespace: metrics.KubeletSubsystem,
Subsystem: "certificate_manager",
Name: "client_expiration_renew_errors",
Help: "Counter of certificate renewal errors.",
StabilityLevel: compbasemetrics.ALPHA,
},
)
legacyregistry.Register(certificateRenewFailure)
m, err := certificate.NewManager(&certificate.Config{
ClientsetFn: clientsetFn,
Template: &x509.CertificateRequest{
Subject: pkix.Name{
CommonName: fmt.Sprintf("system:node:%s", nodeName),
Organization: []string{"system:nodes"},
},
},
SignerName: certificates.KubeAPIServerClientKubeletSignerName,
GetUsages: certificate.DefaultKubeletClientGetUsages,
// For backwards compatibility, the kubelet supports the ability to
// provide a higher privileged certificate as initial data that will
// then be rotated immediately. This code path is used by kubeadm on
// the masters.
BootstrapCertificatePEM: bootstrapCertData,
BootstrapKeyPEM: bootstrapKeyData,
CertificateStore: certificateStore,
CertificateRenewFailure: certificateRenewFailure,
})
if err != nil {
return nil, fmt.Errorf("failed to initialize client certificate manager: %v", err)
}
return m, nil
}
// NewKubeletServerCertificateDynamicFileManager creates a certificate manager based on reading and watching certificate and key files.
// The returned struct implements certificate.Manager interface, enabling using it like other CertificateManager in this package.
// But the struct doesn't communicate with API server to perform certificate request at all.
func NewKubeletServerCertificateDynamicFileManager(certFile, keyFile string) (certificate.Manager, error) {
c, err := dynamiccertificates.NewDynamicServingContentFromFiles("kubelet-server-cert-files", certFile, keyFile)
if err != nil {
return nil, fmt.Errorf("unable to set up dynamic certificate manager for kubelet server cert files: %w", err)
}
m := &kubeletServerCertificateDynamicFileManager{
dynamicCertificateContent: c,
certFile: certFile,
keyFile: keyFile,
}
m.Enqueue()
c.AddListener(m)
return m, nil
}
// kubeletServerCertificateDynamicFileManager uses a dynamic CertKeyContentProvider based on cert and key files.
type kubeletServerCertificateDynamicFileManager struct {
cancelFn context.CancelFunc
certFile string
keyFile string
dynamicCertificateContent *dynamiccertificates.DynamicCertKeyPairContent
currentTLSCertificate atomic.Pointer[tls.Certificate]
}
// Enqueue implements the functions to be notified when the serving cert content changes.
func (m *kubeletServerCertificateDynamicFileManager) Enqueue() {
certContent, keyContent := m.dynamicCertificateContent.CurrentCertKeyContent()
cert, err := tls.X509KeyPair(certContent, keyContent)
if err != nil {
klog.ErrorS(err, "invalid certificate and key pair from file", "certFile", m.certFile, "keyFile", m.keyFile)
return
}
m.currentTLSCertificate.Store(&cert)
klog.V(4).InfoS("loaded certificate and key pair in kubelet server certificate manager", "certFile", m.certFile, "keyFile", m.keyFile)
}
// Current returns the last valid certificate key pair loaded from files.
func (m *kubeletServerCertificateDynamicFileManager) Current() *tls.Certificate {
return m.currentTLSCertificate.Load()
}
// Start starts watching the certificate and key files
func (m *kubeletServerCertificateDynamicFileManager) Start() {
var ctx context.Context
ctx, m.cancelFn = context.WithCancel(context.Background())
go m.dynamicCertificateContent.Run(ctx, 1)
}
// Stop stops watching the certificate and key files
func (m *kubeletServerCertificateDynamicFileManager) Stop() {
if m.cancelFn != nil {
m.cancelFn()
}
}
// ServerHealthy always returns true since the file manager doesn't communicate with any server
func (m *kubeletServerCertificateDynamicFileManager) ServerHealthy() bool {
return true
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificate
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"sync"
"sync/atomic"
"time"
"k8s.io/klog/v2"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/util/certificate"
"k8s.io/client-go/util/connrotation"
)
// UpdateTransport instruments a restconfig with a transport that dynamically uses
// certificates provided by the manager for TLS client auth.
//
// The config must not already provide an explicit transport.
//
// The returned function allows forcefully closing all active connections.
//
// The returned transport periodically checks the manager to determine if the
// certificate has changed. If it has, the transport shuts down all existing client
// connections, forcing the client to re-handshake with the server and use the
// new certificate.
//
// The exitAfter duration, if set, will terminate the current process if a certificate
// is not available from the store (because it has been deleted on disk or is corrupt)
// or if the certificate has expired and the server is responsive. This allows the
// process parent or the bootstrap credentials an opportunity to retrieve a new initial
// certificate.
//
// stopCh should be used to indicate when the transport is unused and doesn't need
// to continue checking the manager.
func UpdateTransport(stopCh <-chan struct{}, clientConfig *restclient.Config, clientCertificateManager certificate.Manager, exitAfter time.Duration) (func(), error) {
return updateTransport(stopCh, 10*time.Second, clientConfig, clientCertificateManager, exitAfter)
}
// updateTransport is an internal method that exposes how often this method checks that the
// client cert has changed.
func updateTransport(stopCh <-chan struct{}, period time.Duration, clientConfig *restclient.Config, clientCertificateManager certificate.Manager, exitAfter time.Duration) (func(), error) {
if clientConfig.Transport != nil || clientConfig.Dial != nil {
return nil, fmt.Errorf("there is already a transport or dialer configured")
}
d := connrotation.NewDialer((&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext)
if clientCertificateManager != nil {
if err := addCertRotation(stopCh, period, clientConfig, clientCertificateManager, exitAfter, d); err != nil {
return nil, err
}
} else {
clientConfig.Dial = d.DialContext
}
return d.CloseAll, nil
}
func addCertRotation(stopCh <-chan struct{}, period time.Duration, clientConfig *restclient.Config, clientCertificateManager certificate.Manager, exitAfter time.Duration, d *connrotation.Dialer) error {
tlsConfig, err := restclient.TLSConfigFor(clientConfig)
if err != nil {
return fmt.Errorf("unable to configure TLS for the rest client: %v", err)
}
if tlsConfig == nil {
tlsConfig = &tls.Config{}
}
tlsConfig.Certificates = nil
tlsConfig.GetClientCertificate = func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert := clientCertificateManager.Current()
if cert == nil {
return &tls.Certificate{Certificate: nil}, nil
}
return cert, nil
}
lastCertAvailable := time.Now()
lastCert := clientCertificateManager.Current()
var hasCert atomic.Bool
hasCert.Store(lastCert != nil)
checkLock := &sync.Mutex{}
checkNewCertificateAndRotate := func() {
// don't run concurrently
checkLock.Lock()
defer checkLock.Unlock()
curr := clientCertificateManager.Current()
if exitAfter > 0 {
now := time.Now()
if curr == nil {
// the certificate has been deleted from disk or is otherwise corrupt
if now.After(lastCertAvailable.Add(exitAfter)) {
if clientCertificateManager.ServerHealthy() {
klog.ErrorS(nil, "No valid client certificate is found and the server is responsive, exiting.", "lastCertificateAvailabilityTime", lastCertAvailable, "shutdownThreshold", exitAfter)
os.Exit(1)
} else {
klog.ErrorS(nil, "No valid client certificate is found but the server is not responsive. A restart may be necessary to retrieve new initial credentials.", "lastCertificateAvailabilityTime", lastCertAvailable, "shutdownThreshold", exitAfter)
}
}
} else {
// the certificate is expired
if now.After(curr.Leaf.NotAfter) {
if clientCertificateManager.ServerHealthy() {
klog.ErrorS(nil, "The currently active client certificate has expired and the server is responsive, exiting.")
os.Exit(1)
} else {
klog.ErrorS(nil, "The currently active client certificate has expired, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.")
}
}
lastCertAvailable = now
}
}
if curr == nil || lastCert == curr {
// Cert hasn't been rotated.
return
}
lastCert = curr
hasCert.Store(lastCert != nil)
klog.InfoS("Certificate rotation detected, shutting down client connections to start using new credentials")
// The cert has been rotated. Close all existing connections to force the client
// to reperform its TLS handshake with new cert.
//
// See: https://github.com/kubernetes-incubator/bootkube/pull/663#issuecomment-318506493
d.CloseAll()
}
// start long-term check
go wait.Until(checkNewCertificateAndRotate, period, stopCh)
if !hasCert.Load() {
// start a faster check until we get the initial certificate
go wait.PollUntil(time.Second, func() (bool, error) {
checkNewCertificateAndRotate()
return hasCert.Load(), nil
}, stopCh)
}
clientConfig.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
MaxIdleConnsPerHost: 25,
DialContext: d.DialContext,
})
// Zero out all existing TLS options since our new transport enforces them.
clientConfig.CertData = nil
clientConfig.KeyData = nil
clientConfig.CertFile = ""
clientConfig.KeyFile = ""
clientConfig.CAData = nil
clientConfig.CAFile = ""
clientConfig.Insecure = false
clientConfig.NextProtos = nil
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package checkpointmanager
import (
"fmt"
"sync"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
utilstore "k8s.io/kubernetes/pkg/kubelet/util/store"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
// Checkpoint provides the process checkpoint data
type Checkpoint interface {
MarshalCheckpoint() ([]byte, error)
UnmarshalCheckpoint(blob []byte) error
VerifyChecksum() error
}
// CheckpointManager provides the interface to manage checkpoint
type CheckpointManager interface {
// CreateCheckpoint persists checkpoint in CheckpointStore. checkpointKey is the key for utilstore to locate checkpoint.
// For file backed utilstore, checkpointKey is the file name to write the checkpoint data.
CreateCheckpoint(checkpointKey string, checkpoint Checkpoint) error
// GetCheckpoint retrieves checkpoint from CheckpointStore.
GetCheckpoint(checkpointKey string, checkpoint Checkpoint) error
// WARNING: RemoveCheckpoint will not return error if checkpoint does not exist.
RemoveCheckpoint(checkpointKey string) error
// ListCheckpoint returns the list of existing checkpoints.
ListCheckpoints() ([]string, error)
}
// impl is an implementation of CheckpointManager. It persists checkpoint in CheckpointStore
type impl struct {
path string
store utilstore.Store
mutex sync.Mutex
}
// NewCheckpointManager returns a new instance of a checkpoint manager
func NewCheckpointManager(checkpointDir string) (CheckpointManager, error) {
fstore, err := utilstore.NewFileStore(checkpointDir, &utilfs.DefaultFs{})
if err != nil {
return nil, err
}
return &impl{path: checkpointDir, store: fstore}, nil
}
// CreateCheckpoint persists checkpoint in CheckpointStore.
func (manager *impl) CreateCheckpoint(checkpointKey string, checkpoint Checkpoint) error {
manager.mutex.Lock()
defer manager.mutex.Unlock()
blob, err := checkpoint.MarshalCheckpoint()
if err != nil {
return err
}
return manager.store.Write(checkpointKey, blob)
}
// GetCheckpoint retrieves checkpoint from CheckpointStore.
func (manager *impl) GetCheckpoint(checkpointKey string, checkpoint Checkpoint) error {
manager.mutex.Lock()
defer manager.mutex.Unlock()
blob, err := manager.store.Read(checkpointKey)
if err != nil {
if err == utilstore.ErrKeyNotFound {
return errors.ErrCheckpointNotFound
}
return err
}
err = checkpoint.UnmarshalCheckpoint(blob)
if err == nil {
err = checkpoint.VerifyChecksum()
}
return err
}
// RemoveCheckpoint will not return error if checkpoint does not exist.
func (manager *impl) RemoveCheckpoint(checkpointKey string) error {
manager.mutex.Lock()
defer manager.mutex.Unlock()
return manager.store.Delete(checkpointKey)
}
// ListCheckpoints returns the list of existing checkpoints.
func (manager *impl) ListCheckpoints() ([]string, error) {
manager.mutex.Lock()
defer manager.mutex.Unlock()
keys, err := manager.store.List()
if err != nil {
return []string{}, fmt.Errorf("failed to list checkpoint store: %v", err)
}
return keys, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package checksum
import (
"hash/fnv"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
hashutil "k8s.io/kubernetes/pkg/util/hash"
)
// Checksum is the data to be stored as checkpoint
type Checksum uint64
// Verify verifies that passed checksum is same as calculated checksum
func (cs Checksum) Verify(data interface{}) error {
actualCS := New(data)
if cs != actualCS {
return &errors.CorruptCheckpointError{ActualCS: uint64(actualCS), ExpectedCS: uint64(cs)}
}
return nil
}
// New returns the Checksum of checkpoint data
func New(data interface{}) Checksum {
return Checksum(getChecksum(data))
}
// Get returns calculated checksum of checkpoint data
func getChecksum(data interface{}) uint64 {
hash := fnv.New32a()
hashutil.DeepHashObject(hash, data)
return uint64(hash.Sum32())
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import "fmt"
// ErrCorruptCheckpoint error is reported when checksum does not match.
// Check for it with:
//
// var csErr *CorruptCheckpointError
// if errors.As(err, &csErr) { ... }
// if errors.Is(err, CorruptCheckpointError{}) { ... }
type CorruptCheckpointError struct {
ActualCS, ExpectedCS uint64
}
func (err CorruptCheckpointError) Error() string {
return "checkpoint is corrupted"
}
func (err CorruptCheckpointError) Is(target error) bool {
switch target.(type) {
case *CorruptCheckpointError, CorruptCheckpointError:
return true
default:
return false
}
}
// ErrCheckpointNotFound is reported when checkpoint is not found for a given key
var ErrCheckpointNotFound = fmt.Errorf("checkpoint is not found")
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package clustertrustbundle abstracts access to ClusterTrustBundles so that
// projected volumes can use them.
package clustertrustbundle
import (
"context"
"encoding/pem"
"fmt"
"math/rand"
"sync"
"time"
"github.com/go-logr/logr"
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
lrucache "k8s.io/apimachinery/pkg/util/cache"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
)
const (
maxLabelSelectorLength = 100 * 1024
)
// clusterTrustBundle is a type constraint for version-independent ClusterTrustBundle API
type clusterTrustBundle interface {
certificatesv1alpha1.ClusterTrustBundle | certificatesv1beta1.ClusterTrustBundle
}
// clusterTrustBundlesLister is an API-verion independent ClusterTrustBundles lister
type clusterTrustBundlesLister[T clusterTrustBundle] interface {
Get(string) (*T, error)
List(labels.Selector) ([]*T, error)
}
type clusterTrustBundleHandlers[T clusterTrustBundle] interface {
GetName(*T) string
GetSignerName(*T) string
GetTrustBundle(*T) string
}
type alphaClusterTrustBundleHandlers struct{}
type betaClusterTrustBundleHandlers struct{}
func (b *alphaClusterTrustBundleHandlers) GetName(ctb *certificatesv1alpha1.ClusterTrustBundle) string {
return ctb.Name
}
func (b *alphaClusterTrustBundleHandlers) GetSignerName(ctb *certificatesv1alpha1.ClusterTrustBundle) string {
return ctb.Spec.SignerName
}
func (b *alphaClusterTrustBundleHandlers) GetTrustBundle(ctb *certificatesv1alpha1.ClusterTrustBundle) string {
return ctb.Spec.TrustBundle
}
func (b betaClusterTrustBundleHandlers) GetName(ctb *certificatesv1beta1.ClusterTrustBundle) string {
return ctb.Name
}
func (b *betaClusterTrustBundleHandlers) GetSignerName(ctb *certificatesv1beta1.ClusterTrustBundle) string {
return ctb.Spec.SignerName
}
func (b *betaClusterTrustBundleHandlers) GetTrustBundle(ctb *certificatesv1beta1.ClusterTrustBundle) string {
return ctb.Spec.TrustBundle
}
// Manager abstracts over the ability to get trust anchors.
type Manager interface {
GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error)
GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error)
}
// InformerManager is the "real" manager. It uses informers to track
// ClusterTrustBundle objects.
type InformerManager[T clusterTrustBundle] struct {
ctbInformer cache.SharedIndexInformer
ctbLister clusterTrustBundlesLister[T]
ctbHandlers clusterTrustBundleHandlers[T]
normalizationCache *lrucache.LRUExpireCache
cacheTTL time.Duration
}
var _ Manager = (*InformerManager[certificatesv1beta1.ClusterTrustBundle])(nil)
func NewAlphaInformerManager(
ctx context.Context, informerFactory informers.SharedInformerFactory, cacheSize int, cacheTTL time.Duration,
) (Manager, error) {
bundlesInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
return newInformerManager(
ctx, &alphaClusterTrustBundleHandlers{}, bundlesInformer.Informer(), bundlesInformer.Lister(), cacheSize, cacheTTL,
)
}
func NewBetaInformerManager(
ctx context.Context, informerFactory informers.SharedInformerFactory, cacheSize int, cacheTTL time.Duration,
) (Manager, error) {
bundlesInformer := informerFactory.Certificates().V1beta1().ClusterTrustBundles()
return newInformerManager(
ctx, &betaClusterTrustBundleHandlers{}, bundlesInformer.Informer(), bundlesInformer.Lister(), cacheSize, cacheTTL,
)
}
// newInformerManager returns an initialized InformerManager.
func newInformerManager[T clusterTrustBundle](ctx context.Context, handlers clusterTrustBundleHandlers[T], informer cache.SharedIndexInformer, lister clusterTrustBundlesLister[T], cacheSize int, cacheTTL time.Duration) (Manager, error) {
// We need to call Informer() before calling start on the shared informer
// factory, or the informer won't be registered to be started.
m := &InformerManager[T]{
ctbInformer: informer,
ctbLister: lister,
ctbHandlers: handlers,
normalizationCache: lrucache.NewLRUExpireCache(cacheSize),
cacheTTL: cacheTTL,
}
logger := klog.FromContext(ctx)
// Have the informer bust cache entries when it sees updates that could
// apply to them.
_, err := m.ctbInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
ctb, ok := obj.(*T)
if !ok {
return
}
logger.Info("Dropping all cache entries for signer", "signerName", m.ctbHandlers.GetSignerName(ctb))
m.dropCacheFor(ctb)
},
UpdateFunc: func(old, new any) {
ctb, ok := new.(*T)
if !ok {
return
}
logger.Info("Dropping cache for ClusterTrustBundle", "signerName", m.ctbHandlers.GetSignerName(ctb))
m.dropCacheFor(new.(*T))
},
DeleteFunc: func(obj any) {
ctb, ok := obj.(*T)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
return
}
ctb, ok = tombstone.Obj.(*T)
if !ok {
return
}
}
logger.Info("Dropping cache for ClusterTrustBundle", "signerName", m.ctbHandlers.GetSignerName(ctb))
m.dropCacheFor(ctb)
},
})
if err != nil {
return nil, fmt.Errorf("while registering event handler on informer: %w", err)
}
return m, nil
}
func (m *InformerManager[T]) dropCacheFor(ctb *T) {
if ctbSignerName := m.ctbHandlers.GetSignerName(ctb); ctbSignerName != "" {
m.normalizationCache.RemoveAll(func(key any) bool {
return key.(cacheKeyType).signerName == ctbSignerName
})
} else {
m.normalizationCache.RemoveAll(func(key any) bool {
return key.(cacheKeyType).ctbName == m.ctbHandlers.GetName(ctb)
})
}
}
// GetTrustAnchorsByName returns normalized and deduplicated trust anchors from
// a single named ClusterTrustBundle.
func (m *InformerManager[T]) GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error) {
if !m.ctbInformer.HasSynced() {
return nil, fmt.Errorf("ClusterTrustBundle informer has not yet synced")
}
cacheKey := cacheKeyType{ctbName: name}
if cachedAnchors, ok := m.normalizationCache.Get(cacheKey); ok {
return cachedAnchors.([]byte), nil
}
ctb, err := m.ctbLister.Get(name)
if k8serrors.IsNotFound(err) && allowMissing {
return []byte{}, nil
}
if err != nil {
return nil, fmt.Errorf("while getting ClusterTrustBundle: %w", err)
}
pemTrustAnchors, err := m.normalizeTrustAnchors([]*T{ctb})
if err != nil {
return nil, fmt.Errorf("while normalizing trust anchors: %w", err)
}
m.normalizationCache.Add(cacheKey, pemTrustAnchors, m.cacheTTL)
return pemTrustAnchors, nil
}
// GetTrustAnchorsBySigner returns normalized and deduplicated trust anchors
// from a set of selected ClusterTrustBundles.
func (m *InformerManager[T]) GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error) {
if !m.ctbInformer.HasSynced() {
return nil, fmt.Errorf("ClusterTrustBundle informer has not yet synced")
}
// Note that this function treats nil as "match nothing", and non-nil but
// empty as "match everything".
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
return nil, fmt.Errorf("while parsing label selector: %w", err)
}
cacheKey := cacheKeyType{signerName: signerName, labelSelector: selector.String()}
if lsLen := len(cacheKey.labelSelector); lsLen > maxLabelSelectorLength {
return nil, fmt.Errorf("label selector length (%d) is larger than %d", lsLen, maxLabelSelectorLength)
}
if cachedAnchors, ok := m.normalizationCache.Get(cacheKey); ok {
return cachedAnchors.([]byte), nil
}
rawCTBList, err := m.ctbLister.List(selector)
if err != nil {
return nil, fmt.Errorf("while listing ClusterTrustBundles matching label selector %v: %w", labelSelector, err)
}
ctbList := []*T{}
for _, ctb := range rawCTBList {
if m.ctbHandlers.GetSignerName(ctb) == signerName {
ctbList = append(ctbList, ctb)
}
}
if len(ctbList) == 0 {
if allowMissing {
return []byte{}, nil
}
return nil, fmt.Errorf("combination of signerName and labelSelector matched zero ClusterTrustBundles")
}
pemTrustAnchors, err := m.normalizeTrustAnchors(ctbList)
if err != nil {
return nil, fmt.Errorf("while normalizing trust anchors: %w", err)
}
m.normalizationCache.Add(cacheKey, pemTrustAnchors, m.cacheTTL)
return pemTrustAnchors, nil
}
func (m *InformerManager[T]) normalizeTrustAnchors(ctbList []*T) ([]byte, error) {
// Deduplicate trust anchors from all ClusterTrustBundles.
trustAnchorSet := sets.Set[string]{}
for _, ctb := range ctbList {
rest := []byte(m.ctbHandlers.GetTrustBundle(ctb))
var b *pem.Block
for {
b, rest = pem.Decode(rest)
if b == nil {
break
}
trustAnchorSet = trustAnchorSet.Insert(string(b.Bytes))
}
}
// Give the list a stable ordering that changes each time Kubelet restarts.
trustAnchorList := sets.List(trustAnchorSet)
rand.Shuffle(len(trustAnchorList), func(i, j int) {
trustAnchorList[i], trustAnchorList[j] = trustAnchorList[j], trustAnchorList[i]
})
pemTrustAnchors := []byte{}
for _, ta := range trustAnchorList {
b := &pem.Block{
Type: "CERTIFICATE",
Bytes: []byte(ta),
}
pemTrustAnchors = append(pemTrustAnchors, pem.EncodeToMemory(b)...)
}
return pemTrustAnchors, nil
}
type cacheKeyType struct {
ctbName string
signerName string
labelSelector string
}
// NoopManager always returns an error, for use in static kubelet mode.
type NoopManager struct{}
var _ Manager = (*NoopManager)(nil)
// GetTrustAnchorsByName implements Manager.
func (m *NoopManager) GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error) {
return nil, fmt.Errorf("ClusterTrustBundle projection is not supported in static kubelet mode")
}
// GetTrustAnchorsBySigner implements Manager.
func (m *NoopManager) GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error) {
return nil, fmt.Errorf("ClusterTrustBundle projection is not supported in static kubelet mode")
}
// LazyInformerManager decides whether to use the noop or the actual manager on a call to
// the manager's methods.
// We cannot determine this upon startup because some may rely on the kubelet to be fully
// running in order to setup their kube-apiserver.
type LazyInformerManager struct {
manager Manager
managerLock sync.RWMutex
client clientset.Interface
cacheSize int
contextWithLogger context.Context
logger logr.Logger
}
func NewLazyInformerManager(ctx context.Context, kubeClient clientset.Interface, cacheSize int) Manager {
return &LazyInformerManager{
client: kubeClient,
cacheSize: cacheSize,
contextWithLogger: ctx,
logger: klog.FromContext(ctx),
managerLock: sync.RWMutex{},
}
}
func (m *LazyInformerManager) GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error) {
if err := m.ensureManagerSet(); err != nil {
return nil, fmt.Errorf("failed to ensure informer manager for ClusterTrustBundles: %w", err)
}
return m.manager.GetTrustAnchorsByName(name, allowMissing)
}
func (m *LazyInformerManager) GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error) {
if err := m.ensureManagerSet(); err != nil {
return nil, fmt.Errorf("failed to ensure informer manager for ClusterTrustBundles: %w", err)
}
return m.manager.GetTrustAnchorsBySigner(signerName, labelSelector, allowMissing)
}
func (m *LazyInformerManager) isManagerSet() bool {
m.managerLock.RLock()
defer m.managerLock.RUnlock()
return m.manager != nil
}
type managerConstructor func(ctx context.Context, informerFactory informers.SharedInformerFactory, cacheSize int, cacheTTL time.Duration) (Manager, error)
func (m *LazyInformerManager) ensureManagerSet() error {
if m.isManagerSet() {
return nil
}
m.managerLock.Lock()
defer m.managerLock.Unlock()
// we need to check again in case the manager was set between locking
if m.manager != nil {
return nil
}
managerSchema := map[schema.GroupVersion]managerConstructor{
certificatesv1alpha1.SchemeGroupVersion: NewAlphaInformerManager,
certificatesv1beta1.SchemeGroupVersion: NewBetaInformerManager,
}
kubeInformers := informers.NewSharedInformerFactoryWithOptions(m.client, 0)
var clusterTrustBundleManager Manager
var foundGV string
for _, gv := range []schema.GroupVersion{certificatesv1beta1.SchemeGroupVersion, certificatesv1alpha1.SchemeGroupVersion} {
ctbAPIAvailable, err := clusterTrustBundlesAvailable(m.client, gv)
if err != nil {
return fmt.Errorf("failed to determine which informer manager to choose: %w", err)
}
if !ctbAPIAvailable {
continue
}
clusterTrustBundleManager, err = managerSchema[gv](m.contextWithLogger, kubeInformers, m.cacheSize, 5*time.Minute)
if err != nil {
return fmt.Errorf("error starting informer-based ClusterTrustBundle manager: %w", err)
}
foundGV = gv.String()
break
}
if clusterTrustBundleManager == nil {
m.manager = &NoopManager{}
m.logger.Info("No version of the ClusterTrustBundle API was found, the ClusterTrustBundle informer won't be started")
return nil
}
m.manager = clusterTrustBundleManager
kubeInformers.Start(m.contextWithLogger.Done())
m.logger.Info("Started ClusterTrustBundle informer", "apiGroup", foundGV)
// a cache fetch will likely follow right after, wait for the freshly started
// informers to sync
synced := true
timeoutContext, cancel := context.WithTimeout(m.contextWithLogger, 10*time.Second)
defer cancel()
m.logger.Info("Waiting for ClusterTrustBundle informer to sync")
for _, ok := range kubeInformers.WaitForCacheSync(timeoutContext.Done()) {
synced = synced && ok
}
if synced {
m.logger.Info("ClusterTrustBundle informer synced")
} else {
m.logger.Info("ClusterTrustBundle informer not synced, continuing to attempt in background")
}
return nil
}
func clusterTrustBundlesAvailable(client clientset.Interface, gv schema.GroupVersion) (bool, error) {
resList, err := client.Discovery().ServerResourcesForGroupVersion(gv.String())
if k8serrors.IsNotFound(err) {
return false, nil
}
if resList != nil {
// even in case of an error above there might be a partial list for APIs that
// were already successfully discovered
for _, r := range resList.APIResources {
if r.Name == "clustertrustbundles" {
return true, nil
}
}
}
return false, err
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"errors"
"fmt"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
const (
ErrorReasonUnexpected = "UnexpectedAdmissionError"
)
type Error interface {
Error() string
Type() string
}
type unexpectedAdmissionError struct{ Err error }
var _ Error = (*unexpectedAdmissionError)(nil)
func (e *unexpectedAdmissionError) Error() string {
return fmt.Sprintf("Allocate failed due to %v, which is unexpected", e.Err)
}
func (e *unexpectedAdmissionError) Type() string {
return ErrorReasonUnexpected
}
func GetPodAdmitResult(err error) lifecycle.PodAdmitResult {
if err == nil {
return lifecycle.PodAdmitResult{Admit: true}
}
var admissionErr Error
if !errors.As(err, &admissionErr) {
admissionErr = &unexpectedAdmissionError{err}
}
return lifecycle.PodAdmitResult{
Message: admissionErr.Error(),
Reason: admissionErr.Type(),
Admit: false,
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
libcontainercgroups "github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
libcontainercgroupmanager "github.com/opencontainers/cgroups/manager"
cgroupsystemd "github.com/opencontainers/cgroups/systemd"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
const (
// systemdSuffix is the cgroup name suffix for systemd
systemdSuffix string = ".slice"
// Cgroup2MemoryMin is memory.min for cgroup v2
Cgroup2MemoryMin string = "memory.min"
// Cgroup2MemoryHigh is memory.high for cgroup v2
Cgroup2MemoryHigh string = "memory.high"
Cgroup2MaxCpuLimit string = "max"
Cgroup2MaxSwapFilename string = "memory.swap.max"
)
var RootCgroupName = CgroupName([]string{})
// NewCgroupName composes a new cgroup name.
// Use RootCgroupName as base to start at the root.
// This function does some basic check for invalid characters at the name.
func NewCgroupName(base CgroupName, components ...string) CgroupName {
for _, component := range components {
// Forbit using "_" in internal names. When remapping internal
// names to systemd cgroup driver, we want to remap "-" => "_",
// so we forbid "_" so that we can always reverse the mapping.
if strings.Contains(component, "/") || strings.Contains(component, "_") {
panic(fmt.Errorf("invalid character in component [%q] of CgroupName", component))
}
}
return CgroupName(append(append([]string{}, base...), components...))
}
func escapeSystemdCgroupName(part string) string {
return strings.Replace(part, "-", "_", -1)
}
func unescapeSystemdCgroupName(part string) string {
return strings.Replace(part, "_", "-", -1)
}
// cgroupName.ToSystemd converts the internal cgroup name to a systemd name.
// For example, the name {"kubepods", "burstable", "pod1234-abcd-5678-efgh"} becomes
// "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1234_abcd_5678_efgh.slice"
// This function always expands the systemd name into the cgroupfs form. If only
// the last part is needed, use path.Base(...) on it to discard the rest.
func (cgroupName CgroupName) ToSystemd() string {
if len(cgroupName) == 0 || (len(cgroupName) == 1 && cgroupName[0] == "") {
return "/"
}
newparts := []string{}
for _, part := range cgroupName {
part = escapeSystemdCgroupName(part)
newparts = append(newparts, part)
}
result, err := cgroupsystemd.ExpandSlice(strings.Join(newparts, "-") + systemdSuffix)
if err != nil {
// Should never happen...
panic(fmt.Errorf("error converting cgroup name [%v] to systemd format: %v", cgroupName, err))
}
return result
}
func ParseSystemdToCgroupName(name string) CgroupName {
driverName := path.Base(name)
driverName = strings.TrimSuffix(driverName, systemdSuffix)
parts := strings.Split(driverName, "-")
result := []string{}
for _, part := range parts {
result = append(result, unescapeSystemdCgroupName(part))
}
return CgroupName(result)
}
func (cgroupName CgroupName) ToCgroupfs() string {
return "/" + path.Join(cgroupName...)
}
func ParseCgroupfsToCgroupName(name string) CgroupName {
components := strings.Split(strings.TrimPrefix(name, "/"), "/")
if len(components) == 1 && components[0] == "" {
components = []string{}
}
return CgroupName(components)
}
func IsSystemdStyleName(name string) bool {
return strings.HasSuffix(name, systemdSuffix)
}
// CgroupSubsystems holds information about the mounted cgroup subsystems
type CgroupSubsystems struct {
// Cgroup subsystem mounts.
// e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"]
Mounts []libcontainercgroups.Mount
// Cgroup subsystem to their mount location.
// e.g.: "cpu" -> "/sys/fs/cgroup/cpu"
MountPoints map[string]string
}
// cgroupCommon implements common tasks
// that are valid for both cgroup v1 and v2.
// This prevents duplicating the code between
// v1 and v2 specific implementations.
type cgroupCommon struct {
// subsystems holds information about all the
// mounted cgroup subsystems on the node
subsystems *CgroupSubsystems
// useSystemd tells if systemd cgroup manager should be used.
useSystemd bool
}
// Make sure that cgroupV1impl and cgroupV2impl implement the CgroupManager interface
var _ CgroupManager = &cgroupV1impl{}
var _ CgroupManager = &cgroupV2impl{}
// NewCgroupManager is a factory method that returns a CgroupManager
func NewCgroupManager(cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
if libcontainercgroups.IsCgroup2UnifiedMode() {
return NewCgroupV2Manager(cs, cgroupDriver)
}
return NewCgroupV1Manager(cs, cgroupDriver)
}
func newCgroupCommon(cs *CgroupSubsystems, cgroupDriver string) cgroupCommon {
return cgroupCommon{
subsystems: cs,
useSystemd: cgroupDriver == "systemd",
}
}
// Name converts the cgroup to the driver specific value in cgroupfs form.
// This always returns a valid cgroupfs path even when systemd driver is in use!
func (m *cgroupCommon) Name(name CgroupName) string {
if m.useSystemd {
return name.ToSystemd()
}
return name.ToCgroupfs()
}
// CgroupName converts the literal cgroupfs name on the host to an internal identifier.
func (m *cgroupCommon) CgroupName(name string) CgroupName {
if m.useSystemd {
return ParseSystemdToCgroupName(name)
}
return ParseCgroupfsToCgroupName(name)
}
// buildCgroupPaths builds a path to each cgroup subsystem for the specified name.
func (m *cgroupCommon) buildCgroupPaths(name CgroupName) map[string]string {
cgroupFsAdaptedName := m.Name(name)
cgroupPaths := make(map[string]string, len(m.subsystems.MountPoints))
for key, val := range m.subsystems.MountPoints {
cgroupPaths[key] = path.Join(val, cgroupFsAdaptedName)
}
return cgroupPaths
}
// libctCgroupConfig converts CgroupConfig to libcontainer's Cgroup config.
func (m *cgroupCommon) libctCgroupConfig(in *CgroupConfig, needResources bool) *libcontainercgroups.Cgroup {
config := &libcontainercgroups.Cgroup{
Systemd: m.useSystemd,
}
if needResources {
config.Resources = m.toResources(in.ResourceParameters)
} else {
config.Resources = &libcontainercgroups.Resources{}
}
if !config.Systemd {
// For fs cgroup manager, we can either set Path or Name and Parent.
// Setting Path is easier.
config.Path = in.Name.ToCgroupfs()
return config
}
// For systemd, we have to set Name and Parent, as they are needed to talk to systemd.
// Setting Path is optional as it can be deduced from Name and Parent.
// TODO(filbranden): This logic belongs in libcontainer/cgroup/systemd instead.
// It should take a libcontainerconfigs.Cgroup.Path field (rather than Name and Parent)
// and split it appropriately, using essentially the logic below.
// This was done for cgroupfs in opencontainers/runc#497 but a counterpart
// for systemd was never introduced.
dir, base := path.Split(in.Name.ToSystemd())
if dir == "/" {
dir = "-.slice"
} else {
dir = path.Base(dir)
}
config.Parent = dir
config.Name = base
return config
}
// Destroy destroys the specified cgroup
func (m *cgroupCommon) Destroy(cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerDuration.WithLabelValues("destroy").Observe(metrics.SinceInSeconds(start))
}()
libcontainerCgroupConfig := m.libctCgroupConfig(cgroupConfig, false)
manager, err := libcontainercgroupmanager.New(libcontainerCgroupConfig)
if err != nil {
return err
}
// Delete cgroups using libcontainers Managers Destroy() method
if err = manager.Destroy(); err != nil {
return fmt.Errorf("unable to destroy cgroup paths for cgroup %v : %v", cgroupConfig.Name, err)
}
return nil
}
func (m *cgroupCommon) SetCgroupConfig(name CgroupName, resourceConfig *ResourceConfig) error {
containerConfig := &CgroupConfig{
Name: name,
ResourceParameters: resourceConfig,
}
return m.Update(containerConfig)
}
// getCPUWeight converts from the range [2, 262144] to [1, 10000]
func getCPUWeight(cpuShares *uint64) uint64 {
if cpuShares == nil {
return 0
}
if *cpuShares >= 262144 {
return 10000
}
return 1 + ((*cpuShares-2)*9999)/262142
}
var (
availableRootControllersOnce sync.Once
availableRootControllers sets.Set[string]
)
func (m *cgroupCommon) toResources(resourceConfig *ResourceConfig) *libcontainercgroups.Resources {
resources := &libcontainercgroups.Resources{
SkipDevices: true,
SkipFreezeOnSet: true,
}
if resourceConfig == nil {
return resources
}
if resourceConfig.Memory != nil {
resources.Memory = *resourceConfig.Memory
}
if resourceConfig.CPUShares != nil {
if libcontainercgroups.IsCgroup2UnifiedMode() {
resources.CpuWeight = getCPUWeight(resourceConfig.CPUShares)
} else {
resources.CpuShares = *resourceConfig.CPUShares
}
}
if resourceConfig.CPUQuota != nil {
resources.CpuQuota = *resourceConfig.CPUQuota
}
if resourceConfig.CPUPeriod != nil {
resources.CpuPeriod = *resourceConfig.CPUPeriod
}
if resourceConfig.PidsLimit != nil {
resources.PidsLimit = *resourceConfig.PidsLimit
}
if !resourceConfig.CPUSet.IsEmpty() {
resources.CpusetCpus = resourceConfig.CPUSet.String()
}
m.maybeSetHugetlb(resourceConfig, resources)
// Ideally unified is used for all the resources when running on cgroup v2.
// It doesn't make difference for the memory.max limit, but for e.g. the cpu controller
// you can specify the correct setting without relying on the conversions performed by the OCI runtime.
if resourceConfig.Unified != nil && libcontainercgroups.IsCgroup2UnifiedMode() {
resources.Unified = make(map[string]string)
for k, v := range resourceConfig.Unified {
resources.Unified[k] = v
}
}
return resources
}
func (m *cgroupCommon) maybeSetHugetlb(resourceConfig *ResourceConfig, resources *libcontainercgroups.Resources) {
// Check if hugetlb is supported.
if libcontainercgroups.IsCgroup2UnifiedMode() {
if !getSupportedUnifiedControllers().Has("hugetlb") {
klog.V(6).InfoS("Optional subsystem not supported: hugetlb")
return
}
} else if _, ok := m.subsystems.MountPoints["hugetlb"]; !ok {
klog.V(6).InfoS("Optional subsystem not supported: hugetlb")
return
}
// For each page size enumerated, set that value.
pageSizes := sets.New[string]()
for pageSize, limit := range resourceConfig.HugePageLimit {
sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize)
if err != nil {
klog.InfoS("Invalid pageSize", "err", err)
continue
}
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainercgroups.HugepageLimit{
Pagesize: sizeString,
Limit: uint64(limit),
})
pageSizes.Insert(sizeString)
}
// for each page size omitted, limit to 0
for _, pageSize := range libcontainercgroups.HugePageSizes() {
if pageSizes.Has(pageSize) {
continue
}
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainercgroups.HugepageLimit{
Pagesize: pageSize,
Limit: uint64(0),
})
}
}
// Update updates the cgroup with the specified Cgroup Configuration
func (m *cgroupCommon) Update(cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerDuration.WithLabelValues("update").Observe(metrics.SinceInSeconds(start))
}()
libcontainerCgroupConfig := m.libctCgroupConfig(cgroupConfig, true)
manager, err := libcontainercgroupmanager.New(libcontainerCgroupConfig)
if err != nil {
return fmt.Errorf("failed to create cgroup manager: %v", err)
}
return manager.Set(libcontainerCgroupConfig.Resources)
}
// Create creates the specified cgroup
func (m *cgroupCommon) Create(cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerDuration.WithLabelValues("create").Observe(metrics.SinceInSeconds(start))
}()
libcontainerCgroupConfig := m.libctCgroupConfig(cgroupConfig, true)
manager, err := libcontainercgroupmanager.New(libcontainerCgroupConfig)
if err != nil {
return err
}
// Apply(-1) is a hack to create the cgroup directories for each resource
// subsystem. The function [cgroups.Manager.apply()] applies cgroup
// configuration to the process with the specified pid.
// It creates cgroup files for each subsystems and writes the pid
// in the tasks file. We use the function to create all the required
// cgroup files but not attach any "real" pid to the cgroup.
if err := manager.Apply(-1); err != nil {
return err
}
// it may confuse why we call set after we do apply, but the issue is that runc
// follows a similar pattern. it's needed to ensure cpu quota is set properly.
if err := manager.Set(libcontainerCgroupConfig.Resources); err != nil {
utilruntime.HandleError(fmt.Errorf("cgroup manager.Set failed: %w", err))
}
return nil
}
// Scans through all subsystems to find pids associated with specified cgroup.
func (m *cgroupCommon) Pids(name CgroupName) []int {
// we need the driver specific name
cgroupFsName := m.Name(name)
// Get a list of processes that we need to kill
pidsToKill := sets.New[int]()
var pids []int
for _, val := range m.subsystems.MountPoints {
dir := path.Join(val, cgroupFsName)
_, err := os.Stat(dir)
if os.IsNotExist(err) {
// The subsystem pod cgroup is already deleted
// do nothing, continue
continue
}
// Get a list of pids that are still charged to the pod's cgroup
pids, err = getCgroupProcs(dir)
if err != nil {
continue
}
pidsToKill.Insert(pids...)
// WalkFunc which is called for each file and directory in the pod cgroup dir
visitor := func(path string, info os.FileInfo, err error) error {
if err != nil {
klog.V(4).InfoS("Cgroup manager encountered error scanning cgroup path", "path", path, "err", err)
return filepath.SkipDir
}
if !info.IsDir() {
return nil
}
pids, err = getCgroupProcs(path)
if err != nil {
klog.V(4).InfoS("Cgroup manager encountered error getting procs for cgroup path", "path", path, "err", err)
return filepath.SkipDir
}
pidsToKill.Insert(pids...)
return nil
}
// Walk through the pod cgroup directory to check if
// container cgroups haven't been GCed yet. Get attached processes to
// all such unwanted containers under the pod cgroup
if err = filepath.Walk(dir, visitor); err != nil {
klog.V(4).InfoS("Cgroup manager encountered error scanning pids for directory", "path", dir, "err", err)
}
}
return sets.List(pidsToKill)
}
// ReduceCPULimits reduces the cgroup's cpu shares to the lowest possible value
func (m *cgroupCommon) ReduceCPULimits(cgroupName CgroupName) error {
// Set lowest possible CpuShares value for the cgroup
minimumCPUShares := uint64(MinShares)
resources := &ResourceConfig{
CPUShares: &minimumCPUShares,
}
containerConfig := &CgroupConfig{
Name: cgroupName,
ResourceParameters: resources,
}
return m.Update(containerConfig)
}
func readCgroupMemoryConfig(cgroupPath string, memLimitFile string) (*ResourceConfig, error) {
memLimit, err := fscommon.GetCgroupParamUint(cgroupPath, memLimitFile)
if err != nil {
return nil, fmt.Errorf("failed to read %s for cgroup %v: %v", memLimitFile, cgroupPath, err)
}
mLim := int64(memLimit)
//TODO(vinaykul,InPlacePodVerticalScaling): Add memory request support
return &ResourceConfig{Memory: &mLim}, nil
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"errors"
"fmt"
"strconv"
"strings"
libcontainercgroups "github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
const cgroupv1MemLimitFile string = "memory.limit_in_bytes"
// cgroupV1impl implements the CgroupManager interface
// for cgroup v1.
// It's a stateless object which can be used to
// update, create or delete any number of cgroups
// It relies on runc/libcontainer cgroup managers.
type cgroupV1impl struct {
cgroupCommon
}
func NewCgroupV1Manager(cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
return &cgroupV1impl{
cgroupCommon: newCgroupCommon(cs, cgroupDriver),
}
}
// Version of the cgroup implementation on the host
func (c *cgroupV1impl) Version() int {
return 1
}
// Validate checks if all subsystem cgroups are valid
func (c *cgroupV1impl) Validate(name CgroupName) error {
// Get map of all cgroup paths on the system for the particular cgroup
cgroupPaths := c.buildCgroupPaths(name)
// the presence of alternative control groups not known to runc confuses
// the kubelet existence checks.
// ideally, we would have a mechanism in runc to support Exists() logic
// scoped to the set control groups it understands. this is being discussed
// in https://github.com/opencontainers/runc/issues/1440
// once resolved, we can remove this code.
allowlistControllers := sets.New[string]("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids")
if _, ok := c.subsystems.MountPoints["hugetlb"]; ok {
allowlistControllers.Insert("hugetlb")
}
var missingPaths []string
// If even one cgroup path doesn't exist, then the cgroup doesn't exist.
for controller, path := range cgroupPaths {
// ignore mounts we don't care about
if !allowlistControllers.Has(controller) {
continue
}
if !libcontainercgroups.PathExists(path) {
missingPaths = append(missingPaths, path)
}
}
if len(missingPaths) > 0 {
return fmt.Errorf("cgroup %q has some missing paths: %v", name, strings.Join(missingPaths, ", "))
}
return nil
}
// Exists checks if all subsystem cgroups already exist
func (c *cgroupV1impl) Exists(name CgroupName) bool {
return c.Validate(name) == nil
}
// MemoryUsage returns the current memory usage of the specified cgroup,
// as read from cgroupfs.
func (c *cgroupV1impl) MemoryUsage(name CgroupName) (int64, error) {
var path, file string
mp, ok := c.subsystems.MountPoints["memory"]
if !ok { // should not happen
return -1, errors.New("no cgroup v1 mountpoint for memory controller found")
}
path = mp + "/" + c.Name(name)
file = "memory.usage_in_bytes"
val, err := fscommon.GetCgroupParamUint(path, file)
return int64(val), err
}
// Get the resource config values applied to the cgroup for specified resource type
func (c *cgroupV1impl) GetCgroupConfig(name CgroupName, resource v1.ResourceName) (*ResourceConfig, error) {
cgroupPaths := c.buildCgroupPaths(name)
cgroupResourcePath, found := cgroupPaths[string(resource)]
if !found {
return nil, fmt.Errorf("failed to build %v cgroup fs path for cgroup %v", resource, name)
}
switch resource {
case v1.ResourceCPU:
return c.getCgroupCPUConfig(cgroupResourcePath)
case v1.ResourceMemory:
return c.getCgroupMemoryConfig(cgroupResourcePath)
}
return nil, fmt.Errorf("unsupported resource %v for cgroup %v", resource, name)
}
func (c *cgroupV1impl) getCgroupCPUConfig(cgroupPath string) (*ResourceConfig, error) {
cpuQuotaStr, errQ := fscommon.GetCgroupParamString(cgroupPath, "cpu.cfs_quota_us")
if errQ != nil {
return nil, fmt.Errorf("failed to read CPU quota for cgroup %v: %w", cgroupPath, errQ)
}
cpuQuota, errInt := strconv.ParseInt(cpuQuotaStr, 10, 64)
if errInt != nil {
return nil, fmt.Errorf("failed to convert CPU quota as integer for cgroup %v: %w", cgroupPath, errInt)
}
cpuPeriod, errP := fscommon.GetCgroupParamUint(cgroupPath, "cpu.cfs_period_us")
if errP != nil {
return nil, fmt.Errorf("failed to read CPU period for cgroup %v: %w", cgroupPath, errP)
}
cpuShares, errS := fscommon.GetCgroupParamUint(cgroupPath, "cpu.shares")
if errS != nil {
return nil, fmt.Errorf("failed to read CPU shares for cgroup %v: %w", cgroupPath, errS)
}
return &ResourceConfig{CPUShares: &cpuShares, CPUQuota: &cpuQuota, CPUPeriod: &cpuPeriod}, nil
}
func (c *cgroupV1impl) getCgroupMemoryConfig(cgroupPath string) (*ResourceConfig, error) {
return readCgroupMemoryConfig(cgroupPath, cgroupv1MemLimitFile)
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/opencontainers/cgroups/fscommon"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
)
const (
cgroupv2MemLimitFile = "memory.max"
cgroupv2CpuMaxFile = "cpu.max"
cgroupv2CpuWeightFile = "cpu.weight"
)
// cgroupV2impl implements the CgroupManager interface
// for cgroup v2.
// It's a stateless object which can be used to
// update, create or delete any number of cgroups
// It relies on runc/libcontainer cgroup managers.
type cgroupV2impl struct {
cgroupCommon
}
func NewCgroupV2Manager(cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
return &cgroupV2impl{
cgroupCommon: newCgroupCommon(cs, cgroupDriver),
}
}
// Version of the cgroup implementation on the host
func (c *cgroupV2impl) Version() int {
return 2
}
// Validate checks if all subsystem cgroups are valid
func (c *cgroupV2impl) Validate(name CgroupName) error {
cgroupPath := c.buildCgroupUnifiedPath(name)
neededControllers := getSupportedUnifiedControllers()
enabledControllers, err := readUnifiedControllers(cgroupPath)
if err != nil {
return fmt.Errorf("could not read controllers for cgroup %q: %w", name, err)
}
difference := neededControllers.Difference(enabledControllers)
if difference.Len() > 0 {
return fmt.Errorf("cgroup %q has some missing controllers: %v", name, strings.Join(sets.List(difference), ", "))
}
return nil
}
// Exists checks if all subsystem cgroups already exist
func (c *cgroupV2impl) Exists(name CgroupName) bool {
return c.Validate(name) == nil
}
// MemoryUsage returns the current memory usage of the specified cgroup,
// as read from cgroupfs.
func (c *cgroupV2impl) MemoryUsage(name CgroupName) (int64, error) {
var path, file string
path = c.buildCgroupUnifiedPath(name)
file = "memory.current"
val, err := fscommon.GetCgroupParamUint(path, file)
return int64(val), err
}
// Get the resource config values applied to the cgroup for specified resource type
func (c *cgroupV2impl) GetCgroupConfig(name CgroupName, resource v1.ResourceName) (*ResourceConfig, error) {
cgroupPaths := c.buildCgroupPaths(name)
cgroupResourcePath, found := cgroupPaths[string(resource)]
if !found {
return nil, fmt.Errorf("failed to build %v cgroup fs path for cgroup %v", resource, name)
}
switch resource {
case v1.ResourceCPU:
return c.getCgroupCPUConfig(cgroupResourcePath)
case v1.ResourceMemory:
return c.getCgroupMemoryConfig(cgroupResourcePath)
}
return nil, fmt.Errorf("unsupported resource %v for cgroup %v", resource, name)
}
func (c *cgroupV2impl) getCgroupCPUConfig(cgroupPath string) (*ResourceConfig, error) {
var cpuLimitStr, cpuPeriodStr string
cpuLimitAndPeriod, err := fscommon.GetCgroupParamString(cgroupPath, cgroupv2CpuMaxFile)
if err != nil {
return nil, fmt.Errorf("failed to read %s file for cgroup %v: %w", cgroupv2CpuMaxFile, cgroupPath, err)
}
numItems, errScan := fmt.Sscanf(cpuLimitAndPeriod, "%s %s", &cpuLimitStr, &cpuPeriodStr)
if errScan != nil || numItems != 2 {
return nil, fmt.Errorf("failed to correctly parse content of %s file ('%s') for cgroup %v: %w",
cgroupv2CpuMaxFile, cpuLimitAndPeriod, cgroupPath, errScan)
}
cpuLimit := int64(-1)
if cpuLimitStr != Cgroup2MaxCpuLimit {
cpuLimit, err = strconv.ParseInt(cpuLimitStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to convert CPU limit as integer for cgroup %v: %w", cgroupPath, err)
}
}
cpuPeriod, errPeriod := strconv.ParseUint(cpuPeriodStr, 10, 64)
if errPeriod != nil {
return nil, fmt.Errorf("failed to convert CPU period as integer for cgroup %v: %w", cgroupPath, errPeriod)
}
cpuWeight, errWeight := fscommon.GetCgroupParamUint(cgroupPath, cgroupv2CpuWeightFile)
if errWeight != nil {
return nil, fmt.Errorf("failed to read CPU weight for cgroup %v: %w", cgroupPath, errWeight)
}
cpuShares := cpuWeightToCPUShares(cpuWeight)
return &ResourceConfig{CPUShares: &cpuShares, CPUQuota: &cpuLimit, CPUPeriod: &cpuPeriod}, nil
}
func (c *cgroupV2impl) getCgroupMemoryConfig(cgroupPath string) (*ResourceConfig, error) {
return readCgroupMemoryConfig(cgroupPath, cgroupv2MemLimitFile)
}
// getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2
func getSupportedUnifiedControllers() sets.Set[string] {
// This is the set of controllers used by the Kubelet
supportedControllers := sets.New("cpu", "cpuset", "memory", "hugetlb", "pids")
// Memoize the set of controllers that are present in the root cgroup
availableRootControllersOnce.Do(func() {
var err error
availableRootControllers, err = readUnifiedControllers(cmutil.CgroupRoot)
if err != nil {
panic(fmt.Errorf("cannot read cgroup controllers at %s", cmutil.CgroupRoot))
}
})
// Return the set of controllers that are supported both by the Kubelet and by the kernel
return supportedControllers.Intersection(availableRootControllers)
}
// readUnifiedControllers reads the controllers available at the specified cgroup
func readUnifiedControllers(path string) (sets.Set[string], error) {
controllersFileContent, err := os.ReadFile(filepath.Join(path, "cgroup.controllers"))
if err != nil {
return nil, err
}
controllers := strings.Fields(string(controllersFileContent))
return sets.New(controllers...), nil
}
// buildCgroupUnifiedPath builds a path to the specified name.
func (c *cgroupV2impl) buildCgroupUnifiedPath(name CgroupName) string {
cgroupFsAdaptedName := c.Name(name)
return path.Join(cmutil.CgroupRoot, cgroupFsAdaptedName)
}
// Convert cgroup v1 cpu.shares value to cgroup v2 cpu.weight
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
func cpuSharesToCPUWeight(cpuShares uint64) uint64 {
return uint64((((cpuShares - 2) * 9999) / 262142) + 1)
}
// Convert cgroup v2 cpu.weight value to cgroup v1 cpu.shares
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
func cpuWeightToCPUShares(cpuWeight uint64) uint64 {
return uint64((((cpuWeight - 1) * 262142) / 9999) + 2)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package cm
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
// TODO: Migrate kubelet to either use its own internal objects or client library.
v1 "k8s.io/api/core/v1"
"k8s.io/apiserver/pkg/server/healthz"
internalapi "k8s.io/cri-api/pkg/apis"
"k8s.io/klog/v2"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/utils/cpuset"
)
const (
// Warning message for the users still using cgroup v1
CgroupV1MaintenanceModeWarning = "cgroup v1 support is in maintenance mode, please migrate to cgroup v2"
// Warning message for the users using cgroup v2 on kernel doesn't support root `cpu.stat`.
// `cpu.stat` was added to root cgroup in kernel 5.8.
// (ref: https://github.com/torvalds/linux/commit/936f2a70f2077f64fab1dcb3eca71879e82ecd3f)
CgroupV2KernelWarning = "cgroup v2 is being used on a kernel, which doesn't support root `cpu.stat`." +
"Kubelet will continue, but may experience instability or wrong behavior"
)
type ActivePodsFunc func() []*v1.Pod
type GetNodeFunc func() (*v1.Node, error)
// Manages the containers running on a machine.
type ContainerManager interface {
// Runs the container manager's housekeeping.
// - Ensures that the Docker daemon is in a container.
// - Creates the system container where all non-containerized processes run.
Start(context.Context, *v1.Node, ActivePodsFunc, GetNodeFunc, config.SourcesReady, status.PodStatusProvider, internalapi.RuntimeService, bool) error
// SystemCgroupsLimit returns resources allocated to system cgroups in the machine.
// These cgroups include the system and Kubernetes services.
SystemCgroupsLimit() v1.ResourceList
// GetNodeConfig returns a NodeConfig that is being used by the container manager.
GetNodeConfig() NodeConfig
// Status returns internal Status.
Status() Status
// NewPodContainerManager is a factory method which returns a podContainerManager object
// Returns a noop implementation if qos cgroup hierarchy is not enabled
NewPodContainerManager() PodContainerManager
// GetMountedSubsystems returns the mounted cgroup subsystems on the node
GetMountedSubsystems() *CgroupSubsystems
// GetQOSContainersInfo returns the names of top level QoS containers
GetQOSContainersInfo() QOSContainersInfo
// GetNodeAllocatableReservation returns the amount of compute resources that have to be reserved from scheduling.
GetNodeAllocatableReservation() v1.ResourceList
// GetCapacity returns the amount of compute resources tracked by container manager available on the node.
GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList
// GetDevicePluginResourceCapacity returns the node capacity (amount of total device plugin resources),
// node allocatable (amount of total healthy resources reported by device plugin),
// and inactive device plugin resources previously registered on the node.
GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string)
// UpdateQOSCgroups performs housekeeping updates to ensure that the top
// level QoS containers have their desired state in a thread-safe way
UpdateQOSCgroups() error
// GetResources returns RunContainerOptions with devices, mounts, and env fields populated for
// extended resources required by container.
GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error)
// UpdatePluginResources calls Allocate of device plugin handler for potential
// requests for device plugin resources, and returns an error if fails.
// Otherwise, it updates allocatableResource in nodeInfo if necessary,
// to make sure it is at least equal to the pod's requested capacity for
// any registered device plugin resource
UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error
InternalContainerLifecycle() InternalContainerLifecycle
// GetPodCgroupRoot returns the cgroup which contains all pods.
GetPodCgroupRoot() string
// GetPluginRegistrationHandlers returns a set of plugin registration handlers
// The pluginwatcher's Handlers allow to have a single module for handling
// registration.
GetPluginRegistrationHandlers() map[string]cache.PluginHandler
// GetHealthCheckers returns a set of health checkers for all plugins.
// These checkers are integrated into the systemd watchdog to monitor the service's health.
GetHealthCheckers() []healthz.HealthChecker
// ShouldResetExtendedResourceCapacity returns whether or not the extended resources should be zeroed,
// due to node recreation.
ShouldResetExtendedResourceCapacity() bool
// GetAllocateResourcesPodAdmitHandler returns an instance of a PodAdmitHandler responsible for allocating pod resources.
GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler
// GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement.
GetNodeAllocatableAbsolute() v1.ResourceList
// PrepareDynamicResource prepares dynamic pod resources
PrepareDynamicResources(context.Context, *v1.Pod) error
// UnprepareDynamicResources unprepares dynamic pod resources
UnprepareDynamicResources(context.Context, *v1.Pod) error
// PodMightNeedToUnprepareResources returns true if the pod with the given UID
// might need to unprepare resources.
PodMightNeedToUnprepareResources(UID types.UID) bool
// UpdateAllocatedResourcesStatus updates the status of allocated resources for the pod.
UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus)
// Updates returns a channel that receives an Update when the device changed its status.
Updates() <-chan resourceupdates.Update
// PodHasExclusiveCPUs returns true if the provided pod has containers with exclusive CPUs,
// This means that at least one sidecar container or one app container has exclusive CPUs allocated.
PodHasExclusiveCPUs(pod *v1.Pod) bool
// ContainerHasExclusiveCPUs returns true if the provided container in the pod has exclusive cpu
ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool
// Implements the PodResources Provider API
podresources.CPUsProvider
podresources.DevicesProvider
podresources.MemoryProvider
podresources.DynamicResourcesProvider
}
type cpuAllocationReader interface {
GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet
}
type NodeConfig struct {
NodeName types.NodeName
RuntimeCgroupsName string
SystemCgroupsName string
KubeletCgroupsName string
KubeletOOMScoreAdj int32
ContainerRuntime string
CgroupsPerQOS bool
CgroupRoot string
CgroupDriver string
KubeletRootDir string
ProtectKernelDefaults bool
NodeAllocatableConfig
QOSReserved map[v1.ResourceName]int64
CPUManagerPolicy string
CPUManagerPolicyOptions map[string]string
TopologyManagerScope string
CPUManagerReconcilePeriod time.Duration
MemoryManagerPolicy string
MemoryManagerReservedMemory []kubeletconfig.MemoryReservation
PodPidsLimit int64
EnforceCPULimits bool
CPUCFSQuotaPeriod time.Duration
TopologyManagerPolicy string
TopologyManagerPolicyOptions map[string]string
CgroupVersion int
}
type NodeAllocatableConfig struct {
KubeReservedCgroupName string
SystemReservedCgroupName string
ReservedSystemCPUs cpuset.CPUSet
EnforceNodeAllocatable sets.Set[string]
KubeReserved v1.ResourceList
SystemReserved v1.ResourceList
HardEvictionThresholds []evictionapi.Threshold
}
type Status struct {
// Any soft requirements that were unsatisfied.
SoftRequirements error
}
func int64Slice(in []int) []int64 {
out := make([]int64, len(in))
for i := range in {
out[i] = int64(in[i])
}
return out
}
func podHasExclusiveCPUs(cr cpuAllocationReader, pod *v1.Pod) bool {
for _, container := range pod.Spec.InitContainers {
if containerHasExclusiveCPUs(cr, pod, &container) {
return true
}
}
for _, container := range pod.Spec.Containers {
if containerHasExclusiveCPUs(cr, pod, &container) {
return true
}
}
klog.V(4).InfoS("Pod contains no container with pinned cpus", "podName", pod.Name)
return false
}
func containerHasExclusiveCPUs(cr cpuAllocationReader, pod *v1.Pod, container *v1.Container) bool {
exclusiveCPUs := cr.GetExclusiveCPUs(string(pod.UID), container.Name)
if !exclusiveCPUs.IsEmpty() {
klog.V(4).InfoS("Container has pinned cpus", "podName", pod.Name, "containerName", container.Name)
return true
}
return false
}
// parsePercentage parses the percentage string to numeric value.
func parsePercentage(v string) (int64, error) {
if !strings.HasSuffix(v, "%") {
return 0, fmt.Errorf("percentage expected, got '%s'", v)
}
percentage, err := strconv.ParseInt(strings.TrimRight(v, "%"), 10, 0)
if err != nil {
return 0, fmt.Errorf("invalid number in percentage '%s'", v)
}
if percentage < 0 || percentage > 100 {
return 0, fmt.Errorf("percentage must be between 0 and 100")
}
return percentage, nil
}
// ParseQOSReserved parses the --qos-reserved option
func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) {
reservations := make(map[v1.ResourceName]int64)
for k, v := range m {
switch v1.ResourceName(k) {
// Only memory resources are supported.
case v1.ResourceMemory:
q, err := parsePercentage(v)
if err != nil {
return nil, fmt.Errorf("failed to parse percentage %q for %q resource: %w", v, k, err)
}
reservations[v1.ResourceName(k)] = q
default:
return nil, fmt.Errorf("cannot reserve %q resource", k)
}
}
return &reservations, nil
}
func containerDevicesFromResourceDeviceInstances(devs devicemanager.ResourceDeviceInstances) []*podresourcesapi.ContainerDevices {
var respDevs []*podresourcesapi.ContainerDevices
for resourceName, resourceDevs := range devs {
for devID, dev := range resourceDevs {
topo := dev.GetTopology()
if topo == nil {
// Some device plugin do not report the topology information.
// This is legal, so we report the devices anyway,
// let the client decide what to do.
respDevs = append(respDevs, &podresourcesapi.ContainerDevices{
ResourceName: resourceName,
DeviceIds: []string{devID},
})
continue
}
for _, node := range topo.GetNodes() {
respDevs = append(respDevs, &podresourcesapi.ContainerDevices{
ResourceName: resourceName,
DeviceIds: []string{devID},
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{
{
ID: node.GetID(),
},
},
},
})
}
}
}
return respDevs
}
//go:build linux
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"context"
"fmt"
"os"
"path"
"sync"
"time"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/manager"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilpath "k8s.io/utils/path"
inuserns "github.com/moby/sys/userns"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
internalapi "k8s.io/cri-api/pkg/apis"
pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
"k8s.io/kubernetes/pkg/kubelet/cm/dra"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
memorymanagerstate "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/swap"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/util/oom"
)
// A non-user container tracked by the Kubelet.
type systemContainer struct {
// Absolute name of the container.
name string
// CPU limit in millicores.
cpuMillicores int64
// Function that ensures the state of the container.
// m is the cgroup manager for the specified container.
ensureStateFunc func(m cgroups.Manager) error
// Manager for the cgroups of the external container.
manager cgroups.Manager
}
func newSystemCgroups(containerName string) (*systemContainer, error) {
manager, err := createManager(containerName)
if err != nil {
return nil, err
}
return &systemContainer{
name: containerName,
manager: manager,
}, nil
}
type containerManagerImpl struct {
sync.RWMutex
cadvisorInterface cadvisor.Interface
mountUtil mount.Interface
NodeConfig
status Status
// External containers being managed.
systemContainers []*systemContainer
// Tasks that are run periodically
periodicTasks []func()
// Holds all the mounted cgroup subsystems
subsystems *CgroupSubsystems
nodeInfo *v1.Node
// Interface for cgroup management
cgroupManager CgroupManager
// Capacity of this node.
capacity v1.ResourceList
// Capacity of this node, including internal resources.
internalCapacity v1.ResourceList
// Absolute cgroupfs path to a cgroup that Kubelet needs to place all pods under.
// This path include a top level container for enforcing Node Allocatable.
cgroupRoot CgroupName
// Event recorder interface.
recorder record.EventRecorder
// Interface for QoS cgroup management
qosContainerManager QOSContainerManager
// Interface for exporting and allocating devices reported by device plugins.
deviceManager devicemanager.Manager
// Interface for CPU affinity management.
cpuManager cpumanager.Manager
// Interface for memory affinity management.
memoryManager memorymanager.Manager
// Interface for Topology resource co-ordination
topologyManager topologymanager.Manager
// Implementation of Dynamic Resource Allocation (DRA).
draManager *dra.Manager
// kubeClient is the interface to the Kubernetes API server. May be nil if the kubelet is running in standalone mode.
kubeClient clientset.Interface
// resourceUpdates is a channel that provides resource updates.
resourceUpdates chan resourceupdates.Update
}
type features struct {
cpuHardcapping bool
}
var _ ContainerManager = &containerManagerImpl{}
// checks if the required cgroups subsystems are mounted.
// As of now, only 'cpu' and 'memory' are required.
// cpu quota is a soft requirement.
func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
const (
cgroupMountType = "cgroup"
localErr = "system validation failed"
)
var (
cpuMountPoint string
f features
)
mountPoints, err := mountUtil.List()
if err != nil {
return f, fmt.Errorf("%s - %v", localErr, err)
}
if cgroups.IsCgroup2UnifiedMode() {
f.cpuHardcapping = true
return f, nil
}
expectedCgroups := sets.New("cpu", "cpuacct", "cpuset", "memory")
for _, mountPoint := range mountPoints {
if mountPoint.Type == cgroupMountType {
for _, opt := range mountPoint.Opts {
if expectedCgroups.Has(opt) {
expectedCgroups.Delete(opt)
}
if opt == "cpu" {
cpuMountPoint = mountPoint.Path
}
}
}
}
if expectedCgroups.Len() > 0 {
return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, sets.List(expectedCgroups))
}
// Check if cpu quota is available.
// CPU cgroup is required and so it expected to be mounted at this point.
periodExists, err := utilpath.Exists(utilpath.CheckFollowSymlink, path.Join(cpuMountPoint, "cpu.cfs_period_us"))
if err != nil {
klog.ErrorS(err, "Failed to detect if CPU cgroup cpu.cfs_period_us is available")
}
quotaExists, err := utilpath.Exists(utilpath.CheckFollowSymlink, path.Join(cpuMountPoint, "cpu.cfs_quota_us"))
if err != nil {
klog.ErrorS(err, "Failed to detect if CPU cgroup cpu.cfs_quota_us is available")
}
if quotaExists && periodExists {
f.cpuHardcapping = true
}
return f, nil
}
// TODO(vmarmol): Add limits to the system containers.
// Takes the absolute name of the specified containers.
// Empty container name disables use of the specified container.
func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, recorder record.EventRecorder, kubeClient clientset.Interface) (ContainerManager, error) {
subsystems, err := GetCgroupSubsystems()
if err != nil {
return nil, fmt.Errorf("failed to get mounted cgroup subsystems: %v", err)
}
isSwapOn, err := swap.IsSwapOn()
if err != nil {
return nil, fmt.Errorf("failed to determine if swap is on: %w", err)
}
if isSwapOn {
if failSwapOn {
return nil, fmt.Errorf("running with swap on is not supported, please disable swap or set --fail-swap-on flag to false")
}
if !swap.IsTmpfsNoswapOptionSupported(mountUtil, nodeConfig.KubeletRootDir) {
nodeRef := nodeRefFromNode(string(nodeConfig.NodeName))
recorder.Event(nodeRef, v1.EventTypeWarning, events.PossibleMemoryBackedVolumesOnDisk,
"The tmpfs noswap option is not supported. Memory-backed volumes (e.g. secrets, emptyDirs, etc.) "+
"might be swapped to disk and should no longer be considered secure.",
)
}
}
var internalCapacity = v1.ResourceList{}
// It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because
// machine info is computed and cached once as part of cAdvisor object creation.
// But `RootFsInfo` and `ImagesFsInfo` are not available at this moment so they will be called later during manager starts
machineInfo, err := cadvisorInterface.MachineInfo()
if err != nil {
return nil, err
}
capacity := cadvisor.CapacityFromMachineInfo(machineInfo)
for k, v := range capacity {
internalCapacity[k] = v
}
pidlimits, err := pidlimit.Stats()
if err == nil && pidlimits != nil && pidlimits.MaxPID != nil {
internalCapacity[pidlimit.PIDs] = *resource.NewQuantity(
int64(*pidlimits.MaxPID),
resource.DecimalSI)
}
// Turn CgroupRoot from a string (in cgroupfs path format) to internal CgroupName
cgroupRoot := ParseCgroupfsToCgroupName(nodeConfig.CgroupRoot)
cgroupManager := NewCgroupManager(subsystems, nodeConfig.CgroupDriver)
nodeConfig.CgroupVersion = cgroupManager.Version()
// Check if Cgroup-root actually exists on the node
if nodeConfig.CgroupsPerQOS {
// this does default to / when enabled, but this tests against regressions.
if nodeConfig.CgroupRoot == "" {
return nil, fmt.Errorf("invalid configuration: cgroups-per-qos was specified and cgroup-root was not specified. To enable the QoS cgroup hierarchy you need to specify a valid cgroup-root")
}
// we need to check that the cgroup root actually exists for each subsystem
// of note, we always use the cgroupfs driver when performing this check since
// the input is provided in that format.
// this is important because we do not want any name conversion to occur.
if err := cgroupManager.Validate(cgroupRoot); err != nil {
return nil, fmt.Errorf("invalid configuration: %w", err)
}
klog.InfoS("Container manager verified user specified cgroup-root exists", "cgroupRoot", cgroupRoot)
// Include the top level cgroup for enforcing node allocatable into cgroup-root.
// This way, all sub modules can avoid having to understand the concept of node allocatable.
cgroupRoot = NewCgroupName(cgroupRoot, defaultNodeAllocatableCgroupName)
}
klog.InfoS("Creating Container Manager object based on Node Config", "nodeConfig", nodeConfig)
qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig, cgroupManager)
if err != nil {
return nil, err
}
cm := &containerManagerImpl{
cadvisorInterface: cadvisorInterface,
mountUtil: mountUtil,
NodeConfig: nodeConfig,
subsystems: subsystems,
cgroupManager: cgroupManager,
capacity: capacity,
internalCapacity: internalCapacity,
cgroupRoot: cgroupRoot,
recorder: recorder,
qosContainerManager: qosContainerManager,
}
cm.topologyManager, err = topologymanager.NewManager(
machineInfo.Topology,
nodeConfig.TopologyManagerPolicy,
nodeConfig.TopologyManagerScope,
nodeConfig.TopologyManagerPolicyOptions,
)
if err != nil {
return nil, err
}
klog.InfoS("Creating device plugin manager")
cm.deviceManager, err = devicemanager.NewManagerImpl(machineInfo.Topology, cm.topologyManager)
if err != nil {
return nil, err
}
cm.topologyManager.AddHintProvider(cm.deviceManager)
// Initialize DRA manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
klog.InfoS("Creating Dynamic Resource Allocation (DRA) manager")
cm.draManager, err = dra.NewManager(klog.TODO(), kubeClient, nodeConfig.KubeletRootDir)
if err != nil {
return nil, err
}
metrics.RegisterCollectors(cm.draManager.NewMetricsCollector())
}
cm.kubeClient = kubeClient
// Initialize CPU manager
cm.cpuManager, err = cpumanager.NewManager(
nodeConfig.CPUManagerPolicy,
nodeConfig.CPUManagerPolicyOptions,
nodeConfig.CPUManagerReconcilePeriod,
machineInfo,
nodeConfig.NodeAllocatableConfig.ReservedSystemCPUs,
cm.GetNodeAllocatableReservation(),
nodeConfig.KubeletRootDir,
cm.topologyManager,
)
if err != nil {
klog.ErrorS(err, "Failed to initialize cpu manager")
return nil, err
}
cm.topologyManager.AddHintProvider(cm.cpuManager)
cm.memoryManager, err = memorymanager.NewManager(
context.TODO(),
nodeConfig.MemoryManagerPolicy,
machineInfo,
cm.GetNodeAllocatableReservation(),
nodeConfig.MemoryManagerReservedMemory,
nodeConfig.KubeletRootDir,
cm.topologyManager,
)
if err != nil {
klog.ErrorS(err, "Failed to initialize memory manager")
return nil, err
}
cm.topologyManager.AddHintProvider(cm.memoryManager)
// Create a single channel for all resource updates. This channel is consumed
// by the Kubelet's main sync loop.
cm.resourceUpdates = make(chan resourceupdates.Update, 10)
// Start goroutines to fan-in updates from the various sub-managers
// (e.g., device manager, DRA manager) into the single updates channel.
var wg sync.WaitGroup
sources := map[string]<-chan resourceupdates.Update{}
if cm.deviceManager != nil {
sources["deviceManager"] = cm.deviceManager.Updates()
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) && cm.draManager != nil {
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ResourceHealthStatus) {
sources["draManager"] = cm.draManager.Updates()
}
}
for name, ch := range sources {
wg.Add(1)
go func(name string, c <-chan resourceupdates.Update) {
defer wg.Done()
for v := range c {
klog.V(4).InfoS("Container Manager: forwarding resource update", "source", name, "pods", v.PodUIDs)
cm.resourceUpdates <- v
}
}(name, ch)
}
go func() {
wg.Wait()
close(cm.resourceUpdates)
}()
return cm, nil
}
// NewPodContainerManager is a factory method returns a PodContainerManager object
// If qosCgroups are enabled then it returns the general pod container manager
// otherwise it returns a no-op manager which essentially does nothing
func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
if cm.NodeConfig.CgroupsPerQOS {
return &podContainerManagerImpl{
qosContainersInfo: cm.GetQOSContainersInfo(),
subsystems: cm.subsystems,
cgroupManager: cm.cgroupManager,
podPidsLimit: cm.PodPidsLimit,
enforceCPULimits: cm.EnforceCPULimits,
// cpuCFSQuotaPeriod is in microseconds. NodeConfig.CPUCFSQuotaPeriod is time.Duration (measured in nano seconds).
// Convert (cm.CPUCFSQuotaPeriod) [nanoseconds] / time.Microsecond (1000) to get cpuCFSQuotaPeriod in microseconds.
cpuCFSQuotaPeriod: uint64(cm.CPUCFSQuotaPeriod / time.Microsecond),
podContainerManager: cm,
}
}
return &podContainerManagerNoop{
cgroupRoot: cm.cgroupRoot,
}
}
func (cm *containerManagerImpl) PodHasExclusiveCPUs(pod *v1.Pod) bool {
return podHasExclusiveCPUs(cm.cpuManager, pod)
}
func (cm *containerManagerImpl) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
return containerHasExclusiveCPUs(cm.cpuManager, pod, container)
}
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
return &internalContainerLifecycleImpl{cm.cpuManager, cm.memoryManager, cm.topologyManager}
}
// Create a cgroup container manager.
func createManager(containerName string) (cgroups.Manager, error) {
cg := &cgroups.Cgroup{
Parent: "/",
Name: containerName,
Resources: &cgroups.Resources{
SkipDevices: true,
},
Systemd: false,
}
return manager.New(cg)
}
type KernelTunableBehavior string
const (
KernelTunableWarn KernelTunableBehavior = "warn"
KernelTunableError KernelTunableBehavior = "error"
KernelTunableModify KernelTunableBehavior = "modify"
)
// setupKernelTunables validates kernel tunable flags are set as expected
// depending upon the specified option, it will either warn, error, or modify the kernel tunable flags
func setupKernelTunables(option KernelTunableBehavior) error {
desiredState := map[string]int{
utilsysctl.VMOvercommitMemory: utilsysctl.VMOvercommitMemoryAlways,
utilsysctl.VMPanicOnOOM: utilsysctl.VMPanicOnOOMInvokeOOMKiller,
utilsysctl.KernelPanic: utilsysctl.KernelPanicRebootTimeout,
utilsysctl.KernelPanicOnOops: utilsysctl.KernelPanicOnOopsAlways,
utilsysctl.RootMaxKeys: utilsysctl.RootMaxKeysSetting,
utilsysctl.RootMaxBytes: utilsysctl.RootMaxBytesSetting,
}
sysctl := utilsysctl.New()
errList := []error{}
for flag, expectedValue := range desiredState {
val, err := sysctl.GetSysctl(flag)
if err != nil {
errList = append(errList, err)
continue
}
if val == expectedValue {
continue
}
switch option {
case KernelTunableError:
errList = append(errList, fmt.Errorf("invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
case KernelTunableWarn:
klog.V(2).InfoS("Invalid kernel flag", "flag", flag, "expectedValue", expectedValue, "actualValue", val)
case KernelTunableModify:
klog.V(2).InfoS("Updating kernel flag", "flag", flag, "expectedValue", expectedValue, "actualValue", val)
err = sysctl.SetSysctl(flag, expectedValue)
if err != nil {
if inuserns.RunningInUserNS() {
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletInUserNamespace) {
klog.V(2).InfoS("Updating kernel flag failed (running in UserNS, ignoring)", "flag", flag, "err", err)
continue
}
klog.ErrorS(err, "Updating kernel flag failed (Hint: enable KubeletInUserNamespace feature flag to ignore the error)", "flag", flag)
}
errList = append(errList, err)
}
}
}
return utilerrors.NewAggregate(errList)
}
func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
f, err := validateSystemRequirements(cm.mountUtil)
if err != nil {
return err
}
if !f.cpuHardcapping {
cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported")
}
b := KernelTunableModify
if cm.GetNodeConfig().ProtectKernelDefaults {
b = KernelTunableError
}
if err := setupKernelTunables(b); err != nil {
return err
}
// Setup top level qos containers only if CgroupsPerQOS flag is specified as true
if cm.NodeConfig.CgroupsPerQOS {
if err := cm.createNodeAllocatableCgroups(); err != nil {
return err
}
err = cm.qosContainerManager.Start(cm.GetNodeAllocatableAbsolute, activePods)
if err != nil {
return fmt.Errorf("failed to initialize top level QOS containers: %v", err)
}
}
// Enforce Node Allocatable (if required)
if err := cm.enforceNodeAllocatableCgroups(); err != nil {
return err
}
systemContainers := []*systemContainer{}
if cm.SystemCgroupsName != "" {
if cm.SystemCgroupsName == "/" {
return fmt.Errorf("system container cannot be root (\"/\")")
}
cont, err := newSystemCgroups(cm.SystemCgroupsName)
if err != nil {
return err
}
cont.ensureStateFunc = func(manager cgroups.Manager) error {
return ensureSystemCgroups("/", manager)
}
systemContainers = append(systemContainers, cont)
}
if cm.KubeletCgroupsName != "" {
cont, err := newSystemCgroups(cm.KubeletCgroupsName)
if err != nil {
return err
}
cont.ensureStateFunc = func(_ cgroups.Manager) error {
return ensureProcessInContainerWithOOMScore(os.Getpid(), int(cm.KubeletOOMScoreAdj), cont.manager)
}
systemContainers = append(systemContainers, cont)
} else {
cm.periodicTasks = append(cm.periodicTasks, func() {
if err := ensureProcessInContainerWithOOMScore(os.Getpid(), int(cm.KubeletOOMScoreAdj), nil); err != nil {
klog.ErrorS(err, "Failed to ensure process in container with oom score")
return
}
cont, err := getContainer(os.Getpid())
if err != nil {
klog.ErrorS(err, "Failed to find cgroups of kubelet")
return
}
cm.Lock()
defer cm.Unlock()
cm.KubeletCgroupsName = cont
})
}
cm.systemContainers = systemContainers
return nil
}
func (cm *containerManagerImpl) GetNodeConfig() NodeConfig {
cm.RLock()
defer cm.RUnlock()
return cm.NodeConfig
}
// GetPodCgroupRoot returns the literal cgroupfs value for the cgroup containing all pods.
func (cm *containerManagerImpl) GetPodCgroupRoot() string {
return cm.cgroupManager.Name(cm.cgroupRoot)
}
func (cm *containerManagerImpl) GetMountedSubsystems() *CgroupSubsystems {
return cm.subsystems
}
func (cm *containerManagerImpl) GetQOSContainersInfo() QOSContainersInfo {
return cm.qosContainerManager.GetQOSContainersInfo()
}
func (cm *containerManagerImpl) UpdateQOSCgroups() error {
return cm.qosContainerManager.UpdateCgroups()
}
func (cm *containerManagerImpl) Status() Status {
cm.RLock()
defer cm.RUnlock()
return cm.status
}
func (cm *containerManagerImpl) Start(ctx context.Context, node *v1.Node,
activePods ActivePodsFunc,
getNode GetNodeFunc,
sourcesReady config.SourcesReady,
podStatusProvider status.PodStatusProvider,
runtimeService internalapi.RuntimeService,
localStorageCapacityIsolation bool) error {
containerMap, containerRunningSet := buildContainerMapAndRunningSetFromRuntime(ctx, runtimeService)
// Initialize DRA manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
err := cm.draManager.Start(ctx, dra.ActivePodsFunc(activePods), dra.GetNodeFunc(getNode), sourcesReady)
if err != nil {
return fmt.Errorf("start dra manager error: %w", err)
}
}
// Initialize CPU manager
err := cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap.Clone())
if err != nil {
return fmt.Errorf("start cpu manager error: %w", err)
}
// Initialize memory manager
err = cm.memoryManager.Start(ctx, memorymanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap.Clone())
if err != nil {
return fmt.Errorf("start memory manager error: %w", err)
}
// cache the node Info including resource capacity and
// allocatable of the node
cm.nodeInfo = node
if localStorageCapacityIsolation {
rootfs, err := cm.cadvisorInterface.RootFsInfo()
if err != nil {
return fmt.Errorf("failed to get rootfs info: %v", err)
}
for rName, rCap := range cadvisor.EphemeralStorageCapacityFromFsInfo(rootfs) {
cm.capacity[rName] = rCap
}
}
// Ensure that node allocatable configuration is valid.
if err := cm.validateNodeAllocatable(); err != nil {
return err
}
// Setup the node
if err := cm.setupNode(activePods); err != nil {
return err
}
// Don't run a background thread if there are no ensureStateFuncs.
hasEnsureStateFuncs := false
for _, cont := range cm.systemContainers {
if cont.ensureStateFunc != nil {
hasEnsureStateFuncs = true
break
}
}
if hasEnsureStateFuncs {
// Run ensure state functions every minute.
go wait.Until(func() {
for _, cont := range cm.systemContainers {
if cont.ensureStateFunc != nil {
if err := cont.ensureStateFunc(cont.manager); err != nil {
klog.InfoS("Failed to ensure state", "containerName", cont.name, "err", err)
}
}
}
}, time.Minute, wait.NeverStop)
}
if len(cm.periodicTasks) > 0 {
go wait.Until(func() {
for _, task := range cm.periodicTasks {
if task != nil {
task()
}
}
}, 5*time.Minute, wait.NeverStop)
}
// Starts device manager.
if err := cm.deviceManager.Start(devicemanager.ActivePodsFunc(activePods), sourcesReady, containerMap.Clone(), containerRunningSet); err != nil {
return err
}
return nil
}
func (cm *containerManagerImpl) GetPluginRegistrationHandlers() map[string]cache.PluginHandler {
res := map[string]cache.PluginHandler{
pluginwatcherapi.DevicePlugin: cm.deviceManager.GetWatcherHandler(),
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
res[pluginwatcherapi.DRAPlugin] = cm.draManager.GetWatcherHandler()
}
return res
}
func (cm *containerManagerImpl) GetHealthCheckers() []healthz.HealthChecker {
return []healthz.HealthChecker{cm.deviceManager.GetHealthChecker()}
}
// TODO: move the GetResources logic to PodContainerManager.
func (cm *containerManagerImpl) GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
logger := klog.FromContext(ctx)
opts := &kubecontainer.RunContainerOptions{}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
resOpts, err := cm.draManager.GetResources(pod, container)
if err != nil {
return nil, err
}
logger.V(5).Info("Determined CDI devices for pod", "pod", klog.KObj(pod), "cdiDevices", resOpts.CDIDevices)
opts.CDIDevices = append(opts.CDIDevices, resOpts.CDIDevices...)
}
// Allocate should already be called during predicateAdmitHandler.Admit(),
// just try to fetch device runtime information from cached state here
devOpts, err := cm.deviceManager.GetDeviceRunContainerOptions(pod, container)
if err != nil {
return nil, err
} else if devOpts == nil {
return opts, nil
}
opts.Devices = append(opts.Devices, devOpts.Devices...)
opts.Mounts = append(opts.Mounts, devOpts.Mounts...)
opts.Envs = append(opts.Envs, devOpts.Envs...)
opts.Annotations = append(opts.Annotations, devOpts.Annotations...)
opts.CDIDevices = append(opts.CDIDevices, devOpts.CDIDevices...)
return opts, nil
}
func (cm *containerManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return cm.deviceManager.UpdatePluginResources(node, attrs)
}
func (cm *containerManagerImpl) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler {
return cm.topologyManager
}
func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
cpuLimit := int64(0)
// Sum up resources of all external containers.
for _, cont := range cm.systemContainers {
cpuLimit += cont.cpuMillicores
}
return v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
cpuLimit,
resource.DecimalSI),
}
}
func isProcessRunningInHost(pid int) (bool, error) {
// Get init pid namespace.
initPidNs, err := os.Readlink("/proc/1/ns/pid")
if err != nil {
return false, fmt.Errorf("failed to find pid namespace of init process")
}
klog.V(10).InfoS("Found init PID namespace", "namespace", initPidNs)
processPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", pid))
if err != nil {
return false, fmt.Errorf("failed to find pid namespace of process %q", pid)
}
klog.V(10).InfoS("Process info", "pid", pid, "namespace", processPidNs)
return initPidNs == processPidNs, nil
}
func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager cgroups.Manager) error {
if runningInHost, err := isProcessRunningInHost(pid); err != nil {
// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
return err
} else if !runningInHost {
// Process is running inside a container. Don't touch that.
klog.V(2).InfoS("PID is not running in the host namespace", "pid", pid)
return nil
}
var errs []error
if manager != nil {
cont, err := getContainer(pid)
if err != nil {
errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
}
name := ""
cgroups, err := manager.GetCgroups()
if err != nil {
errs = append(errs, fmt.Errorf("failed to get cgroups for %d: %v", pid, err))
} else {
name = cgroups.Name
}
if cont != name {
err = manager.Apply(pid)
if err != nil {
errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q: %v", pid, cont, name, err))
}
}
}
// Also apply oom-score-adj to processes
oomAdjuster := oom.NewOOMAdjuster()
klog.V(5).InfoS("Attempting to apply oom_score_adj to process", "oomScoreAdj", oomScoreAdj, "pid", pid)
if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
klog.V(3).InfoS("Failed to apply oom_score_adj to process", "oomScoreAdj", oomScoreAdj, "pid", pid, "err", err)
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err))
}
return utilerrors.NewAggregate(errs)
}
// getContainer returns the cgroup associated with the specified pid.
// It enforces a unified hierarchy for memory and cpu cgroups.
// On systemd environments, it uses the name=systemd cgroup for the specified pid.
func getContainer(pid int) (string, error) {
cgs, err := cgroups.ParseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
if cgroups.IsCgroup2UnifiedMode() {
c, found := cgs[""]
if !found {
return "", cgroups.NewNotFoundError("unified")
}
return c, nil
}
cpu, found := cgs["cpu"]
if !found {
return "", cgroups.NewNotFoundError("cpu")
}
memory, found := cgs["memory"]
if !found {
return "", cgroups.NewNotFoundError("memory")
}
// since we use this container for accounting, we need to ensure its a unified hierarchy.
if cpu != memory {
return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory)
}
// on systemd, every pid is in a unified cgroup hierarchy (name=systemd as seen in systemd-cgls)
// cpu and memory accounting is off by default, users may choose to enable it per unit or globally.
// users could enable CPU and memory accounting globally via /etc/systemd/system.conf (DefaultCPUAccounting=true DefaultMemoryAccounting=true).
// users could also enable CPU and memory accounting per unit via CPUAccounting=true and MemoryAccounting=true
// we only warn if accounting is not enabled for CPU or memory so as to not break local development flows where kubelet is launched in a terminal.
// for example, the cgroup for the user session will be something like /user.slice/user-X.slice/session-X.scope, but the cpu and memory
// cgroup will be the closest ancestor where accounting is performed (most likely /) on systems that launch docker containers.
// as a result, on those systems, you will not get cpu or memory accounting statistics for kubelet.
// in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally).
if systemd, found := cgs["name=systemd"]; found {
if systemd != cpu {
klog.InfoS("CPUAccounting not enabled for process", "pid", pid)
}
if systemd != memory {
klog.InfoS("MemoryAccounting not enabled for process", "pid", pid)
}
return systemd, nil
}
return cpu, nil
}
// Ensures the system container is created and all non-kernel threads and process 1
// without a container are moved to it.
//
// The reason of leaving kernel threads at root cgroup is that we don't want to tie the
// execution of these threads with to-be defined /system quota and create priority inversions.
func ensureSystemCgroups(rootCgroupPath string, manager cgroups.Manager) error {
// Move non-kernel PIDs to the system container.
// Only keep errors on latest attempt.
var finalErr error
for i := 0; i <= 10; i++ {
allPids, err := cmutil.GetPids(rootCgroupPath)
if err != nil {
finalErr = fmt.Errorf("failed to list PIDs for root: %v", err)
continue
}
// Remove kernel pids and other protected PIDs (pid 1, PIDs already in system & kubelet containers)
pids := make([]int, 0, len(allPids))
for _, pid := range allPids {
if pid == 1 || isKernelPid(pid) {
continue
}
pids = append(pids, pid)
}
// Check if we have moved all the non-kernel PIDs.
if len(pids) == 0 {
return nil
}
klog.V(3).InfoS("Moving non-kernel processes", "pids", pids)
for _, pid := range pids {
err := manager.Apply(pid)
if err != nil {
name := ""
cgroups, err := manager.GetCgroups()
if err == nil {
name = cgroups.Name
}
finalErr = fmt.Errorf("failed to move PID %d into the system container %q: %v", pid, name, err)
}
}
}
return finalErr
}
// Determines whether the specified PID is a kernel PID.
func isKernelPid(pid int) bool {
// Kernel threads have no associated executable.
_, err := os.Readlink(fmt.Sprintf("/proc/%d/exe", pid))
return err != nil && os.IsNotExist(err)
}
// GetCapacity returns node capacity data for "cpu", "memory", "ephemeral-storage", and "huge-pages*"
// At present this method is only invoked when introspecting ephemeral storage
func (cm *containerManagerImpl) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
if localStorageCapacityIsolation {
// We store allocatable ephemeral-storage in the capacity property once we Start() the container manager
if _, ok := cm.capacity[v1.ResourceEphemeralStorage]; !ok {
// If we haven't yet stored the capacity for ephemeral-storage, we can try to fetch it directly from cAdvisor,
if cm.cadvisorInterface != nil {
rootfs, err := cm.cadvisorInterface.RootFsInfo()
if err != nil {
klog.ErrorS(err, "Unable to get rootfs data from cAdvisor interface")
// If the rootfsinfo retrieval from cAdvisor fails for any reason, fallback to returning the capacity property with no ephemeral storage data
return cm.capacity
}
// We don't want to mutate cm.capacity here so we'll manually construct a v1.ResourceList from it,
// and add ephemeral-storage
capacityWithEphemeralStorage := v1.ResourceList{}
for rName, rQuant := range cm.capacity {
capacityWithEphemeralStorage[rName] = rQuant
}
capacityWithEphemeralStorage[v1.ResourceEphemeralStorage] = cadvisor.EphemeralStorageCapacityFromFsInfo(rootfs)[v1.ResourceEphemeralStorage]
return capacityWithEphemeralStorage
}
}
}
return cm.capacity
}
func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
return cm.deviceManager.GetCapacity()
}
func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices {
return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetDevices(podUID, containerName))
}
func (cm *containerManagerImpl) GetAllocatableDevices() []*podresourcesapi.ContainerDevices {
return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetAllocatableDevices())
}
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
if cm.cpuManager != nil {
return int64Slice(cm.cpuManager.GetExclusiveCPUs(podUID, containerName).UnsortedList())
}
return []int64{}
}
func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
if cm.cpuManager != nil {
return int64Slice(cm.cpuManager.GetAllocatableCPUs().UnsortedList())
}
return []int64{}
}
func (cm *containerManagerImpl) GetMemory(podUID, containerName string) []*podresourcesapi.ContainerMemory {
if cm.memoryManager == nil {
return []*podresourcesapi.ContainerMemory{}
}
// This is tempporary as part of migration of memory manager to Contextual logging.
// Direct context to be passed when container manager is migrated.
return containerMemoryFromBlock(cm.memoryManager.GetMemory(context.TODO(), podUID, containerName))
}
func (cm *containerManagerImpl) GetAllocatableMemory() []*podresourcesapi.ContainerMemory {
if cm.memoryManager == nil {
return []*podresourcesapi.ContainerMemory{}
}
// This is tempporary as part of migration of memory manager to Contextual logging.
// Direct context to be passed when container manager is migrated.
return containerMemoryFromBlock(cm.memoryManager.GetAllocatableMemory(context.TODO()))
}
func (cm *containerManagerImpl) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource {
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
return []*podresourcesapi.DynamicResource{}
}
var containerDynamicResources []*podresourcesapi.DynamicResource
containerClaimInfos, err := cm.draManager.GetContainerClaimInfos(pod, container)
if err != nil {
klog.ErrorS(err, "Unable to get container claim info state")
return []*podresourcesapi.DynamicResource{}
}
for _, containerClaimInfo := range containerClaimInfos {
var claimResources []*podresourcesapi.ClaimResource
for driverName, driverState := range containerClaimInfo.DriverState {
var cdiDevices []*podresourcesapi.CDIDevice
for _, device := range driverState.Devices {
for _, cdiDeviceID := range device.CDIDeviceIDs {
cdiDevices = append(cdiDevices, &podresourcesapi.CDIDevice{Name: cdiDeviceID})
}
resources := &podresourcesapi.ClaimResource{
CdiDevices: cdiDevices,
DriverName: driverName,
PoolName: device.PoolName,
DeviceName: device.DeviceName,
}
claimResources = append(claimResources, resources)
}
}
containerDynamicResource := podresourcesapi.DynamicResource{
ClaimName: containerClaimInfo.ClaimName,
ClaimNamespace: containerClaimInfo.Namespace,
ClaimResources: claimResources,
}
containerDynamicResources = append(containerDynamicResources, &containerDynamicResource)
}
return containerDynamicResources
}
func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
return cm.deviceManager.ShouldResetExtendedResourceCapacity()
}
func (cm *containerManagerImpl) UpdateAllocatedDevices() {
cm.deviceManager.UpdateAllocatedDevices()
}
func containerMemoryFromBlock(blocks []memorymanagerstate.Block) []*podresourcesapi.ContainerMemory {
var containerMemories []*podresourcesapi.ContainerMemory
for _, b := range blocks {
containerMemory := podresourcesapi.ContainerMemory{
MemoryType: string(b.Type),
Size: b.Size,
Topology: &podresourcesapi.TopologyInfo{
Nodes: []*podresourcesapi.NUMANode{},
},
}
for _, numaNodeID := range b.NUMAAffinity {
containerMemory.Topology.Nodes = append(containerMemory.Topology.Nodes, &podresourcesapi.NUMANode{ID: int64(numaNodeID)})
}
containerMemories = append(containerMemories, &containerMemory)
}
return containerMemories
}
func (cm *containerManagerImpl) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return cm.draManager.PrepareResources(ctx, pod)
}
func (cm *containerManagerImpl) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return cm.draManager.UnprepareResources(ctx, pod)
}
func (cm *containerManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
return cm.draManager.PodMightNeedToUnprepareResources(UID)
}
func (cm *containerManagerImpl) UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) {
// For now we only support Device Plugin
cm.deviceManager.UpdateAllocatedResourcesStatus(pod, status)
// Update DRA resources if the feature is enabled and the manager exists
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) && cm.draManager != nil {
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ResourceHealthStatus) {
cm.draManager.UpdateAllocatedResourcesStatus(pod, status)
}
}
}
func (cm *containerManagerImpl) Updates() <-chan resourceupdates.Update {
return cm.resourceUpdates
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/server/healthz"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
type containerManagerStub struct {
shouldResetExtendedResourceCapacity bool
extendedPluginResources v1.ResourceList
memoryManager memorymanager.Manager
}
var _ ContainerManager = &containerManagerStub{}
func (cm *containerManagerStub) Start(ctx context.Context, _ *v1.Node, _ ActivePodsFunc, _ GetNodeFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService, _ bool) error {
klog.V(2).InfoS("Starting stub container manager")
cm.memoryManager = memorymanager.NewFakeManager(ctx)
return nil
}
func (cm *containerManagerStub) SystemCgroupsLimit() v1.ResourceList {
return v1.ResourceList{}
}
func (cm *containerManagerStub) GetNodeConfig() NodeConfig {
return NodeConfig{}
}
func (cm *containerManagerStub) GetMountedSubsystems() *CgroupSubsystems {
return &CgroupSubsystems{}
}
func (cm *containerManagerStub) GetQOSContainersInfo() QOSContainersInfo {
return QOSContainersInfo{}
}
func (cm *containerManagerStub) UpdateQOSCgroups() error {
return nil
}
func (cm *containerManagerStub) Status() Status {
return Status{}
}
func (cm *containerManagerStub) GetNodeAllocatableReservation() v1.ResourceList {
return nil
}
func (cm *containerManagerStub) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
if !localStorageCapacityIsolation {
return v1.ResourceList{}
}
c := v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewQuantity(
int64(0),
resource.BinarySI),
}
return c
}
func (cm *containerManagerStub) GetPluginRegistrationHandlers() map[string]cache.PluginHandler {
return nil
}
func (cm *containerManagerStub) GetHealthCheckers() []healthz.HealthChecker {
return []healthz.HealthChecker{}
}
func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
return cm.extendedPluginResources, cm.extendedPluginResources, []string{}
}
func (m *podContainerManagerStub) GetPodCgroupConfig(_ *v1.Pod, _ v1.ResourceName) (*ResourceConfig, error) {
return nil, fmt.Errorf("not implemented")
}
func (m *podContainerManagerStub) SetPodCgroupConfig(pod *v1.Pod, resourceConfig *ResourceConfig) error {
return fmt.Errorf("not implemented")
}
func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager {
return &podContainerManagerStub{}
}
func (cm *containerManagerStub) GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
return &kubecontainer.RunContainerOptions{}, nil
}
func (cm *containerManagerStub) UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil
}
func (cm *containerManagerStub) InternalContainerLifecycle() InternalContainerLifecycle {
return &internalContainerLifecycleImpl{cpumanager.NewFakeManager(), cm.memoryManager, topologymanager.NewFakeManager()}
}
func (cm *containerManagerStub) GetPodCgroupRoot() string {
return ""
}
func (cm *containerManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
return nil
}
func (cm *containerManagerStub) GetAllocatableDevices() []*podresourcesapi.ContainerDevices {
return nil
}
func (cm *containerManagerStub) ShouldResetExtendedResourceCapacity() bool {
return cm.shouldResetExtendedResourceCapacity
}
func (cm *containerManagerStub) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler {
return topologymanager.NewFakeManager()
}
func (cm *containerManagerStub) UpdateAllocatedDevices() {
return
}
func (cm *containerManagerStub) GetCPUs(_, _ string) []int64 {
return nil
}
func (cm *containerManagerStub) GetAllocatableCPUs() []int64 {
return nil
}
func (cm *containerManagerStub) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory {
return nil
}
func (cm *containerManagerStub) GetAllocatableMemory() []*podresourcesapi.ContainerMemory {
return nil
}
func (cm *containerManagerStub) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource {
return nil
}
func (cm *containerManagerStub) GetNodeAllocatableAbsolute() v1.ResourceList {
return nil
}
func (cm *containerManagerStub) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return nil
}
func (cm *containerManagerStub) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return nil
}
func (cm *containerManagerStub) PodMightNeedToUnprepareResources(UID types.UID) bool {
return false
}
func (cm *containerManagerStub) UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) {
}
func (cm *containerManagerStub) Updates() <-chan resourceupdates.Update {
return nil
}
func (cm *containerManagerStub) PodHasExclusiveCPUs(pod *v1.Pod) bool {
return false
}
func (cm *containerManagerStub) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
return false
}
func NewStubContainerManager() ContainerManager {
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
}
func NewStubContainerManagerWithExtendedResource(shouldResetExtendedResourceCapacity bool) ContainerManager {
return &containerManagerStub{shouldResetExtendedResourceCapacity: shouldResetExtendedResourceCapacity}
}
func NewStubContainerManagerWithDevicePluginResource(extendedPluginResources v1.ResourceList) ContainerManager {
return &containerManagerStub{
shouldResetExtendedResourceCapacity: false,
extendedPluginResources: extendedPluginResources,
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containermap
import (
"fmt"
"maps"
)
// cmItem (ContainerMap ITEM) is a pair podUID, containerName
type cmItem struct {
podUID string
containerName string
}
// ContainerMap maps (containerID)->(podUID, containerName)
type ContainerMap map[string]cmItem
// NewContainerMap creates a new ContainerMap struct
func NewContainerMap() ContainerMap {
return make(ContainerMap)
}
// Clone creates a deep copy of the ContainerMap
func (cm ContainerMap) Clone() ContainerMap {
return maps.Clone(cm)
}
// Add adds a mapping of (containerID)->(podUID, containerName) to the ContainerMap
func (cm ContainerMap) Add(podUID, containerName, containerID string) {
cm[containerID] = cmItem{
podUID: podUID,
containerName: containerName,
}
}
// RemoveByContainerID removes a mapping of (containerID)->(podUID, containerName) from the ContainerMap
func (cm ContainerMap) RemoveByContainerID(containerID string) {
delete(cm, containerID)
}
// RemoveByContainerRef removes a mapping of (containerID)->(podUID, containerName) from the ContainerMap
func (cm ContainerMap) RemoveByContainerRef(podUID, containerName string) {
containerID, err := cm.GetContainerID(podUID, containerName)
if err == nil {
cm.RemoveByContainerID(containerID)
}
}
// GetContainerID retrieves a ContainerID from the ContainerMap
func (cm ContainerMap) GetContainerID(podUID, containerName string) (string, error) {
for key, val := range cm {
if val.podUID == podUID && val.containerName == containerName {
return key, nil
}
}
return "", fmt.Errorf("container %s not in ContainerMap for pod %s", containerName, podUID)
}
// GetContainerRef retrieves a (podUID, containerName) pair from the ContainerMap
func (cm ContainerMap) GetContainerRef(containerID string) (string, string, error) {
if _, exists := cm[containerID]; !exists {
return "", "", fmt.Errorf("containerID %s not in ContainerMap", containerID)
}
return cm[containerID].podUID, cm[containerID].containerName, nil
}
// Visit invoke visitor function to walks all of the entries in the container map
func (cm ContainerMap) Visit(visitor func(podUID, containerName, containerID string)) {
for k, v := range cm {
visitor(v.podUID, v.containerName, k)
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"maps"
"math"
"sort"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/utils/cpuset"
)
// LoopControl controls the behavior of the cpu accumulator loop logic
type LoopControl int
// Possible loop control outcomes
const (
Continue LoopControl = iota
Break
)
type mapIntInt map[int]int
func (m mapIntInt) Clone() mapIntInt {
return maps.Clone(m)
}
func (m mapIntInt) Keys() []int {
var keys []int
for k := range m {
keys = append(keys, k)
}
return keys
}
func (m mapIntInt) Values(keys ...int) []int {
if keys == nil {
keys = m.Keys()
}
var values []int
for _, k := range keys {
values = append(values, m[k])
}
return values
}
func sum(xs []int) int {
var s int
for _, x := range xs {
s += x
}
return s
}
func mean(xs []int) float64 {
var sum float64
for _, x := range xs {
sum += float64(x)
}
m := sum / float64(len(xs))
return math.Round(m*1000) / 1000
}
func standardDeviation(xs []int) float64 {
m := mean(xs)
var sum float64
for _, x := range xs {
sum += (float64(x) - m) * (float64(x) - m)
}
s := math.Sqrt(sum / float64(len(xs)))
return math.Round(s*1000) / 1000
}
type numaOrSocketsFirstFuncs interface {
takeFullFirstLevel()
takeFullSecondLevel()
sortAvailableNUMANodes() []int
sortAvailableSockets() []int
sortAvailableCores() []int
}
type numaFirst struct{ acc *cpuAccumulator }
type socketsFirst struct{ acc *cpuAccumulator }
var _ numaOrSocketsFirstFuncs = (*numaFirst)(nil)
var _ numaOrSocketsFirstFuncs = (*socketsFirst)(nil)
// If NUMA nodes are higher in the memory hierarchy than sockets, then we take
// from the set of NUMA Nodes as the first level.
func (n *numaFirst) takeFullFirstLevel() {
n.acc.takeFullNUMANodes()
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then we take
// from the set of sockets as the second level.
func (n *numaFirst) takeFullSecondLevel() {
n.acc.takeFullSockets()
}
// Sort the UncoreCaches within the NUMA nodes.
func (a *cpuAccumulator) sortAvailableUncoreCaches() []int {
var result []int
for _, numa := range a.sortAvailableNUMANodes() {
uncore := a.details.UncoreInNUMANodes(numa).UnsortedList()
a.sort(uncore, a.details.CPUsInUncoreCaches)
result = append(result, uncore...)
}
return result
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then just
// sort the NUMA nodes directly, and return them.
func (n *numaFirst) sortAvailableNUMANodes() []int {
numas := n.acc.details.NUMANodes().UnsortedList()
n.acc.sort(numas, n.acc.details.CPUsInNUMANodes)
return numas
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then we need
// to pull the set of sockets out of each sorted NUMA node, and accumulate the
// partial order across them.
func (n *numaFirst) sortAvailableSockets() []int {
var result []int
for _, numa := range n.sortAvailableNUMANodes() {
sockets := n.acc.details.SocketsInNUMANodes(numa).UnsortedList()
n.acc.sort(sockets, n.acc.details.CPUsInSockets)
result = append(result, sockets...)
}
return result
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then
// cores sit directly below sockets in the memory hierarchy.
func (n *numaFirst) sortAvailableCores() []int {
var result []int
for _, socket := range n.acc.sortAvailableSockets() {
cores := n.acc.details.CoresInSockets(socket).UnsortedList()
n.acc.sort(cores, n.acc.details.CPUsInCores)
result = append(result, cores...)
}
return result
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then we take
// from the set of sockets as the first level.
func (s *socketsFirst) takeFullFirstLevel() {
s.acc.takeFullSockets()
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then we take
// from the set of NUMA Nodes as the second level.
func (s *socketsFirst) takeFullSecondLevel() {
s.acc.takeFullNUMANodes()
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then we need
// to pull the set of NUMA nodes out of each sorted Socket, and accumulate the
// partial order across them.
func (s *socketsFirst) sortAvailableNUMANodes() []int {
var result []int
for _, socket := range s.sortAvailableSockets() {
numas := s.acc.details.NUMANodesInSockets(socket).UnsortedList()
s.acc.sort(numas, s.acc.details.CPUsInNUMANodes)
result = append(result, numas...)
}
return result
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then just
// sort the sockets directly, and return them.
func (s *socketsFirst) sortAvailableSockets() []int {
sockets := s.acc.details.Sockets().UnsortedList()
s.acc.sort(sockets, s.acc.details.CPUsInSockets)
return sockets
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then cores
// sit directly below NUMA Nodes in the memory hierarchy.
func (s *socketsFirst) sortAvailableCores() []int {
var result []int
for _, numa := range s.acc.sortAvailableNUMANodes() {
cores := s.acc.details.CoresInNUMANodes(numa).UnsortedList()
s.acc.sort(cores, s.acc.details.CPUsInCores)
result = append(result, cores...)
}
return result
}
type availableCPUSorter interface {
sort() []int
}
type sortCPUsPacked struct{ acc *cpuAccumulator }
type sortCPUsSpread struct{ acc *cpuAccumulator }
var _ availableCPUSorter = (*sortCPUsPacked)(nil)
var _ availableCPUSorter = (*sortCPUsSpread)(nil)
func (s sortCPUsPacked) sort() []int {
return s.acc.sortAvailableCPUsPacked()
}
func (s sortCPUsSpread) sort() []int {
return s.acc.sortAvailableCPUsSpread()
}
// CPUSortingStrategy describes the CPU sorting solution within the socket scope.
// Using topoDualSocketHT (12 CPUs, 2 sockets, 6 cores) as an example:
//
// CPUDetails: map[int]topology.CPUInfo{
// 0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
// 1: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
// 2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
// 3: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
// 4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
// 5: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
// 6: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
// 7: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
// 8: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
// 9: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
// 10: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
// 11: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
// },
//
// - CPUSortingOptionPacked sorts CPUs in a packed manner, where CPUs are grouped by core
// before moving to the next core, resulting in packed cores, like:
// 0, 2, 4, 6, 8, 10, 1, 3, 5, 7, 9, 11
// - CPUSortingOptionSpread sorts CPUs in a spread manner, where CPUs are spread across cores
// before moving to the next CPU, resulting in spread-out cores, like:
// 0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11
//
// By default, CPUSortingOptionPacked will be used, and CPUSortingOptionSpread will only be activated
// when the user specifies the `DistributeCPUsAcrossCoresOption` static policy option.
type CPUSortingStrategy string
const (
CPUSortingStrategyPacked CPUSortingStrategy = "packed"
CPUSortingStrategySpread CPUSortingStrategy = "spread"
)
type cpuAccumulator struct {
// `topo` describes the layout of CPUs (i.e. hyper-threads if hyperthreading is on) between
// cores (i.e. physical CPUs if hyper-threading is on), NUMA nodes, and sockets on the K8s
// cluster node. `topo` is never mutated, meaning that as the cpuAccumulator claims CPUs topo is
// not modified. Its primary purpose is being a reference of the original (i.e. at the time the
// cpuAccumulator was created) topology to learn things such as how many CPUs are on each
// socket, NUMA node, etc... .
topo *topology.CPUTopology
// `details` is the set of free CPUs that the cpuAccumulator can claim to accumulate the desired
// number of CPUS. When a CPU is claimed, it's removed from `details`.
details topology.CPUDetails
// `numCPUsNeeded` is the number of CPUs that the accumulator still needs to accumulate to reach
// the desired number of CPUs. When the cpuAccumulator is created, `numCPUsNeeded` is set to the
// total number of CPUs to accumulate. Every time a CPU is claimed, `numCPUsNeeded` is decreased
// by 1 until it has value 0, meaning that all the needed CPUs have been accumulated
// (success), or a situation where it's bigger than 0 but no more CPUs are available is reached
// (failure).
numCPUsNeeded int
// `result` is the set of CPUs that have been accumulated so far. When a CPU is claimed, it's
// added to `result`. The cpuAccumulator completed its duty successfully when `result` has
// cardinality equal to the total number of CPUs to accumulate.
result cpuset.CPUSet
numaOrSocketsFirst numaOrSocketsFirstFuncs
// availableCPUSorter is used to control the cpu sorting result.
// The sequence of returned CPU IDs depends on the policy.
// By default, cpus is sorted by sortAvailableCPUsPacked()
// If packed is false, cpu is sorted by sortAvailableCPUsSpread()
availableCPUSorter availableCPUSorter
}
func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy) *cpuAccumulator {
acc := &cpuAccumulator{
topo: topo,
details: topo.CPUDetails.KeepOnly(availableCPUs),
numCPUsNeeded: numCPUs,
result: cpuset.New(),
}
if topo.NumSockets >= topo.NumNUMANodes {
acc.numaOrSocketsFirst = &numaFirst{acc}
} else {
acc.numaOrSocketsFirst = &socketsFirst{acc}
}
if cpuSortingStrategy == CPUSortingStrategyPacked {
acc.availableCPUSorter = &sortCPUsPacked{acc}
} else {
acc.availableCPUSorter = &sortCPUsSpread{acc}
}
return acc
}
// Returns true if the supplied NUMANode is fully available in `a.details`.
// "fully available" means that all the CPUs in it are free.
func (a *cpuAccumulator) isNUMANodeFree(numaID int) bool {
return a.details.CPUsInNUMANodes(numaID).Size() == a.topo.CPUDetails.CPUsInNUMANodes(numaID).Size()
}
// Returns true if the supplied socket is fully available in `a.details`.
// "fully available" means that all the CPUs in it are free.
func (a *cpuAccumulator) isSocketFree(socketID int) bool {
return a.details.CPUsInSockets(socketID).Size() == a.topo.CPUsPerSocket()
}
// Returns true if the supplied UnCoreCache is fully available,
// "fully available" means that all the CPUs in it are free.
func (a *cpuAccumulator) isUncoreCacheFree(uncoreID int) bool {
return a.details.CPUsInUncoreCaches(uncoreID).Size() == a.topo.CPUDetails.CPUsInUncoreCaches(uncoreID).Size()
}
// Returns true if the supplied core is fully available in `a.details`.
// "fully available" means that all the CPUs in it are free.
func (a *cpuAccumulator) isCoreFree(coreID int) bool {
return a.details.CPUsInCores(coreID).Size() == a.topo.CPUsPerCore()
}
// Returns free NUMA Node IDs as a slice sorted by sortAvailableNUMANodes().
func (a *cpuAccumulator) freeNUMANodes() []int {
free := []int{}
for _, numa := range a.sortAvailableNUMANodes() {
if a.isNUMANodeFree(numa) {
free = append(free, numa)
}
}
return free
}
// Returns free socket IDs as a slice sorted by sortAvailableSockets().
func (a *cpuAccumulator) freeSockets() []int {
free := []int{}
for _, socket := range a.sortAvailableSockets() {
if a.isSocketFree(socket) {
free = append(free, socket)
}
}
return free
}
// Returns free UncoreCache IDs as a slice sorted by sortAvailableUnCoreCache().
func (a *cpuAccumulator) freeUncoreCache() []int {
free := []int{}
for _, uncore := range a.sortAvailableUncoreCaches() {
if a.isUncoreCacheFree(uncore) {
free = append(free, uncore)
}
}
return free
}
// Returns free core IDs as a slice sorted by sortAvailableCores().
func (a *cpuAccumulator) freeCores() []int {
free := []int{}
for _, core := range a.sortAvailableCores() {
if a.isCoreFree(core) {
free = append(free, core)
}
}
return free
}
// Returns free CPU IDs as a slice sorted by sortAvailableCPUsPacked().
func (a *cpuAccumulator) freeCPUs() []int {
return a.availableCPUSorter.sort()
}
// Sorts the provided list of NUMA nodes/sockets/cores/cpus referenced in 'ids'
// by the number of available CPUs contained within them (smallest to largest).
// The 'getCPU()' parameter defines the function that should be called to
// retrieve the list of available CPUs for the type being referenced. If two
// NUMA nodes/sockets/cores/cpus have the same number of available CPUs, they
// are sorted in ascending order by their id.
func (a *cpuAccumulator) sort(ids []int, getCPUs func(ids ...int) cpuset.CPUSet) {
sort.Slice(ids,
func(i, j int) bool {
iCPUs := getCPUs(ids[i])
jCPUs := getCPUs(ids[j])
if iCPUs.Size() < jCPUs.Size() {
return true
}
if iCPUs.Size() > jCPUs.Size() {
return false
}
return ids[i] < ids[j]
})
}
// Sort all NUMA nodes with at least one free CPU.
//
// If NUMA nodes are higher than sockets in the memory hierarchy, they are sorted by ascending number
// of free CPUs that they contain. "higher than sockets in the memory hierarchy" means that NUMA nodes
// contain a bigger number of CPUs (free and busy) than sockets, or equivalently that each NUMA node
// contains more than one socket.
//
// If instead NUMA nodes are lower in the memory hierarchy than sockets, they are sorted as follows.
// First, they are sorted by number of free CPUs in the sockets that contain them. Then, for each
// socket they are sorted by number of free CPUs that they contain. The order is always ascending.
// In other words, the relative order of two NUMA nodes is determined as follows:
// 1. If the two NUMA nodes belong to different sockets, the NUMA node in the socket with the
// smaller amount of free CPUs appears first.
// 2. If the two NUMA nodes belong to the same socket, the NUMA node with the smaller amount of free
// CPUs appears first.
func (a *cpuAccumulator) sortAvailableNUMANodes() []int {
return a.numaOrSocketsFirst.sortAvailableNUMANodes()
}
// Sort all sockets with at least one free CPU.
//
// If sockets are higher than NUMA nodes in the memory hierarchy, they are sorted by ascending number
// of free CPUs that they contain. "higher than NUMA nodes in the memory hierarchy" means that
// sockets contain a bigger number of CPUs (free and busy) than NUMA nodes, or equivalently that each
// socket contains more than one NUMA node.
//
// If instead sockets are lower in the memory hierarchy than NUMA nodes, they are sorted as follows.
// First, they are sorted by number of free CPUs in the NUMA nodes that contain them. Then, for each
// NUMA node they are sorted by number of free CPUs that they contain. The order is always ascending.
// In other words, the relative order of two sockets is determined as follows:
// 1. If the two sockets belong to different NUMA nodes, the socket in the NUMA node with the
// smaller amount of free CPUs appears first.
// 2. If the two sockets belong to the same NUMA node, the socket with the smaller amount of free
// CPUs appears first.
func (a *cpuAccumulator) sortAvailableSockets() []int {
return a.numaOrSocketsFirst.sortAvailableSockets()
}
// Sort all cores with at least one free CPU.
//
// If sockets are higher in the memory hierarchy than NUMA nodes, meaning that sockets contain a
// bigger number of CPUs (free and busy) than NUMA nodes, or equivalently that each socket contains
// more than one NUMA node, the cores are sorted as follows. First, they are sorted by number of
// free CPUs that their sockets contain. Then, for each socket, the cores in it are sorted by number
// of free CPUs that their NUMA nodes contain. Then, for each NUMA node, the cores in it are sorted
// by number of free CPUs that they contain. The order is always ascending. In other words, the
// relative order of two cores is determined as follows:
// 1. If the two cores belong to different sockets, the core in the socket with the smaller amount of
// free CPUs appears first.
// 2. If the two cores belong to the same socket but different NUMA nodes, the core in the NUMA node
// with the smaller amount of free CPUs appears first.
// 3. If the two cores belong to the same NUMA node and socket, the core with the smaller amount of
// free CPUs appears first.
//
// If instead NUMA nodes are higher in the memory hierarchy than sockets, the sorting happens in the
// same way as described in the previous paragraph, except that the priority of NUMA nodes and
// sockets is inverted (e.g. first sort the cores by number of free CPUs in their NUMA nodes, then,
// for each NUMA node, sort the cores by number of free CPUs in their sockets, etc...).
func (a *cpuAccumulator) sortAvailableCores() []int {
return a.numaOrSocketsFirst.sortAvailableCores()
}
// Sort all free CPUs.
//
// If sockets are higher in the memory hierarchy than NUMA nodes, meaning that sockets contain a
// bigger number of CPUs (free and busy) than NUMA nodes, or equivalently that each socket contains
// more than one NUMA node, the CPUs are sorted as follows. First, they are sorted by number of
// free CPUs that their sockets contain. Then, for each socket, the CPUs in it are sorted by number
// of free CPUs that their NUMA nodes contain. Then, for each NUMA node, the CPUs in it are sorted
// by number of free CPUs that their cores contain. Finally, for each core, the CPUs in it are
// sorted by numerical ID. The order is always ascending. In other words, the relative order of two
// CPUs is determined as follows:
// 1. If the two CPUs belong to different sockets, the CPU in the socket with the smaller amount of
// free CPUs appears first.
// 2. If the two CPUs belong to the same socket but different NUMA nodes, the CPU in the NUMA node
// with the smaller amount of free CPUs appears first.
// 3. If the two CPUs belong to the same socket and NUMA node but different cores, the CPU in the
// core with the smaller amount of free CPUs appears first.
// 4. If the two CPUs belong to the same NUMA node, socket, and core, the CPU with the smaller ID
// appears first.
//
// If instead NUMA nodes are higher in the memory hierarchy than sockets, the sorting happens in the
// same way as described in the previous paragraph, except that the priority of NUMA nodes and
// sockets is inverted (e.g. first sort the CPUs by number of free CPUs in their NUMA nodes, then,
// for each NUMA node, sort the CPUs by number of free CPUs in their sockets, etc...).
func (a *cpuAccumulator) sortAvailableCPUsPacked() []int {
var result []int
for _, core := range a.sortAvailableCores() {
cpus := a.details.CPUsInCores(core).UnsortedList()
sort.Ints(cpus)
result = append(result, cpus...)
}
return result
}
// Sort all available CPUs:
// - First by core using sortAvailableSockets().
// - Then within each socket, sort cpus directly using the sort() algorithm defined above.
func (a *cpuAccumulator) sortAvailableCPUsSpread() []int {
var result []int
for _, socket := range a.sortAvailableSockets() {
cpus := a.details.CPUsInSockets(socket).UnsortedList()
sort.Ints(cpus)
result = append(result, cpus...)
}
return result
}
func (a *cpuAccumulator) take(cpus cpuset.CPUSet) {
a.result = a.result.Union(cpus)
a.details = a.details.KeepOnly(a.details.CPUs().Difference(a.result))
a.numCPUsNeeded -= cpus.Size()
}
func (a *cpuAccumulator) takeFullNUMANodes() {
for _, numa := range a.freeNUMANodes() {
cpusInNUMANode := a.topo.CPUDetails.CPUsInNUMANodes(numa)
if !a.needsAtLeast(cpusInNUMANode.Size()) {
continue
}
klog.V(4).InfoS("takeFullNUMANodes: claiming NUMA node", "numa", numa)
a.take(cpusInNUMANode)
}
}
func (a *cpuAccumulator) takeFullSockets() {
for _, socket := range a.freeSockets() {
cpusInSocket := a.topo.CPUDetails.CPUsInSockets(socket)
if !a.needsAtLeast(cpusInSocket.Size()) {
continue
}
klog.V(4).InfoS("takeFullSockets: claiming socket", "socket", socket)
a.take(cpusInSocket)
}
}
func (a *cpuAccumulator) takeFullUncore() {
for _, uncore := range a.freeUncoreCache() {
cpusInUncore := a.topo.CPUDetails.CPUsInUncoreCaches(uncore)
if !a.needsAtLeast(cpusInUncore.Size()) {
continue
}
klog.V(4).InfoS("takeFullUncore: claiming uncore", "uncore", uncore)
a.take(cpusInUncore)
}
}
func (a *cpuAccumulator) takePartialUncore(uncoreID int) {
// determine the number of cores needed whether SMT/hyperthread is enabled or disabled
numCoresNeeded := (a.numCPUsNeeded + a.topo.CPUsPerCore() - 1) / a.topo.CPUsPerCore()
// determine the N number of free cores (physical cpus) within the UncoreCache, then
// determine the M number of free cpus (virtual cpus) that correspond with the free cores
freeCores := a.details.CoresNeededInUncoreCache(numCoresNeeded, uncoreID)
freeCPUs := a.details.CPUsInCores(freeCores.UnsortedList()...)
// when SMT/hyperthread is enabled and remaining cpu requirement is an odd integer value:
// sort the free CPUs that were determined based on the cores that have available cpus.
// if the amount of free cpus is greather than the cpus needed, we can drop the last cpu
// since the odd integer request will only require one out of the two free cpus that
// correspond to the last core
if a.numCPUsNeeded%2 != 0 && a.topo.CPUsPerCore() > 1 {
// we sort freeCPUs to ensure we pack virtual cpu allocations, meaning we allocate
// whole core's worth of cpus as much as possible to reduce smt-misalignment
sortFreeCPUs := freeCPUs.List()
if len(sortFreeCPUs) > a.numCPUsNeeded {
// if we are in takePartialUncore, the accumulator is not satisfied after
// takeFullUncore, so freeCPUs.Size() can't be < 1
sortFreeCPUs = sortFreeCPUs[:freeCPUs.Size()-1]
}
freeCPUs = cpuset.New(sortFreeCPUs...)
}
// claim the cpus if the free cpus within the UncoreCache can satisfy the needed cpus
claimed := (a.numCPUsNeeded == freeCPUs.Size())
klog.V(4).InfoS("takePartialUncore: trying to claim partial uncore",
"uncore", uncoreID,
"claimed", claimed,
"needed", a.numCPUsNeeded,
"cores", freeCores.String(),
"cpus", freeCPUs.String())
if !claimed {
return
}
a.take(freeCPUs)
}
// First try to take full UncoreCache, if available and need is at least the size of the UncoreCache group.
// Second try to take the partial UncoreCache if available and the request size can fit w/in the UncoreCache.
func (a *cpuAccumulator) takeUncoreCache() {
numCPUsInUncore := a.topo.CPUsPerUncore()
for _, uncore := range a.sortAvailableUncoreCaches() {
// take full UncoreCache if the CPUs needed is greater than free UncoreCache size
if a.needsAtLeast(numCPUsInUncore) {
a.takeFullUncore()
}
if a.isSatisfied() {
return
}
// take partial UncoreCache if the CPUs needed is less than free UncoreCache size
a.takePartialUncore(uncore)
if a.isSatisfied() {
return
}
}
}
func (a *cpuAccumulator) takeFullCores() {
for _, core := range a.freeCores() {
cpusInCore := a.topo.CPUDetails.CPUsInCores(core)
if !a.needsAtLeast(cpusInCore.Size()) {
continue
}
klog.V(4).InfoS("takeFullCores: claiming core", "core", core)
a.take(cpusInCore)
}
}
func (a *cpuAccumulator) takeRemainingCPUs() {
for _, cpu := range a.availableCPUSorter.sort() {
klog.V(4).InfoS("takeRemainingCPUs: claiming CPU", "cpu", cpu)
a.take(cpuset.New(cpu))
if a.isSatisfied() {
return
}
}
}
// rangeNUMANodesNeededToSatisfy returns minimum and maximum (in this order) number of NUMA nodes
// needed to satisfy the cpuAccumulator's goal of accumulating `a.numCPUsNeeded` CPUs, assuming that
// CPU groups have size given by the `cpuGroupSize` argument.
func (a *cpuAccumulator) rangeNUMANodesNeededToSatisfy(cpuGroupSize int) (minNumNUMAs, maxNumNUMAs int) {
// Get the total number of NUMA nodes in the system.
numNUMANodes := a.topo.CPUDetails.NUMANodes().Size()
// Get the total number of NUMA nodes that have CPUs available on them.
numNUMANodesAvailable := a.details.NUMANodes().Size()
// Get the total number of CPUs in the system.
numCPUs := a.topo.CPUDetails.CPUs().Size()
// Get the total number of 'cpuGroups' in the system.
numCPUGroups := (numCPUs-1)/cpuGroupSize + 1
// Calculate the number of 'cpuGroups' per NUMA Node in the system (rounding up).
numCPUGroupsPerNUMANode := (numCPUGroups-1)/numNUMANodes + 1
// Calculate the number of available 'cpuGroups' across all NUMA nodes as
// well as the number of 'cpuGroups' that need to be allocated (rounding up).
numCPUGroupsNeeded := (a.numCPUsNeeded-1)/cpuGroupSize + 1
// Calculate the minimum number of numa nodes required to satisfy the
// allocation (rounding up).
minNumNUMAs = (numCPUGroupsNeeded-1)/numCPUGroupsPerNUMANode + 1
// Calculate the maximum number of numa nodes required to satisfy the allocation.
maxNumNUMAs = min(numCPUGroupsNeeded, numNUMANodesAvailable)
return
}
// needsAtLeast returns true if and only if the accumulator needs at least `n` CPUs.
// This means that needsAtLeast returns true even if more than `n` CPUs are needed.
func (a *cpuAccumulator) needsAtLeast(n int) bool {
return a.numCPUsNeeded >= n
}
// isSatisfied returns true if and only if the accumulator has all the CPUs it needs.
func (a *cpuAccumulator) isSatisfied() bool {
return a.numCPUsNeeded < 1
}
// isFailed returns true if and only if there aren't enough available CPUs in the system.
// (e.g. the accumulator needs 4 CPUs but only 3 are available).
func (a *cpuAccumulator) isFailed() bool {
return a.numCPUsNeeded > a.details.CPUs().Size()
}
// iterateCombinations walks through all n-choose-k subsets of size k in n and
// calls function 'f()' on each subset. For example, if n={0,1,2}, and k=2,
// then f() will be called on the subsets {0,1}, {0,2}. and {1,2}. If f() ever
// returns 'Break', we break early and exit the loop.
func (a *cpuAccumulator) iterateCombinations(n []int, k int, f func([]int) LoopControl) {
if k < 1 {
return
}
var helper func(n []int, k int, start int, accum []int, f func([]int) LoopControl) LoopControl
helper = func(n []int, k int, start int, accum []int, f func([]int) LoopControl) LoopControl {
if k == 0 {
return f(accum)
}
for i := start; i <= len(n)-k; i++ {
control := helper(n, k-1, i+1, append(accum, n[i]), f)
if control == Break {
return Break
}
}
return Continue
}
helper(n, k, 0, []int{}, f)
}
// takeByTopologyNUMAPacked returns a CPUSet containing `numCPUs` CPUs taken from the CPUs in the
// set `availableCPUs`. `topo` describes how the CPUs are arranged between sockets, NUMA nodes
// and physical cores (if hyperthreading is on a "CPU" is a thread rather than a full physical
// core).
//
// If sockets are higher than NUMA nodes in the memory hierarchy (i.e. a socket contains more than
// one NUMA node), the CPUs are selected as follows.
//
// If `numCPUs` is bigger than the total number of CPUs in a socket, and there are free (i.e. all
// CPUs in them are free) sockets, the function takes as many entire free sockets as possible.
// If there are no free sockets, or `numCPUs` is less than a whole socket, or the remaining number
// of CPUs to take after having taken some whole sockets is less than a whole socket, the function
// tries to take whole NUMA nodes.
//
// If the remaining number of CPUs to take is bigger than the total number of CPUs in a NUMA node,
// and there are free (i.e. all CPUs in them are free) NUMA nodes, the function takes as many entire
// free NUMA nodes as possible. The free NUMA nodes are taken from one socket at a time, and the
// sockets are considered by ascending order of free CPUs in them. If there are no free NUMA nodes,
// or the remaining number of CPUs to take after having taken full sockets and NUMA nodes is less
// than a whole NUMA node, the function tries to take whole physical cores (cores).
//
// If `PreferAlignByUncoreCache` is enabled, the function will try to optimally assign Uncorecaches.
// If `numCPUs` is larger than or equal to the total number of CPUs in a Uncorecache, and there are
// free (i.e. all CPUs within the Uncorecache are free) Uncorecaches, the function takes as many entire
// cores from free Uncorecaches as possible. If/Once `numCPUs` is smaller than the total number of
// CPUs in a free Uncorecache, the function scans each Uncorecache index in numerical order to assign
// cores that will fit within the Uncorecache. If `numCPUs` cannot fit within any Uncorecache, the
// function tries to take whole physical cores.
//
// If `numCPUs` is bigger than the total number of CPUs in a core, and there are
// free (i.e. all CPUs in them are free) cores, the function takes as many entire free cores as possible.
// The cores are taken from one socket at a time, and the sockets are considered by
// ascending order of free CPUs in them. For a given socket, the cores are taken one NUMA node at a time,
// and the NUMA nodes are considered by ascending order of free CPUs in them. If there are no free
// cores, or the remaining number of CPUs to take after having taken full sockets, NUMA nodes and
// cores is less than a whole core, the function tries to take individual CPUs.
//
// The individual CPUs are taken from one socket at a time, and the sockets are considered by
// ascending order of free CPUs in them. For a given socket, the CPUs are taken one NUMA node at a time,
// and the NUMA nodes are considered by ascending order of free CPUs in them. For a given NUMA node, the
// CPUs are taken one core at a time, and the core are considered by ascending order of free CPUs in them.
//
// If NUMA nodes are higher than Sockets in the memory hierarchy (i.e. a NUMA node contains more
// than one socket), the CPUs are selected as written above, with the only differences being that
// (1) the order with which full sockets and full NUMA nodes are acquired is swapped, and (2) the
// order with which lower-level topology elements are selected is also swapped accordingly. E.g.
// when selecting full cores, the cores are selected starting from the ones in the NUMA node with
// the least amount of free CPUs to the one with the highest amount of free CPUs (i.e. in ascending
// order of free CPUs). For any NUMA node, the cores are selected from the ones in the socket with
// the least amount of free CPUs to the one with the highest amount of free CPUs.
func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy, preferAlignByUncoreCache bool) (cpuset.CPUSet, error) {
acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy)
if acc.isSatisfied() {
return acc.result, nil
}
if acc.isFailed() {
return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request: requested=%d, available=%d", numCPUs, availableCPUs.Size())
}
// Algorithm: topology-aware best-fit
// 1. Acquire whole NUMA nodes and sockets, if available and the container
// requires at least a NUMA node or socket's-worth of CPUs. If NUMA
// Nodes map to 1 or more sockets, pull from NUMA nodes first.
// Otherwise pull from sockets first.
acc.numaOrSocketsFirst.takeFullFirstLevel()
if acc.isSatisfied() {
return acc.result, nil
}
acc.numaOrSocketsFirst.takeFullSecondLevel()
if acc.isSatisfied() {
return acc.result, nil
}
// 2. If PreferAlignByUncoreCache is enabled, acquire whole UncoreCaches
// if available and the container requires at least a UncoreCache's-worth
// of CPUs. Otherwise, acquire CPUs from the least amount of UncoreCaches.
if preferAlignByUncoreCache {
acc.takeUncoreCache()
if acc.isSatisfied() {
return acc.result, nil
}
}
// 3. Acquire whole cores, if available and the container requires at least
// a core's-worth of CPUs.
// If `CPUSortingStrategySpread` is specified, skip taking the whole core.
if cpuSortingStrategy != CPUSortingStrategySpread {
acc.takeFullCores()
if acc.isSatisfied() {
return acc.result, nil
}
}
// 4. Acquire single threads, preferring to fill partially-allocated cores
// on the same sockets as the whole cores we have already taken in this
// allocation.
acc.takeRemainingCPUs()
if acc.isSatisfied() {
return acc.result, nil
}
return cpuset.New(), fmt.Errorf("failed to allocate cpus")
}
// takeByTopologyNUMADistributed returns a CPUSet of size 'numCPUs'.
//
// It generates this CPUset by allocating CPUs from 'availableCPUs' according
// to the algorithm outlined in KEP-2902:
//
// https://github.com/kubernetes/enhancements/tree/e7f51ffbe2ee398ffd1fba4a6d854f276bfad9fb/keps/sig-node/2902-cpumanager-distribute-cpus-policy-option
//
// This algorithm evenly distribute CPUs across NUMA nodes in cases where more
// than one NUMA node is required to satisfy the allocation. This is in
// contrast to the takeByTopologyNUMAPacked algorithm, which attempts to 'pack'
// CPUs onto NUMA nodes and fill them up before moving on to the next one.
//
// At a high-level this algorithm can be summarized as:
//
// For each NUMA single node:
// - If all requested CPUs can be allocated from this NUMA node;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in that NUMA node and return
//
// Otherwise, for each pair of NUMA nodes:
// - If the set of requested CPUs (modulo 2) can be evenly split across
// the 2 NUMA nodes; AND
// - Any remaining CPUs (after the modulo operation) can be striped across
// some subset of the NUMA nodes;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in both NUMA nodes and return
//
// Otherwise, for each 3-tuple of NUMA nodes:
// - If the set of requested CPUs (modulo 3) can be evenly distributed
// across the 3 NUMA nodes; AND
// - Any remaining CPUs (after the modulo operation) can be striped across
// some subset of the NUMA nodes;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in all three NUMA nodes and return
//
// ...
//
// Otherwise, for the set of all NUMA nodes:
// - If the set of requested CPUs (modulo NUM_NUMA_NODES) can be evenly
// distributed across all NUMA nodes; AND
// - Any remaining CPUs (after the modulo operation) can be striped across
// some subset of the NUMA nodes;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in all NUMA nodes and return
//
// If none of the above conditions can be met, then resort back to a
// best-effort fit of packing CPUs into NUMA nodes by calling
// takeByTopologyNUMAPacked() over all available CPUs.
//
// NOTE: A "balance score" will be calculated to help find the best subset of
// NUMA nodes to allocate any 'remainder' CPUs from (in cases where the total
// number of CPUs to allocate cannot be evenly distributed across the chosen
// set of NUMA nodes). This "balance score" is calculated as the standard
// deviation of how many CPUs will be available on each NUMA node after all
// evenly distributed and remainder CPUs are allocated. The subset with the
// lowest "balance score" will receive the CPUs in order to keep the overall
// allocation of CPUs as "balanced" as possible.
//
// NOTE: This algorithm has been generalized to take an additional
// 'cpuGroupSize' parameter to ensure that CPUs are always allocated in groups
// of size 'cpuGroupSize' according to the algorithm described above. This is
// important, for example, to ensure that all CPUs (i.e. all hyperthreads) from
// a single core are allocated together.
func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuGroupSize int, cpuSortingStrategy CPUSortingStrategy) (cpuset.CPUSet, error) {
// If the number of CPUs requested cannot be handed out in chunks of
// 'cpuGroupSize', then we just call out the packing algorithm since we
// can't distribute CPUs in this chunk size.
// PreferAlignByUncoreCache feature not implemented here yet and set to false.
// Support for PreferAlignByUncoreCache to be done at beta release.
if (numCPUs % cpuGroupSize) != 0 {
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false)
}
// Otherwise build an accumulator to start allocating CPUs from.
acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy)
if acc.isSatisfied() {
return acc.result, nil
}
if acc.isFailed() {
return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request: requested=%d, available=%d", numCPUs, availableCPUs.Size())
}
// Get the list of NUMA nodes represented by the set of CPUs in 'availableCPUs'.
numas := acc.sortAvailableNUMANodes()
// Calculate the minimum and maximum possible number of NUMA nodes that
// could satisfy this request. This is used to optimize how many iterations
// of the loop we need to go through below.
minNUMAs, maxNUMAs := acc.rangeNUMANodesNeededToSatisfy(cpuGroupSize)
// Try combinations of 1,2,3,... NUMA nodes until we find a combination
// where we can evenly distribute CPUs across them. To optimize things, we
// don't always start at 1 and end at len(numas). Instead, we use the
// values of 'minNUMAs' and 'maxNUMAs' calculated above.
for k := minNUMAs; k <= maxNUMAs; k++ {
// Iterate through the various n-choose-k NUMA node combinations,
// looking for the combination of NUMA nodes that can best have CPUs
// distributed across them.
var bestBalance float64 = math.MaxFloat64
var bestRemainder []int = nil
var bestCombo []int = nil
acc.iterateCombinations(numas, k, func(combo []int) LoopControl {
// If we've already found a combo with a balance of 0 in a
// different iteration, then don't bother checking any others.
if bestBalance == 0 {
return Break
}
// Check that this combination of NUMA nodes has enough CPUs to
// satisfy the allocation overall.
cpus := acc.details.CPUsInNUMANodes(combo...)
if cpus.Size() < numCPUs {
return Continue
}
// Check that CPUs can be handed out in groups of size
// 'cpuGroupSize' across the NUMA nodes in this combo.
numCPUGroups := 0
for _, numa := range combo {
numCPUGroups += (acc.details.CPUsInNUMANodes(numa).Size() / cpuGroupSize)
}
if (numCPUGroups * cpuGroupSize) < numCPUs {
return Continue
}
// Check that each NUMA node in this combination can allocate an
// even distribution of CPUs in groups of size 'cpuGroupSize',
// modulo some remainder.
distribution := (numCPUs / len(combo) / cpuGroupSize) * cpuGroupSize
for _, numa := range combo {
cpus := acc.details.CPUsInNUMANodes(numa)
if cpus.Size() < distribution {
return Continue
}
}
// Calculate how many CPUs will be available on each NUMA node in
// the system after allocating an even distribution of CPU groups
// of size 'cpuGroupSize' from each NUMA node in 'combo'. This will
// be used in the "balance score" calculation to help decide if
// this combo should ultimately be chosen.
availableAfterAllocation := make(mapIntInt, len(numas))
for _, numa := range numas {
availableAfterAllocation[numa] = acc.details.CPUsInNUMANodes(numa).Size()
}
for _, numa := range combo {
availableAfterAllocation[numa] -= distribution
}
// Check if there are any remaining CPUs to distribute across the
// NUMA nodes once CPUs have been evenly distributed in groups of
// size 'cpuGroupSize'.
remainder := numCPUs - (distribution * len(combo))
// Get a list of NUMA nodes to consider pulling the remainder CPUs
// from. This list excludes NUMA nodes that don't have at least
// 'cpuGroupSize' CPUs available after being allocated
// 'distribution' number of CPUs.
var remainderCombo []int
for _, numa := range combo {
if availableAfterAllocation[numa] >= cpuGroupSize {
remainderCombo = append(remainderCombo, numa)
}
}
// Declare a set of local variables to help track the "balance
// scores" calculated when using different subsets of
// 'remainderCombo' to allocate remainder CPUs from.
var bestLocalBalance float64 = math.MaxFloat64
var bestLocalRemainder []int = nil
// If there aren't any remainder CPUs to allocate, then calculate
// the "balance score" of this combo as the standard deviation of
// the values contained in 'availableAfterAllocation'.
if remainder == 0 {
bestLocalBalance = standardDeviation(availableAfterAllocation.Values())
bestLocalRemainder = nil
}
// Otherwise, find the best "balance score" when allocating the
// remainder CPUs across different subsets of NUMA nodes in 'remainderCombo'.
// These remainder CPUs are handed out in groups of size 'cpuGroupSize'.
// We start from k=len(remainderCombo) and walk down to k=1 so that
// we continue to distribute CPUs as much as possible across
// multiple NUMA nodes.
for k := len(remainderCombo); remainder > 0 && k >= 1; k-- {
acc.iterateCombinations(remainderCombo, k, func(subset []int) LoopControl {
// Make a local copy of 'remainder'.
remainder := remainder
// Make a local copy of 'availableAfterAllocation'.
availableAfterAllocation := availableAfterAllocation.Clone()
// If this subset is not capable of allocating all
// remainder CPUs, continue to the next one.
if sum(availableAfterAllocation.Values(subset...)) < remainder {
return Continue
}
// For all NUMA nodes in 'subset', walk through them,
// removing 'cpuGroupSize' number of CPUs from each
// until all remainder CPUs have been accounted for.
for remainder > 0 {
for _, numa := range subset {
if remainder == 0 {
break
}
if availableAfterAllocation[numa] < cpuGroupSize {
continue
}
availableAfterAllocation[numa] -= cpuGroupSize
remainder -= cpuGroupSize
}
}
// Calculate the "balance score" as the standard deviation
// of the number of CPUs available on all NUMA nodes in the
// system after the remainder CPUs have been allocated
// across 'subset' in groups of size 'cpuGroupSize'.
balance := standardDeviation(availableAfterAllocation.Values())
if balance < bestLocalBalance {
bestLocalBalance = balance
bestLocalRemainder = subset
}
return Continue
})
}
// If the best "balance score" for this combo is less than the
// lowest "balance score" of all previous combos, then update this
// combo (and remainder set) to be the best one found so far.
if bestLocalBalance < bestBalance {
bestBalance = bestLocalBalance
bestRemainder = bestLocalRemainder
bestCombo = combo
}
return Continue
})
// If we made it through all of the iterations above without finding a
// combination of NUMA nodes that can properly balance CPU allocations,
// then move on to the next larger set of NUMA node combinations.
if bestCombo == nil {
continue
}
// Otherwise, start allocating CPUs from the NUMA node combination
// chosen. First allocate an even distribution of CPUs in groups of
// size 'cpuGroupSize' from 'bestCombo'.
distribution := (numCPUs / len(bestCombo) / cpuGroupSize) * cpuGroupSize
for _, numa := range bestCombo {
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), distribution, cpuSortingStrategy, false)
acc.take(cpus)
}
// Then allocate any remaining CPUs in groups of size 'cpuGroupSize'
// from each NUMA node in the remainder set.
remainder := numCPUs - (distribution * len(bestCombo))
for remainder > 0 {
for _, numa := range bestRemainder {
if remainder == 0 {
break
}
if acc.details.CPUsInNUMANodes(numa).Size() < cpuGroupSize {
continue
}
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize, cpuSortingStrategy, false)
acc.take(cpus)
remainder -= cpuGroupSize
}
}
// If we haven't allocated all of our CPUs at this point, then something
// went wrong in our accounting and we should error out.
if acc.numCPUsNeeded > 0 {
return cpuset.New(), fmt.Errorf("accounting error, not enough CPUs allocated, remaining: %v", acc.numCPUsNeeded)
}
// Likewise, if we have allocated too many CPUs at this point, then something
// went wrong in our accounting and we should error out.
if acc.numCPUsNeeded < 0 {
return cpuset.New(), fmt.Errorf("accounting error, too many CPUs allocated, remaining: %v", acc.numCPUsNeeded)
}
// Otherwise, return the result
return acc.result, nil
}
// If we never found a combination of NUMA nodes that we could properly
// distribute CPUs across, fall back to the packing algorithm.
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"context"
"fmt"
"math"
"sync"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/utils/cpuset"
)
// ActivePodsFunc is a function that returns a list of pods to reconcile.
type ActivePodsFunc func() []*v1.Pod
type runtimeService interface {
UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error
}
type policyName string
// cpuManagerStateFileName is the file name where cpu manager stores its state
const cpuManagerStateFileName = "cpu_manager_state"
// Manager interface provides methods for Kubelet to manage pod cpus.
type Manager interface {
// Start is called during Kubelet initialization.
Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error
// Called to trigger the allocation of CPUs to a container. This must be
// called at some point prior to the AddContainer() call for a container,
// e.g. at pod admission time.
Allocate(pod *v1.Pod, container *v1.Container) error
// AddContainer adds the mapping between container ID to pod UID and the container name
// The mapping used to remove the CPU allocation during the container removal
AddContainer(p *v1.Pod, c *v1.Container, containerID string)
// RemoveContainer is called after Kubelet decides to kill or delete a
// container. After this call, the CPU manager stops trying to reconcile
// that container and any CPUs dedicated to the container are freed.
RemoveContainer(containerID string) error
// State returns a read-only interface to the internal CPU manager state.
State() state.Reader
// GetTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
// GetExclusiveCPUs implements the podresources.CPUsProvider interface to provide
// exclusively allocated cpus for the container
GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet
// GetPodTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment per Pod
// among this and other resource controllers.
GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint
// GetAllocatableCPUs returns the total set of CPUs available for allocation.
GetAllocatableCPUs() cpuset.CPUSet
// GetCPUAffinity returns cpuset which includes cpus from shared pools
// as well as exclusively allocated cpus
GetCPUAffinity(podUID, containerName string) cpuset.CPUSet
// GetAllCPUs returns all the CPUs known by cpumanager, as reported by the
// hardware discovery. Maps to the CPU capacity.
GetAllCPUs() cpuset.CPUSet
}
type manager struct {
sync.Mutex
policy Policy
// reconcilePeriod is the duration between calls to reconcileState.
reconcilePeriod time.Duration
// state allows pluggable CPU assignment policies while sharing a common
// representation of state for the system to inspect and reconcile.
state state.State
// lastUpdatedstate holds state for each container from the last time it was updated.
lastUpdateState state.State
// containerRuntime is the container runtime service interface needed
// to make UpdateContainerResources() calls against the containers.
containerRuntime runtimeService
// activePods is a method for listing active pods on the node
// so all the containers can be updated in the reconciliation loop.
activePods ActivePodsFunc
// podStatusProvider provides a method for obtaining pod statuses
// and the containerID of their containers
podStatusProvider status.PodStatusProvider
// containerMap provides a mapping from (pod, container) -> containerID
// for all containers a pod
containerMap containermap.ContainerMap
topology *topology.CPUTopology
nodeAllocatableReservation v1.ResourceList
// sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness.
// We use it to determine when we can purge inactive pods from checkpointed state.
sourcesReady config.SourcesReady
// stateFileDirectory holds the directory where the state file for checkpoints is held.
stateFileDirectory string
// allCPUs is the set of online CPUs as reported by the system
allCPUs cpuset.CPUSet
// allocatableCPUs is the set of online CPUs as reported by the system,
// and available for allocation, minus the reserved set
allocatableCPUs cpuset.CPUSet
}
var _ Manager = &manager{}
type sourcesReadyStub struct{}
func (s *sourcesReadyStub) AddSource(source string) {}
func (s *sourcesReadyStub) AllReady() bool { return true }
// NewManager creates new cpu manager based on provided policy
func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, specificCPUs cpuset.CPUSet, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
var topo *topology.CPUTopology
var policy Policy
var err error
topo, err = topology.Discover(machineInfo)
if err != nil {
return nil, err
}
switch policyName(cpuPolicyName) {
case PolicyNone:
policy, err = NewNonePolicy(cpuPolicyOptions)
if err != nil {
return nil, fmt.Errorf("new none policy error: %w", err)
}
case PolicyStatic:
klog.InfoS("Detected CPU topology", "topology", topo)
reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU]
if !ok {
// The static policy cannot initialize without this information.
return nil, fmt.Errorf("[cpumanager] unable to determine reserved CPU resources for static policy")
}
if reservedCPUs.IsZero() {
// The static policy requires this to be nonzero. Zero CPU reservation
// would allow the shared pool to be completely exhausted. At that point
// either we would violate our guarantee of exclusivity or need to evict
// any pod that has at least one container that requires zero CPUs.
// See the comments in policy_static.go for more details.
return nil, fmt.Errorf("[cpumanager] the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero")
}
// Take the ceiling of the reservation, since fractional CPUs cannot be
// exclusively allocated.
reservedCPUsFloat := float64(reservedCPUs.MilliValue()) / 1000
numReservedCPUs := int(math.Ceil(reservedCPUsFloat))
policy, err = NewStaticPolicy(topo, numReservedCPUs, specificCPUs, affinity, cpuPolicyOptions)
if err != nil {
return nil, fmt.Errorf("new static policy error: %w", err)
}
default:
return nil, fmt.Errorf("unknown policy: \"%s\"", cpuPolicyName)
}
manager := &manager{
policy: policy,
reconcilePeriod: reconcilePeriod,
lastUpdateState: state.NewMemoryState(),
topology: topo,
nodeAllocatableReservation: nodeAllocatableReservation,
stateFileDirectory: stateFileDirectory,
allCPUs: topo.CPUDetails.CPUs(),
}
manager.sourcesReady = &sourcesReadyStub{}
return manager, nil
}
func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error {
klog.InfoS("Starting CPU manager", "policy", m.policy.Name())
klog.InfoS("Reconciling", "reconcilePeriod", m.reconcilePeriod)
m.sourcesReady = sourcesReady
m.activePods = activePods
m.podStatusProvider = podStatusProvider
m.containerRuntime = containerRuntime
m.containerMap = initialContainers
stateImpl, err := state.NewCheckpointState(m.stateFileDirectory, cpuManagerStateFileName, m.policy.Name(), m.containerMap)
if err != nil {
klog.ErrorS(err, "Could not initialize checkpoint manager, please drain node and remove policy state file")
return err
}
m.state = stateImpl
err = m.policy.Start(m.state)
if err != nil {
klog.ErrorS(err, "Policy start error")
return err
}
klog.V(4).InfoS("CPU manager started", "policy", m.policy.Name())
m.allocatableCPUs = m.policy.GetAllocatableCPUs(m.state)
if m.policy.Name() == string(PolicyNone) {
return nil
}
// Periodically call m.reconcileState() to continue to keep the CPU sets of
// all pods in sync with and guaranteed CPUs handed out among them.
go wait.Until(func() { m.reconcileState() }, m.reconcilePeriod, wait.NeverStop)
return nil
}
func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
// Garbage collect any stranded resources before allocating CPUs.
m.removeStaleState()
m.Lock()
defer m.Unlock()
// Call down into the policy to assign this container CPUs if required.
err := m.policy.Allocate(m.state, p, c)
if err != nil {
klog.ErrorS(err, "Allocate error")
return err
}
return nil
}
func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
m.Lock()
defer m.Unlock()
if cset, exists := m.state.GetCPUSet(string(pod.UID), container.Name); exists {
m.lastUpdateState.SetCPUSet(string(pod.UID), container.Name, cset)
}
m.containerMap.Add(string(pod.UID), container.Name, containerID)
}
func (m *manager) RemoveContainer(containerID string) error {
m.Lock()
defer m.Unlock()
err := m.policyRemoveContainerByID(containerID)
if err != nil {
klog.ErrorS(err, "RemoveContainer error")
return err
}
return nil
}
func (m *manager) policyRemoveContainerByID(containerID string) error {
podUID, containerName, err := m.containerMap.GetContainerRef(containerID)
if err != nil {
return nil
}
err = m.policy.RemoveContainer(m.state, podUID, containerName)
if err == nil {
m.lastUpdateState.Delete(podUID, containerName)
m.containerMap.RemoveByContainerID(containerID)
}
return err
}
func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) error {
err := m.policy.RemoveContainer(m.state, podUID, containerName)
if err == nil {
m.lastUpdateState.Delete(podUID, containerName)
m.containerMap.RemoveByContainerRef(podUID, containerName)
}
return err
}
func (m *manager) State() state.Reader {
return m.state
}
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
// Garbage collect any stranded resources before providing TopologyHints
m.removeStaleState()
// Delegate to active policy
return m.policy.GetTopologyHints(m.state, pod, container)
}
func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
// Garbage collect any stranded resources before providing TopologyHints
m.removeStaleState()
// Delegate to active policy
return m.policy.GetPodTopologyHints(m.state, pod)
}
func (m *manager) GetAllocatableCPUs() cpuset.CPUSet {
return m.allocatableCPUs.Clone()
}
func (m *manager) GetAllCPUs() cpuset.CPUSet {
return m.allCPUs.Clone()
}
type reconciledContainer struct {
podName string
containerName string
containerID string
}
func (m *manager) removeStaleState() {
// Only once all sources are ready do we attempt to remove any stale state.
// This ensures that the call to `m.activePods()` below will succeed with
// the actual active pods list.
if !m.sourcesReady.AllReady() {
return
}
// We grab the lock to ensure that no new containers will grab CPUs while
// executing the code below. Without this lock, its possible that we end up
// removing state that is newly added by an asynchronous call to
// AddContainer() during the execution of this code.
m.Lock()
defer m.Unlock()
// Get the list of active pods.
activePods := m.activePods()
// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
activeContainers := make(map[string]map[string]struct{})
for _, pod := range activePods {
activeContainers[string(pod.UID)] = make(map[string]struct{})
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
activeContainers[string(pod.UID)][container.Name] = struct{}{}
}
}
// Loop through the CPUManager state. Remove any state for containers not
// in the `activeContainers` list built above.
assignments := m.state.GetCPUAssignments()
for podUID := range assignments {
for containerName := range assignments[podUID] {
if _, ok := activeContainers[podUID][containerName]; ok {
klog.V(5).InfoS("RemoveStaleState: container still active", "podUID", podUID, "containerName", containerName)
continue
}
klog.V(2).InfoS("RemoveStaleState: removing container", "podUID", podUID, "containerName", containerName)
err := m.policyRemoveContainerByRef(podUID, containerName)
if err != nil {
klog.ErrorS(err, "RemoveStaleState: failed to remove container", "podUID", podUID, "containerName", containerName)
}
}
}
m.containerMap.Visit(func(podUID, containerName, containerID string) {
if _, ok := activeContainers[podUID][containerName]; ok {
klog.V(5).InfoS("RemoveStaleState: containerMap: container still active", "podUID", podUID, "containerName", containerName)
return
}
klog.V(2).InfoS("RemoveStaleState: containerMap: removing container", "podUID", podUID, "containerName", containerName)
err := m.policyRemoveContainerByRef(podUID, containerName)
if err != nil {
klog.ErrorS(err, "RemoveStaleState: containerMap: failed to remove container", "podUID", podUID, "containerName", containerName)
}
})
}
func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) {
ctx := context.Background()
success = []reconciledContainer{}
failure = []reconciledContainer{}
m.removeStaleState()
for _, pod := range m.activePods() {
pstatus, ok := m.podStatusProvider.GetPodStatus(pod.UID)
if !ok {
klog.V(5).InfoS("ReconcileState: skipping pod; status not found", "pod", klog.KObj(pod))
failure = append(failure, reconciledContainer{pod.Name, "", ""})
continue
}
allContainers := pod.Spec.InitContainers
allContainers = append(allContainers, pod.Spec.Containers...)
for _, container := range allContainers {
containerID, err := findContainerIDByName(&pstatus, container.Name)
if err != nil {
klog.V(5).InfoS("ReconcileState: skipping container; ID not found in pod status", "pod", klog.KObj(pod), "containerName", container.Name, "err", err)
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
continue
}
cstatus, err := findContainerStatusByName(&pstatus, container.Name)
if err != nil {
klog.V(5).InfoS("ReconcileState: skipping container; container status not found in pod status", "pod", klog.KObj(pod), "containerName", container.Name, "err", err)
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
continue
}
if cstatus.State.Waiting != nil ||
(cstatus.State.Waiting == nil && cstatus.State.Running == nil && cstatus.State.Terminated == nil) {
klog.V(4).InfoS("ReconcileState: skipping container; container still in the waiting state", "pod", klog.KObj(pod), "containerName", container.Name, "err", err)
failure = append(failure, reconciledContainer{pod.Name, container.Name, ""})
continue
}
m.Lock()
if cstatus.State.Terminated != nil {
// The container is terminated but we can't call m.RemoveContainer()
// here because it could remove the allocated cpuset for the container
// which may be in the process of being restarted. That would result
// in the container losing any exclusively-allocated CPUs that it
// was allocated.
_, _, err := m.containerMap.GetContainerRef(containerID)
if err == nil {
klog.V(4).InfoS("ReconcileState: ignoring terminated container", "pod", klog.KObj(pod), "containerID", containerID)
}
m.Unlock()
continue
}
// Once we make it here we know we have a running container.
// Idempotently add it to the containerMap incase it is missing.
// This can happen after a kubelet restart, for example.
m.containerMap.Add(string(pod.UID), container.Name, containerID)
m.Unlock()
cset := m.state.GetCPUSetOrDefault(string(pod.UID), container.Name)
if cset.IsEmpty() {
// NOTE: This should not happen outside of tests.
klog.V(2).InfoS("ReconcileState: skipping container; empty cpuset assigned", "pod", klog.KObj(pod), "containerName", container.Name)
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
continue
}
lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name)
if !cset.Equals(lcset) {
klog.V(5).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
err = m.updateContainerCPUSet(ctx, containerID, cset)
if err != nil {
klog.ErrorS(err, "ReconcileState: failed to update container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
continue
}
m.lastUpdateState.SetCPUSet(string(pod.UID), container.Name, cset)
}
success = append(success, reconciledContainer{pod.Name, container.Name, containerID})
}
}
return success, failure
}
func findContainerIDByName(status *v1.PodStatus, name string) (string, error) {
allStatuses := status.InitContainerStatuses
allStatuses = append(allStatuses, status.ContainerStatuses...)
for _, container := range allStatuses {
if container.Name == name && container.ContainerID != "" {
cid := &kubecontainer.ContainerID{}
err := cid.ParseString(container.ContainerID)
if err != nil {
return "", err
}
return cid.ID, nil
}
}
return "", fmt.Errorf("unable to find ID for container with name %v in pod status (it may not be running)", name)
}
func findContainerStatusByName(status *v1.PodStatus, name string) (*v1.ContainerStatus, error) {
for _, containerStatus := range append(status.InitContainerStatuses, status.ContainerStatuses...) {
if containerStatus.Name == name {
return &containerStatus, nil
}
}
return nil, fmt.Errorf("unable to find status for container with name %v in pod status (it may not be running)", name)
}
func (m *manager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
if result, ok := m.state.GetCPUSet(podUID, containerName); ok {
return result
}
return cpuset.CPUSet{}
}
func (m *manager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
return m.state.GetCPUSetOrDefault(podUID, containerName)
}
//go:build !windows
// +build !windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"context"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/utils/cpuset"
)
func (m *manager) updateContainerCPUSet(ctx context.Context, containerID string, cpus cpuset.CPUSet) error {
// TODO: Consider adding a `ResourceConfigForContainer` helper in
// helpers_linux.go similar to what exists for pods.
// It would be better to pass the full container resources here instead of
// this patch-like partial resources.
return m.containerRuntime.UpdateContainerResources(
ctx,
containerID,
&runtimeapi.ContainerResources{
Linux: &runtimeapi.LinuxContainerResources{
CpusetCpus: cpus.String(),
},
})
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/utils/cpuset"
)
type fakeManager struct {
state state.State
}
func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error {
klog.InfoS("Start()")
return nil
}
func (m *fakeManager) Policy() Policy {
klog.InfoS("Policy()")
pol, _ := NewNonePolicy(nil)
return pol
}
func (m *fakeManager) Allocate(pod *v1.Pod, container *v1.Container) error {
klog.InfoS("Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
return nil
}
func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID)
}
func (m *fakeManager) RemoveContainer(containerID string) error {
klog.InfoS("RemoveContainer", "containerID", containerID)
return nil
}
func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
klog.InfoS("Get container topology hints")
return map[string][]topologymanager.TopologyHint{}
}
func (m *fakeManager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
klog.InfoS("Get pod topology hints")
return map[string][]topologymanager.TopologyHint{}
}
func (m *fakeManager) State() state.Reader {
return m.state
}
func (m *fakeManager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
klog.InfoS("GetExclusiveCPUs", "podUID", podUID, "containerName", containerName)
return cpuset.CPUSet{}
}
func (m *fakeManager) GetAllocatableCPUs() cpuset.CPUSet {
klog.InfoS("Get Allocatable CPUs")
return cpuset.CPUSet{}
}
func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
klog.InfoS("GetCPUAffinity", "podUID", podUID, "containerName", containerName)
return cpuset.CPUSet{}
}
func (m *fakeManager) GetAllCPUs() cpuset.CPUSet {
klog.InfoS("GetAllCPUs")
return cpuset.CPUSet{}
}
// NewFakeManager creates empty/fake cpu manager
func NewFakeManager() Manager {
return &fakeManager{
state: state.NewMemoryState(),
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/utils/cpuset"
)
type nonePolicy struct{}
var _ Policy = &nonePolicy{}
// PolicyNone name of none policy
const PolicyNone policyName = "none"
// NewNonePolicy returns a cpuset manager policy that does nothing
func NewNonePolicy(cpuPolicyOptions map[string]string) (Policy, error) {
if len(cpuPolicyOptions) > 0 {
return nil, fmt.Errorf("None policy: received unsupported options=%v", cpuPolicyOptions)
}
return &nonePolicy{}, nil
}
func (p *nonePolicy) Name() string {
return string(PolicyNone)
}
func (p *nonePolicy) Start(s state.State) error {
klog.InfoS("None policy: Start")
return nil
}
func (p *nonePolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
return nil
}
func (p *nonePolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
return nil
}
func (p *nonePolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
return nil
}
func (p *nonePolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
return nil
}
// Assignable CPUs are the ones that can be exclusively allocated to pods that meet the exclusivity requirement
// (ie guaranteed QoS class and integral CPU request).
// Assignability of CPUs as a concept is only applicable in case of static policy i.e. scenarios where workloads
// CAN get exclusive access to core(s).
// Hence, we return empty set here: no cpus are assignable according to above definition with this policy.
func (p *nonePolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet {
return cpuset.New()
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"strconv"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
)
// Names of the options, as part of the user interface.
const (
FullPCPUsOnlyOption string = "full-pcpus-only"
DistributeCPUsAcrossNUMAOption string = "distribute-cpus-across-numa"
AlignBySocketOption string = "align-by-socket"
DistributeCPUsAcrossCoresOption string = "distribute-cpus-across-cores"
StrictCPUReservationOption string = "strict-cpu-reservation"
PreferAlignByUnCoreCacheOption string = "prefer-align-cpus-by-uncorecache"
)
var (
alphaOptions = sets.New[string](
AlignBySocketOption,
DistributeCPUsAcrossCoresOption,
)
betaOptions = sets.New[string](
StrictCPUReservationOption,
DistributeCPUsAcrossNUMAOption,
PreferAlignByUnCoreCacheOption,
)
stableOptions = sets.New[string](
FullPCPUsOnlyOption,
)
)
// CheckPolicyOptionAvailable verifies if the given option can be used depending on the Feature Gate Settings.
// returns nil on success, or an error describing the failure on error.
func CheckPolicyOptionAvailable(option string) error {
if !alphaOptions.Has(option) && !betaOptions.Has(option) && !stableOptions.Has(option) {
return fmt.Errorf("unknown CPU Manager Policy option: %q", option)
}
if alphaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManagerPolicyAlphaOptions) {
return fmt.Errorf("CPU Manager Policy Alpha-level Options not enabled, but option %q provided", option)
}
if betaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManagerPolicyBetaOptions) {
return fmt.Errorf("CPU Manager Policy Beta-level Options not enabled, but option %q provided", option)
}
// if the option is stable, we need no CPUManagerPolicy*Options feature gate check
return nil
}
// StaticPolicyOptions holds the parsed value of the policy options, ready to be consumed internally.
type StaticPolicyOptions struct {
// flag to enable extra allocation restrictions to avoid
// different containers to possibly end up on the same core.
// we consider "core" and "physical CPU" synonim here, leaning
// towards the terminoloy k8s hints. We acknowledge this is confusing.
//
// looking at https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/,
// any possible naming scheme will lead to ambiguity to some extent.
// We picked "pcpu" because it the established docs hints at vCPU already.
FullPhysicalCPUsOnly bool
// Flag to evenly distribute CPUs across NUMA nodes in cases where more
// than one NUMA node is required to satisfy the allocation.
DistributeCPUsAcrossNUMA bool
// Flag to ensure CPUs are considered aligned at socket boundary rather than
// NUMA boundary
AlignBySocket bool
// flag to enable extra allocation restrictions to spread
// cpus (HT) on different physical core.
// This is a preferred policy so do not throw error if they have to packed in one physical core.
DistributeCPUsAcrossCores bool
// Flag to remove reserved cores from the list of available cores
StrictCPUReservation bool
// Flag that makes best-effort to align CPUs to a uncorecache boundary
// As long as there are CPUs available, pods will be admitted if the condition is not met.
PreferAlignByUncoreCacheOption bool
}
// NewStaticPolicyOptions creates a StaticPolicyOptions struct from the user configuration.
func NewStaticPolicyOptions(policyOptions map[string]string) (StaticPolicyOptions, error) {
opts := StaticPolicyOptions{}
for name, value := range policyOptions {
if err := CheckPolicyOptionAvailable(name); err != nil {
return opts, err
}
switch name {
case FullPCPUsOnlyOption:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.FullPhysicalCPUsOnly = optValue
case DistributeCPUsAcrossNUMAOption:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.DistributeCPUsAcrossNUMA = optValue
case AlignBySocketOption:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.AlignBySocket = optValue
case DistributeCPUsAcrossCoresOption:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.DistributeCPUsAcrossCores = optValue
case StrictCPUReservationOption:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.StrictCPUReservation = optValue
case PreferAlignByUnCoreCacheOption:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.PreferAlignByUncoreCacheOption = optValue
default:
// this should never be reached, we already detect unknown options,
// but we keep it as further safety.
return opts, fmt.Errorf("unsupported cpumanager option: %q (%s)", name, value)
}
}
if opts.FullPhysicalCPUsOnly && opts.DistributeCPUsAcrossCores {
return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", FullPCPUsOnlyOption, DistributeCPUsAcrossCoresOption)
}
// TODO(@Jeffwan): Remove this check after more compatibility tests are done.
if opts.DistributeCPUsAcrossNUMA && opts.DistributeCPUsAcrossCores {
return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", DistributeCPUsAcrossNUMAOption, DistributeCPUsAcrossCoresOption)
}
if opts.PreferAlignByUncoreCacheOption && opts.DistributeCPUsAcrossCores {
return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", PreferAlignByUnCoreCacheOption, DistributeCPUsAcrossCoresOption)
}
if opts.PreferAlignByUncoreCacheOption && opts.DistributeCPUsAcrossNUMA {
return opts, fmt.Errorf("static policy options %s and %s can not be used at the same time", PreferAlignByUnCoreCacheOption, DistributeCPUsAcrossNUMAOption)
}
return opts, nil
}
// ValidateStaticPolicyOptions ensures that the requested policy options are compatible with the machine on which the CPUManager is running.
func ValidateStaticPolicyOptions(opts StaticPolicyOptions, topology *topology.CPUTopology, topologyManager topologymanager.Store) error {
if opts.AlignBySocket {
// Not compatible with topology manager single-numa-node policy option.
if topologyManager.GetPolicy().Name() == topologymanager.PolicySingleNumaNode {
return fmt.Errorf("Topolgy manager %s policy is incompatible with CPUManager %s policy option", topologymanager.PolicySingleNumaNode, AlignBySocketOption)
}
// Not compatible with topology when number of sockets are more than number of NUMA nodes.
if topology.NumSockets > topology.NumNUMANodes {
return fmt.Errorf("Align by socket is not compatible with hardware where number of sockets are more than number of NUMA")
}
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cpumanager
import (
"fmt"
"strconv"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/cpuset"
)
const (
// PolicyStatic is the name of the static policy.
// Should options be given, these will be ignored and backward (up to 1.21 included)
// compatible behaviour will be enforced
PolicyStatic policyName = "static"
// ErrorSMTAlignment represents the type of an SMTAlignmentError
ErrorSMTAlignment = "SMTAlignmentError"
)
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
CpusPerCore int
AvailablePhysicalCPUs int
CausedByPhysicalCPUs bool
}
func (e SMTAlignmentError) Error() string {
if e.CausedByPhysicalCPUs {
return fmt.Sprintf("SMT Alignment Error: not enough free physical CPUs: available physical CPUs = %d, requested CPUs = %d, CPUs per core = %d", e.AvailablePhysicalCPUs, e.RequestedCPUs, e.CpusPerCore)
}
return fmt.Sprintf("SMT Alignment Error: requested %d cpus not multiple cpus per core = %d", e.RequestedCPUs, e.CpusPerCore)
}
// Type returns human-readable type of this error. Used in the admission control to populate Admission Failure reason.
func (e SMTAlignmentError) Type() string {
return ErrorSMTAlignment
}
// staticPolicy is a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
//
// This policy allocates CPUs exclusively for a container if all the following
// conditions are met:
//
// - The pod QoS class is Guaranteed.
// - The CPU request is a positive integer.
//
// The static policy maintains the following sets of logical CPUs:
//
// - SHARED: Burstable, BestEffort, and non-integral Guaranteed containers
// run here. Initially this contains all CPU IDs on the system. As
// exclusive allocations are created and destroyed, this CPU set shrinks
// and grows, accordingly. This is stored in the state as the default
// CPU set.
//
// - RESERVED: A subset of the shared pool which is not exclusively
// allocatable. The membership of this pool is static for the lifetime of
// the Kubelet. The size of the reserved pool is
// ceil(systemreserved.cpu + kubereserved.cpu).
// Reserved CPUs are taken topologically starting with lowest-indexed
// physical core, as reported by cAdvisor.
//
// - ASSIGNABLE: Equal to SHARED - RESERVED. Exclusive CPUs are allocated
// from this pool.
//
// - EXCLUSIVE ALLOCATIONS: CPU sets assigned exclusively to one container.
// These are stored as explicit assignments in the state.
//
// When an exclusive allocation is made, the static policy also updates the
// default cpuset in the state abstraction. The CPU manager's periodic
// reconcile loop takes care of rewriting the cpuset in cgroupfs for any
// containers that may be running in the shared pool. For this reason,
// applications running within exclusively-allocated containers must tolerate
// potentially sharing their allocated CPUs for up to the CPU manager
// reconcile period.
type staticPolicy struct {
// cpu socket topology
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reservedCPUs cpuset.CPUSet
// Superset of reservedCPUs. It includes not just the reservedCPUs themselves,
// but also any siblings of those reservedCPUs on the same physical die.
// NOTE: If the reserved set includes full physical CPUs from the beginning
// (e.g. only reserved pairs of core siblings) this set is expected to be
// identical to the reserved set.
reservedPhysicalCPUs cpuset.CPUSet
// topology manager reference to get container Topology affinity
affinity topologymanager.Store
// set of CPUs to reuse across allocations in a pod
cpusToReuse map[string]cpuset.CPUSet
// options allow to fine-tune the behaviour of the policy
options StaticPolicyOptions
// we compute this value multiple time, and it's not supposed to change
// at runtime - the cpumanager can't deal with runtime topology changes anyway.
cpuGroupSize int
}
// Ensure staticPolicy implements Policy interface
var _ Policy = &staticPolicy{}
// NewStaticPolicy returns a CPU manager policy that does not change CPU
// assignments for exclusively pinned guaranteed containers after the main
// container process starts.
func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reservedCPUs cpuset.CPUSet, affinity topologymanager.Store, cpuPolicyOptions map[string]string) (Policy, error) {
opts, err := NewStaticPolicyOptions(cpuPolicyOptions)
if err != nil {
return nil, err
}
err = ValidateStaticPolicyOptions(opts, topology, affinity)
if err != nil {
return nil, err
}
cpuGroupSize := topology.CPUsPerCore()
klog.InfoS("Static policy created with configuration", "options", opts, "cpuGroupSize", cpuGroupSize)
policy := &staticPolicy{
topology: topology,
affinity: affinity,
cpusToReuse: make(map[string]cpuset.CPUSet),
options: opts,
cpuGroupSize: cpuGroupSize,
}
allCPUs := topology.CPUDetails.CPUs()
var reserved cpuset.CPUSet
if reservedCPUs.Size() > 0 {
reserved = reservedCPUs
} else {
// takeByTopology allocates CPUs associated with low-numbered cores from
// allCPUs.
//
// For example: Given a system with 8 CPUs available and HT enabled,
// if numReservedCPUs=2, then reserved={0,4}
reserved, _ = policy.takeByTopology(allCPUs, numReservedCPUs)
}
if reserved.Size() != numReservedCPUs {
err := fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs)
return nil, err
}
var reservedPhysicalCPUs cpuset.CPUSet
for _, cpu := range reserved.UnsortedList() {
core, err := topology.CPUCoreID(cpu)
if err != nil {
return nil, fmt.Errorf("[cpumanager] unable to build the reserved physical CPUs from the reserved set: %w", err)
}
reservedPhysicalCPUs = reservedPhysicalCPUs.Union(topology.CPUDetails.CPUsInCores(core))
}
klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved, "reservedPhysicalCPUs", reservedPhysicalCPUs)
policy.reservedCPUs = reserved
policy.reservedPhysicalCPUs = reservedPhysicalCPUs
return policy, nil
}
func (p *staticPolicy) Name() string {
return string(PolicyStatic)
}
func (p *staticPolicy) Start(s state.State) error {
if err := p.validateState(s); err != nil {
klog.ErrorS(err, "Static policy invalid state, please drain node and remove policy state file")
return err
}
p.initializeMetrics(s)
return nil
}
func (p *staticPolicy) validateState(s state.State) error {
tmpAssignments := s.GetCPUAssignments()
tmpDefaultCPUset := s.GetDefaultCPUSet()
allCPUs := p.topology.CPUDetails.CPUs()
if p.options.StrictCPUReservation {
allCPUs = allCPUs.Difference(p.reservedCPUs)
}
// Default cpuset cannot be empty when assignments exist
if tmpDefaultCPUset.IsEmpty() {
if len(tmpAssignments) != 0 {
return fmt.Errorf("default cpuset cannot be empty")
}
// state is empty initialize
s.SetDefaultCPUSet(allCPUs)
klog.InfoS("Static policy initialized", "defaultCPUSet", allCPUs)
return nil
}
// State has already been initialized from file (is not empty)
// 1. Check if the reserved cpuset is not part of default cpuset because:
// - kube/system reserved have changed (increased) - may lead to some containers not being able to start
// - user tampered with file
if p.options.StrictCPUReservation {
if !p.reservedCPUs.Intersection(tmpDefaultCPUset).IsEmpty() {
return fmt.Errorf("some of strictly reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
p.reservedCPUs.Intersection(tmpDefaultCPUset).String(), tmpDefaultCPUset.String())
}
} else {
if !p.reservedCPUs.Intersection(tmpDefaultCPUset).Equals(p.reservedCPUs) {
return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
p.reservedCPUs.String(), tmpDefaultCPUset.String())
}
}
// 2. Check if state for static policy is consistent
for pod := range tmpAssignments {
for container, cset := range tmpAssignments[pod] {
// None of the cpu in DEFAULT cset should be in s.assignments
if !tmpDefaultCPUset.Intersection(cset).IsEmpty() {
return fmt.Errorf("pod: %s, container: %s cpuset: \"%s\" overlaps with default cpuset \"%s\"",
pod, container, cset.String(), tmpDefaultCPUset.String())
}
}
}
// 3. It's possible that the set of available CPUs has changed since
// the state was written. This can be due to for example
// offlining a CPU when kubelet is not running. If this happens,
// CPU manager will run into trouble when later it tries to
// assign non-existent CPUs to containers. Validate that the
// topology that was received during CPU manager startup matches with
// the set of CPUs stored in the state.
totalKnownCPUs := tmpDefaultCPUset.Clone()
tmpCPUSets := []cpuset.CPUSet{}
for pod := range tmpAssignments {
for _, cset := range tmpAssignments[pod] {
tmpCPUSets = append(tmpCPUSets, cset)
}
}
totalKnownCPUs = totalKnownCPUs.Union(tmpCPUSets...)
if !totalKnownCPUs.Equals(allCPUs) {
return fmt.Errorf("current set of available CPUs \"%s\" doesn't match with CPUs in state \"%s\"",
allCPUs.String(), totalKnownCPUs.String())
}
return nil
}
// GetAllocatableCPUs returns the total set of CPUs available for allocation.
func (p *staticPolicy) GetAllocatableCPUs(s state.State) cpuset.CPUSet {
return p.topology.CPUDetails.CPUs().Difference(p.reservedCPUs)
}
// GetAvailableCPUs returns the set of unassigned CPUs minus the reserved set.
func (p *staticPolicy) GetAvailableCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reservedCPUs)
}
func (p *staticPolicy) GetAvailablePhysicalCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reservedPhysicalCPUs)
}
func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, cset cpuset.CPUSet) {
// If pod entries to m.cpusToReuse other than the current pod exist, delete them.
for podUID := range p.cpusToReuse {
if podUID != string(pod.UID) {
delete(p.cpusToReuse, podUID)
}
}
// If no cpuset exists for cpusToReuse by this pod yet, create one.
if _, ok := p.cpusToReuse[string(pod.UID)]; !ok {
p.cpusToReuse[string(pod.UID)] = cpuset.New()
}
// Check if the container is an init container.
// If so, add its cpuset to the cpuset of reusable CPUs for any new allocations.
for _, initContainer := range pod.Spec.InitContainers {
if container.Name == initContainer.Name {
if podutil.IsRestartableInitContainer(&initContainer) {
// If the container is a restartable init container, we should not
// reuse its cpuset, as a restartable init container can run with
// regular containers.
break
}
p.cpusToReuse[string(pod.UID)] = p.cpusToReuse[string(pod.UID)].Union(cset)
return
}
}
// Otherwise it is an app container.
// Remove its cpuset from the cpuset of reusable CPUs for any new allocations.
p.cpusToReuse[string(pod.UID)] = p.cpusToReuse[string(pod.UID)].Difference(cset)
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
numCPUs := p.guaranteedCPUs(pod, container)
if numCPUs == 0 {
// container belongs in the shared pool (nothing to do; use default cpuset)
return nil
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
klog.V(2).InfoS("CPU Manager allocation skipped, pod is using pod-level resources which are not supported by the static CPU manager policy", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
klog.InfoS("Static policy: Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
// container belongs in an exclusively allocated pool
metrics.CPUManagerPinningRequestsTotal.Inc()
defer func() {
if rerr != nil {
metrics.CPUManagerPinningErrorsTotal.Inc()
if p.options.FullPhysicalCPUsOnly {
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Inc()
}
return
}
// TODO: move in updateMetricsOnAllocate
if p.options.FullPhysicalCPUsOnly {
// increment only if we know we allocate aligned resources
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Inc()
}
}()
if p.options.FullPhysicalCPUsOnly {
if (numCPUs % p.cpuGroupSize) != 0 {
// Since CPU Manager has been enabled requesting strict SMT alignment, it means a guaranteed pod can only be admitted
// if the CPU requested is a multiple of the number of virtual cpus per physical cores.
// In case CPU request is not a multiple of the number of virtual cpus per physical cores the Pod will be put
// in Failed state, with SMTAlignmentError as reason. Since the allocation happens in terms of physical cores
// and the scheduler is responsible for ensuring that the workload goes to a node that has enough CPUs,
// the pod would be placed on a node where there are enough physical cores available to be allocated.
// Just like the behaviour in case of static policy, takeByTopology will try to first allocate CPUs from the same socket
// and only in case the request cannot be sattisfied on a single socket, CPU allocation is done for a workload to occupy all
// CPUs on a physical core. Allocation of individual threads would never have to occur.
return SMTAlignmentError{
RequestedCPUs: numCPUs,
CpusPerCore: p.cpuGroupSize,
CausedByPhysicalCPUs: false,
}
}
availablePhysicalCPUs := p.GetAvailablePhysicalCPUs(s).Size()
// It's legal to reserve CPUs which are not core siblings. In this case the CPU allocator can descend to single cores
// when picking CPUs. This will void the guarantee of FullPhysicalCPUsOnly. To prevent this, we need to additionally consider
// all the core siblings of the reserved CPUs as unavailable when computing the free CPUs, before to start the actual allocation.
// This way, by construction all possible CPUs allocation whose number is multiple of the SMT level are now correct again.
if numCPUs > availablePhysicalCPUs {
return SMTAlignmentError{
RequestedCPUs: numCPUs,
CpusPerCore: p.cpuGroupSize,
AvailablePhysicalCPUs: availablePhysicalCPUs,
CausedByPhysicalCPUs: true,
}
}
}
if cset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
p.updateCPUsToReuse(pod, container, cset)
klog.InfoS("Static policy: container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name)
return nil
}
// Call Topology Manager to get the aligned socket affinity across all hint providers.
hint := p.affinity.GetAffinity(string(pod.UID), container.Name)
klog.InfoS("Topology Affinity", "pod", klog.KObj(pod), "containerName", container.Name, "affinity", hint)
// Allocate CPUs according to the NUMA affinity contained in the hint.
cpuAllocation, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity, p.cpusToReuse[string(pod.UID)])
if err != nil {
klog.ErrorS(err, "Unable to allocate CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "numCPUs", numCPUs)
return err
}
s.SetCPUSet(string(pod.UID), container.Name, cpuAllocation.CPUs)
p.updateCPUsToReuse(pod, container, cpuAllocation.CPUs)
p.updateMetricsOnAllocate(s, cpuAllocation)
klog.V(4).InfoS("Allocated exclusive CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "cpuset", cpuAllocation.CPUs.String())
return nil
}
// getAssignedCPUsOfSiblings returns assigned cpus of given container's siblings(all containers other than the given container) in the given pod `podUID`.
func getAssignedCPUsOfSiblings(s state.State, podUID string, containerName string) cpuset.CPUSet {
assignments := s.GetCPUAssignments()
cset := cpuset.New()
for name, cpus := range assignments[podUID] {
if containerName == name {
continue
}
cset = cset.Union(cpus)
}
return cset
}
func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
klog.InfoS("Static policy: RemoveContainer", "podUID", podUID, "containerName", containerName)
cpusInUse := getAssignedCPUsOfSiblings(s, podUID, containerName)
if toRelease, ok := s.GetCPUSet(podUID, containerName); ok {
s.Delete(podUID, containerName)
// Mutate the shared pool, adding released cpus.
toRelease = toRelease.Difference(cpusInUse)
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease))
p.updateMetricsOnRelease(s, toRelease)
}
return nil
}
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (topology.Allocation, error) {
klog.InfoS("AllocateCPUs", "numCPUs", numCPUs, "socket", numaAffinity)
allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs)
// If there are aligned CPUs in numaAffinity, attempt to take those first.
result := topology.EmptyAllocation()
if numaAffinity != nil {
alignedCPUs := p.getAlignedCPUs(numaAffinity, allocatableCPUs)
numAlignedToAlloc := alignedCPUs.Size()
if numCPUs < numAlignedToAlloc {
numAlignedToAlloc = numCPUs
}
allocatedCPUs, err := p.takeByTopology(alignedCPUs, numAlignedToAlloc)
if err != nil {
return topology.EmptyAllocation(), err
}
result.CPUs = result.CPUs.Union(allocatedCPUs)
}
// Get any remaining CPUs from what's leftover after attempting to grab aligned ones.
remainingCPUs, err := p.takeByTopology(allocatableCPUs.Difference(result.CPUs), numCPUs-result.CPUs.Size())
if err != nil {
return topology.EmptyAllocation(), err
}
result.CPUs = result.CPUs.Union(remainingCPUs)
result.Aligned = p.topology.CheckAlignment(result.CPUs)
// Remove allocated CPUs from the shared CPUSet.
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result.CPUs))
klog.InfoS("AllocateCPUs", "result", result.String())
return result, nil
}
func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
qos := v1qos.GetPodQOS(pod)
if qos != v1.PodQOSGuaranteed {
klog.V(5).InfoS("Exclusive CPU allocation skipped, pod QoS is not guaranteed", "pod", klog.KObj(pod), "containerName", container.Name, "qos", qos)
return 0
}
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
cpuValue := cpuQuantity.Value()
if cpuValue*1000 != cpuQuantity.MilliValue() {
klog.V(5).InfoS("Exclusive CPU allocation skipped, pod requested non-integral CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "cpu", cpuValue)
return 0
}
// Safe downcast to do for all systems with < 2.1 billion CPUs.
// Per the language spec, `int` is guaranteed to be at least 32 bits wide.
// https://golang.org/ref/spec#Numeric_types
return int(cpuQuantity.Value())
}
func (p *staticPolicy) podGuaranteedCPUs(pod *v1.Pod) int {
// The maximum of requested CPUs by init containers.
requestedByInitContainers := 0
requestedByRestartableInitContainers := 0
for _, container := range pod.Spec.InitContainers {
if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok {
continue
}
requestedCPU := p.guaranteedCPUs(pod, &container)
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
// for the detail.
if podutil.IsRestartableInitContainer(&container) {
requestedByRestartableInitContainers += requestedCPU
} else if requestedByRestartableInitContainers+requestedCPU > requestedByInitContainers {
requestedByInitContainers = requestedByRestartableInitContainers + requestedCPU
}
}
// The sum of requested CPUs by app containers.
requestedByAppContainers := 0
for _, container := range pod.Spec.Containers {
if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok {
continue
}
requestedByAppContainers += p.guaranteedCPUs(pod, &container)
}
requestedByLongRunningContainers := requestedByAppContainers + requestedByRestartableInitContainers
if requestedByInitContainers > requestedByLongRunningContainers {
return requestedByInitContainers
}
return requestedByLongRunningContainers
}
func (p *staticPolicy) takeByTopology(availableCPUs cpuset.CPUSet, numCPUs int) (cpuset.CPUSet, error) {
cpuSortingStrategy := CPUSortingStrategyPacked
if p.options.DistributeCPUsAcrossCores {
cpuSortingStrategy = CPUSortingStrategySpread
}
if p.options.DistributeCPUsAcrossNUMA {
cpuGroupSize := 1
if p.options.FullPhysicalCPUsOnly {
cpuGroupSize = p.cpuGroupSize
}
return takeByTopologyNUMADistributed(p.topology, availableCPUs, numCPUs, cpuGroupSize, cpuSortingStrategy)
}
return takeByTopologyNUMAPacked(p.topology, availableCPUs, numCPUs, cpuSortingStrategy, p.options.PreferAlignByUncoreCacheOption)
}
func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
// Get a count of how many guaranteed CPUs have been requested.
requested := p.guaranteedCPUs(pod, container)
// Number of required CPUs is not an integer or a container is not part of the Guaranteed QoS class.
// It will be treated by the TopologyManager as having no preference and cause it to ignore this
// resource when considering pod alignment.
// In terms of hints, this is equal to: TopologyHints[NUMANodeAffinity: nil, Preferred: true].
if requested == 0 {
return nil
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
klog.V(3).InfoS("CPU Manager hint generation skipped, pod is using pod-level resources which are not supported by the static CPU manager policy", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
// Short circuit to regenerate the same hints if there are already
// guaranteed CPUs allocated to the Container. This might happen after a
// kubelet restart, for example.
if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists {
if allocated.Size() != requested {
klog.InfoS("CPUs already allocated to container with different number than request", "pod", klog.KObj(pod), "containerName", container.Name, "requestedSize", requested, "allocatedSize", allocated.Size())
// An empty list of hints will be treated as a preference that cannot be satisfied.
// In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false].
// For all but the best-effort policy, the Topology Manager will throw a pod-admission error.
return map[string][]topologymanager.TopologyHint{
string(v1.ResourceCPU): {},
}
}
klog.InfoS("Regenerating TopologyHints for CPUs already allocated", "pod", klog.KObj(pod), "containerName", container.Name)
return map[string][]topologymanager.TopologyHint{
string(v1.ResourceCPU): p.generateCPUTopologyHints(allocated, cpuset.CPUSet{}, requested),
}
}
// Get a list of available CPUs.
available := p.GetAvailableCPUs(s)
// Get a list of reusable CPUs (e.g. CPUs reused from initContainers).
// It should be an empty CPUSet for a newly created pod.
reusable := p.cpusToReuse[string(pod.UID)]
// Generate hints.
cpuHints := p.generateCPUTopologyHints(available, reusable, requested)
klog.InfoS("TopologyHints generated", "pod", klog.KObj(pod), "containerName", container.Name, "cpuHints", cpuHints)
return map[string][]topologymanager.TopologyHint{
string(v1.ResourceCPU): cpuHints,
}
}
func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
// Get a count of how many guaranteed CPUs have been requested by Pod.
requested := p.podGuaranteedCPUs(pod)
// Number of required CPUs is not an integer or a pod is not part of the Guaranteed QoS class.
// It will be treated by the TopologyManager as having no preference and cause it to ignore this
// resource when considering pod alignment.
// In terms of hints, this is equal to: TopologyHints[NUMANodeAffinity: nil, Preferred: true].
if requested == 0 {
return nil
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
klog.V(3).InfoS("CPU Manager pod hint generation skipped, pod is using pod-level resources which are not supported by the static CPU manager policy", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
assignedCPUs := cpuset.New()
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
requestedByContainer := p.guaranteedCPUs(pod, &container)
// Short circuit to regenerate the same hints if there are already
// guaranteed CPUs allocated to the Container. This might happen after a
// kubelet restart, for example.
if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists {
if allocated.Size() != requestedByContainer {
klog.InfoS("CPUs already allocated to container with different number than request", "pod", klog.KObj(pod), "containerName", container.Name, "allocatedSize", requested, "requestedByContainer", requestedByContainer, "allocatedSize", allocated.Size())
// An empty list of hints will be treated as a preference that cannot be satisfied.
// In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false].
// For all but the best-effort policy, the Topology Manager will throw a pod-admission error.
return map[string][]topologymanager.TopologyHint{
string(v1.ResourceCPU): {},
}
}
// A set of CPUs already assigned to containers in this pod
assignedCPUs = assignedCPUs.Union(allocated)
}
}
if assignedCPUs.Size() == requested {
klog.InfoS("Regenerating TopologyHints for CPUs already allocated", "pod", klog.KObj(pod))
return map[string][]topologymanager.TopologyHint{
string(v1.ResourceCPU): p.generateCPUTopologyHints(assignedCPUs, cpuset.CPUSet{}, requested),
}
}
// Get a list of available CPUs.
available := p.GetAvailableCPUs(s)
// Get a list of reusable CPUs (e.g. CPUs reused from initContainers).
// It should be an empty CPUSet for a newly created pod.
reusable := p.cpusToReuse[string(pod.UID)]
// Ensure any CPUs already assigned to containers in this pod are included as part of the hint generation.
reusable = reusable.Union(assignedCPUs)
// Generate hints.
cpuHints := p.generateCPUTopologyHints(available, reusable, requested)
klog.InfoS("TopologyHints generated", "pod", klog.KObj(pod), "cpuHints", cpuHints)
return map[string][]topologymanager.TopologyHint{
string(v1.ResourceCPU): cpuHints,
}
}
// generateCPUTopologyHints generates a set of TopologyHints given the set of
// available CPUs and the number of CPUs being requested.
//
// It follows the convention of marking all hints that have the same number of
// bits set as the narrowest matching NUMANodeAffinity with 'Preferred: true', and
// marking all others with 'Preferred: false'.
func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reusableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint {
// Initialize minAffinitySize to include all NUMA Nodes.
minAffinitySize := p.topology.CPUDetails.NUMANodes().Size()
// Iterate through all combinations of numa nodes bitmask and build hints from them.
hints := []topologymanager.TopologyHint{}
bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().List(), func(mask bitmask.BitMask) {
// First, update minAffinitySize for the current request size.
cpusInMask := p.topology.CPUDetails.CPUsInNUMANodes(mask.GetBits()...).Size()
if cpusInMask >= request && mask.Count() < minAffinitySize {
minAffinitySize = mask.Count()
}
// Then check to see if we have enough CPUs available on the current
// numa node bitmask to satisfy the CPU request.
numMatching := 0
for _, c := range reusableCPUs.List() {
// Disregard this mask if its NUMANode isn't part of it.
if !mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) {
return
}
numMatching++
}
// Finally, check to see if enough available CPUs remain on the current
// NUMA node combination to satisfy the CPU request.
for _, c := range availableCPUs.List() {
if mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) {
numMatching++
}
}
// If they don't, then move onto the next combination.
if numMatching < request {
return
}
// Otherwise, create a new hint from the numa node bitmask and add it to the
// list of hints. We set all hint preferences to 'false' on the first
// pass through.
hints = append(hints, topologymanager.TopologyHint{
NUMANodeAffinity: mask,
Preferred: false,
})
})
// Loop back through all hints and update the 'Preferred' field based on
// counting the number of bits sets in the affinity mask and comparing it
// to the minAffinitySize. Only those with an equal number of bits set (and
// with a minimal set of numa nodes) will be considered preferred.
for i := range hints {
if p.options.AlignBySocket && p.isHintSocketAligned(hints[i], minAffinitySize) {
hints[i].Preferred = true
continue
}
if hints[i].NUMANodeAffinity.Count() == minAffinitySize {
hints[i].Preferred = true
}
}
return hints
}
// isHintSocketAligned function return true if numa nodes in hint are socket aligned.
func (p *staticPolicy) isHintSocketAligned(hint topologymanager.TopologyHint, minAffinitySize int) bool {
numaNodesBitMask := hint.NUMANodeAffinity.GetBits()
numaNodesPerSocket := p.topology.NumNUMANodes / p.topology.NumSockets
if numaNodesPerSocket == 0 {
return false
}
// minSockets refers to minimum number of socket required to satify allocation.
// A hint is considered socket aligned if sockets across which numa nodes span is equal to minSockets
minSockets := (minAffinitySize + numaNodesPerSocket - 1) / numaNodesPerSocket
return p.topology.CPUDetails.SocketsInNUMANodes(numaNodesBitMask...).Size() == minSockets
}
// getAlignedCPUs return set of aligned CPUs based on numa affinity mask and configured policy options.
func (p *staticPolicy) getAlignedCPUs(numaAffinity bitmask.BitMask, allocatableCPUs cpuset.CPUSet) cpuset.CPUSet {
alignedCPUs := cpuset.New()
numaBits := numaAffinity.GetBits()
// If align-by-socket policy option is enabled, NUMA based hint is expanded to
// socket aligned hint. It will ensure that first socket aligned available CPUs are
// allocated before we try to find CPUs across socket to satisfy allocation request.
if p.options.AlignBySocket {
socketBits := p.topology.CPUDetails.SocketsInNUMANodes(numaBits...).UnsortedList()
for _, socketID := range socketBits {
alignedCPUs = alignedCPUs.Union(allocatableCPUs.Intersection(p.topology.CPUDetails.CPUsInSockets(socketID)))
}
return alignedCPUs
}
for _, numaNodeID := range numaBits {
alignedCPUs = alignedCPUs.Union(allocatableCPUs.Intersection(p.topology.CPUDetails.CPUsInNUMANodes(numaNodeID)))
}
return alignedCPUs
}
func (p *staticPolicy) initializeMetrics(s state.State) {
metrics.CPUManagerSharedPoolSizeMilliCores.Set(float64(p.GetAvailableCPUs(s).Size() * 1000))
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Add(0) // ensure the value exists
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Add(0) // ensure the value exists
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedUncoreCache).Add(0) // ensure the value exists
totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s)
metrics.CPUManagerExclusiveCPUsAllocationCount.Set(float64(totalAssignedCPUs.Size()))
updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs)
}
func (p *staticPolicy) updateMetricsOnAllocate(s state.State, cpuAlloc topology.Allocation) {
ncpus := cpuAlloc.CPUs.Size()
metrics.CPUManagerExclusiveCPUsAllocationCount.Add(float64(ncpus))
metrics.CPUManagerSharedPoolSizeMilliCores.Add(float64(-ncpus * 1000))
if cpuAlloc.Aligned.UncoreCache {
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedUncoreCache).Inc()
}
totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s)
updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs)
}
func (p *staticPolicy) updateMetricsOnRelease(s state.State, cset cpuset.CPUSet) {
ncpus := cset.Size()
metrics.CPUManagerExclusiveCPUsAllocationCount.Add(float64(-ncpus))
metrics.CPUManagerSharedPoolSizeMilliCores.Add(float64(ncpus * 1000))
totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s)
updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs.Difference(cset))
}
func getTotalAssignedExclusiveCPUs(s state.State) cpuset.CPUSet {
totalAssignedCPUs := cpuset.New()
for _, assignment := range s.GetCPUAssignments() {
for _, cset := range assignment {
totalAssignedCPUs = totalAssignedCPUs.Union(cset)
}
}
return totalAssignedCPUs
}
func updateAllocationPerNUMAMetric(topo *topology.CPUTopology, allocatedCPUs cpuset.CPUSet) {
numaCount := make(map[int]int)
// Count CPUs allocated per NUMA node
for _, cpuID := range allocatedCPUs.UnsortedList() {
numaNode, err := topo.CPUNUMANodeID(cpuID)
if err != nil {
//NOTE: We are logging the error but it is highly unlikely to happen as the CPUset
// is already computed, evaluated and there is no room for user tampering.
klog.ErrorS(err, "Unable to determine NUMA node", "cpuID", cpuID)
}
numaCount[numaNode]++
}
// Update metric
for numaNode, count := range numaCount {
metrics.CPUManagerAllocationPerNUMA.WithLabelValues(strconv.Itoa(numaNode)).Set(float64(count))
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"encoding/json"
"fmt"
"hash/fnv"
"strings"
"k8s.io/apimachinery/pkg/util/dump"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
)
var _ checkpointmanager.Checkpoint = &CPUManagerCheckpointV1{}
var _ checkpointmanager.Checkpoint = &CPUManagerCheckpointV2{}
var _ checkpointmanager.Checkpoint = &CPUManagerCheckpoint{}
// CPUManagerCheckpoint struct is used to store cpu/pod assignments in a checkpoint in v2 format
type CPUManagerCheckpoint struct {
PolicyName string `json:"policyName"`
DefaultCPUSet string `json:"defaultCpuSet"`
Entries map[string]map[string]string `json:"entries,omitempty"`
Checksum checksum.Checksum `json:"checksum"`
}
// CPUManagerCheckpointV1 struct is used to store cpu/pod assignments in a checkpoint in v1 format
type CPUManagerCheckpointV1 struct {
PolicyName string `json:"policyName"`
DefaultCPUSet string `json:"defaultCpuSet"`
Entries map[string]string `json:"entries,omitempty"`
Checksum checksum.Checksum `json:"checksum"`
}
// CPUManagerCheckpointV2 struct is used to store cpu/pod assignments in a checkpoint in v2 format
type CPUManagerCheckpointV2 = CPUManagerCheckpoint
// NewCPUManagerCheckpoint returns an instance of Checkpoint
func NewCPUManagerCheckpoint() *CPUManagerCheckpoint {
//nolint:staticcheck // unexported-type-in-api user-facing error message
return newCPUManagerCheckpointV2()
}
func newCPUManagerCheckpointV1() *CPUManagerCheckpointV1 {
return &CPUManagerCheckpointV1{
Entries: make(map[string]string),
}
}
func newCPUManagerCheckpointV2() *CPUManagerCheckpointV2 {
return &CPUManagerCheckpointV2{
Entries: make(map[string]map[string]string),
}
}
// MarshalCheckpoint returns marshalled checkpoint in v1 format
func (cp *CPUManagerCheckpointV1) MarshalCheckpoint() ([]byte, error) {
// make sure checksum wasn't set before so it doesn't affect output checksum
cp.Checksum = 0
cp.Checksum = checksum.New(cp)
return json.Marshal(*cp)
}
// MarshalCheckpoint returns marshalled checkpoint in v2 format
func (cp *CPUManagerCheckpointV2) MarshalCheckpoint() ([]byte, error) {
// make sure checksum wasn't set before so it doesn't affect output checksum
cp.Checksum = 0
cp.Checksum = checksum.New(cp)
return json.Marshal(*cp)
}
// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint in v1 format
func (cp *CPUManagerCheckpointV1) UnmarshalCheckpoint(blob []byte) error {
return json.Unmarshal(blob, cp)
}
// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint in v2 format
func (cp *CPUManagerCheckpointV2) UnmarshalCheckpoint(blob []byte) error {
return json.Unmarshal(blob, cp)
}
// VerifyChecksum verifies that current checksum of checkpoint is valid in v1 format
func (cp *CPUManagerCheckpointV1) VerifyChecksum() error {
if cp.Checksum == 0 {
// accept empty checksum for compatibility with old file backend
return nil
}
ck := cp.Checksum
cp.Checksum = 0
object := dump.ForHash(cp)
object = strings.Replace(object, "CPUManagerCheckpointV1", "CPUManagerCheckpoint", 1)
cp.Checksum = ck
hash := fnv.New32a()
fmt.Fprintf(hash, "%v", object)
actualCS := checksum.Checksum(hash.Sum32())
if cp.Checksum != actualCS {
return &errors.CorruptCheckpointError{
ActualCS: uint64(actualCS),
ExpectedCS: uint64(cp.Checksum),
}
}
return nil
}
// VerifyChecksum verifies that current checksum of checkpoint is valid in v2 format
func (cp *CPUManagerCheckpointV2) VerifyChecksum() error {
if cp.Checksum == 0 {
// accept empty checksum for compatibility with old file backend
return nil
}
ck := cp.Checksum
cp.Checksum = 0
err := ck.Verify(cp)
cp.Checksum = ck
return err
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"k8s.io/utils/cpuset"
)
// ContainerCPUAssignments type used in cpu manager state
type ContainerCPUAssignments map[string]map[string]cpuset.CPUSet
// Clone returns a copy of ContainerCPUAssignments
func (as ContainerCPUAssignments) Clone() ContainerCPUAssignments {
ret := make(ContainerCPUAssignments, len(as))
for pod := range as {
ret[pod] = make(map[string]cpuset.CPUSet, len(as[pod]))
for container, cset := range as[pod] {
ret[pod][container] = cset
}
}
return ret
}
// Reader interface used to read current cpu/pod assignment state
type Reader interface {
GetCPUSet(podUID string, containerName string) (cpuset.CPUSet, bool)
GetDefaultCPUSet() cpuset.CPUSet
GetCPUSetOrDefault(podUID string, containerName string) cpuset.CPUSet
GetCPUAssignments() ContainerCPUAssignments
}
type writer interface {
SetCPUSet(podUID string, containerName string, cpuset cpuset.CPUSet)
SetDefaultCPUSet(cpuset cpuset.CPUSet)
SetCPUAssignments(ContainerCPUAssignments)
Delete(podUID string, containerName string)
ClearState()
}
// State interface provides methods for tracking and setting cpu/pod assignment
type State interface {
Reader
writer
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"fmt"
"path/filepath"
"sync"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/utils/cpuset"
)
var _ State = &stateCheckpoint{}
type stateCheckpoint struct {
mux sync.RWMutex
policyName string
cache State
checkpointManager checkpointmanager.CheckpointManager
checkpointName string
initialContainers containermap.ContainerMap
}
// NewCheckpointState creates new State for keeping track of cpu/pod assignment with checkpoint backend
func NewCheckpointState(stateDir, checkpointName, policyName string, initialContainers containermap.ContainerMap) (State, error) {
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
if err != nil {
return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err)
}
stateCheckpoint := &stateCheckpoint{
cache: NewMemoryState(),
policyName: policyName,
checkpointManager: checkpointManager,
checkpointName: checkpointName,
initialContainers: initialContainers,
}
if err := stateCheckpoint.restoreState(); err != nil {
//nolint:staticcheck // ST1005 user-facing error message
return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete the CPU manager checkpoint file %q before restarting Kubelet",
err, filepath.Join(stateDir, checkpointName))
}
return stateCheckpoint, nil
}
// migrateV1CheckpointToV2Checkpoint() converts checkpoints from the v1 format to the v2 format
func (sc *stateCheckpoint) migrateV1CheckpointToV2Checkpoint(src *CPUManagerCheckpointV1, dst *CPUManagerCheckpointV2) error {
if src.PolicyName != "" {
dst.PolicyName = src.PolicyName
}
if src.DefaultCPUSet != "" {
dst.DefaultCPUSet = src.DefaultCPUSet
}
for containerID, cset := range src.Entries {
podUID, containerName, err := sc.initialContainers.GetContainerRef(containerID)
if err != nil {
return fmt.Errorf("containerID '%v' not found in initial containers list", containerID)
}
if dst.Entries == nil {
dst.Entries = make(map[string]map[string]string)
}
if _, exists := dst.Entries[podUID]; !exists {
dst.Entries[podUID] = make(map[string]string)
}
dst.Entries[podUID][containerName] = cset
}
return nil
}
// restores state from a checkpoint and creates it if it doesn't exist
func (sc *stateCheckpoint) restoreState() error {
sc.mux.Lock()
defer sc.mux.Unlock()
var err error
checkpointV1 := newCPUManagerCheckpointV1()
checkpointV2 := newCPUManagerCheckpointV2()
if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpointV1); err != nil {
checkpointV1 = &CPUManagerCheckpointV1{} // reset it back to 0
if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpointV2); err != nil {
if err == errors.ErrCheckpointNotFound {
return sc.storeState()
}
return err
}
}
if err = sc.migrateV1CheckpointToV2Checkpoint(checkpointV1, checkpointV2); err != nil {
return fmt.Errorf("error migrating v1 checkpoint state to v2 checkpoint state: %s", err)
}
if sc.policyName != checkpointV2.PolicyName {
return fmt.Errorf("configured policy %q differs from state checkpoint policy %q", sc.policyName, checkpointV2.PolicyName)
}
var tmpDefaultCPUSet cpuset.CPUSet
if tmpDefaultCPUSet, err = cpuset.Parse(checkpointV2.DefaultCPUSet); err != nil {
return fmt.Errorf("could not parse default cpu set %q: %v", checkpointV2.DefaultCPUSet, err)
}
var tmpContainerCPUSet cpuset.CPUSet
tmpAssignments := ContainerCPUAssignments{}
for pod := range checkpointV2.Entries {
tmpAssignments[pod] = make(map[string]cpuset.CPUSet, len(checkpointV2.Entries[pod]))
for container, cpuString := range checkpointV2.Entries[pod] {
if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil {
return fmt.Errorf("could not parse cpuset %q for container %q in pod %q: %v", cpuString, container, pod, err)
}
tmpAssignments[pod][container] = tmpContainerCPUSet
}
}
sc.cache.SetDefaultCPUSet(tmpDefaultCPUSet)
sc.cache.SetCPUAssignments(tmpAssignments)
klog.V(2).InfoS("State checkpoint: restored state from checkpoint")
klog.V(2).InfoS("State checkpoint: defaultCPUSet", "defaultCpuSet", tmpDefaultCPUSet.String())
return nil
}
// saves state to a checkpoint, caller is responsible for locking
func (sc *stateCheckpoint) storeState() error {
checkpoint := NewCPUManagerCheckpoint()
checkpoint.PolicyName = sc.policyName
checkpoint.DefaultCPUSet = sc.cache.GetDefaultCPUSet().String()
assignments := sc.cache.GetCPUAssignments()
for pod := range assignments {
checkpoint.Entries[pod] = make(map[string]string, len(assignments[pod]))
for container, cset := range assignments[pod] {
checkpoint.Entries[pod][container] = cset.String()
}
}
err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
if err != nil {
klog.ErrorS(err, "Failed to save checkpoint")
return err
}
return nil
}
// GetCPUSet returns current CPU set
func (sc *stateCheckpoint) GetCPUSet(podUID string, containerName string) (cpuset.CPUSet, bool) {
sc.mux.RLock()
defer sc.mux.RUnlock()
res, ok := sc.cache.GetCPUSet(podUID, containerName)
return res, ok
}
// GetDefaultCPUSet returns default CPU set
func (sc *stateCheckpoint) GetDefaultCPUSet() cpuset.CPUSet {
sc.mux.RLock()
defer sc.mux.RUnlock()
return sc.cache.GetDefaultCPUSet()
}
// GetCPUSetOrDefault returns current CPU set, or default one if it wasn't changed
func (sc *stateCheckpoint) GetCPUSetOrDefault(podUID string, containerName string) cpuset.CPUSet {
sc.mux.RLock()
defer sc.mux.RUnlock()
return sc.cache.GetCPUSetOrDefault(podUID, containerName)
}
// GetCPUAssignments returns current CPU to pod assignments
func (sc *stateCheckpoint) GetCPUAssignments() ContainerCPUAssignments {
sc.mux.RLock()
defer sc.mux.RUnlock()
return sc.cache.GetCPUAssignments()
}
// SetCPUSet sets CPU set
func (sc *stateCheckpoint) SetCPUSet(podUID string, containerName string, cset cpuset.CPUSet) {
sc.mux.Lock()
defer sc.mux.Unlock()
sc.cache.SetCPUSet(podUID, containerName, cset)
err := sc.storeState()
if err != nil {
klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
}
}
// SetDefaultCPUSet sets default CPU set
func (sc *stateCheckpoint) SetDefaultCPUSet(cset cpuset.CPUSet) {
sc.mux.Lock()
defer sc.mux.Unlock()
sc.cache.SetDefaultCPUSet(cset)
err := sc.storeState()
if err != nil {
klog.ErrorS(err, "Failed to store state to checkpoint")
}
}
// SetCPUAssignments sets CPU to pod assignments
func (sc *stateCheckpoint) SetCPUAssignments(a ContainerCPUAssignments) {
sc.mux.Lock()
defer sc.mux.Unlock()
sc.cache.SetCPUAssignments(a)
err := sc.storeState()
if err != nil {
klog.ErrorS(err, "Failed to store state to checkpoint")
}
}
// Delete deletes assignment for specified pod
func (sc *stateCheckpoint) Delete(podUID string, containerName string) {
sc.mux.Lock()
defer sc.mux.Unlock()
sc.cache.Delete(podUID, containerName)
err := sc.storeState()
if err != nil {
klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
}
}
// ClearState clears the state and saves it in a checkpoint
func (sc *stateCheckpoint) ClearState() {
sc.mux.Lock()
defer sc.mux.Unlock()
sc.cache.ClearState()
err := sc.storeState()
if err != nil {
klog.ErrorS(err, "Failed to store state to checkpoint")
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"sync"
"k8s.io/klog/v2"
"k8s.io/utils/cpuset"
)
type stateMemory struct {
sync.RWMutex
assignments ContainerCPUAssignments
defaultCPUSet cpuset.CPUSet
}
var _ State = &stateMemory{}
// NewMemoryState creates new State for keeping track of cpu/pod assignment
func NewMemoryState() State {
klog.InfoS("Initialized new in-memory state store")
return &stateMemory{
assignments: ContainerCPUAssignments{},
defaultCPUSet: cpuset.New(),
}
}
func (s *stateMemory) GetCPUSet(podUID string, containerName string) (cpuset.CPUSet, bool) {
s.RLock()
defer s.RUnlock()
res, ok := s.assignments[podUID][containerName]
return res.Clone(), ok
}
func (s *stateMemory) GetDefaultCPUSet() cpuset.CPUSet {
s.RLock()
defer s.RUnlock()
return s.defaultCPUSet.Clone()
}
func (s *stateMemory) GetCPUSetOrDefault(podUID string, containerName string) cpuset.CPUSet {
if res, ok := s.GetCPUSet(podUID, containerName); ok {
return res
}
return s.GetDefaultCPUSet()
}
func (s *stateMemory) GetCPUAssignments() ContainerCPUAssignments {
s.RLock()
defer s.RUnlock()
return s.assignments.Clone()
}
func (s *stateMemory) SetCPUSet(podUID string, containerName string, cset cpuset.CPUSet) {
s.Lock()
defer s.Unlock()
if _, ok := s.assignments[podUID]; !ok {
s.assignments[podUID] = make(map[string]cpuset.CPUSet)
}
s.assignments[podUID][containerName] = cset
klog.InfoS("Updated desired CPUSet", "podUID", podUID, "containerName", containerName, "cpuSet", cset)
}
func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) {
s.Lock()
defer s.Unlock()
s.defaultCPUSet = cset
klog.InfoS("Updated default CPUSet", "cpuSet", cset)
}
func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) {
s.Lock()
defer s.Unlock()
s.assignments = a.Clone()
klog.InfoS("Updated CPUSet assignments", "assignments", a)
}
func (s *stateMemory) Delete(podUID string, containerName string) {
s.Lock()
defer s.Unlock()
delete(s.assignments[podUID], containerName)
if len(s.assignments[podUID]) == 0 {
delete(s.assignments, podUID)
}
klog.V(2).InfoS("Deleted CPUSet assignment", "podUID", podUID, "containerName", containerName)
}
func (s *stateMemory) ClearState() {
s.Lock()
defer s.Unlock()
s.defaultCPUSet = cpuset.CPUSet{}
s.assignments = make(ContainerCPUAssignments)
klog.V(2).InfoS("Cleared state")
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topology
import (
"fmt"
"k8s.io/utils/cpuset"
)
// Alignment is metadata about a cpuset allocation
type Alignment struct {
// UncoreCache is true if all the CPUs are uncore-cache aligned,
// IOW if they all share the same Uncore cache block.
// If the allocated CPU count is greater than a Uncore Group size,
// CPUs can't be uncore-aligned; otherwise, they are.
// This flag tracks alignment, not interference or lack thereof.
UncoreCache bool
}
func (ca Alignment) String() string {
return fmt.Sprintf("aligned=<uncore:%v>", ca.UncoreCache)
}
// Allocation represents a CPU set plus alignment metadata
type Allocation struct {
CPUs cpuset.CPUSet
Aligned Alignment
}
func (ca Allocation) String() string {
return ca.CPUs.String() + " " + ca.Aligned.String()
}
// EmptyAllocation returns a new zero-valued CPU allocation. Please note that
// a empty cpuset is aligned according to every possible way we can consider
func EmptyAllocation() Allocation {
return Allocation{
CPUs: cpuset.New(),
Aligned: Alignment{
UncoreCache: true,
},
}
}
func isAlignedAtUncoreCache(topo *CPUTopology, cpuList ...int) bool {
if len(cpuList) <= 1 {
return true
}
reference, ok := topo.CPUDetails[cpuList[0]]
if !ok {
return false
}
for _, cpu := range cpuList[1:] {
info, ok := topo.CPUDetails[cpu]
if !ok {
return false
}
if info.UncoreCacheID != reference.UncoreCacheID {
return false
}
}
return true
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topology
import (
"fmt"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2"
"k8s.io/utils/cpuset"
)
// NUMANodeInfo is a map from NUMANode ID to a list of CPU IDs associated with
// that NUMANode.
type NUMANodeInfo map[int]cpuset.CPUSet
// CPUDetails is a map from CPU ID to Core ID, Socket ID, and NUMA ID.
type CPUDetails map[int]CPUInfo
// CPUTopology contains details of node cpu, where :
// CPU - logical CPU, cadvisor - thread
// Core - physical CPU, cadvisor - Core
// Socket - socket, cadvisor - Socket
// NUMA Node - NUMA cell, cadvisor - Node
// UncoreCache - Split L3 Cache Topology, cadvisor
type CPUTopology struct {
NumCPUs int
NumCores int
NumUncoreCache int
NumSockets int
NumNUMANodes int
CPUDetails CPUDetails
}
// CPUsPerCore returns the number of logical CPUs are associated with
// each core.
func (topo *CPUTopology) CPUsPerCore() int {
if topo.NumCores == 0 {
return 0
}
return topo.NumCPUs / topo.NumCores
}
// CPUsPerSocket returns the number of logical CPUs are associated with
// each socket.
func (topo *CPUTopology) CPUsPerSocket() int {
if topo.NumSockets == 0 {
return 0
}
return topo.NumCPUs / topo.NumSockets
}
// CPUsPerUncore returns the number of logicial CPUs that are associated with
// each UncoreCache
func (topo *CPUTopology) CPUsPerUncore() int {
if topo.NumUncoreCache == 0 {
return 0
}
return topo.NumCPUs / topo.NumUncoreCache
}
// CPUCoreID returns the physical core ID which the given logical CPU
// belongs to.
func (topo *CPUTopology) CPUCoreID(cpu int) (int, error) {
info, ok := topo.CPUDetails[cpu]
if !ok {
return -1, fmt.Errorf("unknown CPU ID: %d", cpu)
}
return info.CoreID, nil
}
// CPUCoreID returns the socket ID which the given logical CPU belongs to.
func (topo *CPUTopology) CPUSocketID(cpu int) (int, error) {
info, ok := topo.CPUDetails[cpu]
if !ok {
return -1, fmt.Errorf("unknown CPU ID: %d", cpu)
}
return info.SocketID, nil
}
// CPUCoreID returns the NUMA node ID which the given logical CPU belongs to.
func (topo *CPUTopology) CPUNUMANodeID(cpu int) (int, error) {
info, ok := topo.CPUDetails[cpu]
if !ok {
return -1, fmt.Errorf("unknown CPU ID: %d", cpu)
}
return info.NUMANodeID, nil
}
// CheckAlignment returns alignment information for the given cpuset in
// the context of the current CPU topology
func (topo *CPUTopology) CheckAlignment(cpus cpuset.CPUSet) Alignment {
cpuList := cpus.UnsortedList()
return Alignment{
UncoreCache: isAlignedAtUncoreCache(topo, cpuList...),
}
}
// CPUInfo contains the NUMA, socket, UncoreCache and core IDs associated with a CPU.
type CPUInfo struct {
NUMANodeID int
SocketID int
CoreID int
UncoreCacheID int
}
// KeepOnly returns a new CPUDetails object with only the supplied cpus.
func (d CPUDetails) KeepOnly(cpus cpuset.CPUSet) CPUDetails {
result := CPUDetails{}
for cpu, info := range d {
if cpus.Contains(cpu) {
result[cpu] = info
}
}
return result
}
// UncoreCaches returns all the uncorecache Id (L3 Index) associated with the CPUs in this CPUDetails
func (d CPUDetails) UncoreCaches() cpuset.CPUSet {
var numUnCoreIDs []int
for _, info := range d {
numUnCoreIDs = append(numUnCoreIDs, info.UncoreCacheID)
}
return cpuset.New(numUnCoreIDs...)
}
// UnCoresInNUMANodes returns all of the uncore IDs associated with the given
// NUMANode IDs in this CPUDetails.
func (d CPUDetails) UncoreInNUMANodes(ids ...int) cpuset.CPUSet {
var unCoreIDs []int
for _, id := range ids {
for _, info := range d {
if info.NUMANodeID == id {
unCoreIDs = append(unCoreIDs, info.UncoreCacheID)
}
}
}
return cpuset.New(unCoreIDs...)
}
// CoresNeededInUncoreCache returns either the full list of all available unique core IDs associated with the given
// UnCoreCache IDs in this CPUDetails or subset that matches the ask.
func (d CPUDetails) CoresNeededInUncoreCache(numCoresNeeded int, ids ...int) cpuset.CPUSet {
coreIDs := d.coresInUncoreCache(ids...)
if coreIDs.Size() <= numCoresNeeded {
return coreIDs
}
tmpCoreIDs := coreIDs.List()
return cpuset.New(tmpCoreIDs[:numCoresNeeded]...)
}
// Helper function that just gets the cores
func (d CPUDetails) coresInUncoreCache(ids ...int) cpuset.CPUSet {
var coreIDs []int
for _, id := range ids {
for _, info := range d {
if info.UncoreCacheID == id {
coreIDs = append(coreIDs, info.CoreID)
}
}
}
return cpuset.New(coreIDs...)
}
// CPUsInUncoreCaches returns all the logical CPU IDs associated with the given
// UnCoreCache IDs in this CPUDetails
func (d CPUDetails) CPUsInUncoreCaches(ids ...int) cpuset.CPUSet {
var cpuIDs []int
for _, id := range ids {
for cpu, info := range d {
if info.UncoreCacheID == id {
cpuIDs = append(cpuIDs, cpu)
}
}
}
return cpuset.New(cpuIDs...)
}
// NUMANodes returns all of the NUMANode IDs associated with the CPUs in this
// CPUDetails.
func (d CPUDetails) NUMANodes() cpuset.CPUSet {
var numaNodeIDs []int
for _, info := range d {
numaNodeIDs = append(numaNodeIDs, info.NUMANodeID)
}
return cpuset.New(numaNodeIDs...)
}
// NUMANodesInSockets returns all of the logical NUMANode IDs associated with
// the given socket IDs in this CPUDetails.
func (d CPUDetails) NUMANodesInSockets(ids ...int) cpuset.CPUSet {
var numaNodeIDs []int
for _, id := range ids {
for _, info := range d {
if info.SocketID == id {
numaNodeIDs = append(numaNodeIDs, info.NUMANodeID)
}
}
}
return cpuset.New(numaNodeIDs...)
}
// Sockets returns all of the socket IDs associated with the CPUs in this
// CPUDetails.
func (d CPUDetails) Sockets() cpuset.CPUSet {
var socketIDs []int
for _, info := range d {
socketIDs = append(socketIDs, info.SocketID)
}
return cpuset.New(socketIDs...)
}
// CPUsInSockets returns all of the logical CPU IDs associated with the given
// socket IDs in this CPUDetails.
func (d CPUDetails) CPUsInSockets(ids ...int) cpuset.CPUSet {
var cpuIDs []int
for _, id := range ids {
for cpu, info := range d {
if info.SocketID == id {
cpuIDs = append(cpuIDs, cpu)
}
}
}
return cpuset.New(cpuIDs...)
}
// SocketsInNUMANodes returns all of the logical Socket IDs associated with the
// given NUMANode IDs in this CPUDetails.
func (d CPUDetails) SocketsInNUMANodes(ids ...int) cpuset.CPUSet {
var socketIDs []int
for _, id := range ids {
for _, info := range d {
if info.NUMANodeID == id {
socketIDs = append(socketIDs, info.SocketID)
}
}
}
return cpuset.New(socketIDs...)
}
// Cores returns all of the core IDs associated with the CPUs in this
// CPUDetails.
func (d CPUDetails) Cores() cpuset.CPUSet {
var coreIDs []int
for _, info := range d {
coreIDs = append(coreIDs, info.CoreID)
}
return cpuset.New(coreIDs...)
}
// CoresInNUMANodes returns all of the core IDs associated with the given
// NUMANode IDs in this CPUDetails.
func (d CPUDetails) CoresInNUMANodes(ids ...int) cpuset.CPUSet {
var coreIDs []int
for _, id := range ids {
for _, info := range d {
if info.NUMANodeID == id {
coreIDs = append(coreIDs, info.CoreID)
}
}
}
return cpuset.New(coreIDs...)
}
// CoresInSockets returns all of the core IDs associated with the given socket
// IDs in this CPUDetails.
func (d CPUDetails) CoresInSockets(ids ...int) cpuset.CPUSet {
var coreIDs []int
for _, id := range ids {
for _, info := range d {
if info.SocketID == id {
coreIDs = append(coreIDs, info.CoreID)
}
}
}
return cpuset.New(coreIDs...)
}
// CPUs returns all of the logical CPU IDs in this CPUDetails.
func (d CPUDetails) CPUs() cpuset.CPUSet {
var cpuIDs []int
for cpuID := range d {
cpuIDs = append(cpuIDs, cpuID)
}
return cpuset.New(cpuIDs...)
}
// CPUsInNUMANodes returns all of the logical CPU IDs associated with the given
// NUMANode IDs in this CPUDetails.
func (d CPUDetails) CPUsInNUMANodes(ids ...int) cpuset.CPUSet {
var cpuIDs []int
for _, id := range ids {
for cpu, info := range d {
if info.NUMANodeID == id {
cpuIDs = append(cpuIDs, cpu)
}
}
}
return cpuset.New(cpuIDs...)
}
// CPUsInCores returns all of the logical CPU IDs associated with the given
// core IDs in this CPUDetails.
func (d CPUDetails) CPUsInCores(ids ...int) cpuset.CPUSet {
var cpuIDs []int
for _, id := range ids {
for cpu, info := range d {
if info.CoreID == id {
cpuIDs = append(cpuIDs, cpu)
}
}
}
return cpuset.New(cpuIDs...)
}
func getUncoreCacheID(core cadvisorapi.Core) int {
if len(core.UncoreCaches) < 1 {
// In case cAdvisor is nil, failback to socket alignment since uncorecache is not shared
return core.SocketID
}
// Even though cadvisor API returns a slice, we only expect either 0 or a 1 uncore caches,
// so everything past the first entry should be discarded or ignored
return core.UncoreCaches[0].Id
}
// Discover returns CPUTopology based on cadvisor node info
func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) {
if machineInfo.NumCores == 0 {
return nil, fmt.Errorf("could not detect number of cpus")
}
CPUDetails := CPUDetails{}
numPhysicalCores := 0
for _, node := range machineInfo.Topology {
numPhysicalCores += len(node.Cores)
for _, core := range node.Cores {
if coreID, err := getUniqueCoreID(core.Threads); err == nil {
for _, cpu := range core.Threads {
CPUDetails[cpu] = CPUInfo{
CoreID: coreID,
SocketID: core.SocketID,
NUMANodeID: node.Id,
UncoreCacheID: getUncoreCacheID(core),
}
}
} else {
klog.ErrorS(nil, "Could not get unique coreID for socket", "socket", core.SocketID, "core", core.Id, "threads", core.Threads)
return nil, err
}
}
}
return &CPUTopology{
NumCPUs: machineInfo.NumCores,
NumSockets: machineInfo.NumSockets,
NumCores: numPhysicalCores,
NumNUMANodes: CPUDetails.NUMANodes().Size(),
NumUncoreCache: CPUDetails.UncoreCaches().Size(),
CPUDetails: CPUDetails,
}, nil
}
// getUniqueCoreID computes coreId as the lowest cpuID
// for a given Threads []int slice. This will assure that coreID's are
// platform unique (opposite to what cAdvisor reports)
func getUniqueCoreID(threads []int) (coreID int, err error) {
if len(threads) == 0 {
return 0, fmt.Errorf("no cpus provided")
}
if len(threads) != cpuset.New(threads...).Size() {
return 0, fmt.Errorf("cpus provided are not unique")
}
min := threads[0]
for _, thread := range threads[1:] {
if thread < min {
min = thread
}
}
return min, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package checkpoint
import (
"encoding/json"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
)
// DeviceManagerCheckpoint defines the operations to retrieve pod devices
type DeviceManagerCheckpoint interface {
checkpointmanager.Checkpoint
GetData() ([]PodDevicesEntry, map[string][]string)
}
// DevicesPerNUMA represents device ids obtained from device plugin per NUMA node id
type DevicesPerNUMA map[int64][]string
// PodDevicesEntry connects pod information to devices
type PodDevicesEntry struct {
PodUID string
ContainerName string
ResourceName string
DeviceIDs DevicesPerNUMA
AllocResp []byte
}
// checkpointData struct is used to store pod to device allocation information
// in a checkpoint file.
// TODO: add version control when we need to change checkpoint format.
type checkpointData struct {
PodDeviceEntries []PodDevicesEntry
RegisteredDevices map[string][]string
}
// Data holds checkpoint data and its checksum
type Data struct {
Data checkpointData
Checksum checksum.Checksum
}
// NewDevicesPerNUMA is a function that creates DevicesPerNUMA map
func NewDevicesPerNUMA() DevicesPerNUMA {
return make(DevicesPerNUMA)
}
// Devices is a function that returns all device ids for all NUMA nodes
// and represent it as sets.Set[string]
func (dev DevicesPerNUMA) Devices() sets.Set[string] {
result := sets.New[string]()
for _, devs := range dev {
result.Insert(devs...)
}
return result
}
// New returns an instance of Checkpoint - must be an alias for the most recent version
func New(devEntries []PodDevicesEntry, devices map[string][]string) DeviceManagerCheckpoint {
return newV2(devEntries, devices)
}
func newV2(devEntries []PodDevicesEntry, devices map[string][]string) DeviceManagerCheckpoint {
return &Data{
Data: checkpointData{
PodDeviceEntries: devEntries,
RegisteredDevices: devices,
},
}
}
// MarshalCheckpoint returns marshalled data
func (cp *Data) MarshalCheckpoint() ([]byte, error) {
cp.Checksum = checksum.New(cp.Data)
return json.Marshal(*cp)
}
// UnmarshalCheckpoint returns unmarshalled data
func (cp *Data) UnmarshalCheckpoint(blob []byte) error {
return json.Unmarshal(blob, cp)
}
// VerifyChecksum verifies that passed checksum is same as calculated checksum
func (cp *Data) VerifyChecksum() error {
return cp.Checksum.Verify(cp.Data)
}
// GetData returns device entries and registered devices in the *most recent*
// checkpoint format, *not* in the original format stored on disk.
func (cp *Data) GetData() ([]PodDevicesEntry, map[string][]string) {
return cp.Data.PodDeviceEntries, cp.Data.RegisteredDevices
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devicemanager
import (
"context"
"fmt"
"sync"
"time"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
plugin "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1"
)
// endpoint maps to a single registered device plugin. It is responsible
// for managing gRPC communications with the device plugin and caching
// device states reported by the device plugin.
type endpoint interface {
getPreferredAllocation(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error)
allocate(devs []string) (*pluginapi.AllocateResponse, error)
preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error)
setStopTime(t time.Time)
isStopped() bool
stopGracePeriodExpired() bool
}
type endpointImpl struct {
mutex sync.Mutex
resourceName string
api pluginapi.DevicePluginClient
stopTime time.Time
client plugin.Client // for testing only
}
// newEndpointImpl creates a new endpoint for the given resourceName.
// This is to be used during normal device plugin registration.
func newEndpointImpl(p plugin.DevicePlugin) *endpointImpl {
return &endpointImpl{
api: p.API(),
resourceName: p.Resource(),
}
}
// newStoppedEndpointImpl creates a new endpoint for the given resourceName with stopTime set.
// This is to be used during Kubelet restart, before the actual device plugin re-registers.
func newStoppedEndpointImpl(resourceName string) *endpointImpl {
return &endpointImpl{
resourceName: resourceName,
stopTime: time.Now(),
}
}
func (e *endpointImpl) isStopped() bool {
e.mutex.Lock()
defer e.mutex.Unlock()
return !e.stopTime.IsZero()
}
func (e *endpointImpl) stopGracePeriodExpired() bool {
e.mutex.Lock()
defer e.mutex.Unlock()
return !e.stopTime.IsZero() && time.Since(e.stopTime) > endpointStopGracePeriod
}
func (e *endpointImpl) setStopTime(t time.Time) {
e.mutex.Lock()
defer e.mutex.Unlock()
e.stopTime = t
}
// getPreferredAllocation issues GetPreferredAllocation gRPC call to the device plugin.
func (e *endpointImpl) getPreferredAllocation(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error) {
if e.isStopped() {
return nil, fmt.Errorf(errEndpointStopped, e)
}
return e.api.GetPreferredAllocation(context.Background(), &pluginapi.PreferredAllocationRequest{
ContainerRequests: []*pluginapi.ContainerPreferredAllocationRequest{
{
AvailableDeviceIDs: available,
MustIncludeDeviceIDs: mustInclude,
AllocationSize: int32(size),
},
},
})
}
// allocate issues Allocate gRPC call to the device plugin.
func (e *endpointImpl) allocate(devs []string) (*pluginapi.AllocateResponse, error) {
if e.isStopped() {
return nil, fmt.Errorf(errEndpointStopped, e)
}
return e.api.Allocate(context.Background(), &pluginapi.AllocateRequest{
ContainerRequests: []*pluginapi.ContainerAllocateRequest{
{DevicesIds: devs},
},
})
}
// preStartContainer issues PreStartContainer gRPC call to the device plugin.
func (e *endpointImpl) preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error) {
if e.isStopped() {
return nil, fmt.Errorf(errEndpointStopped, e)
}
ctx, cancel := context.WithTimeout(context.Background(), pluginapi.KubeletPreStartContainerRPCTimeoutInSecs*time.Second)
defer cancel()
return e.api.PreStartContainer(ctx, &pluginapi.PreStartContainerRequest{
DevicesIds: devs,
})
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devicemanager
import (
"context"
goerrors "errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sort"
"sync"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/server/healthz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
plugin "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
const nodeWithoutTopology = -1
// ActivePodsFunc is a function that returns a list of pods to reconcile.
type ActivePodsFunc func() []*v1.Pod
// ManagerImpl is the structure in charge of managing Device Plugins.
type ManagerImpl struct {
checkpointdir string
endpoints map[string]endpointInfo // Key is ResourceName
mutex sync.Mutex
server plugin.Server
// activePods is a method for listing active pods on the node
// so the amount of pluginResources requested by existing pods
// could be counted when updating allocated devices
activePods ActivePodsFunc
// sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness.
// We use it to determine when we can purge inactive pods from checkpointed state.
sourcesReady config.SourcesReady
// allDevices holds all the devices currently registered to the device manager
allDevices ResourceDeviceInstances
// healthyDevices contains all the registered healthy resourceNames and their exported device IDs.
healthyDevices map[string]sets.Set[string]
// unhealthyDevices contains all the unhealthy devices and their exported device IDs.
unhealthyDevices map[string]sets.Set[string]
// allocatedDevices contains allocated deviceIds, keyed by resourceName.
allocatedDevices map[string]sets.Set[string]
// podDevices contains pod to allocated device mapping.
podDevices *podDevices
checkpointManager checkpointmanager.CheckpointManager
// List of NUMA Nodes available on the underlying machine
numaNodes []int
// Store of Topology Affinities that the Device Manager can query.
topologyAffinityStore topologymanager.Store
// devicesToReuse contains devices that can be reused as they have been allocated to
// init containers.
devicesToReuse PodReusableDevices
// containerMap provides a mapping from (pod, container) -> containerID
// for all containers in a pod. Used to detect pods running across a restart
containerMap containermap.ContainerMap
// containerRunningSet identifies which container among those present in `containerMap`
// was reported running by the container runtime when `containerMap` was computed.
// Used to detect pods running across a restart
containerRunningSet sets.Set[string]
// update channel for device health updates
update chan resourceupdates.Update
}
type endpointInfo struct {
e endpoint
opts *pluginapi.DevicePluginOptions
}
type sourcesReadyStub struct{}
// PodReusableDevices is a map by pod name of devices to reuse.
type PodReusableDevices map[string]map[string]sets.Set[string]
func (s *sourcesReadyStub) AddSource(source string) {}
func (s *sourcesReadyStub) AllReady() bool { return true }
// NewManagerImpl creates a new manager.
func NewManagerImpl(topology []cadvisorapi.Node, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
socketPath := pluginapi.KubeletSocket
if runtime.GOOS == "windows" {
socketPath = os.Getenv("SYSTEMDRIVE") + pluginapi.KubeletSocketWindows
}
return newManagerImpl(socketPath, topology, topologyAffinityStore)
}
func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffinityStore topologymanager.Store) (*ManagerImpl, error) {
klog.V(2).InfoS("Creating Device Plugin manager", "path", socketPath)
var numaNodes []int
for _, node := range topology {
numaNodes = append(numaNodes, node.Id)
}
manager := &ManagerImpl{
endpoints: make(map[string]endpointInfo),
allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.Set[string]),
unhealthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(),
numaNodes: numaNodes,
topologyAffinityStore: topologyAffinityStore,
devicesToReuse: make(PodReusableDevices),
update: make(chan resourceupdates.Update, 100),
}
server, err := plugin.NewServer(socketPath, manager, manager)
if err != nil {
return nil, fmt.Errorf("failed to create plugin server: %v", err)
}
manager.server = server
manager.checkpointdir, _ = filepath.Split(server.SocketPath())
// The following structures are populated with real implementations in manager.Start()
// Before that, initializes them to perform no-op operations.
manager.activePods = func() []*v1.Pod { return []*v1.Pod{} }
manager.sourcesReady = &sourcesReadyStub{}
checkpointManager, err := checkpointmanager.NewCheckpointManager(manager.checkpointdir)
if err != nil {
return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err)
}
manager.checkpointManager = checkpointManager
return manager, nil
}
func (m *ManagerImpl) Updates() <-chan resourceupdates.Update {
return m.update
}
// CleanupPluginDirectory is to remove all existing unix sockets
// from /var/lib/kubelet/device-plugins on Device Plugin Manager start
func (m *ManagerImpl) CleanupPluginDirectory(dir string) error {
d, err := os.Open(dir)
if err != nil {
return err
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return err
}
var errs []error
for _, name := range names {
filePath := filepath.Join(dir, name)
if filePath == m.checkpointFile() {
continue
}
stat, err := os.Stat(filePath)
if err != nil {
klog.ErrorS(err, "Failed to stat file", "path", filePath)
continue
}
if stat.IsDir() || stat.Mode()&os.ModeSocket == 0 {
continue
}
err = os.RemoveAll(filePath)
if err != nil {
errs = append(errs, err)
klog.ErrorS(err, "Failed to remove file", "path", filePath)
continue
}
}
return errorsutil.NewAggregate(errs)
}
// PluginConnected is to connect a plugin to a new endpoint.
// This is done as part of device plugin registration.
func (m *ManagerImpl) PluginConnected(resourceName string, p plugin.DevicePlugin) error {
options, err := p.API().GetDevicePluginOptions(context.Background(), &pluginapi.Empty{})
if err != nil {
return fmt.Errorf("failed to get device plugin options: %v", err)
}
e := newEndpointImpl(p)
m.mutex.Lock()
defer m.mutex.Unlock()
m.endpoints[resourceName] = endpointInfo{e, options}
klog.V(2).InfoS("Device plugin connected", "resourceName", resourceName)
return nil
}
// PluginDisconnected is to disconnect a plugin from an endpoint.
// This is done as part of device plugin deregistration.
func (m *ManagerImpl) PluginDisconnected(resourceName string) {
m.mutex.Lock()
defer m.mutex.Unlock()
if ep, exists := m.endpoints[resourceName]; exists {
m.markResourceUnhealthy(resourceName)
klog.V(2).InfoS("Endpoint became unhealthy", "resourceName", resourceName, "endpoint", ep)
ep.e.setStopTime(time.Now())
}
}
// PluginListAndWatchReceiver receives ListAndWatchResponse from a device plugin
// and ensures that an upto date state (e.g. number of devices and device health)
// is captured. Also, registered device and device to container allocation
// information is checkpointed to the disk.
func (m *ManagerImpl) PluginListAndWatchReceiver(resourceName string, resp *pluginapi.ListAndWatchResponse) {
m.genericDeviceUpdateCallback(resourceName, resp.Devices)
}
func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []*pluginapi.Device) {
healthyCount := 0
m.mutex.Lock()
m.healthyDevices[resourceName] = sets.New[string]()
m.unhealthyDevices[resourceName] = sets.New[string]()
oldDevices := m.allDevices[resourceName]
podsToUpdate := sets.New[string]()
m.allDevices[resourceName] = make(map[string]*pluginapi.Device)
for _, dev := range devices {
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceHealthStatus) {
// compare with old device's health and send update to the channel if needed
updatePodUIDFn := func(deviceID string) {
podUID, _ := m.podDevices.getPodAndContainerForDevice(deviceID)
if podUID != "" {
podsToUpdate.Insert(podUID)
}
}
if oldDev, ok := oldDevices[dev.ID]; ok {
if oldDev.Health != dev.Health {
updatePodUIDFn(dev.ID)
}
} else {
// if this is a new device, it might have existed before and disappeared for a while
// but still be assigned to a Pod. In this case, we need to send an update to the channel
updatePodUIDFn(dev.ID)
}
}
m.allDevices[resourceName][dev.ID] = dev
if dev.Health == pluginapi.Healthy {
m.healthyDevices[resourceName].Insert(dev.ID)
healthyCount++
} else {
m.unhealthyDevices[resourceName].Insert(dev.ID)
}
}
m.mutex.Unlock()
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceHealthStatus) {
if len(podsToUpdate) > 0 {
select {
case m.update <- resourceupdates.Update{PodUIDs: podsToUpdate.UnsortedList()}:
default:
klog.ErrorS(goerrors.New("device update channel is full"), "discard pods info", "podsToUpdate", podsToUpdate.UnsortedList())
}
}
}
if err := m.writeCheckpoint(); err != nil {
klog.ErrorS(err, "Writing checkpoint encountered")
}
klog.V(2).InfoS("Processed device updates for resource", "resourceName", resourceName, "totalCount", len(devices), "healthyCount", healthyCount)
}
// GetWatcherHandler returns the plugin handler
func (m *ManagerImpl) GetWatcherHandler() cache.PluginHandler {
return m.server
}
// GetHealthChecker returns the plugin handler
func (m *ManagerImpl) GetHealthChecker() healthz.HealthChecker {
return m.server
}
// checkpointFile returns device plugin checkpoint file path.
func (m *ManagerImpl) checkpointFile() string {
return filepath.Join(m.checkpointdir, kubeletDeviceManagerCheckpoint)
}
// Start starts the Device Plugin Manager and start initialization of
// podDevices and allocatedDevices information from checkpointed state and
// starts device plugin registration service.
func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error {
klog.V(2).InfoS("Starting Device Plugin manager")
m.activePods = activePods
m.sourcesReady = sourcesReady
m.containerMap = initialContainers
m.containerRunningSet = initialContainerRunningSet
// Loads in allocatedDevices information from disk.
err := m.readCheckpoint()
if err != nil {
klog.ErrorS(err, "Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date")
}
return m.server.Start()
}
// Stop is the function that can stop the plugin server.
// Can be called concurrently, more than once, and is safe to call
// without a prior Start.
func (m *ManagerImpl) Stop() error {
return m.server.Stop()
}
// Allocate is the call that you can use to allocate a set of devices
// from the registered device plugins.
func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
if _, ok := m.devicesToReuse[string(pod.UID)]; !ok {
m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string])
}
// If pod entries to m.devicesToReuse other than the current pod exist, delete them.
for podUID := range m.devicesToReuse {
if podUID != string(pod.UID) {
delete(m.devicesToReuse, podUID)
}
}
// Allocate resources for init containers first as we know the caller always loops
// through init containers before looping through app containers. Should the caller
// ever change those semantics, this logic will need to be amended.
for _, initContainer := range pod.Spec.InitContainers {
if container.Name == initContainer.Name {
if err := m.allocateContainerResources(pod, container, m.devicesToReuse[string(pod.UID)]); err != nil {
return err
}
if !podutil.IsRestartableInitContainer(&initContainer) {
m.podDevices.addContainerAllocatedResources(string(pod.UID), container.Name, m.devicesToReuse[string(pod.UID)])
} else {
// If the init container is restartable, we need to keep the
// devices allocated. In other words, we should remove them
// from the devicesToReuse.
m.podDevices.removeContainerAllocatedResources(string(pod.UID), container.Name, m.devicesToReuse[string(pod.UID)])
}
return nil
}
}
if err := m.allocateContainerResources(pod, container, m.devicesToReuse[string(pod.UID)]); err != nil {
return err
}
m.podDevices.removeContainerAllocatedResources(string(pod.UID), container.Name, m.devicesToReuse[string(pod.UID)])
return nil
}
// UpdatePluginResources updates node resources based on devices already allocated to pods.
func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
pod := attrs.Pod
// quick return if no pluginResources requested
if !m.podDevices.hasPod(string(pod.UID)) {
return nil
}
m.sanitizeNodeAllocatable(node)
return nil
}
func (m *ManagerImpl) markResourceUnhealthy(resourceName string) {
klog.V(2).InfoS("Mark all resources Unhealthy for resource", "resourceName", resourceName)
healthyDevices := sets.New[string]()
if _, ok := m.healthyDevices[resourceName]; ok {
healthyDevices = m.healthyDevices[resourceName]
m.healthyDevices[resourceName] = sets.New[string]()
}
if _, ok := m.unhealthyDevices[resourceName]; !ok {
m.unhealthyDevices[resourceName] = sets.New[string]()
}
m.unhealthyDevices[resourceName] = m.unhealthyDevices[resourceName].Union(healthyDevices)
}
// GetCapacity is expected to be called when Kubelet updates its node status.
// The first returned variable contains the registered device plugin resource capacity.
// The second returned variable contains the registered device plugin resource allocatable.
// The third returned variable contains previously registered resources that are no longer active.
// Kubelet uses this information to update resource capacity/allocatable in its node status.
// After the call, device plugin can remove the inactive resources from its internal list as the
// change is already reflected in Kubelet node status.
// Note in the special case after Kubelet restarts, device plugin resource capacities can
// temporarily drop to zero till corresponding device plugins re-register. This is OK because
// cm.UpdatePluginResource() run during predicate Admit guarantees we adjust nodeinfo
// capacity for already allocated pods so that they can continue to run. However, new pods
// requiring device plugin resources will not be scheduled till device plugin re-registers.
func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) {
needsUpdateCheckpoint := false
var capacity = v1.ResourceList{}
var allocatable = v1.ResourceList{}
deletedResources := sets.New[string]()
m.mutex.Lock()
for resourceName, devices := range m.healthyDevices {
eI, ok := m.endpoints[resourceName]
if (ok && eI.e.stopGracePeriodExpired()) || !ok {
// The resources contained in endpoints and (un)healthyDevices
// should always be consistent. Otherwise, we run with the risk
// of failing to garbage collect non-existing resources or devices.
if !ok {
klog.InfoS("Unexpected: healthyDevices and endpoints are out of sync")
}
delete(m.endpoints, resourceName)
delete(m.healthyDevices, resourceName)
deletedResources.Insert(resourceName)
needsUpdateCheckpoint = true
} else {
capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI)
allocatable[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI)
}
}
for resourceName, devices := range m.unhealthyDevices {
eI, ok := m.endpoints[resourceName]
if (ok && eI.e.stopGracePeriodExpired()) || !ok {
if !ok {
klog.InfoS("Unexpected: unhealthyDevices and endpoints became out of sync")
}
delete(m.endpoints, resourceName)
delete(m.unhealthyDevices, resourceName)
deletedResources.Insert(resourceName)
needsUpdateCheckpoint = true
} else {
capacityCount := capacity[v1.ResourceName(resourceName)]
unhealthyCount := *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI)
capacityCount.Add(unhealthyCount)
capacity[v1.ResourceName(resourceName)] = capacityCount
}
}
m.mutex.Unlock()
if needsUpdateCheckpoint {
if err := m.writeCheckpoint(); err != nil {
klog.ErrorS(err, "Failed to write checkpoint file")
}
}
return capacity, allocatable, deletedResources.UnsortedList()
}
// Checkpoints device to container allocation information to disk.
func (m *ManagerImpl) writeCheckpoint() error {
m.mutex.Lock()
registeredDevs := make(map[string][]string)
for resource, devices := range m.healthyDevices {
registeredDevs[resource] = devices.UnsortedList()
}
data := checkpoint.New(m.podDevices.toCheckpointData(),
registeredDevs)
m.mutex.Unlock()
err := m.checkpointManager.CreateCheckpoint(kubeletDeviceManagerCheckpoint, data)
if err != nil {
err2 := fmt.Errorf("failed to write checkpoint file %q: %v", kubeletDeviceManagerCheckpoint, err)
klog.ErrorS(err, "Failed to write checkpoint file")
return err2
}
klog.V(4).InfoS("Checkpoint file written", "checkpoint", kubeletDeviceManagerCheckpoint)
return nil
}
// Reads device to container allocation information from disk, and populates
// m.allocatedDevices accordingly.
func (m *ManagerImpl) readCheckpoint() error {
cp, err := m.getCheckpoint()
if err != nil {
if err == errors.ErrCheckpointNotFound {
// no point in trying anything else
klog.ErrorS(err, "Failed to read data from checkpoint", "checkpoint", kubeletDeviceManagerCheckpoint)
return nil
}
return err
}
m.mutex.Lock()
defer m.mutex.Unlock()
podDevices, registeredDevs := cp.GetData()
m.podDevices.fromCheckpointData(podDevices)
m.allocatedDevices = m.podDevices.devices()
for resource := range registeredDevs {
// During start up, creates empty healthyDevices list so that the resource capacity
// will stay zero till the corresponding device plugin re-registers.
m.healthyDevices[resource] = sets.New[string]()
m.unhealthyDevices[resource] = sets.New[string]()
m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil}
}
klog.V(4).InfoS("Read data from checkpoint file", "checkpoint", kubeletDeviceManagerCheckpoint)
return nil
}
func (m *ManagerImpl) getCheckpoint() (checkpoint.DeviceManagerCheckpoint, error) {
registeredDevs := make(map[string][]string)
devEntries := make([]checkpoint.PodDevicesEntry, 0)
cp := checkpoint.New(devEntries, registeredDevs)
err := m.checkpointManager.GetCheckpoint(kubeletDeviceManagerCheckpoint, cp)
return cp, err
}
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
func (m *ManagerImpl) UpdateAllocatedDevices() {
activePods := m.activePods()
if !m.sourcesReady.AllReady() {
return
}
m.mutex.Lock()
defer m.mutex.Unlock()
podsToBeRemoved := m.podDevices.pods()
for _, pod := range activePods {
podsToBeRemoved.Delete(string(pod.UID))
}
if len(podsToBeRemoved) <= 0 {
return
}
klog.V(3).InfoS("Pods to be removed", "podUIDs", sets.List(podsToBeRemoved))
m.podDevices.delete(sets.List(podsToBeRemoved))
// Regenerated allocatedDevices after we update pod allocation information.
m.allocatedDevices = m.podDevices.devices()
}
// Returns list of device Ids we need to allocate with Allocate rpc call.
// Returns empty list in case we don't need to issue the Allocate rpc call.
func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.Set[string]) (sets.Set[string], error) {
m.mutex.Lock()
defer m.mutex.Unlock()
needed := required
// Gets list of devices that have already been allocated.
// This can happen if a container restarts for example.
devices := m.podDevices.containerDevices(podUID, contName, resource)
if devices != nil {
klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", podUID, "devices", sets.List(devices))
needed = needed - devices.Len()
// A pod's resource is not expected to change once admitted by the API server,
// so just fail loudly here. We can revisit this part if this no longer holds.
if needed != 0 {
return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", podUID, contName, resource, devices.Len(), required)
}
}
// We have 3 major flows to handle:
// 1. kubelet running, normal allocation (needed > 0, container being [re]created). Steady state and most common case by far and large.
// 2. kubelet restart. In this scenario every other component of the stack (device plugins, app container, runtime) is still running.
// 3. node reboot. In this scenario device plugins may not be running yet when we try to allocate devices.
// note: if we get this far the runtime is surely running. This is usually enforced at OS level by startup system services dependencies.
// First we take care of the exceptional flow (scenarios 2 and 3). In both flows, kubelet is reinitializing, and while kubelet is initializing, sources are NOT all ready.
// Is this a simple kubelet restart (scenario 2)? To distinguish, we use the information we got for runtime. If we are asked to allocate devices for containers reported
// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
// because it already has access to all the required devices, so we got nothing to do and we can bail out.
if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) {
klog.V(3).InfoS("Container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
return nil, nil
}
// We dealt with scenario 2. If we got this far it's either scenario 3 (node reboot) or scenario 1 (steady state, normal flow).
klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
healthyDevices, hasRegistered := m.healthyDevices[resource]
// The following checks are expected to fail only happen on scenario 3 (node reboot).
// The kubelet is reinitializing and got a container from sources. But there's no ordering, so an app container may attempt allocation _before_ the device plugin was created,
// has registered and reported back to kubelet the devices.
// This can only happen on scenario 3 because at steady state (scenario 1) the scheduler prevents pod to be sent towards node which don't report enough devices.
// Note: we need to check the device health and registration status *before* we check how many devices are needed, doing otherwise caused issue #109595
// Note: if the scheduler is bypassed, we fall back in scenario 1, so we still need these checks.
if !hasRegistered {
return nil, fmt.Errorf("cannot allocate unregistered device %s", resource)
}
// Check if registered resource has healthy devices
if healthyDevices.Len() == 0 {
return nil, fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resource)
}
// Check if all the previously allocated devices are healthy
if !healthyDevices.IsSuperset(devices) {
return nil, fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resource)
}
// We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path.
// We cover container restart on kubelet steady state with the same flow.
if needed == 0 {
klog.V(3).InfoS("No devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
// No change, no work.
return nil, nil
}
// Declare the list of allocated devices.
// This will be populated and returned below.
allocated := sets.New[string]()
// Create a closure to help with device allocation
// Returns 'true' once no more devices need to be allocated.
allocateRemainingFrom := func(devices sets.Set[string]) bool {
// When we call callGetPreferredAllocationIfAvailable below, we will release
// the lock and call the device plugin. If someone calls ListResource concurrently,
// device manager will recalculate the allocatedDevices map. Some entries with
// empty sets may be removed, so we reinit here.
if m.allocatedDevices[resource] == nil {
m.allocatedDevices[resource] = sets.New[string]()
}
for device := range devices.Difference(allocated) {
m.allocatedDevices[resource].Insert(device)
allocated.Insert(device)
needed--
if needed == 0 {
return true
}
}
return false
}
// Allocates from reusableDevices list first.
if allocateRemainingFrom(reusableDevices) {
return allocated, nil
}
// Gets Devices in use.
devicesInUse := m.allocatedDevices[resource]
// Gets Available devices.
available := m.healthyDevices[resource].Difference(devicesInUse)
if available.Len() < needed {
return nil, fmt.Errorf("requested number of devices unavailable for %s. Requested: %d, Available: %d", resource, needed, available.Len())
}
// Filters available Devices based on NUMA affinity.
aligned, unaligned, noAffinity := m.filterByAffinity(podUID, contName, resource, available)
// If we can allocate all remaining devices from the set of aligned ones, then
// give the plugin the chance to influence which ones to allocate from that set.
if needed < aligned.Len() {
// First allocate from the preferred devices list (if available).
preferred, err := m.callGetPreferredAllocationIfAvailable(podUID, contName, resource, aligned.Union(allocated), allocated, required)
if err != nil {
return nil, err
}
if allocateRemainingFrom(preferred.Intersection(aligned)) {
return allocated, nil
}
// Then fallback to allocate from the aligned set if no preferred list
// is returned (or not enough devices are returned in that list).
if allocateRemainingFrom(aligned) {
return allocated, nil
}
return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed)
}
// If we can't allocate all remaining devices from the set of aligned ones,
// then start by first allocating all the aligned devices (to ensure
// that the alignment guaranteed by the TopologyManager is honored).
if allocateRemainingFrom(aligned) {
return allocated, nil
}
// Then give the plugin the chance to influence the decision on any
// remaining devices to allocate.
preferred, err := m.callGetPreferredAllocationIfAvailable(podUID, contName, resource, available.Union(allocated), allocated, required)
if err != nil {
return nil, err
}
if allocateRemainingFrom(preferred.Intersection(available)) {
return allocated, nil
}
// Finally, if the plugin did not return a preferred allocation (or didn't
// return a large enough one), then fall back to allocating the remaining
// devices from the 'unaligned' and 'noAffinity' sets.
if allocateRemainingFrom(unaligned) {
return allocated, nil
}
if allocateRemainingFrom(noAffinity) {
return allocated, nil
}
return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed)
}
func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.Set[string]) (sets.Set[string], sets.Set[string], sets.Set[string]) {
// If alignment information is not available, just pass the available list back.
hint := m.topologyAffinityStore.GetAffinity(podUID, contName)
if !m.deviceHasTopologyAlignment(resource) || hint.NUMANodeAffinity == nil {
return sets.New[string](), sets.New[string](), available
}
// Build a map of NUMA Nodes to the devices associated with them. A
// device may be associated to multiple NUMA nodes at the same time. If an
// available device does not have any NUMA Nodes associated with it, add it
// to a list of NUMA Nodes for the fake NUMANode -1.
perNodeDevices := make(map[int]sets.Set[string])
for d := range available {
if m.allDevices[resource][d].Topology == nil || len(m.allDevices[resource][d].Topology.Nodes) == 0 {
if _, ok := perNodeDevices[nodeWithoutTopology]; !ok {
perNodeDevices[nodeWithoutTopology] = sets.New[string]()
}
perNodeDevices[nodeWithoutTopology].Insert(d)
continue
}
for _, node := range m.allDevices[resource][d].Topology.Nodes {
if _, ok := perNodeDevices[int(node.ID)]; !ok {
perNodeDevices[int(node.ID)] = sets.New[string]()
}
perNodeDevices[int(node.ID)].Insert(d)
}
}
// Get a flat list of all the nodes associated with available devices.
var nodes []int
for node := range perNodeDevices {
nodes = append(nodes, node)
}
// Sort the list of nodes by:
// 1) Nodes contained in the 'hint's affinity set
// 2) Nodes not contained in the 'hint's affinity set
// 3) The fake NUMANode of -1 (assuming it is included in the list)
// Within each of the groups above, sort the nodes by how many devices they contain
sort.Slice(nodes, func(i, j int) bool {
// If one or the other of nodes[i] or nodes[j] is in the 'hint's affinity set
if hint.NUMANodeAffinity.IsSet(nodes[i]) && hint.NUMANodeAffinity.IsSet(nodes[j]) {
return perNodeDevices[nodes[i]].Len() < perNodeDevices[nodes[j]].Len()
}
if hint.NUMANodeAffinity.IsSet(nodes[i]) {
return true
}
if hint.NUMANodeAffinity.IsSet(nodes[j]) {
return false
}
// If one or the other of nodes[i] or nodes[j] is the fake NUMA node -1 (they can't both be)
if nodes[i] == nodeWithoutTopology {
return false
}
if nodes[j] == nodeWithoutTopology {
return true
}
// Otherwise both nodes[i] and nodes[j] are real NUMA nodes that are not in the 'hint's' affinity list.
return perNodeDevices[nodes[i]].Len() < perNodeDevices[nodes[j]].Len()
})
// Generate three sorted lists of devices. Devices in the first list come
// from valid NUMA Nodes contained in the affinity mask. Devices in the
// second list come from valid NUMA Nodes not in the affinity mask. Devices
// in the third list come from devices with no NUMA Node association (i.e.
// those mapped to the fake NUMA Node -1). Because we loop through the
// sorted list of NUMA nodes in order, within each list, devices are sorted
// by their connection to NUMA Nodes with more devices on them.
var fromAffinity []string
var notFromAffinity []string
var withoutTopology []string
for d := range available {
// Since the same device may be associated with multiple NUMA Nodes. We
// need to be careful not to add each device to multiple lists. The
// logic below ensures this by breaking after the first NUMA node that
// has the device is encountered.
for _, n := range nodes {
if perNodeDevices[n].Has(d) {
if n == nodeWithoutTopology {
withoutTopology = append(withoutTopology, d)
} else if hint.NUMANodeAffinity.IsSet(n) {
fromAffinity = append(fromAffinity, d)
} else {
notFromAffinity = append(notFromAffinity, d)
}
break
}
}
}
// Return all three lists containing the full set of devices across them.
return sets.New[string](fromAffinity...), sets.New[string](notFromAffinity...), sets.New[string](withoutTopology...)
}
// allocateContainerResources attempts to allocate all of required device
// plugin resources for the input container, issues an Allocate rpc request
// for each new device resource requirement, processes their AllocateResponses,
// and updates the cached containerDevices on success.
func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.Set[string]) error {
podUID := string(pod.UID)
contName := container.Name
allocatedDevicesUpdated := false
needsUpdateCheckpoint := false
// Extended resources are not allowed to be overcommitted.
// Since device plugin advertises extended resources,
// therefore Requests must be equal to Limits and iterating
// over the Limits should be sufficient.
for k, v := range container.Resources.Limits {
resource := string(k)
needed := int(v.Value())
klog.V(3).InfoS("Looking for needed resources", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "needed", needed)
if !m.isDevicePluginResource(resource) {
continue
}
// Updates allocatedDevices to garbage collect any stranded resources
// before doing the device plugin allocation.
if !allocatedDevicesUpdated {
m.UpdateAllocatedDevices()
allocatedDevicesUpdated = true
}
allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed, devicesToReuse[resource])
if err != nil {
return err
}
if allocDevices == nil || len(allocDevices) <= 0 {
continue
}
needsUpdateCheckpoint = true
startRPCTime := time.Now()
// Manager.Allocate involves RPC calls to device plugin, which
// could be heavy-weight. Therefore we want to perform this operation outside
// mutex lock. Note if Allocate call fails, we may leave container resources
// partially allocated for the failed container. We rely on UpdateAllocatedDevices()
// to garbage collect these resources later. Another side effect is that if
// we have X resource A and Y resource B in total, and two containers, container1
// and container2 both require X resource A and Y resource B. Both allocation
// requests may fail if we serve them in mixed order.
// TODO: may revisit this part later if we see inefficient resource allocation
// in real use as the result of this. Should also consider to parallelize device
// plugin Allocate grpc calls if it becomes common that a container may require
// resources from multiple device plugins.
m.mutex.Lock()
eI, ok := m.endpoints[resource]
m.mutex.Unlock()
if !ok {
m.mutex.Lock()
m.allocatedDevices = m.podDevices.devices()
m.mutex.Unlock()
return fmt.Errorf("unknown Device Plugin %s", resource)
}
devs := allocDevices.UnsortedList()
// TODO: refactor this part of code to just append a ContainerAllocationRequest
// in a passed in AllocateRequest pointer, and issues a single Allocate call per pod.
klog.V(4).InfoS("Making allocation request for device plugin", "devices", devs, "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name)
resp, err := eI.e.allocate(devs)
metrics.DevicePluginAllocationDuration.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime))
if err != nil {
// In case of allocation failure, we want to restore m.allocatedDevices
// to the actual allocated state from m.podDevices.
m.mutex.Lock()
m.allocatedDevices = m.podDevices.devices()
m.mutex.Unlock()
return err
}
if len(resp.ContainerResponses) == 0 {
return fmt.Errorf("no containers return in allocation response %v", resp)
}
allocDevicesWithNUMA := checkpoint.NewDevicesPerNUMA()
// Update internal cached podDevices state.
m.mutex.Lock()
for dev := range allocDevices {
if m.allDevices[resource][dev].Topology == nil || len(m.allDevices[resource][dev].Topology.Nodes) == 0 {
allocDevicesWithNUMA[nodeWithoutTopology] = append(allocDevicesWithNUMA[nodeWithoutTopology], dev)
continue
}
for idx := range m.allDevices[resource][dev].Topology.Nodes {
node := m.allDevices[resource][dev].Topology.Nodes[idx]
allocDevicesWithNUMA[node.ID] = append(allocDevicesWithNUMA[node.ID], dev)
}
}
m.mutex.Unlock()
m.podDevices.insert(podUID, contName, resource, allocDevicesWithNUMA, resp.ContainerResponses[0])
}
if needsUpdateCheckpoint {
return m.writeCheckpoint()
}
return nil
}
// checkPodActive checks if the given pod is still in activePods list
func (m *ManagerImpl) checkPodActive(pod *v1.Pod) bool {
activePods := m.activePods()
for _, activePod := range activePods {
if activePod.UID == pod.UID {
return true
}
}
return false
}
// GetDeviceRunContainerOptions checks whether we have cached containerDevices
// for the passed-in <pod, container> and returns its DeviceRunContainerOptions
// for the found one. An empty struct is returned in case no cached state is found.
func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*DeviceRunContainerOptions, error) {
podUID := string(pod.UID)
contName := container.Name
needsReAllocate := false
for k, v := range container.Resources.Limits {
resource := string(k)
if !m.isDevicePluginResource(resource) || v.Value() == 0 {
continue
}
err := m.callPreStartContainerIfNeeded(podUID, contName, resource)
if err != nil {
return nil, err
}
if !m.checkPodActive(pod) {
klog.V(5).InfoS("Pod deleted from activePods, skip to reAllocate", "pod", klog.KObj(pod), "podUID", podUID, "containerName", container.Name)
continue
}
// This is a device plugin resource yet we don't have cached
// resource state. This is likely due to a race during node
// restart. We re-issue allocate request to cover this race.
if m.podDevices.containerDevices(podUID, contName, resource) == nil {
needsReAllocate = true
}
}
if needsReAllocate {
klog.V(2).InfoS("Needs to re-allocate device plugin resources for pod", "pod", klog.KObj(pod), "containerName", container.Name)
if err := m.Allocate(pod, container); err != nil {
return nil, err
}
}
return m.podDevices.deviceRunContainerOptions(string(pod.UID), container.Name), nil
}
// callPreStartContainerIfNeeded issues PreStartContainer grpc call for device plugin resource
// with PreStartRequired option set.
func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource string) error {
m.mutex.Lock()
eI, ok := m.endpoints[resource]
if !ok {
m.mutex.Unlock()
return fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource)
}
if eI.opts == nil || !eI.opts.PreStartRequired {
m.mutex.Unlock()
klog.V(5).InfoS("Plugin options indicate to skip PreStartContainer for resource", "podUID", podUID, "resourceName", resource, "containerName", contName)
return nil
}
devices := m.podDevices.containerDevices(podUID, contName, resource)
if devices == nil {
m.mutex.Unlock()
return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", podUID, contName, resource)
}
m.mutex.Unlock()
devs := devices.UnsortedList()
klog.V(4).InfoS("Issuing a PreStartContainer call for container", "containerName", contName, "podUID", podUID)
_, err := eI.e.preStartContainer(devs)
if err != nil {
return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err)
}
// TODO: Add metrics support for init RPC
return nil
}
// callGetPreferredAllocationIfAvailable issues GetPreferredAllocation grpc
// call for device plugin resource with GetPreferredAllocationAvailable option set.
func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.Set[string], size int) (sets.Set[string], error) {
eI, ok := m.endpoints[resource]
if !ok {
return nil, fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource)
}
if eI.opts == nil || !eI.opts.GetPreferredAllocationAvailable {
klog.V(5).InfoS("Plugin options indicate to skip GetPreferredAllocation for resource", "resourceName", resource, "podUID", podUID, "containerName", contName)
return nil, nil
}
m.mutex.Unlock()
klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "resourceName", resource, "containerName", contName, "podUID", podUID)
resp, err := eI.e.getPreferredAllocation(available.UnsortedList(), mustInclude.UnsortedList(), size)
m.mutex.Lock()
if err != nil {
return nil, fmt.Errorf("device plugin GetPreferredAllocation rpc failed with err: %v", err)
}
if resp != nil && len(resp.ContainerResponses) > 0 {
return sets.New[string](resp.ContainerResponses[0].DeviceIDs...), nil
}
return sets.New[string](), nil
}
// sanitizeNodeAllocatable scans through allocatedDevices in the device manager
// and if necessary, updates allocatableResource in nodeInfo to at least equal to
// the allocated capacity. This allows pods that have already been scheduled on
// the node to pass GeneralPredicates admission checking even upon device plugin failure.
func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulerframework.NodeInfo) {
var newAllocatableResource *schedulerframework.Resource
allocatableResource := node.Allocatable
if allocatableResource.ScalarResources == nil {
allocatableResource.ScalarResources = make(map[v1.ResourceName]int64)
}
m.mutex.Lock()
defer m.mutex.Unlock()
for resource, devices := range m.allocatedDevices {
needed := devices.Len()
quant, ok := allocatableResource.ScalarResources[v1.ResourceName(resource)]
if ok && int(quant) >= needed {
continue
}
// Needs to update nodeInfo.AllocatableResource to make sure
// NodeInfo.allocatableResource at least equal to the capacity already allocated.
if newAllocatableResource == nil {
newAllocatableResource = allocatableResource.Clone()
}
newAllocatableResource.ScalarResources[v1.ResourceName(resource)] = int64(needed)
}
if newAllocatableResource != nil {
node.Allocatable = newAllocatableResource
}
}
func (m *ManagerImpl) isDevicePluginResource(resource string) bool {
m.mutex.Lock()
defer m.mutex.Unlock()
_, registeredResource := m.healthyDevices[resource]
_, allocatedResource := m.allocatedDevices[resource]
// Return true if this is either an active device plugin resource or
// a resource we have previously allocated.
if registeredResource || allocatedResource {
return true
}
return false
}
// GetAllocatableDevices returns information about all the healthy devices known to the manager
func (m *ManagerImpl) GetAllocatableDevices() ResourceDeviceInstances {
m.mutex.Lock()
defer m.mutex.Unlock()
resp := m.allDevices.Filter(m.healthyDevices)
klog.V(4).InfoS("GetAllocatableDevices", "known", len(m.allDevices), "allocatable", len(resp))
return resp
}
// GetDevices returns the devices used by the specified container
func (m *ManagerImpl) GetDevices(podUID, containerName string) ResourceDeviceInstances {
return m.podDevices.getContainerDevices(podUID, containerName)
}
func (m *ManagerImpl) UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) {
m.mutex.Lock()
defer m.mutex.Unlock()
// Today we ignore edge cases that are not likely to happen:
// - update statuses for containers that are in spec, but not in status
// - update statuses for resources requested in spec, but with no information in podDevices
for i, containerStatus := range status.ContainerStatuses {
devices := m.podDevices.getContainerDevices(string(pod.UID), containerStatus.Name)
for resourceName, deviceInstances := range devices {
for id, d := range deviceInstances {
health := pluginapi.Healthy
// this is unlikely, but check for existence here anyways
if r, ok := m.allDevices[resourceName]; ok {
if _, ok := r[id]; ok {
health = m.allDevices[resourceName][id].Health
}
}
d.Health = health
deviceInstances[id] = d
}
}
for resourceName, dI := range devices {
resourceStatus := v1.ResourceStatus{
Name: v1.ResourceName(resourceName),
Resources: []v1.ResourceHealth{},
}
for id, d := range dI {
health := v1.ResourceHealthStatusHealthy
if d.Health != pluginapi.Healthy {
health = v1.ResourceHealthStatusUnhealthy
}
resourceStatus.Resources = append(resourceStatus.Resources, v1.ResourceHealth{
ResourceID: v1.ResourceID(id),
Health: health,
})
}
if status.ContainerStatuses[i].AllocatedResourcesStatus == nil {
status.ContainerStatuses[i].AllocatedResourcesStatus = []v1.ResourceStatus{}
}
// look up the resource status by name and update it
found := false
for j, rs := range status.ContainerStatuses[i].AllocatedResourcesStatus {
if rs.Name == resourceStatus.Name {
status.ContainerStatuses[i].AllocatedResourcesStatus[j] = resourceStatus
found = true
break
}
}
if !found {
status.ContainerStatuses[i].AllocatedResourcesStatus = append(status.ContainerStatuses[i].AllocatedResourcesStatus, resourceStatus)
}
}
}
}
// ShouldResetExtendedResourceCapacity returns whether the extended resources should be zeroed or not,
// depending on whether the node has been recreated. Absence of the checkpoint file strongly indicates the node
// has been recreated.
func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool {
checkpoints, err := m.checkpointManager.ListCheckpoints()
if err != nil {
return false
}
return len(checkpoints) == 0
}
func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool {
cntID, err := m.containerMap.GetContainerID(podUID, cntName)
if err != nil {
klog.ErrorS(err, "Container not found in the initial map, assumed NOT running", "podUID", podUID, "containerName", cntName)
return false
}
// note that if container runtime is down when kubelet restarts, this set will be empty,
// so on kubelet restart containers will again fail admission, hitting https://github.com/kubernetes/kubernetes/issues/118559 again.
// This scenario should however be rare enough.
if !m.containerRunningSet.Has(cntID) {
klog.V(4).InfoS("Container not present in the initial running set", "podUID", podUID, "containerName", cntName, "containerID", cntID)
return false
}
// Once we make it here we know we have a running container.
klog.V(4).InfoS("Container found in the initial set, assumed running", "podUID", podUID, "containerName", cntName, "containerID", cntID)
return true
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"net"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/klog/v2"
api "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
)
// DevicePlugin interface provides methods for accessing Device Plugin resources, API and unix socket.
type DevicePlugin interface {
API() api.DevicePluginClient
Resource() string
SocketPath() string
}
// Client interface provides methods for establishing/closing gRPC connection and running the device plugin gRPC client.
type Client interface {
Connect() error
Run()
Disconnect() error
}
type client struct {
mutex sync.Mutex
resource string
socket string
grpc *grpc.ClientConn
handler ClientHandler
client api.DevicePluginClient
}
// NewPluginClient returns an initialized device plugin client.
func NewPluginClient(r string, socketPath string, h ClientHandler) Client {
return &client{
resource: r,
socket: socketPath,
handler: h,
}
}
// Connect is for establishing a gRPC connection between device manager and device plugin.
func (c *client) Connect() error {
client, conn, err := dial(c.socket)
if err != nil {
klog.ErrorS(err, "Unable to connect to device plugin client with socket path", "path", c.socket)
return err
}
c.mutex.Lock()
c.grpc = conn
c.client = client
c.mutex.Unlock()
return c.handler.PluginConnected(c.resource, c)
}
// Run is for running the device plugin gRPC client.
func (c *client) Run() {
stream, err := c.client.ListAndWatch(context.Background(), &api.Empty{})
if err != nil {
klog.ErrorS(err, "ListAndWatch ended unexpectedly for device plugin", "resource", c.resource)
return
}
for {
response, err := stream.Recv()
if err != nil {
klog.ErrorS(err, "ListAndWatch ended unexpectedly for device plugin", "resource", c.resource)
return
}
klog.V(2).InfoS("State pushed for device plugin", "resource", c.resource, "resourceCapacity", len(response.Devices))
c.handler.PluginListAndWatchReceiver(c.resource, response)
}
}
// Disconnect is for closing gRPC connection between device manager and device plugin.
func (c *client) Disconnect() error {
c.mutex.Lock()
if c.grpc != nil {
if err := c.grpc.Close(); err != nil {
klog.V(2).ErrorS(err, "Failed to close grcp connection", "resource", c.Resource())
}
c.grpc = nil
}
c.mutex.Unlock()
c.handler.PluginDisconnected(c.resource)
klog.V(2).InfoS("Device plugin disconnected", "resource", c.resource)
return nil
}
func (c *client) Resource() string {
return c.resource
}
func (c *client) API() api.DevicePluginClient {
return c.client
}
func (c *client) SocketPath() string {
return c.socket
}
// dial establishes the gRPC communication with the registered device plugin. https://godoc.org/google.golang.org/grpc#Dial
func dial(unixSocketPath string) (api.DevicePluginClient, *grpc.ClientConn, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c, err := grpc.DialContext(ctx, unixSocketPath,
grpc.WithAuthority("localhost"),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, "unix", addr)
}),
)
if err != nil {
return nil, nil, fmt.Errorf(errFailedToDialDevicePlugin+" %v", err)
}
return api.NewDevicePluginClient(c), c, nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"os"
"time"
core "k8s.io/api/core/v1"
"k8s.io/klog/v2"
api "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
)
func (s *server) GetPluginHandler() cache.PluginHandler {
if f, err := os.Create(s.socketDir + "DEPRECATION"); err != nil {
klog.ErrorS(err, "Failed to create deprecation file at socket dir", "path", s.socketDir)
} else {
f.Close()
klog.V(4).InfoS("Created deprecation file", "path", f.Name())
}
return s
}
func (s *server) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error {
klog.V(2).InfoS("Registering plugin at endpoint", "plugin", pluginName, "endpoint", endpoint)
return s.connectClient(pluginName, endpoint)
}
func (s *server) DeRegisterPlugin(pluginName, endpoint string) {
klog.V(2).InfoS("Deregistering plugin", "plugin", pluginName, "endpoint", endpoint)
client := s.getClient(pluginName)
if client != nil {
s.disconnectClient(pluginName, client)
}
}
func (s *server) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
klog.V(2).InfoS("Got plugin at endpoint with versions", "plugin", pluginName, "endpoint", endpoint, "versions", versions)
if !s.isVersionCompatibleWithPlugin(versions...) {
return fmt.Errorf("manager version, %s, is not among plugin supported versions %v", api.Version, versions)
}
if !v1helper.IsExtendedResourceName(core.ResourceName(pluginName)) {
return fmt.Errorf("invalid name of device plugin socket: %s", fmt.Sprintf(errInvalidResourceName, pluginName))
}
klog.V(2).InfoS("Device plugin validated", "plugin", pluginName, "endpoint", endpoint, "versions", versions)
return nil
}
func (s *server) connectClient(name string, socketPath string) error {
c := NewPluginClient(name, socketPath, s.chandler)
s.registerClient(name, c)
if err := c.Connect(); err != nil {
s.deregisterClient(name)
klog.ErrorS(err, "Failed to connect to new client", "resource", name)
return err
}
klog.V(2).InfoS("Connected to new client", "resource", name)
go func() {
s.runClient(name, c)
}()
return nil
}
func (s *server) disconnectClient(name string, c Client) error {
s.deregisterClient(name)
return c.Disconnect()
}
func (s *server) registerClient(name string, c Client) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.clients[name] = c
klog.V(2).InfoS("Registered client", "name", name)
}
func (s *server) deregisterClient(name string) {
s.mutex.Lock()
defer s.mutex.Unlock()
delete(s.clients, name)
klog.V(2).InfoS("Deregistered client", "name", name)
}
func (s *server) runClient(name string, c Client) {
c.Run()
c = s.getClient(name)
if c == nil {
return
}
if err := s.disconnectClient(name, c); err != nil {
klog.ErrorS(err, "Unable to disconnect client", "resource", name, "client", c)
}
}
func (s *server) getClient(name string) Client {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.clients[name]
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"sync"
"github.com/opencontainers/selinux/go-selinux"
"google.golang.org/grpc"
core "k8s.io/api/core/v1"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/klog/v2"
api "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
)
// Server interface provides methods for Device plugin registration server.
type Server interface {
cache.PluginHandler
healthz.HealthChecker
Start() error
Stop() error
SocketPath() string
}
type server struct {
socketName string
socketDir string
mutex sync.Mutex
wg sync.WaitGroup
grpc *grpc.Server
rhandler RegistrationHandler
chandler ClientHandler
clients map[string]Client
// isStarted indicates whether the service has started successfully.
isStarted bool
api.UnsafeRegistrationServer
}
// NewServer returns an initialized device plugin registration server.
func NewServer(socketPath string, rh RegistrationHandler, ch ClientHandler) (Server, error) {
if socketPath == "" || !filepath.IsAbs(socketPath) {
return nil, fmt.Errorf(errBadSocket+" %s", socketPath)
}
dir, name := filepath.Split(socketPath)
klog.V(2).InfoS("Creating device plugin registration server", "version", api.Version, "socket", socketPath)
s := &server{
socketName: name,
socketDir: dir,
rhandler: rh,
chandler: ch,
clients: make(map[string]Client),
}
return s, nil
}
func (s *server) Start() error {
klog.V(2).InfoS("Starting device plugin registration server")
if err := os.MkdirAll(s.socketDir, 0750); err != nil {
klog.ErrorS(err, "Failed to create the device plugin socket directory", "directory", s.socketDir)
return err
}
if selinux.GetEnabled() {
if err := selinux.SetFileLabel(s.socketDir, config.KubeletPluginsDirSELinuxLabel); err != nil {
klog.ErrorS(err, "Unprivileged containerized plugins might not work. Could not set selinux context on socket dir", "path", s.socketDir)
}
}
// For now, we leave cleanup of the *entire* directory up to the Handler
// (even though we should in theory be able to just wipe the whole directory)
// because the Handler stores its checkpoint file (amongst others) in here.
if err := s.rhandler.CleanupPluginDirectory(s.socketDir); err != nil {
klog.ErrorS(err, "Failed to cleanup the device plugin directory", "directory", s.socketDir)
return err
}
ln, err := net.Listen("unix", s.SocketPath())
if err != nil {
klog.ErrorS(err, "Failed to listen to socket while starting device plugin registry")
return err
}
s.wg.Add(1)
s.grpc = grpc.NewServer([]grpc.ServerOption{}...)
api.RegisterRegistrationServer(s.grpc, s)
go func() {
defer s.wg.Done()
s.setHealthy()
if err = s.grpc.Serve(ln); err != nil {
s.setUnhealthy()
klog.ErrorS(err, "Error while serving device plugin registration grpc server")
}
}()
return nil
}
func (s *server) Stop() error {
s.visitClients(func(r string, c Client) {
if err := s.disconnectClient(r, c); err != nil {
klog.ErrorS(err, "Failed to disconnect device plugin client", "resourceName", r)
}
})
s.mutex.Lock()
defer s.mutex.Unlock()
if s.grpc == nil {
return nil
}
s.grpc.Stop()
s.wg.Wait()
s.grpc = nil
// During kubelet termination, we do not need the registration server,
// and we consider the kubelet to be healthy even when it is down.
s.setHealthy()
klog.V(2).InfoS("Stopping device plugin registration server")
return nil
}
func (s *server) SocketPath() string {
return filepath.Join(s.socketDir, s.socketName)
}
func (s *server) Register(ctx context.Context, r *api.RegisterRequest) (*api.Empty, error) {
klog.InfoS("Got registration request from device plugin with resource", "resourceName", r.ResourceName)
metrics.DevicePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc()
if !s.isVersionCompatibleWithPlugin(r.Version) {
err := fmt.Errorf(errUnsupportedVersion, r.Version, api.SupportedVersions)
klog.ErrorS(err, "Bad registration request from device plugin with resource", "resourceName", r.ResourceName)
return &api.Empty{}, err
}
if !v1helper.IsExtendedResourceName(core.ResourceName(r.ResourceName)) {
err := fmt.Errorf(errInvalidResourceName, r.ResourceName)
klog.ErrorS(err, "Bad registration request from device plugin")
return &api.Empty{}, err
}
if err := s.connectClient(r.ResourceName, filepath.Join(s.socketDir, r.Endpoint)); err != nil {
klog.ErrorS(err, "Error connecting to device plugin client")
return &api.Empty{}, err
}
return &api.Empty{}, nil
}
func (s *server) isVersionCompatibleWithPlugin(versions ...string) bool {
// TODO(vikasc): Currently this is fine as we only have a single supported version. When we do need to support
// multiple versions in the future, we may need to extend this function to return a supported version.
// E.g., say kubelet supports v1beta1 and v1beta2, and we get v1alpha1 and v1beta1 from a device plugin,
// this function should return v1beta1
for _, version := range versions {
for _, supportedVersion := range api.SupportedVersions {
if version == supportedVersion {
return true
}
}
}
return false
}
func (s *server) visitClients(visit func(r string, c Client)) {
s.mutex.Lock()
for r, c := range s.clients {
s.mutex.Unlock()
visit(r, c)
s.mutex.Lock()
}
s.mutex.Unlock()
}
func (s *server) Name() string {
return "device-plugin"
}
func (s *server) Check(_ *http.Request) error {
if s.isStarted {
return nil
}
return fmt.Errorf("device plugin registration gRPC server failed and no device plugins can register")
}
// setHealthy sets the health status of the gRPC server.
func (s *server) setHealthy() {
s.isStarted = true
}
// setUnhealthy sets the health status of the gRPC server to unhealthy.
func (s *server) setUnhealthy() {
s.isStarted = false
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"net"
"os"
"path/filepath"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
)
// Stub implementation for DevicePlugin.
type Stub struct {
devs []*pluginapi.Device
socket string
resourceName string
preStartContainerFlag bool
getPreferredAllocationFlag bool
stop chan interface{}
wg sync.WaitGroup
update chan []*pluginapi.Device
server *grpc.Server
// allocFunc is used for handling allocation request
allocFunc stubAllocFunc
// getPreferredAllocFunc is used for handling getPreferredAllocation request
getPreferredAllocFunc stubGetPreferredAllocFunc
// registerControlFunc is used for controlling auto-registration of requests
registerControlFunc stubRegisterControlFunc
registrationStatus chan watcherapi.RegistrationStatus // for testing
endpoint string // for testing
kubeletRestartWatcher *fsnotify.Watcher
pluginapi.UnsafeDevicePluginServer
watcherapi.UnsafeRegistrationServer
}
// stubGetPreferredAllocFunc is the function called when a getPreferredAllocation request is received from Kubelet
type stubGetPreferredAllocFunc func(r *pluginapi.PreferredAllocationRequest, devs map[string]*pluginapi.Device) (*pluginapi.PreferredAllocationResponse, error)
func defaultGetPreferredAllocFunc(r *pluginapi.PreferredAllocationRequest, devs map[string]*pluginapi.Device) (*pluginapi.PreferredAllocationResponse, error) {
var response pluginapi.PreferredAllocationResponse
return &response, nil
}
// stubAllocFunc is the function called when an allocation request is received from Kubelet
type stubAllocFunc func(r *pluginapi.AllocateRequest, devs map[string]*pluginapi.Device) (*pluginapi.AllocateResponse, error)
func defaultAllocFunc(r *pluginapi.AllocateRequest, devs map[string]*pluginapi.Device) (*pluginapi.AllocateResponse, error) {
var response pluginapi.AllocateResponse
return &response, nil
}
// stubRegisterControlFunc is the function called when a registration request is received from Kubelet
type stubRegisterControlFunc func() bool
func defaultRegisterControlFunc() bool {
return true
}
// NewDevicePluginStub returns an initialized DevicePlugin Stub.
func NewDevicePluginStub(devs []*pluginapi.Device, socket string, name string, preStartContainerFlag bool, getPreferredAllocationFlag bool) *Stub {
watcher, err := fsnotify.NewWatcher()
if err != nil {
klog.ErrorS(err, "Watcher creation failed")
panic(err)
}
return &Stub{
devs: devs,
socket: socket,
resourceName: name,
preStartContainerFlag: preStartContainerFlag,
getPreferredAllocationFlag: getPreferredAllocationFlag,
registerControlFunc: defaultRegisterControlFunc,
stop: make(chan interface{}),
update: make(chan []*pluginapi.Device),
allocFunc: defaultAllocFunc,
getPreferredAllocFunc: defaultGetPreferredAllocFunc,
kubeletRestartWatcher: watcher,
}
}
// SetGetPreferredAllocFunc sets allocFunc of the device plugin
func (m *Stub) SetGetPreferredAllocFunc(f stubGetPreferredAllocFunc) {
m.getPreferredAllocFunc = f
}
// SetAllocFunc sets allocFunc of the device plugin
func (m *Stub) SetAllocFunc(f stubAllocFunc) {
m.allocFunc = f
}
// SetRegisterControlFunc sets RegisterControlFunc of the device plugin
func (m *Stub) SetRegisterControlFunc(f stubRegisterControlFunc) {
m.registerControlFunc = f
}
// Start starts the gRPC server of the device plugin. Can only
// be called once.
func (m *Stub) Start() error {
klog.InfoS("Starting device plugin server")
err := m.cleanup()
if err != nil {
return err
}
sock, err := net.Listen("unix", m.socket)
if err != nil {
return err
}
m.wg.Add(1)
m.server = grpc.NewServer([]grpc.ServerOption{}...)
pluginapi.RegisterDevicePluginServer(m.server, m)
watcherapi.RegisterRegistrationServer(m.server, m)
err = m.kubeletRestartWatcher.Add(filepath.Dir(m.socket))
if err != nil {
klog.ErrorS(err, "Failed to add watch", "devicePluginPath", pluginapi.DevicePluginPath)
return err
}
go func() {
defer m.wg.Done()
if err = m.server.Serve(sock); err != nil {
klog.ErrorS(err, "Error while serving device plugin registration grpc server")
}
}()
var lastDialErr error
wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
var conn *grpc.ClientConn
_, conn, lastDialErr = dial(m.socket)
if lastDialErr != nil {
return false, nil
}
conn.Close()
return true, nil
})
if lastDialErr != nil {
return lastDialErr
}
klog.InfoS("Starting to serve on socket", "socket", m.socket)
return nil
}
func (m *Stub) Restart() error {
klog.InfoS("Restarting Device Plugin server")
if m.server == nil {
return nil
}
m.server.Stop()
m.server = nil
return m.Start()
}
// Stop stops the gRPC server. Can be called without a prior Start
// and more than once. Not safe to be called concurrently by different
// goroutines!
func (m *Stub) Stop() error {
klog.InfoS("Stopping device plugin server")
if m.server == nil {
return nil
}
m.kubeletRestartWatcher.Close()
m.server.Stop()
m.wg.Wait()
m.server = nil
close(m.stop) // This prevents re-starting the server.
return m.cleanup()
}
func (m *Stub) Watch(kubeletEndpoint, resourceName, pluginSockDir string) {
for {
select {
// Detect a kubelet restart by watching for a newly created
// 'pluginapi.KubeletSocket' file. When this occurs, restart
// the device plugin server
case event := <-m.kubeletRestartWatcher.Events:
if event.Name == kubeletEndpoint && event.Op&fsnotify.Create == fsnotify.Create {
klog.InfoS("inotify: file created, restarting", "kubeletEndpoint", kubeletEndpoint)
var lastErr error
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 2*time.Minute, false, func(context.Context) (done bool, err error) {
restartErr := m.Restart()
if restartErr == nil {
return true, nil
}
klog.ErrorS(restartErr, "Retrying after error")
lastErr = restartErr
return false, nil
})
if err != nil {
klog.ErrorS(err, "Unable to restart server: wait timed out", "lastErr", lastErr.Error())
panic(err)
}
if ok := m.registerControlFunc(); ok {
if err := m.Register(kubeletEndpoint, resourceName, pluginSockDir); err != nil {
klog.ErrorS(err, "Unable to register to kubelet")
panic(err)
}
}
}
// Watch for any other fs errors and log them.
case err := <-m.kubeletRestartWatcher.Errors:
klog.ErrorS(err, "inotify error")
}
}
}
// GetInfo is the RPC which return pluginInfo
func (m *Stub) GetInfo(ctx context.Context, req *watcherapi.InfoRequest) (*watcherapi.PluginInfo, error) {
klog.InfoS("GetInfo")
return &watcherapi.PluginInfo{
Type: watcherapi.DevicePlugin,
Name: m.resourceName,
Endpoint: m.endpoint,
SupportedVersions: []string{pluginapi.Version}}, nil
}
// NotifyRegistrationStatus receives the registration notification from watcher
func (m *Stub) NotifyRegistrationStatus(ctx context.Context, status *watcherapi.RegistrationStatus) (*watcherapi.RegistrationStatusResponse, error) {
if m.registrationStatus != nil {
m.registrationStatus <- *status
}
if !status.PluginRegistered {
klog.InfoS("Registration failed", "err", status.Error)
}
return &watcherapi.RegistrationStatusResponse{}, nil
}
// Register registers the device plugin for the given resourceName with Kubelet.
func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir string) error {
klog.InfoS("Register", "kubeletEndpoint", kubeletEndpoint, "resourceName", resourceName, "socket", pluginSockDir)
if pluginSockDir != "" {
if _, err := os.Stat(pluginSockDir + "DEPRECATION"); err == nil {
klog.InfoS("Deprecation file found. Skip registration")
return nil
}
}
klog.InfoS("Deprecation file not found. Invoke registration")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, kubeletEndpoint,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, "unix", addr)
}))
if err != nil {
return err
}
defer conn.Close()
client := pluginapi.NewRegistrationClient(conn)
reqt := &pluginapi.RegisterRequest{
Version: pluginapi.Version,
Endpoint: filepath.Base(m.socket),
ResourceName: resourceName,
Options: &pluginapi.DevicePluginOptions{
PreStartRequired: m.preStartContainerFlag,
GetPreferredAllocationAvailable: m.getPreferredAllocationFlag,
},
}
_, err = client.Register(context.Background(), reqt)
if err != nil {
// Stop server
m.server.Stop()
klog.ErrorS(err, "Client unable to register to kubelet")
return err
}
klog.InfoS("Device Plugin registered with the Kubelet")
return err
}
// GetDevicePluginOptions returns DevicePluginOptions settings for the device plugin.
func (m *Stub) GetDevicePluginOptions(ctx context.Context, e *pluginapi.Empty) (*pluginapi.DevicePluginOptions, error) {
options := &pluginapi.DevicePluginOptions{
PreStartRequired: m.preStartContainerFlag,
GetPreferredAllocationAvailable: m.getPreferredAllocationFlag,
}
return options, nil
}
// PreStartContainer resets the devices received
func (m *Stub) PreStartContainer(ctx context.Context, r *pluginapi.PreStartContainerRequest) (*pluginapi.PreStartContainerResponse, error) {
klog.InfoS("PreStartContainer", "request", r)
return &pluginapi.PreStartContainerResponse{}, nil
}
// ListAndWatch lists devices and update that list according to the Update call
func (m *Stub) ListAndWatch(e *pluginapi.Empty, s pluginapi.DevicePlugin_ListAndWatchServer) error {
klog.InfoS("ListAndWatch")
s.Send(&pluginapi.ListAndWatchResponse{Devices: m.devs})
for {
select {
case <-m.stop:
return nil
case updated := <-m.update:
s.Send(&pluginapi.ListAndWatchResponse{Devices: updated})
}
}
}
// Update allows the device plugin to send new devices through ListAndWatch
func (m *Stub) Update(devs []*pluginapi.Device) {
m.update <- devs
}
// GetPreferredAllocation gets the preferred allocation from a set of available devices
func (m *Stub) GetPreferredAllocation(ctx context.Context, r *pluginapi.PreferredAllocationRequest) (*pluginapi.PreferredAllocationResponse, error) {
klog.InfoS("GetPreferredAllocation", "request", r)
devs := make(map[string]*pluginapi.Device)
for _, dev := range m.devs {
devs[dev.ID] = dev
}
return m.getPreferredAllocFunc(r, devs)
}
// Allocate does a mock allocation
func (m *Stub) Allocate(ctx context.Context, r *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) {
klog.InfoS("Allocate", "request", r)
devs := make(map[string]*pluginapi.Device)
for _, dev := range m.devs {
devs[dev.ID] = dev
}
return m.allocFunc(r, devs)
}
func (m *Stub) cleanup() error {
if err := os.Remove(m.socket); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devicemanager
import (
"maps"
"sync"
"google.golang.org/protobuf/proto"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/sets"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
type deviceAllocateInfo struct {
// deviceIds contains device Ids allocated to this container for the given resourceName.
deviceIds checkpoint.DevicesPerNUMA
// allocResp contains cached rpc AllocateResponse.
allocResp *pluginapi.ContainerAllocateResponse
}
type resourceAllocateInfo map[string]deviceAllocateInfo // Keyed by resourceName.
type containerDevices map[string]resourceAllocateInfo // Keyed by containerName.
type podDevices struct {
sync.RWMutex
devs map[string]containerDevices // Keyed by podUID.
}
// NewPodDevices is a function that returns object of podDevices type with its own guard
// RWMutex and a map where key is a pod UID and value contains
// container devices information of type containerDevices.
func newPodDevices() *podDevices {
return &podDevices{devs: make(map[string]containerDevices)}
}
func (pdev *podDevices) pods() sets.Set[string] {
pdev.RLock()
defer pdev.RUnlock()
ret := sets.New[string]()
for k := range pdev.devs {
ret.Insert(k)
}
return ret
}
func (pdev *podDevices) size() int {
pdev.RLock()
defer pdev.RUnlock()
return len(pdev.devs)
}
func (pdev *podDevices) hasPod(podUID string) bool {
pdev.RLock()
defer pdev.RUnlock()
_, podExists := pdev.devs[podUID]
return podExists
}
func (pdev *podDevices) insert(podUID, contName, resource string, devices checkpoint.DevicesPerNUMA, resp *pluginapi.ContainerAllocateResponse) {
pdev.Lock()
defer pdev.Unlock()
if _, podExists := pdev.devs[podUID]; !podExists {
pdev.devs[podUID] = make(containerDevices)
}
if _, contExists := pdev.devs[podUID][contName]; !contExists {
pdev.devs[podUID][contName] = make(resourceAllocateInfo)
}
pdev.devs[podUID][contName][resource] = deviceAllocateInfo{
deviceIds: devices,
allocResp: resp,
}
}
func (pdev *podDevices) delete(pods []string) {
pdev.Lock()
defer pdev.Unlock()
for _, uid := range pods {
delete(pdev.devs, uid)
}
}
// Returns list of device Ids allocated to the given pod for the given resource.
// Returns nil if we don't have cached state for the given <podUID, resource>.
func (pdev *podDevices) podDevices(podUID, resource string) sets.Set[string] {
pdev.RLock()
defer pdev.RUnlock()
ret := sets.New[string]()
for contName := range pdev.devs[podUID] {
ret = ret.Union(pdev.containerDevices(podUID, contName, resource))
}
return ret
}
// Returns list of device Ids allocated to the given container for the given resource.
// Returns nil if we don't have cached state for the given <podUID, contName, resource>.
func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.Set[string] {
pdev.RLock()
defer pdev.RUnlock()
if _, podExists := pdev.devs[podUID]; !podExists {
return nil
}
if _, contExists := pdev.devs[podUID][contName]; !contExists {
return nil
}
devs, resourceExists := pdev.devs[podUID][contName][resource]
if !resourceExists {
return nil
}
return devs.deviceIds.Devices()
}
// Populates allocatedResources with the device resources allocated to the specified <podUID, contName>.
func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) {
pdev.RLock()
defer pdev.RUnlock()
containers, exists := pdev.devs[podUID]
if !exists {
return
}
resources, exists := containers[contName]
if !exists {
return
}
for resource, devices := range resources {
allocatedResources[resource] = allocatedResources[resource].Union(devices.deviceIds.Devices())
}
}
// Removes the device resources allocated to the specified <podUID, contName> from allocatedResources.
func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) {
pdev.RLock()
defer pdev.RUnlock()
containers, exists := pdev.devs[podUID]
if !exists {
return
}
resources, exists := containers[contName]
if !exists {
return
}
for resource, devices := range resources {
allocatedResources[resource] = allocatedResources[resource].Difference(devices.deviceIds.Devices())
}
}
// Returns all devices allocated to the pods being tracked, keyed by resourceName.
func (pdev *podDevices) devices() map[string]sets.Set[string] {
ret := make(map[string]sets.Set[string])
pdev.RLock()
defer pdev.RUnlock()
for _, containerDevices := range pdev.devs {
for _, resources := range containerDevices {
for resource, devices := range resources {
if _, exists := ret[resource]; !exists {
ret[resource] = sets.New[string]()
}
if devices.allocResp != nil {
ret[resource] = ret[resource].Union(devices.deviceIds.Devices())
}
}
}
}
return ret
}
// Returns podUID and containerName for a device
func (pdev *podDevices) getPodAndContainerForDevice(deviceID string) (string, string) {
pdev.RLock()
defer pdev.RUnlock()
for podUID, containerDevices := range pdev.devs {
for containerName, resources := range containerDevices {
for _, devices := range resources {
if devices.deviceIds.Devices().Has(deviceID) {
return podUID, containerName
}
}
}
}
return "", ""
}
// Turns podDevices to checkpointData.
func (pdev *podDevices) toCheckpointData() []checkpoint.PodDevicesEntry {
var data []checkpoint.PodDevicesEntry
pdev.RLock()
defer pdev.RUnlock()
for podUID, containerDevices := range pdev.devs {
for conName, resources := range containerDevices {
for resource, devices := range resources {
if devices.allocResp == nil {
klog.ErrorS(nil, "Can't marshal allocResp, allocation response is missing", "podUID", podUID, "containerName", conName, "resourceName", resource)
continue
}
allocResp, err := proto.Marshal(devices.allocResp)
if err != nil {
klog.ErrorS(err, "Can't marshal allocResp", "podUID", podUID, "containerName", conName, "resourceName", resource)
continue
}
data = append(data, checkpoint.PodDevicesEntry{
PodUID: podUID,
ContainerName: conName,
ResourceName: resource,
DeviceIDs: devices.deviceIds,
AllocResp: allocResp})
}
}
}
return data
}
// Populates podDevices from the passed in checkpointData.
func (pdev *podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) {
for _, entry := range data {
klog.V(2).InfoS("Get checkpoint entry",
"podUID", entry.PodUID, "containerName", entry.ContainerName,
"resourceName", entry.ResourceName, "deviceIDs", entry.DeviceIDs, "allocated", entry.AllocResp)
allocResp := &pluginapi.ContainerAllocateResponse{}
err := proto.Unmarshal(entry.AllocResp, allocResp)
if err != nil {
klog.ErrorS(err, "Can't unmarshal allocResp", "podUID", entry.PodUID, "containerName", entry.ContainerName, "resourceName", entry.ResourceName)
continue
}
pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, allocResp)
}
}
// Returns combined container runtime settings to consume the container's allocated devices.
func (pdev *podDevices) deviceRunContainerOptions(podUID, contName string) *DeviceRunContainerOptions {
pdev.RLock()
defer pdev.RUnlock()
containers, exists := pdev.devs[podUID]
if !exists {
return nil
}
resources, exists := containers[contName]
if !exists {
return nil
}
opts := &DeviceRunContainerOptions{}
// Maps to detect duplicate settings.
devsMap := make(map[string]string)
mountsMap := make(map[string]string)
envsMap := make(map[string]string)
annotationsMap := make(map[string]string)
// Keep track of all CDI devices requested for the container.
allCDIDevices := sets.New[string]()
// Loops through AllocationResponses of all cached device resources.
for _, devices := range resources {
resp := devices.allocResp
// Each Allocate response has the following artifacts.
// Environment variables
// Mount points
// Device files
// Container annotations
// CDI device IDs
// These artifacts are per resource per container.
// Updates RunContainerOptions.Envs.
for k, v := range resp.Envs {
if e, ok := envsMap[k]; ok {
klog.V(4).InfoS("Skip existing env", "envKey", k, "envValue", v)
if e != v {
klog.ErrorS(nil, "Environment variable has conflicting setting", "envKey", k, "expected", v, "got", e)
}
continue
}
klog.V(4).InfoS("Add env", "envKey", k, "envValue", v)
envsMap[k] = v
opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v})
}
// Updates RunContainerOptions.Devices.
for _, dev := range resp.Devices {
if d, ok := devsMap[dev.ContainerPath]; ok {
klog.V(4).InfoS("Skip existing device", "containerPath", dev.ContainerPath, "hostPath", dev.HostPath)
if d != dev.HostPath {
klog.ErrorS(nil, "Container device has conflicting mapping host devices",
"containerPath", dev.ContainerPath, "got", d, "expected", dev.HostPath)
}
continue
}
klog.V(4).InfoS("Add device", "containerPath", dev.ContainerPath, "hostPath", dev.HostPath)
devsMap[dev.ContainerPath] = dev.HostPath
opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{
PathOnHost: dev.HostPath,
PathInContainer: dev.ContainerPath,
Permissions: dev.Permissions,
})
}
// Updates RunContainerOptions.Mounts.
for _, mount := range resp.Mounts {
if m, ok := mountsMap[mount.ContainerPath]; ok {
klog.V(4).InfoS("Skip existing mount", "containerPath", mount.ContainerPath, "hostPath", mount.HostPath)
if m != mount.HostPath {
klog.ErrorS(nil, "Container mount has conflicting mapping host mounts",
"containerPath", mount.ContainerPath, "conflictingPath", m, "hostPath", mount.HostPath)
}
continue
}
klog.V(4).InfoS("Add mount", "containerPath", mount.ContainerPath, "hostPath", mount.HostPath)
mountsMap[mount.ContainerPath] = mount.HostPath
opts.Mounts = append(opts.Mounts, kubecontainer.Mount{
Name: mount.ContainerPath,
ContainerPath: mount.ContainerPath,
HostPath: mount.HostPath,
ReadOnly: mount.ReadOnly,
// TODO: This may need to be part of Device plugin API.
SELinuxRelabel: false,
})
}
// Updates for Annotations
for k, v := range resp.Annotations {
if e, ok := annotationsMap[k]; ok {
klog.V(4).InfoS("Skip existing annotation", "annotationKey", k, "annotationValue", v)
if e != v {
klog.ErrorS(nil, "Annotation has conflicting setting", "annotationKey", k, "expected", e, "got", v)
}
continue
}
klog.V(4).InfoS("Add annotation", "annotationKey", k, "annotationValue", v)
annotationsMap[k] = v
opts.Annotations = append(opts.Annotations, kubecontainer.Annotation{Name: k, Value: v})
}
// Updates for CDI devices.
cdiDevices := getCDIDeviceInfo(resp, allCDIDevices)
opts.CDIDevices = append(opts.CDIDevices, cdiDevices...)
}
return opts
}
// getCDIDeviceInfo returns CDI devices from an allocate response
func getCDIDeviceInfo(resp *pluginapi.ContainerAllocateResponse, knownCDIDevices sets.Set[string]) []kubecontainer.CDIDevice {
var cdiDevices []kubecontainer.CDIDevice
for _, cdiDevice := range resp.CdiDevices {
if knownCDIDevices.Has(cdiDevice.Name) {
klog.V(4).InfoS("Skip existing CDI Device", "name", cdiDevice.Name)
continue
}
klog.V(4).InfoS("Add CDI device", "name", cdiDevice.Name)
knownCDIDevices.Insert(cdiDevice.Name)
device := kubecontainer.CDIDevice{
Name: cdiDevice.Name,
}
cdiDevices = append(cdiDevices, device)
}
return cdiDevices
}
// getContainerDevices returns the devices assigned to the provided container for all ResourceNames
func (pdev *podDevices) getContainerDevices(podUID, contName string) ResourceDeviceInstances {
pdev.RLock()
defer pdev.RUnlock()
if _, podExists := pdev.devs[podUID]; !podExists {
return nil
}
if _, contExists := pdev.devs[podUID][contName]; !contExists {
return nil
}
resDev := NewResourceDeviceInstances()
for resource, allocateInfo := range pdev.devs[podUID][contName] {
if len(allocateInfo.deviceIds) == 0 {
continue
}
devicePluginMap := make(map[string]*pluginapi.Device)
for numaid, devlist := range allocateInfo.deviceIds {
for _, devID := range devlist {
var topology *pluginapi.TopologyInfo
if numaid != nodeWithoutTopology {
NUMANodes := []*pluginapi.NUMANode{{ID: numaid}}
if pDev, ok := devicePluginMap[devID]; ok && pDev.Topology != nil {
if nodes := pDev.Topology.GetNodes(); nodes != nil {
NUMANodes = append(NUMANodes, nodes...)
}
}
// ID and Healthy are not relevant here.
topology = &pluginapi.TopologyInfo{Nodes: NUMANodes}
}
devicePluginMap[devID] = &pluginapi.Device{
Topology: topology,
}
}
}
resDev[resource] = devicePluginMap
}
return resDev
}
// DeviceInstances is a mapping device name -> plugin device data
type DeviceInstances map[string]*pluginapi.Device
// ResourceDeviceInstances is a mapping resource name -> DeviceInstances
type ResourceDeviceInstances map[string]DeviceInstances
// NewResourceDeviceInstances returns a new ResourceDeviceInstances
func NewResourceDeviceInstances() ResourceDeviceInstances {
return make(ResourceDeviceInstances)
}
// Clone returns a clone of ResourceDeviceInstances
func (rdev ResourceDeviceInstances) Clone() ResourceDeviceInstances {
clone := NewResourceDeviceInstances()
for resourceName, resourceDevs := range rdev {
clone[resourceName] = maps.Clone(resourceDevs)
}
return clone
}
// Filter takes a condition set expressed as map[string]sets.Set[string] and returns a new
// ResourceDeviceInstances with only the devices matching the condition set.
func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.Set[string]) ResourceDeviceInstances {
filtered := NewResourceDeviceInstances()
for resourceName, filterIDs := range cond {
if _, exists := rdev[resourceName]; !exists {
continue
}
filtered[resourceName] = DeviceInstances{}
for instanceID, instance := range rdev[resourceName] {
if filterIDs.Has(instanceID) {
filtered[resourceName][instanceID] = instance
}
}
}
return filtered
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devicemanager
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
// GetTopologyHints implements the TopologyManager HintProvider Interface which
// ensures the Device Manager is consulted when Topology Aware Hints for each
// container are created.
func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
// Garbage collect any stranded device resources before providing TopologyHints
m.UpdateAllocatedDevices()
// Loop through all device resources and generate TopologyHints for them.
deviceHints := make(map[string][]topologymanager.TopologyHint)
accumulatedResourceRequests := m.getContainerDeviceRequest(container)
m.mutex.Lock()
defer m.mutex.Unlock()
for resource, requested := range accumulatedResourceRequests {
// Only consider devices that actually contain topology information.
if aligned := m.deviceHasTopologyAlignment(resource); !aligned {
klog.InfoS("Resource does not have a topology preference", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested)
deviceHints[resource] = nil
continue
}
// Short circuit to regenerate the same hints if there are already
// devices allocated to the Container. This might happen after a
// kubelet restart, for example.
allocated := m.podDevices.containerDevices(string(pod.UID), container.Name, resource)
if allocated.Len() > 0 {
if allocated.Len() != requested {
klog.InfoS("Resource already allocated to pod with different number than request", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested, "allocated", allocated.Len())
deviceHints[resource] = []topologymanager.TopologyHint{}
continue
}
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name)
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
continue
}
// Get the list of available devices, for which TopologyHints should be generated.
available := m.getAvailableDevices(resource)
reusable := m.devicesToReuse[string(pod.UID)][resource]
if available.Union(reusable).Len() < requested {
klog.InfoS("Unable to generate topology hints: requested number of devices unavailable", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested, "available", available.Union(reusable).Len())
deviceHints[resource] = []topologymanager.TopologyHint{}
continue
}
// Generate TopologyHints for this resource given the current
// request size and the list of available devices.
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, reusable, requested)
}
return deviceHints
}
// GetPodTopologyHints implements the topologymanager.HintProvider Interface which
// ensures the Device Manager is consulted when Topology Aware Hints for Pod are created.
func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
// Garbage collect any stranded device resources before providing TopologyHints
m.UpdateAllocatedDevices()
deviceHints := make(map[string][]topologymanager.TopologyHint)
accumulatedResourceRequests := m.getPodDeviceRequest(pod)
m.mutex.Lock()
defer m.mutex.Unlock()
for resource, requested := range accumulatedResourceRequests {
// Only consider devices that actually contain topology information.
if aligned := m.deviceHasTopologyAlignment(resource); !aligned {
klog.InfoS("Resource does not have a topology preference", "resourceName", resource, "pod", klog.KObj(pod), "request", requested)
deviceHints[resource] = nil
continue
}
// Short circuit to regenerate the same hints if there are already
// devices allocated to the Pod. This might happen after a
// kubelet restart, for example.
allocated := m.podDevices.podDevices(string(pod.UID), resource)
if allocated.Len() > 0 {
if allocated.Len() != requested {
klog.InfoS("Resource already allocated to pod with different number than request", "resourceName", resource, "pod", klog.KObj(pod), "request", requested, "allocated", allocated.Len())
deviceHints[resource] = []topologymanager.TopologyHint{}
continue
}
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resourceName", resource, "pod", klog.KObj(pod), "allocated", allocated.Len())
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
continue
}
// Get the list of available devices, for which TopologyHints should be generated.
available := m.getAvailableDevices(resource)
if available.Len() < requested {
klog.InfoS("Unable to generate topology hints: requested number of devices unavailable", "resourceName", resource, "pod", klog.KObj(pod), "request", requested, "available", available.Len())
deviceHints[resource] = []topologymanager.TopologyHint{}
continue
}
// Generate TopologyHints for this resource given the current
// request size and the list of available devices.
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.Set[string]{}, requested)
}
return deviceHints
}
func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool {
// If any device has Topology NUMANodes available, we assume they care about alignment.
for _, device := range m.allDevices[resource] {
if device.Topology != nil && len(device.Topology.Nodes) > 0 {
return true
}
}
return false
}
func (m *ManagerImpl) getAvailableDevices(resource string) sets.Set[string] {
// Strip all devices in use from the list of healthy ones.
return m.healthyDevices[resource].Difference(m.allocatedDevices[resource])
}
func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.Set[string], reusable sets.Set[string], request int) []topologymanager.TopologyHint {
// Initialize minAffinitySize to include all NUMA Nodes
minAffinitySize := len(m.numaNodes)
// Iterate through all combinations of NUMA Nodes and build hints from them.
hints := []topologymanager.TopologyHint{}
bitmask.IterateBitMasks(m.numaNodes, func(mask bitmask.BitMask) {
// First, update minAffinitySize for the current request size.
devicesInMask := 0
for _, device := range m.allDevices[resource] {
if mask.AnySet(m.getNUMANodeIds(device.Topology)) {
devicesInMask++
}
}
if devicesInMask >= request && mask.Count() < minAffinitySize {
minAffinitySize = mask.Count()
}
// Then check to see if all the reusable devices are part of the bitmask.
numMatching := 0
for d := range reusable {
// Skip the device if it doesn't specify any topology info.
if m.allDevices[resource][d].Topology == nil {
continue
}
// Otherwise disregard this mask if its NUMANode isn't part of it.
if !mask.AnySet(m.getNUMANodeIds(m.allDevices[resource][d].Topology)) {
return
}
numMatching++
}
// Finally, check to see if enough available devices remain on the
// current NUMA node combination to satisfy the device request.
for d := range available {
if mask.AnySet(m.getNUMANodeIds(m.allDevices[resource][d].Topology)) {
numMatching++
}
}
// If they don't, then move onto the next combination.
if numMatching < request {
return
}
// Otherwise, create a new hint from the NUMA mask and add it to the
// list of hints. We set all hint preferences to 'false' on the first
// pass through.
hints = append(hints, topologymanager.TopologyHint{
NUMANodeAffinity: mask,
Preferred: false,
})
})
// Loop back through all hints and update the 'Preferred' field based on
// counting the number of bits sets in the affinity mask and comparing it
// to the minAffinity. Only those with an equal number of bits set will be
// considered preferred.
for i := range hints {
if hints[i].NUMANodeAffinity.Count() == minAffinitySize {
hints[i].Preferred = true
}
}
return hints
}
func (m *ManagerImpl) getNUMANodeIds(topology *pluginapi.TopologyInfo) []int {
if topology == nil {
return nil
}
var ids []int
for _, n := range topology.Nodes {
ids = append(ids, int(n.ID))
}
return ids
}
func (m *ManagerImpl) getPodDeviceRequest(pod *v1.Pod) map[string]int {
// for these device plugin resources, requests == limits
limits := resource.PodLimits(pod, resource.PodResourcesOptions{
ExcludeOverhead: true,
})
podRequests := make(map[string]int)
for resourceName, quantity := range limits {
if !m.isDevicePluginResource(string(resourceName)) {
continue
}
podRequests[string(resourceName)] = int(quantity.Value())
}
return podRequests
}
func (m *ManagerImpl) getContainerDeviceRequest(container *v1.Container) map[string]int {
containerRequests := make(map[string]int)
for resourceObj, requestedObj := range container.Resources.Limits {
resource := string(resourceObj)
requested := int(requestedObj.Value())
if !m.isDevicePluginResource(resource) {
continue
}
containerRequests[resource] = requested
}
return containerRequests
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dra
import (
"errors"
"fmt"
"slices"
"strings"
"sync"
"github.com/go-logr/logr"
resourceapi "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/metrics"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/dra/state"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
)
// ClaimInfo holds information required
// to prepare and unprepare a resource claim.
// +k8s:deepcopy-gen=true
type ClaimInfo struct {
state.ClaimInfoState
prepared bool
}
// claimInfoCache is a cache of processed resource claims keyed by namespace/claimname.
type claimInfoCache struct {
logger klog.Logger
sync.RWMutex
checkpointer state.Checkpointer
claimInfo map[string]*ClaimInfo
}
// newClaimInfoFromClaim creates a new claim info from a resource claim.
// It verifies that the kubelet can handle the claim.
func newClaimInfoFromClaim(claim *resourceapi.ResourceClaim) (*ClaimInfo, error) {
claimInfoState := state.ClaimInfoState{
ClaimUID: claim.UID,
ClaimName: claim.Name,
Namespace: claim.Namespace,
PodUIDs: sets.New[string](),
DriverState: make(map[string]state.DriverState),
}
if claim.Status.Allocation == nil {
return nil, errors.New("not allocated")
}
for _, result := range claim.Status.Allocation.Devices.Results {
claimInfoState.DriverState[result.Driver] = state.DriverState{}
}
info := &ClaimInfo{
ClaimInfoState: claimInfoState,
prepared: false,
}
return info, nil
}
// newClaimInfoFromClaim creates a new claim info from a checkpointed claim info state object.
func newClaimInfoFromState(state *state.ClaimInfoState) *ClaimInfo {
info := &ClaimInfo{
ClaimInfoState: *state.DeepCopy(),
prepared: false,
}
return info
}
// setCDIDevices adds a set of CDI devices to the claim info.
func (info *ClaimInfo) addDevice(driverName string, deviceState state.Device) {
if info.DriverState == nil {
info.DriverState = make(map[string]state.DriverState)
}
driverState := info.DriverState[driverName]
driverState.Devices = append(driverState.Devices, deviceState)
info.DriverState[driverName] = driverState
}
// addPodReference adds a pod reference to the claim info.
func (info *ClaimInfo) addPodReference(podUID types.UID) {
info.PodUIDs.Insert(string(podUID))
}
// hasPodReference checks if a pod reference exists in the claim info.
func (info *ClaimInfo) hasPodReference(podUID types.UID) bool {
return info.PodUIDs.Has(string(podUID))
}
// deletePodReference deletes a pod reference from the claim info.
func (info *ClaimInfo) deletePodReference(podUID types.UID) {
info.PodUIDs.Delete(string(podUID))
}
// setPrepared marks the claim info as prepared.
func (info *ClaimInfo) setPrepared() {
info.prepared = true
}
// isPrepared checks if claim info is prepared or not.
func (info *ClaimInfo) isPrepared() bool {
return info.prepared
}
// cdiDevicesAsList returns a list of CDIDevices from the provided claim info.
// When the request name is non-empty, only devices relevant for that request
// are returned.
func (info *ClaimInfo) cdiDevicesAsList(requestName string) []kubecontainer.CDIDevice {
var cdiDevices []kubecontainer.CDIDevice
for _, driverData := range info.DriverState {
for _, device := range driverData.Devices {
if requestName == "" || len(device.RequestNames) == 0 || slices.Contains(device.RequestNames, requestName) {
for _, cdiDeviceID := range device.CDIDeviceIDs {
cdiDevices = append(cdiDevices, kubecontainer.CDIDevice{Name: cdiDeviceID})
}
}
}
}
return cdiDevices
}
// newClaimInfoCache creates a new claim info cache object, pre-populated from a checkpoint (if present).
func newClaimInfoCache(logger klog.Logger, stateDir, checkpointName string) (*claimInfoCache, error) {
checkpointer, err := state.NewCheckpointer(stateDir, checkpointName)
if err != nil {
return nil, fmt.Errorf("could not initialize checkpoint manager, please drain node and remove DRA state file, err: %w", err)
}
checkpoint, err := checkpointer.GetOrCreate()
if err != nil {
return nil, fmt.Errorf("GetOrCreate() on checkpoint state: %w", err)
}
cache := &claimInfoCache{
logger: logger,
checkpointer: checkpointer,
claimInfo: make(map[string]*ClaimInfo),
}
entries, err := checkpoint.GetClaimInfoStateList()
if err != nil {
return nil, fmt.Errorf("GetEntries() on checkpoint: %w", err)
}
for _, entry := range entries {
info := newClaimInfoFromState(&entry)
cache.claimInfo[info.Namespace+"/"+info.ClaimName] = info
}
return cache, nil
}
// withLock runs a function while holding the claimInfoCache lock.
// It logs changes.
func (cache *claimInfoCache) withLock(f func() error) error {
cache.Lock()
defer cache.Unlock()
if loggerV := cache.logger.V(5); loggerV.Enabled() {
claimsInUseBefore := cache.claimsInUse()
defer func() {
claimsInUseAfter := cache.claimsInUse()
delta := claimsInUseDelta(claimsInUseBefore, claimsInUseAfter)
changed := false
for _, inUse := range delta {
if inUse.Delta != 0 {
changed = true
break
}
}
if changed {
cache.logger.V(5).Info("ResourceClaim usage changed", "claimsInUse", delta)
}
}()
}
return f()
}
// withRLock runs a function while holding the claimInfoCache rlock.
func (cache *claimInfoCache) withRLock(f func() error) error {
cache.RLock()
defer cache.RUnlock()
return f()
}
// add adds a new claim info object into the claim info cache.
func (cache *claimInfoCache) add(info *ClaimInfo) {
cache.claimInfo[info.Namespace+"/"+info.ClaimName] = info
}
// contains checks to see if a specific claim info object is already in the cache.
func (cache *claimInfoCache) contains(claimName, namespace string) bool {
_, exists := cache.claimInfo[namespace+"/"+claimName]
return exists
}
// get gets a specific claim info object from the cache.
func (cache *claimInfoCache) get(claimName, namespace string) (*ClaimInfo, bool) {
info, exists := cache.claimInfo[namespace+"/"+claimName]
return info, exists
}
// delete deletes a specific claim info object from the cache.
func (cache *claimInfoCache) delete(claimName, namespace string) {
delete(cache.claimInfo, namespace+"/"+claimName)
}
// hasPodReference checks if there is at least one claim
// that is referenced by the pod with the given UID
// This function is used indirectly by the status manager
// to check if pod can enter termination status
func (cache *claimInfoCache) hasPodReference(uid types.UID) bool {
for _, claimInfo := range cache.claimInfo {
if claimInfo.hasPodReference(uid) {
return true
}
}
return false
}
// syncToCheckpoint syncs the full claim info cache state to a checkpoint.
func (cache *claimInfoCache) syncToCheckpoint() error {
claimInfoStateList := make(state.ClaimInfoStateList, 0, len(cache.claimInfo))
for _, infoClaim := range cache.claimInfo {
claimInfoStateList = append(claimInfoStateList, infoClaim.ClaimInfoState)
}
checkpoint, err := state.NewCheckpoint(claimInfoStateList)
if err != nil {
return err
}
return cache.checkpointer.Store(checkpoint)
}
// claimsInUse computes the the current counter vector for DRAResourceClaimsInUse.
// It returns a map of driver name to number of claims which have been prepared using
// the driver. The [kubeletmetrics.DRAResourceClaimsInUseAnyDriver] key stands for
// all prepared claims.
//
// Must be called while the rlock is held.
func (cache *claimInfoCache) claimsInUse() map[string]int {
counts := make(map[string]int)
total := 0
for _, claimInfo := range cache.claimInfo {
if !claimInfo.isPrepared() {
continue
}
total++
for driverName := range claimInfo.DriverState {
counts[driverName]++
}
}
counts[kubeletmetrics.DRAResourceClaimsInUseAnyDriver] = total
return counts
}
// claimsInUseDelta compares two maps returned by claimsInUse.
// The type can be used as value in structured logging.
func claimsInUseDelta(before, after map[string]int) ClaimsInUseDelta {
var delta ClaimsInUseDelta
for driverName, count := range before {
if _, stillSet := after[driverName]; !stillSet {
delta = append(delta, ClaimsInUse{DriverName: driverName, Count: 0, Delta: -count})
}
}
for driverName, count := range after {
delta = append(delta, ClaimsInUse{DriverName: driverName, Count: count, Delta: count - before[driverName]})
}
return delta
}
// ClaimsInUseDelta provides String (for text logging) and MarshalLog (for structured logging).
type ClaimsInUseDelta []ClaimsInUse
var _ fmt.Stringer = ClaimsInUseDelta{}
var _ logr.Marshaler = ClaimsInUseDelta{}
func (d ClaimsInUseDelta) String() string {
d = d.sort()
var buffer strings.Builder
for i, inUse := range d {
if i > 0 {
buffer.WriteByte('\n')
}
buffer.WriteString(fmt.Sprintf("%s: %d (%+d)", inUse.DriverName, inUse.Count, inUse.Delta))
}
return buffer.String()
}
func (d ClaimsInUseDelta) MarshalLog() any {
d = d.sort()
return []ClaimsInUse(d)
}
// sort returns a sorted copy of the slice.
func (d ClaimsInUseDelta) sort() ClaimsInUseDelta {
d = slices.Clone(d)
slices.SortFunc(d, func(a, b ClaimsInUse) int {
return strings.Compare(a.DriverName, b.DriverName)
})
return d
}
type ClaimsInUse struct {
DriverName string
Count int
Delta int
}
// claimInfoCollector provides metrics for a claimInfoCache.
type claimInfoCollector struct {
metrics.BaseStableCollector
cache *claimInfoCache
}
var _ metrics.StableCollector = &claimInfoCollector{}
// DescribeWithStability implements the metrics.StableCollector interface.
func (collector *claimInfoCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- kubeletmetrics.DRAResourceClaimsInUseDesc
}
// CollectWithStability implements the metrics.StableCollector interface.
func (collector *claimInfoCollector) CollectWithStability(ch chan<- metrics.Metric) {
var claimsInUse map[string]int
_ = collector.cache.withRLock(func() error {
claimsInUse = collector.cache.claimsInUse()
return nil
})
// Only currently known drivers are listed. If a driver had active
// claims in the past, no longer does and then gets uninstalled, it no
// longer shows up. This avoids the memory leak problem in a normal
// GaugeVec which could grow over time unless obsolete drivers are
// actively deleted.
//
// The empty driver name provides the overall count of all active
// ResourceClaims regardless of the driver.
for driverName, count := range claimsInUse {
ch <- metrics.NewLazyConstMetric(kubeletmetrics.DRAResourceClaimsInUseDesc, metrics.GaugeValue, float64(count), driverName)
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dra
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/dra/state"
)
// TODO(#133118): Make health timeout configurable.
const (
healthTimeout = 30 * time.Second
)
// healthInfoCache is a cache of known device health.
type healthInfoCache struct {
sync.RWMutex
HealthInfo *state.DevicesHealthMap
stateFile string
}
// newHealthInfoCache creates a new cache, loading from a checkpoint if present.
func newHealthInfoCache(stateFile string) (*healthInfoCache, error) {
cache := &healthInfoCache{
HealthInfo: &state.DevicesHealthMap{},
stateFile: stateFile,
}
if err := cache.loadFromCheckpoint(); err != nil {
klog.Background().Error(err, "Failed to load health checkpoint, proceeding with empty cache")
}
return cache, nil
}
// loadFromCheckpoint loads the cache from the state file.
func (cache *healthInfoCache) loadFromCheckpoint() error {
if cache.stateFile == "" {
return nil
}
data, err := os.ReadFile(cache.stateFile)
if err != nil {
if os.IsNotExist(err) {
cache.HealthInfo = &state.DevicesHealthMap{}
return nil
}
return err
}
return json.Unmarshal(data, cache.HealthInfo)
}
// withLock runs a function while holding the healthInfoCache lock.
func (cache *healthInfoCache) withLock(f func() error) error {
cache.Lock()
defer cache.Unlock()
return f()
}
// withRLock runs a function while holding the healthInfoCache rlock.
func (cache *healthInfoCache) withRLock(f func() error) error {
cache.RLock()
defer cache.RUnlock()
return f()
}
// saveToCheckpointInternal does the actual saving without locking.
// Assumes the caller holds the necessary lock.
func (cache *healthInfoCache) saveToCheckpointInternal() error {
if cache.stateFile == "" {
return nil
}
data, err := json.Marshal(cache.HealthInfo)
if err != nil {
return fmt.Errorf("failed to marshal health info: %w", err)
}
tempFile, err := os.CreateTemp(filepath.Dir(cache.stateFile), filepath.Base(cache.stateFile)+".tmp")
if err != nil {
return fmt.Errorf("failed to create temp checkpoint file: %w", err)
}
defer func() {
if err := os.Remove(tempFile.Name()); err != nil && !os.IsNotExist(err) {
klog.Background().Error(err, "Failed to remove temporary checkpoint file", "path", tempFile.Name())
}
}()
if _, err := tempFile.Write(data); err != nil {
_ = tempFile.Close()
return fmt.Errorf("failed to write to temporary file: %w", err)
}
if err := tempFile.Close(); err != nil {
return fmt.Errorf("failed to close temporary file: %w", err)
}
if err := os.Rename(tempFile.Name(), cache.stateFile); err != nil {
return fmt.Errorf("failed to rename temporary file to state file: %w", err)
}
return nil
}
// getHealthInfo returns the current health info, adjusting for timeouts.
func (cache *healthInfoCache) getHealthInfo(driverName, poolName, deviceName string) state.DeviceHealthStatus {
res := state.DeviceHealthStatusUnknown
_ = cache.withRLock(func() error {
now := time.Now()
if driver, ok := (*cache.HealthInfo)[driverName]; ok {
key := poolName + "/" + deviceName
if device, ok := driver.Devices[key]; ok {
if now.Sub(device.LastUpdated) > healthTimeout {
res = state.DeviceHealthStatusUnknown
} else {
res = device.Health
}
}
}
return nil
})
return res
}
// updateHealthInfo reconciles the cache with a fresh list of device health states
// from a plugin. It identifies which devices have changed state and handles devices
// that are no longer being reported by the plugin.
func (cache *healthInfoCache) updateHealthInfo(driverName string, devices []state.DeviceHealth) ([]state.DeviceHealth, error) {
changedDevices := []state.DeviceHealth{}
err := cache.withLock(func() error {
now := time.Now()
currentDriver, exists := (*cache.HealthInfo)[driverName]
if !exists {
currentDriver = state.DriverHealthState{Devices: make(map[string]state.DeviceHealth)}
(*cache.HealthInfo)[driverName] = currentDriver
}
reportedKeys := make(map[string]struct{})
// Phase 1: Process the incoming report from the plugin.
// Update existing devices, add new ones, and record all devices
// present in this report.
for _, reportedDevice := range devices {
reportedDevice.LastUpdated = now
key := reportedDevice.PoolName + "/" + reportedDevice.DeviceName
reportedKeys[key] = struct{}{}
existingDevice, ok := currentDriver.Devices[key]
if !ok || existingDevice.Health != reportedDevice.Health {
changedDevices = append(changedDevices, reportedDevice)
}
currentDriver.Devices[key] = reportedDevice
}
// Phase 2: Handle devices that are in the cache but were not in the report.
// These devices may have been removed or the plugin may have stopped monitoring
// them. Mark them as "Unknown" if their status has timed out.
for key, existingDevice := range currentDriver.Devices {
if _, wasReported := reportedKeys[key]; !wasReported {
if existingDevice.Health != state.DeviceHealthStatusUnknown && now.Sub(existingDevice.LastUpdated) > healthTimeout {
existingDevice.Health = state.DeviceHealthStatusUnknown
existingDevice.LastUpdated = now
currentDriver.Devices[key] = existingDevice
changedDevices = append(changedDevices, existingDevice)
}
}
}
// Phase 3: Persist changes to the checkpoint file if any state changed.
if len(changedDevices) > 0 {
if err := cache.saveToCheckpointInternal(); err != nil {
klog.Background().Error(err, "Failed to save health checkpoint after update. Kubelet restart may lose the device health information.")
}
}
return nil
})
if err != nil {
return nil, err
}
return changedDevices, nil
}
// clearDriver clears all health data for a specific driver.
func (cache *healthInfoCache) clearDriver(driverName string) error {
return cache.withLock(func() error {
delete(*cache.HealthInfo, driverName)
return cache.saveToCheckpointInternal()
})
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dra
import (
"context"
"errors"
"fmt"
"io"
"path/filepath"
"strconv"
"time"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics"
"k8s.io/dynamic-resource-allocation/resourceclaim"
"k8s.io/klog/v2"
drahealthv1alpha1 "k8s.io/kubelet/pkg/apis/dra-health/v1alpha1"
drapb "k8s.io/kubelet/pkg/apis/dra/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
kubefeatures "k8s.io/kubernetes/pkg/features"
draplugin "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin"
"k8s.io/kubernetes/pkg/kubelet/cm/dra/state"
"k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
)
// draManagerStateFileName is the file name where dra manager stores its state
const draManagerStateFileName = "dra_manager_state"
// defaultReconcilePeriod is the default reconciliation period to keep all claim info state in sync.
const defaultReconcilePeriod = 60 * time.Second
// The time that DRA drivers have to come back after being unregistered
// before the kubelet removes their ResourceSlices.
//
// This must be long enough to actually allow stopping a pod and
// starting the replacement (otherwise ResourceSlices get deleted
// unnecessarily) and not too long (otherwise the time window were
// pods might still get scheduled to the node after removal of a
// driver is too long).
//
// 30 seconds might be long enough for a simple container restart.
// If a DRA driver wants to be sure that slices don't get wiped,
// it should use rolling updates.
const defaultWipingDelay = 30 * time.Second
// ActivePodsFunc is a function that returns a list of pods to reconcile.
type ActivePodsFunc func() []*v1.Pod
// GetNodeFunc is a function that returns the node object using the kubelet's node lister.
type GetNodeFunc func() (*v1.Node, error)
// Manager is responsible for managing ResourceClaims.
// It ensures that they are prepared before starting pods
// and that they are unprepared before the last consuming
// pod is declared as terminated.
type Manager struct {
// draPlugins manages the registered plugins.
draPlugins *draplugin.DRAPluginManager
// cache contains cached claim info
cache *claimInfoCache
// reconcilePeriod is the duration between calls to reconcileLoop.
reconcilePeriod time.Duration
// activePods is a method for listing active pods on the node
// so all claim info state can be updated in the reconciliation loop.
activePods ActivePodsFunc
// sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness.
// We use it to determine when we can treat pods as inactive and react appropriately.
sourcesReady config.SourcesReady
// KubeClient reference
kubeClient clientset.Interface
// healthInfoCache contains cached health info
healthInfoCache *healthInfoCache
// update channel for resource updates
update chan resourceupdates.Update
}
// NewManager creates a new DRA manager.
//
// Most errors returned by the manager show up in the context of a pod.
// They try to adhere to the following convention:
// - Don't include the pod.
// - Use terms that are familiar to users.
// - Don't include the namespace, it can be inferred from the context.
// - Avoid repeated "failed to ...: failed to ..." when wrapping errors.
// - Avoid wrapping when it does not provide relevant additional information to keep the user-visible error short.
func NewManager(logger klog.Logger, kubeClient clientset.Interface, stateFileDirectory string) (*Manager, error) {
claimInfoCache, err := newClaimInfoCache(logger, stateFileDirectory, draManagerStateFileName)
if err != nil {
return nil, fmt.Errorf("create ResourceClaim cache: %w", err)
}
healthInfoCache, err := newHealthInfoCache(filepath.Join(stateFileDirectory, "dra_health_state"))
if err != nil {
return nil, fmt.Errorf("failed to create healthInfo cache: %w", err)
}
// TODO: for now the reconcile period is not configurable.
// We should consider making it configurable in the future.
reconcilePeriod := defaultReconcilePeriod
manager := &Manager{
cache: claimInfoCache,
kubeClient: kubeClient,
reconcilePeriod: reconcilePeriod,
activePods: nil,
sourcesReady: nil,
healthInfoCache: healthInfoCache,
update: make(chan resourceupdates.Update, 100),
}
return manager, nil
}
func (m *Manager) NewMetricsCollector() metrics.StableCollector {
return &claimInfoCollector{cache: m.cache}
}
// GetWatcherHandler must be called after Start, it indirectly depends
// on parameters which only get passed to Start, for example the context.
func (m *Manager) GetWatcherHandler() cache.PluginHandler {
return m.draPlugins
}
// Start starts the reconcile loop of the manager.
func (m *Manager) Start(ctx context.Context, activePods ActivePodsFunc, getNode GetNodeFunc, sourcesReady config.SourcesReady) error {
m.initDRAPluginManager(ctx, getNode, defaultWipingDelay)
m.activePods = activePods
m.sourcesReady = sourcesReady
go wait.UntilWithContext(ctx, func(ctx context.Context) { m.reconcileLoop(ctx) }, m.reconcilePeriod)
return nil
}
// initPluginManager can be used instead of Start to make the manager useable
// for calls to prepare/unprepare. It exists primarily for testing purposes.
func (m *Manager) initDRAPluginManager(ctx context.Context, getNode GetNodeFunc, wipingDelay time.Duration) {
m.draPlugins = draplugin.NewDRAPluginManager(ctx, m.kubeClient, getNode, m, wipingDelay)
}
// reconcileLoop ensures that any stale state in the manager's claimInfoCache gets periodically reconciled.
func (m *Manager) reconcileLoop(ctx context.Context) {
logger := klog.FromContext(ctx)
// Only once all sources are ready do we attempt to reconcile.
// This ensures that the call to m.activePods() below will succeed with
// the actual active pods list.
if m.sourcesReady == nil || !m.sourcesReady.AllReady() {
return
}
// Get the full list of active pods.
activePods := sets.New[string]()
for _, p := range m.activePods() {
activePods.Insert(string(p.UID))
}
// Get the list of inactive pods still referenced by any claimInfos.
type podClaims struct {
uid types.UID
namespace string
claimNames []string
}
inactivePodClaims := make(map[string]*podClaims)
m.cache.RLock()
for _, claimInfo := range m.cache.claimInfo {
for podUID := range claimInfo.PodUIDs {
if activePods.Has(podUID) {
continue
}
if inactivePodClaims[podUID] == nil {
inactivePodClaims[podUID] = &podClaims{
uid: types.UID(podUID),
namespace: claimInfo.Namespace,
claimNames: []string{},
}
}
inactivePodClaims[podUID].claimNames = append(inactivePodClaims[podUID].claimNames, claimInfo.ClaimName)
}
}
m.cache.RUnlock()
// Loop through all inactive pods and call UnprepareResources on them.
for _, podClaims := range inactivePodClaims {
if err := m.unprepareResources(ctx, podClaims.uid, podClaims.namespace, podClaims.claimNames); err != nil {
logger.Info("Unpreparing pod resources in reconcile loop failed, will retry", "podUID", podClaims.uid, "err", err)
}
}
}
// PrepareResources attempts to prepare all of the required resources
// for the input container, issue NodePrepareResources rpc requests
// for each new resource requirement, process their responses and update the cached
// containerResources on success.
func (m *Manager) PrepareResources(ctx context.Context, pod *v1.Pod) error {
startTime := time.Now()
err := m.prepareResources(ctx, pod)
kubeletmetrics.DRAOperationsDuration.WithLabelValues("PrepareResources", strconv.FormatBool(err == nil)).Observe(time.Since(startTime).Seconds())
if err != nil {
return fmt.Errorf("prepare dynamic resources: %w", err)
}
return nil
}
func (m *Manager) prepareResources(ctx context.Context, pod *v1.Pod) error {
var err error
logger := klog.FromContext(ctx)
batches := make(map[*draplugin.DRAPlugin][]*drapb.Claim)
resourceClaims := make(map[types.UID]*resourceapi.ResourceClaim)
// Do a validation pass *without* changing the claim info cache.
// If anything goes wrong, we don't proceed. This has the advantage
// that the failing pod can be deleted without getting stuck.
//
// If we added the claim and pod to the cache, UnprepareResources
// would have to asssume that NodePrepareResources was called and
// try to call NodeUnprepareResources. This is particularly bad
// when the driver never has been installed on the node and
// remains unavailable.
podResourceClaims := pod.Spec.ResourceClaims
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DRAExtendedResource) {
if pod.Status.ExtendedResourceClaimStatus != nil {
extendedResourceClaim := v1.PodResourceClaim{
ResourceClaimName: &pod.Status.ExtendedResourceClaimStatus.ResourceClaimName,
}
podResourceClaims = make([]v1.PodResourceClaim, 0, len(pod.Spec.ResourceClaims)+1)
podResourceClaims = append(podResourceClaims, pod.Spec.ResourceClaims...)
podResourceClaims = append(podResourceClaims, extendedResourceClaim)
}
}
infos := make([]struct {
resourceClaim *resourceapi.ResourceClaim
podClaim *v1.PodResourceClaim
claimInfo *ClaimInfo
plugins map[string]*draplugin.DRAPlugin
}, len(podResourceClaims))
for i := range podResourceClaims {
podClaim := &podResourceClaims[i]
infos[i].podClaim = podClaim
logger.V(3).Info("Processing resource", "pod", klog.KObj(pod), "podClaim", podClaim.Name)
claimName, mustCheckOwner, err := resourceclaim.Name(pod, podClaim)
if err != nil {
return err
}
if claimName == nil {
// Nothing to do.
continue
}
// Query claim object from the API server
resourceClaim, err := m.kubeClient.ResourceV1().ResourceClaims(pod.Namespace).Get(
ctx,
*claimName,
metav1.GetOptions{})
if err != nil {
return fmt.Errorf("fetch ResourceClaim %s: %w", *claimName, err)
}
if mustCheckOwner {
if err = resourceclaim.IsForPod(pod, resourceClaim); err != nil {
// No wrapping, error is already informative.
return err
}
}
// Check if pod is in the ReservedFor for the claim
if !resourceclaim.IsReservedForPod(pod, resourceClaim) {
return fmt.Errorf("pod %s (%s) is not allowed to use ResourceClaim %s (%s)",
pod.Name, pod.UID, *claimName, resourceClaim.UID)
}
// At this point we assume that we have to prepare the claim and thus need
// the driver. If the driver is currently unavailable, it is better to fail
// even if the claim is already prepared because something is wrong with
// the node.
infos[i].resourceClaim = resourceClaim
claimInfo, err := newClaimInfoFromClaim(resourceClaim)
if err != nil {
return fmt.Errorf("ResourceClaim %s: %w", resourceClaim.Name, err)
}
infos[i].claimInfo = claimInfo
infos[i].plugins = make(map[string]*draplugin.DRAPlugin, len(claimInfo.DriverState))
for driverName := range claimInfo.DriverState {
if plugin := infos[i].plugins[driverName]; plugin != nil {
continue
}
plugin, err := m.draPlugins.GetPlugin(driverName)
if err != nil {
// No wrapping, error includes driver name already.
return err
}
infos[i].plugins[driverName] = plugin
}
}
// Now that we have everything that we need, we can update the claim info cache.
// Almost nothing can go wrong anymore at this point.
err = m.cache.withLock(func() error {
for i := range podResourceClaims {
resourceClaim := infos[i].resourceClaim
podClaim := infos[i].podClaim
if resourceClaim == nil {
logger.V(5).Info("No need to prepare resources, no claim generated", "pod", klog.KObj(pod), "podClaim", podClaim.Name)
continue
}
// Get a reference to the claim info for this claim from the cache.
// If there isn't one yet, then add it to the cache.
claimInfo, exists := m.cache.get(resourceClaim.Name, resourceClaim.Namespace)
if !exists {
claimInfo = infos[i].claimInfo
m.cache.add(claimInfo)
logger.V(6).Info("Created new claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo)
} else {
if claimInfo.ClaimUID != resourceClaim.UID {
return fmt.Errorf("old ResourceClaim with same name %s and different UID %s still exists (previous pod force-deleted?!)", resourceClaim.Name, claimInfo.ClaimUID)
}
logger.V(6).Info("Found existing claim info cache entry", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim), "claimInfoEntry", claimInfo)
}
// Add a reference to the current pod in the claim info.
claimInfo.addPodReference(pod.UID)
// Checkpoint to ensure all claims we plan to prepare are tracked.
// If something goes wrong and the newly referenced pod gets
// deleted without a successful prepare call, we will catch
// that in the reconcile loop and take the appropriate action.
if err := m.cache.syncToCheckpoint(); err != nil {
return fmt.Errorf("checkpoint ResourceClaim cache: %w", err)
}
// If this claim is already prepared, there is no need to prepare it again.
if claimInfo.isPrepared() {
logger.V(5).Info("Resources already prepared", "pod", klog.KObj(pod), "podClaim", podClaim.Name, "claim", klog.KObj(resourceClaim))
return nil
}
// This saved claim will be used to update ClaimInfo cache
// after NodePrepareResources GRPC succeeds
resourceClaims[claimInfo.ClaimUID] = resourceClaim
// Loop through all drivers and prepare for calling NodePrepareResources.
claim := &drapb.Claim{
Namespace: claimInfo.Namespace,
Uid: string(claimInfo.ClaimUID),
Name: claimInfo.ClaimName,
}
for driverName := range claimInfo.DriverState {
plugin := infos[i].plugins[driverName]
batches[plugin] = append(batches[plugin], claim)
}
}
return nil
})
if err != nil {
// No error wrapping because there is no additional context needed.
// What we get here are the errors from our own callback above.
return err
}
// Call NodePrepareResources for all claims in each batch.
// If there is any error, processing gets aborted.
// We could try to continue, but that would make the code more complex.
for plugin, claims := range batches {
// Call NodePrepareResources RPC for all resource handles.
response, err := plugin.NodePrepareResources(ctx, &drapb.NodePrepareResourcesRequest{Claims: claims})
if err != nil {
// General error unrelated to any particular claim.
return fmt.Errorf("NodePrepareResources: %w", err)
}
for claimUID, result := range response.Claims {
reqClaim := lookupClaimRequest(claims, claimUID)
if reqClaim == nil {
return fmt.Errorf("NodePrepareResources returned result for unknown claim UID %s", claimUID)
}
if result.GetError() != "" {
return fmt.Errorf("NodePrepareResources failed for ResourceClaim %s: %s", reqClaim.Name, result.Error)
}
claim := resourceClaims[types.UID(claimUID)]
// Add the prepared CDI devices to the claim info
err := m.cache.withLock(func() error {
info, exists := m.cache.get(claim.Name, claim.Namespace)
if !exists {
return fmt.Errorf("internal error: unable to get claim info for ResourceClaim %s", claim.Name)
}
for _, device := range result.GetDevices() {
info.addDevice(plugin.DriverName(), state.Device{PoolName: device.PoolName, DeviceName: device.DeviceName, RequestNames: device.RequestNames, CDIDeviceIDs: device.CdiDeviceIds})
}
return nil
})
if err != nil {
// No wrapping, this is the error above.
return err
}
}
unfinished := len(claims) - len(response.Claims)
if unfinished != 0 {
return fmt.Errorf("NodePrepareResources skipped %d ResourceClaims", unfinished)
}
}
// Atomically perform some operations on the claimInfo cache.
err = m.cache.withLock(func() error {
// Mark all pod claims as prepared.
for _, claim := range resourceClaims {
info, exists := m.cache.get(claim.Name, claim.Namespace)
if !exists {
return fmt.Errorf("internal error: unable to get claim info for ResourceClaim %s", claim.Name)
}
info.setPrepared()
}
// Checkpoint to ensure all prepared claims are tracked with their list
// of CDI devices attached.
if err := m.cache.syncToCheckpoint(); err != nil {
return fmt.Errorf("checkpoint ResourceClaim state: %w", err)
}
return nil
})
if err != nil {
// No wrapping, this is the error above.
return err
}
return nil
}
func lookupClaimRequest(claims []*drapb.Claim, claimUID string) *drapb.Claim {
for _, claim := range claims {
if claim.Uid == claimUID {
return claim
}
}
return nil
}
// GetResources gets a ContainerInfo object from the claimInfo cache.
// This information is used by the caller to update a container config.
func (m *Manager) GetResources(pod *v1.Pod, container *v1.Container) (*ContainerInfo, error) {
cdiDevices := []kubecontainer.CDIDevice{}
for i := range pod.Spec.ResourceClaims {
podClaim := &pod.Spec.ResourceClaims[i]
claimName, _, err := resourceclaim.Name(pod, podClaim)
if err != nil {
// No wrapping, error is already informative.
return nil, err
}
// The claim name might be nil if no underlying resource claim
// was generated for the referenced claim. There are valid use
// cases when this might happen, so we simply skip it.
if claimName == nil {
continue
}
for _, claim := range container.Resources.Claims {
if podClaim.Name != claim.Name {
continue
}
err := m.cache.withRLock(func() error {
claimInfo, exists := m.cache.get(*claimName, pod.Namespace)
if !exists {
return fmt.Errorf("internal error: unable to get claim info for ResourceClaim %s", *claimName)
}
// As of Kubernetes 1.31, CDI device IDs are not passed via annotations anymore.
cdiDevices = append(cdiDevices, claimInfo.cdiDevicesAsList(claim.Request)...)
return nil
})
if err != nil {
// No wrapping, this is the error above.
return nil, err
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DRAExtendedResource) && pod.Status.ExtendedResourceClaimStatus != nil {
claimName := pod.Status.ExtendedResourceClaimStatus.ResourceClaimName
// if the container has requests for extended resources backed by DRA,
// they must have been allocated via the extendedResourceClaim created
// by the kube-scheduler.
err := m.cache.withRLock(func() error {
claimInfo, exists := m.cache.get(claimName, pod.Namespace)
if !exists {
return fmt.Errorf("unable to get claim info for claim %s in namespace %s", claimName, pod.Namespace)
}
for rName, rValue := range container.Resources.Requests {
if rValue.IsZero() {
// We only care about the resources requested by the pod
continue
}
if v1helper.IsExtendedResourceName(rName) {
requestName := ""
for _, rm := range pod.Status.ExtendedResourceClaimStatus.RequestMappings {
if rm.ContainerName == container.Name && rm.ResourceName == rName.String() {
requestName = rm.RequestName
break
}
}
if requestName != "" {
// As of Kubernetes 1.31, CDI device IDs are not passed via annotations anymore.
cdiDevices = append(cdiDevices, claimInfo.cdiDevicesAsList(requestName)...)
}
}
}
return nil
})
if err != nil {
return nil, err
}
}
return &ContainerInfo{CDIDevices: cdiDevices}, nil
}
// UnprepareResources calls a driver's NodeUnprepareResource API for each resource claim owned by a pod.
// This function is idempotent and may be called multiple times against the same pod.
// As such, calls to the underlying NodeUnprepareResource API are skipped for claims that have
// already been successfully unprepared.
func (m *Manager) UnprepareResources(ctx context.Context, pod *v1.Pod) error {
startTime := time.Now()
err := m.unprepareResourcesForPod(ctx, pod)
kubeletmetrics.DRAOperationsDuration.WithLabelValues("UnprepareResources", strconv.FormatBool(err == nil)).Observe(time.Since(startTime).Seconds())
if err != nil {
return fmt.Errorf("unprepare dynamic resources: %w", err)
}
return nil
}
func (m *Manager) unprepareResourcesForPod(ctx context.Context, pod *v1.Pod) error {
var claimNames []string
for i := range pod.Spec.ResourceClaims {
claimName, _, err := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
if err != nil {
// No wrapping, the error is already informative.
return err
}
// The claim name might be nil if no underlying resource claim
// was generated for the referenced claim. There are valid use
// cases when this might happen, so we simply skip it.
if claimName == nil {
continue
}
claimNames = append(claimNames, *claimName)
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DRAExtendedResource) {
if pod.Status.ExtendedResourceClaimStatus != nil {
claimNames = append(claimNames, pod.Status.ExtendedResourceClaimStatus.ResourceClaimName)
}
}
return m.unprepareResources(ctx, pod.UID, pod.Namespace, claimNames)
}
func (m *Manager) unprepareResources(ctx context.Context, podUID types.UID, namespace string, claimNames []string) error {
logger := klog.FromContext(ctx)
batches := make(map[string][]*drapb.Claim)
claimNamesMap := make(map[types.UID]string)
for _, claimName := range claimNames {
// Atomically perform some operations on the claimInfo cache.
err := m.cache.withLock(func() error {
// Get the claim info from the cache
claimInfo, exists := m.cache.get(claimName, namespace)
// Skip calling NodeUnprepareResource if claim info is not cached
if !exists {
return nil
}
// Skip calling NodeUnprepareResource if other pods are still referencing it
if len(claimInfo.PodUIDs) > 1 {
// We delay checkpointing of this change until
// UnprepareResources returns successfully. It is OK to do
// this because we will only return successfully from this call
// if the checkpoint has succeeded. That means if the kubelet
// is ever restarted before this checkpoint succeeds, we will
// simply call into this (idempotent) function again.
claimInfo.deletePodReference(podUID)
return nil
}
// This claimInfo name will be used to update ClaimInfo cache
// after NodeUnprepareResources GRPC succeeds
claimNamesMap[claimInfo.ClaimUID] = claimInfo.ClaimName
// Loop through all drivers and prepare for calling NodeUnprepareResources.
claim := &drapb.Claim{
Namespace: claimInfo.Namespace,
Uid: string(claimInfo.ClaimUID),
Name: claimInfo.ClaimName,
}
for driverName := range claimInfo.DriverState {
batches[driverName] = append(batches[driverName], claim)
}
return nil
})
if err != nil {
// No wrapping, this is the error above.
return err
}
}
// Call NodeUnprepareResources for all claims in each batch.
// If there is any error, processing gets aborted.
// We could try to continue, but that would make the code more complex.
for driverName, claims := range batches {
// Call NodeUnprepareResources RPC for all resource handles.
plugin, err := m.draPlugins.GetPlugin(driverName)
if plugin == nil {
// No wrapping, error includes driver name already.
return err
}
response, err := plugin.NodeUnprepareResources(ctx, &drapb.NodeUnprepareResourcesRequest{Claims: claims})
if err != nil {
// General error unrelated to any particular claim.
return fmt.Errorf("NodeUnprepareResources: %w", err)
}
for claimUID, result := range response.Claims {
reqClaim := lookupClaimRequest(claims, claimUID)
if reqClaim == nil {
return fmt.Errorf("NodeUnprepareResources returned result for unknown claim UID %s", claimUID)
}
if result.GetError() != "" {
return fmt.Errorf("NodeUnprepareResources failed for ResourceClaim %s: %s", reqClaim.Name, result.Error)
}
}
unfinished := len(claims) - len(response.Claims)
if unfinished != 0 {
return fmt.Errorf("NodeUnprepareResources skipped %d ResourceClaims", unfinished)
}
}
// Atomically perform some operations on the claimInfo cache.
err := m.cache.withLock(func() error {
// TODO(#132978): Re-evaluate this logic to support post-mortem health updates.
// As of the initial implementation, we immediately delete the claim info upon
// unprepare. This means a late-arriving health update for a terminated pod
// will be missed. A future enhancement could be to "tombstone" this entry for
// a grace period instead of deleting it.
// Delete all claimInfos from the cache that have just been unprepared.
for _, claimName := range claimNamesMap {
claimInfo, _ := m.cache.get(claimName, namespace)
m.cache.delete(claimName, namespace)
logger.V(6).Info("Deleted claim info cache entry", "claim", klog.KRef(namespace, claimName), "claimInfoEntry", claimInfo)
}
// Atomically sync the cache back to the checkpoint.
if err := m.cache.syncToCheckpoint(); err != nil {
return fmt.Errorf("checkpoint ResourceClaim state: %w", err)
}
return nil
})
if err != nil {
// No wrapping, this is the error above.
return err
}
return nil
}
// PodMightNeedToUnprepareResources returns true if the pod might need to
// unprepare resources
func (m *Manager) PodMightNeedToUnprepareResources(uid types.UID) bool {
m.cache.Lock()
defer m.cache.Unlock()
return m.cache.hasPodReference(uid)
}
// GetContainerClaimInfos gets Container's ClaimInfo
func (m *Manager) GetContainerClaimInfos(pod *v1.Pod, container *v1.Container) ([]*ClaimInfo, error) {
claimInfos := make([]*ClaimInfo, 0, len(pod.Spec.ResourceClaims))
for i, podResourceClaim := range pod.Spec.ResourceClaims {
claimName, _, err := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i])
if err != nil {
// No wrapping, the error is already informative.
return nil, err
}
if claimName == nil {
// No ResourceClaim needed.
continue
}
// Ownership doesn't get checked here, this should have been done before.
for _, claim := range container.Resources.Claims {
if podResourceClaim.Name != claim.Name {
continue
}
err := m.cache.withRLock(func() error {
claimInfo, exists := m.cache.get(*claimName, pod.Namespace)
if !exists {
return fmt.Errorf("unable to get information for ResourceClaim %s", *claimName)
}
claimInfos = append(claimInfos, claimInfo.DeepCopy())
return nil
})
if err != nil {
// No wrapping, this is the error above.
return nil, err
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DRAExtendedResource) {
// Handle the special claim for extended resources backed by DRA in the pod
if pod.Status.ExtendedResourceClaimStatus != nil {
var hasExtendedResourceClaim bool
for _, n := range pod.Status.ExtendedResourceClaimStatus.RequestMappings {
if n.ContainerName == container.Name {
hasExtendedResourceClaim = true
break
}
}
if !hasExtendedResourceClaim {
return claimInfos, nil
}
claimName := &pod.Status.ExtendedResourceClaimStatus.ResourceClaimName
err := m.cache.withRLock(func() error {
claimInfo, exists := m.cache.get(*claimName, pod.Namespace)
if !exists {
return fmt.Errorf("unable to get claim info for claim %s in namespace %s", *claimName, pod.Namespace)
}
claimInfos = append(claimInfos, claimInfo.DeepCopy())
return nil
})
if err != nil {
// No wrapping, this is the error above.
return nil, err
}
}
}
return claimInfos, nil
}
// UpdateAllocatedResourcesStatus updates the health status of allocated DRA resources in the pod's container statuses.
func (m *Manager) UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) {
logger := klog.FromContext(context.Background())
for _, container := range pod.Spec.Containers {
// Get all the DRA claim details associated with this specific container.
claimInfos, err := m.GetContainerClaimInfos(pod, &container)
if err != nil {
logger.Error(err, "Failed to get claim infos for container", "pod", klog.KObj(pod), "container", container.Name)
continue
}
// Find the corresponding container status
for i, containerStatus := range status.ContainerStatuses {
if containerStatus.Name != container.Name {
continue
}
// Ensure the slice exists. Use a map for efficient updates by resource name.
resourceStatusMap := make(map[v1.ResourceName]*v1.ResourceStatus)
if status.ContainerStatuses[i].AllocatedResourcesStatus != nil {
for idx := range status.ContainerStatuses[i].AllocatedResourcesStatus {
// Store pointers to modify in place
resourceStatusMap[status.ContainerStatuses[i].AllocatedResourcesStatus[idx].Name] = &status.ContainerStatuses[i].AllocatedResourcesStatus[idx]
}
} else {
status.ContainerStatuses[i].AllocatedResourcesStatus = []v1.ResourceStatus{}
}
// Loop through each claim associated with the container
for _, claimInfo := range claimInfos {
var resourceName v1.ResourceName
foundClaimInSpec := false
for _, cClaim := range container.Resources.Claims {
if cClaim.Name == claimInfo.ClaimName {
if cClaim.Request == "" {
resourceName = v1.ResourceName(fmt.Sprintf("claim:%s", cClaim.Name))
} else {
resourceName = v1.ResourceName(fmt.Sprintf("claim:%s/%s", cClaim.Name, cClaim.Request))
}
foundClaimInSpec = true
break
}
}
if !foundClaimInSpec {
logger.V(4).Info("Could not find matching resource claim in container spec", "pod", klog.KObj(pod), "container", container.Name, "claimName", claimInfo.ClaimName)
continue
}
// Get or create the ResourceStatus entry for this claim
resStatus, ok := resourceStatusMap[resourceName]
if !ok {
// Create a new entry and add it to the map and the slice
newStatus := v1.ResourceStatus{
Name: resourceName,
Resources: []v1.ResourceHealth{},
}
status.ContainerStatuses[i].AllocatedResourcesStatus = append(status.ContainerStatuses[i].AllocatedResourcesStatus, newStatus)
// Get pointer to the newly added element *after* appending
resStatus = &status.ContainerStatuses[i].AllocatedResourcesStatus[len(status.ContainerStatuses[i].AllocatedResourcesStatus)-1]
resourceStatusMap[resourceName] = resStatus
}
// Clear previous health entries for this resource before adding current ones
// Ensures we only report current health for allocated devices.
resStatus.Resources = []v1.ResourceHealth{}
// Iterate through the map holding the state specific to each driver
for driverName, driverState := range claimInfo.DriverState {
// Iterate through each specific device allocated by this driver
for _, device := range driverState.Devices {
healthStr := m.healthInfoCache.getHealthInfo(driverName, device.PoolName, device.DeviceName)
// Convert internal health string to API type
var health v1.ResourceHealthStatus
switch healthStr {
case "Healthy":
health = v1.ResourceHealthStatusHealthy
case "Unhealthy":
health = v1.ResourceHealthStatusUnhealthy
default: // Catches "Unknown" or any other case
health = v1.ResourceHealthStatusUnknown
}
// Create the ResourceHealth entry
resourceHealth := v1.ResourceHealth{
Health: health,
}
// Use first CDI device ID as ResourceID, with fallback
if len(device.CDIDeviceIDs) > 0 {
resourceHealth.ResourceID = v1.ResourceID(device.CDIDeviceIDs[0])
} else {
// Fallback ID if no CDI ID is present
resourceHealth.ResourceID = v1.ResourceID(fmt.Sprintf("%s/%s/%s", driverName, device.PoolName, device.DeviceName))
}
// Append the health status for this specific device/resource ID
resStatus.Resources = append(resStatus.Resources, resourceHealth)
}
}
}
// Rebuild the slice from the map values to ensure correctness
finalStatuses := make([]v1.ResourceStatus, 0, len(resourceStatusMap))
for _, rs := range resourceStatusMap {
// Only add if it actually has resource health entries populated
if len(rs.Resources) > 0 {
finalStatuses = append(finalStatuses, *rs)
}
}
status.ContainerStatuses[i].AllocatedResourcesStatus = finalStatuses
}
}
}
// HandleWatchResourcesStream processes health updates from the DRA plugin.
func (m *Manager) HandleWatchResourcesStream(ctx context.Context, stream drahealthv1alpha1.DRAResourceHealth_NodeWatchResourcesClient, pluginName string) error {
logger := klog.FromContext(ctx)
defer func() {
logger.V(4).Info("Clearing health cache for driver upon stream exit", "pluginName", pluginName)
// Use a separate context for clearDriver if needed, though background should be fine.
if err := m.healthInfoCache.clearDriver(pluginName); err != nil {
logger.Error(err, "Failed to clear health info cache for driver", "pluginName", pluginName)
}
}()
for {
resp, err := stream.Recv()
if err != nil {
// Context canceled, normal shutdown.
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
logger.V(4).Info("Stopping health monitoring due to context cancellation", "pluginName", pluginName, "reason", err)
return err
}
// Stream closed cleanly by the server, get normal EOF.
if errors.Is(err, io.EOF) {
logger.V(4).Info("Stream ended with EOF", "pluginName", pluginName)
return nil
}
// Other errors are unexpected, log & return.
logger.Error(err, "Error receiving from WatchResources stream", "pluginName", pluginName)
return err
}
// Convert drahealthv1alpha1.DeviceHealth to state.DeviceHealth
devices := make([]state.DeviceHealth, len(resp.GetDevices()))
for i, d := range resp.GetDevices() {
var health state.DeviceHealthStatus
switch d.GetHealth() {
case drahealthv1alpha1.HealthStatus_HEALTHY:
health = state.DeviceHealthStatusHealthy
case drahealthv1alpha1.HealthStatus_UNHEALTHY:
health = state.DeviceHealthStatusUnhealthy
default:
health = state.DeviceHealthStatusUnknown
}
devices[i] = state.DeviceHealth{
PoolName: d.GetDevice().GetPoolName(),
DeviceName: d.GetDevice().GetDeviceName(),
Health: health,
LastUpdated: time.Unix(d.GetLastUpdatedTime(), 0),
}
}
changedDevices, updateErr := m.healthInfoCache.updateHealthInfo(pluginName, devices)
if updateErr != nil {
logger.Error(updateErr, "Failed to update health info cache", "pluginName", pluginName)
}
if len(changedDevices) > 0 {
logger.V(4).Info("Health info changed, checking affected pods", "pluginName", pluginName, "changedDevicesCount", len(changedDevices))
podsToUpdate := sets.New[string]()
m.cache.RLock()
for _, dev := range changedDevices {
for _, cInfo := range m.cache.claimInfo {
if driverState, ok := cInfo.DriverState[pluginName]; ok {
for _, allocatedDevice := range driverState.Devices {
if allocatedDevice.PoolName == dev.PoolName && allocatedDevice.DeviceName == dev.DeviceName {
podsToUpdate.Insert(cInfo.PodUIDs.UnsortedList()...)
break
}
}
}
}
}
m.cache.RUnlock()
if podsToUpdate.Len() > 0 {
podUIDs := podsToUpdate.UnsortedList()
logger.Info("Sending health update notification for pods", "pluginName", pluginName, "pods", podUIDs)
select {
case m.update <- resourceupdates.Update{PodUIDs: podUIDs}:
default:
logger.Error(nil, "DRA health update channel is full, discarding pod update notification", "pluginName", pluginName, "pods", podUIDs)
}
} else {
logger.V(4).Info("Health info changed, but no active pods found using the affected devices", "pluginName", pluginName)
}
}
}
}
// Updates returns the channel that provides resource updates.
func (m *Manager) Updates() <-chan resourceupdates.Update {
// Return the internal channel that HandleWatchResourcesStream writes to.
return m.update
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"context"
"errors"
"fmt"
"net"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
"k8s.io/klog/v2"
drahealthv1alpha1 "k8s.io/kubelet/pkg/apis/dra-health/v1alpha1"
drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1"
drapbv1beta1 "k8s.io/kubelet/pkg/apis/dra/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
// defaultClientCallTimeout is the default amount of time that a DRA driver has
// to respond to any of the gRPC calls. kubelet uses this value by passing nil
// to RegisterPlugin. Some tests use a different, usually shorter timeout to
// speed up testing.
//
// This is half of the kubelet retry period (according to
// https://github.com/kubernetes/kubernetes/commit/0449cef8fd5217d394c5cd331d852bd50983e6b3).
const defaultClientCallTimeout = 45 * time.Second
// All API versions supported by this wrapper.
// Sorted by most recent first, oldest last.
var servicesSupportedByKubelet = []string{
drapbv1.DRAPluginService,
drapbv1beta1.DRAPluginService,
}
// DRAPlugin contains information about one registered plugin of a DRA driver.
// It implements the kubelet operations for preparing/unpreparing by calling
// a gRPC interface that is implemented by the plugin.
type DRAPlugin struct {
driverName string
conn *grpc.ClientConn
endpoint string
chosenService string // e.g. drapbv1.DRAPluginService
clientCallTimeout time.Duration
mutex sync.Mutex
backgroundCtx context.Context
healthClient drahealthv1alpha1.DRAResourceHealthClient
healthStreamCtx context.Context
healthStreamCancel context.CancelFunc
}
func (p *DRAPlugin) getOrCreateGRPCConn() (*grpc.ClientConn, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
// If connection exists and is ready, return it.
if p.conn != nil && p.conn.GetState() != connectivity.Shutdown {
// Initialize health client if connection exists but client is nil
// This allows lazy init if connection was established before health was added.
if p.healthClient == nil {
p.healthClient = drahealthv1alpha1.NewDRAResourceHealthClient(p.conn)
klog.FromContext(p.backgroundCtx).V(4).Info("Initialized DRAResourceHealthClient lazily")
}
return p.conn, nil
}
// If the connection is dead, clean it up before creating a new one.
if p.conn != nil {
if err := p.conn.Close(); err != nil {
return nil, fmt.Errorf("failed to close stale gRPC connection to %s: %w", p.endpoint, err)
}
p.conn = nil
p.healthClient = nil
}
ctx := p.backgroundCtx
logger := klog.FromContext(ctx)
network := "unix"
logger.V(4).Info("Creating new gRPC connection", "protocol", network, "endpoint", p.endpoint)
// grpc.Dial is deprecated. grpc.NewClient should be used instead.
// For now this gets ignored because this function is meant to establish
// the connection, with the one second timeout below. Perhaps that
// approach should be reconsidered?
//nolint:staticcheck
conn, err := grpc.Dial(
p.endpoint,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(func(ctx context.Context, target string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, target)
}),
grpc.WithChainUnaryInterceptor(newMetricsInterceptor(p.driverName)),
)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if ok := conn.WaitForStateChange(ctx, connectivity.Connecting); !ok {
return nil, errors.New("timed out waiting for gRPC connection to be ready")
}
p.conn = conn
p.healthClient = drahealthv1alpha1.NewDRAResourceHealthClient(p.conn)
return p.conn, nil
}
func (p *DRAPlugin) DriverName() string {
return p.driverName
}
func (p *DRAPlugin) NodePrepareResources(
ctx context.Context,
req *drapbv1.NodePrepareResourcesRequest,
opts ...grpc.CallOption,
) (*drapbv1.NodePrepareResourcesResponse, error) {
logger := klog.FromContext(ctx)
logger = klog.LoggerWithValues(logger, "driverName", p.driverName, "endpoint", p.endpoint)
ctx = klog.NewContext(ctx, logger)
logger.V(4).Info("Calling NodePrepareResources rpc", "request", req)
ctx, cancel := context.WithTimeout(ctx, p.clientCallTimeout)
defer cancel()
var err error
var response *drapbv1.NodePrepareResourcesResponse
switch p.chosenService {
case drapbv1beta1.DRAPluginService:
client := drapbv1beta1.NewDRAPluginClient(p.conn)
response, err = drapbv1beta1.V1Beta1ClientWrapper{DRAPluginClient: client}.NodePrepareResources(ctx, req)
case drapbv1.DRAPluginService:
client := drapbv1.NewDRAPluginClient(p.conn)
response, err = client.NodePrepareResources(ctx, req)
default:
// Shouldn't happen, validateSupportedServices should only
// return services we support here.
return nil, fmt.Errorf("internal error: unsupported chosen service: %q", p.chosenService)
}
logger.V(4).Info("Done calling NodePrepareResources rpc", "response", response, "err", err)
return response, err
}
func (p *DRAPlugin) NodeUnprepareResources(
ctx context.Context,
req *drapbv1.NodeUnprepareResourcesRequest,
opts ...grpc.CallOption,
) (*drapbv1.NodeUnprepareResourcesResponse, error) {
logger := klog.FromContext(ctx)
logger.V(4).Info("Calling NodeUnprepareResource rpc", "request", req)
logger = klog.LoggerWithValues(logger, "driverName", p.driverName, "endpoint", p.endpoint)
ctx = klog.NewContext(ctx, logger)
ctx, cancel := context.WithTimeout(ctx, p.clientCallTimeout)
defer cancel()
var err error
var response *drapbv1.NodeUnprepareResourcesResponse
switch p.chosenService {
case drapbv1beta1.DRAPluginService:
client := drapbv1beta1.NewDRAPluginClient(p.conn)
response, err = drapbv1beta1.V1Beta1ClientWrapper{DRAPluginClient: client}.NodeUnprepareResources(ctx, req)
case drapbv1.DRAPluginService:
client := drapbv1.NewDRAPluginClient(p.conn)
response, err = client.NodeUnprepareResources(ctx, req)
default:
// Shouldn't happen, validateSupportedServices should only
// return services we support here.
return nil, fmt.Errorf("internal error: unsupported chosen service: %q", p.chosenService)
}
logger.V(4).Info("Done calling NodeUnprepareResources rpc", "response", response, "err", err)
return response, err
}
func newMetricsInterceptor(driverName string) grpc.UnaryClientInterceptor {
return func(ctx context.Context, method string, req, reply any, conn *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
start := time.Now()
err := invoker(ctx, method, req, reply, conn, opts...)
metrics.DRAGRPCOperationsDuration.WithLabelValues(driverName, method, status.Code(err).String()).Observe(time.Since(start).Seconds())
return err
}
}
// SetHealthStream stores the context and cancel function for the active health stream.
func (p *DRAPlugin) SetHealthStream(ctx context.Context, cancel context.CancelFunc) {
p.mutex.Lock()
defer p.mutex.Unlock()
p.healthStreamCtx = ctx
p.healthStreamCancel = cancel
}
// HealthStreamCancel returns the cancel function for the current health stream, if any.
func (p *DRAPlugin) HealthStreamCancel() context.CancelFunc {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.healthStreamCancel
}
// NodeWatchResources establishes a stream to receive health updates from the DRA plugin.
func (p *DRAPlugin) NodeWatchResources(ctx context.Context) (drahealthv1alpha1.DRAResourceHealth_NodeWatchResourcesClient, error) {
// Ensure a connection and the health client exist before proceeding.
// This call is idempotent and will create them if they don't exist.
_, err := p.getOrCreateGRPCConn()
if err != nil {
klog.FromContext(p.backgroundCtx).Error(err, "Failed to get gRPC connection for health client")
return nil, err
}
logger := klog.FromContext(ctx).WithValues("pluginName", p.driverName)
logger.V(4).Info("Starting WatchResources stream")
stream, err := p.healthClient.NodeWatchResources(ctx, &drahealthv1alpha1.NodeWatchResourcesRequest{})
if err != nil {
logger.Error(err, "NodeWatchResources RPC call failed")
return nil, err
}
logger.V(4).Info("NodeWatchResources stream initiated successfully")
return stream, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"context"
"errors"
"fmt"
"slices"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
grpcstats "google.golang.org/grpc/stats"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
timedworkers "k8s.io/kubernetes/pkg/controller/tainteviction" // TODO (?): move this common helper somewhere else?
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/utils/ptr"
)
// DRAPluginManager keeps track of how to reach plugins registered for DRA drivers.
// Each plugin has a gRPC endpoint. There may be more than one plugin per driver.
//
// To be informed about available plugins, the DRAPluginManager implements the
// [cache.PluginHandler] interface and needs to be added to the
// plugin manager.
//
// The null DRAPluginManager is not usable, use NewDRAPluginManager.
type DRAPluginManager struct {
// backgroundCtx is used for all future activities of the DRAPluginManager.
// This is necessary because it implements APIs which don't
// provide a context.
backgroundCtx context.Context
cancel func(err error)
kubeClient kubernetes.Interface
getNode func() (*v1.Node, error)
wipingDelay time.Duration
streamHandler StreamHandler
// withIdleTimeout is only for unit testing, ignore if <= 0.
withIdleTimeout time.Duration
wg sync.WaitGroup
mutex sync.RWMutex
// driver name -> DRAPlugin in the order in which they got added
store map[string][]*monitoredPlugin
// pendingWipes tracks at which time ResourceSlices for a
// DRA driver should be removed. The removal then happens in
// the background in a callback function that is invoked
// by the TimedWorkerQueue.
//
// TimedWorkerQueue uses namespace/name as key. We use
// the driver name as name with no namespace.
pendingWipes *timedworkers.TimedWorkerQueue
}
var _ cache.PluginHandler = &DRAPluginManager{}
// monitoredPlugin tracks whether the gRPC connection of a plugin is
// currently connected. Fot that it implements the [grpcstats.Handler]
// interface.
//
// The tagging functions might be useful for contextual logging. But
// for now all that matters is HandleConn.
type monitoredPlugin struct {
*DRAPlugin
pm *DRAPluginManager
// connected is protected by store.mutex.
connected bool
}
var _ grpcstats.Handler = &monitoredPlugin{}
func (m *monitoredPlugin) TagRPC(ctx context.Context, info *grpcstats.RPCTagInfo) context.Context {
return ctx
}
func (m *monitoredPlugin) HandleRPC(context.Context, grpcstats.RPCStats) {
}
func (m *monitoredPlugin) TagConn(ctx context.Context, info *grpcstats.ConnTagInfo) context.Context {
return ctx
}
func (m *monitoredPlugin) HandleConn(_ context.Context, stats grpcstats.ConnStats) {
connected := false
switch stats.(type) {
case *grpcstats.ConnBegin:
connected = true
case *grpcstats.ConnEnd:
// We have to ask for a reconnect, otherwise gRPC wouldn't try and
// thus we wouldn't be notified about a restart of the plugin.
//
// This must be done in a goroutine because gRPC deadlocks
// when called directly from inside HandleConn when a connection
// goes idle (and only then). It looks like cc.idlenessMgr.ExitIdleMode
// in Connect tries to lock a mutex that is already locked by
// the caller of HandleConn.
go m.conn.Connect()
default:
return
}
if m.pm.backgroundCtx.Err() != nil {
// Shutting down, no longer interested in connection changes...
return
}
logger := klog.FromContext(m.pm.backgroundCtx)
m.pm.mutex.Lock()
defer m.pm.mutex.Unlock()
logger.V(2).Info("Connection changed", "driverName", m.driverName, "endpoint", m.endpoint, "connected", connected)
m.connected = connected
m.pm.sync(m.driverName)
}
// NewDRAPluginManager creates a new DRAPluginManager, with support for wiping ResourceSlices
// when the plugin(s) for a DRA driver are not available too long.
//
// The context can be used to cancel all background activities.
// If desired, Stop can be called in addition or instead of canceling
// the context. It then also waits for background activities to stop.
func NewDRAPluginManager(ctx context.Context, kubeClient kubernetes.Interface, getNode func() (*v1.Node, error), streamHandler StreamHandler, wipingDelay time.Duration) *DRAPluginManager {
ctx, cancel := context.WithCancelCause(ctx)
pm := &DRAPluginManager{
backgroundCtx: klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "DRA registration handler")),
cancel: cancel,
kubeClient: kubeClient,
getNode: getNode,
wipingDelay: wipingDelay,
streamHandler: streamHandler,
}
pm.pendingWipes = timedworkers.CreateWorkerQueue(func(ctx context.Context, fireAt time.Time, args *timedworkers.WorkArgs) error {
pm.wipeResourceSlices(ctx, args.Object.Name)
return nil
})
// When kubelet starts up, no DRA driver has registered yet. None of
// the drivers are usable until they come back, which might not happen
// at all. Therefore it is better to not advertise any local resources
// because pods could get stuck on the node waiting for the driver
// to start up.
//
// This has to run in the background.
pm.wg.Add(1)
go func() {
defer pm.wg.Done()
ctx := pm.backgroundCtx
logger := klog.LoggerWithName(klog.FromContext(ctx), "startup")
ctx = klog.NewContext(ctx, logger)
pm.wipeResourceSlices(ctx, "" /* all drivers */)
}()
return pm
}
// Stop cancels any remaining background activities and blocks until all goroutines have stopped,
// with one caveat: goroutines created dynamically for wiping ResourceSlices are not tracked.
// They won't do anything because of the context cancellation.
func (pm *DRAPluginManager) Stop() {
defer pm.wg.Wait() // Must run after unlocking our mutex.
pm.mutex.Lock()
defer pm.mutex.Unlock()
logger := klog.FromContext(pm.backgroundCtx)
pm.cancel(errors.New("Stop was called"))
// Close all connections, otherwise gRPC keeps doing things in the background.
// Also cancel all pending wiping.
for driverName, plugins := range pm.store {
workerArg := timedworkers.NewWorkArgs(driverName, "")
pm.pendingWipes.CancelWork(logger, workerArg.KeyFromWorkArgs())
for _, plugin := range plugins {
if err := plugin.conn.Close(); err != nil {
logger.Error(err, "Closing gRPC connection", "driverName", plugin.driverName, "endpoint", plugin.endpoint)
}
}
}
}
// wipeResourceSlices deletes ResourceSlices of the node, optionally just for a specific driver.
//
// It gets called in a stand-alone goroutine at kubelet startup and as callback
// of a TimedWorkersQueue. In both cases the caller has no way of handling errors,
// so wipeResourceSlices must implement it's own retry mechanism.
//
// Can be canceled by canceling the context.
func (pm *DRAPluginManager) wipeResourceSlices(ctx context.Context, driver string) {
if pm.kubeClient == nil {
return
}
logger := klog.FromContext(ctx)
backoff := wait.Backoff{
Duration: time.Second,
Factor: 2,
Jitter: 0.2,
Cap: 5 * time.Minute,
Steps: 100,
}
// Error logging is done inside the loop. Context cancellation doesn't get logged.
_ = wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) {
node, err := pm.getNode()
if apierrors.IsNotFound(err) {
return false, nil
}
if err != nil {
logger.Error(err, "Unexpected error checking for node")
return false, nil
}
fieldSelector := fields.Set{resourceapi.ResourceSliceSelectorNodeName: node.Name}
if driver != "" {
fieldSelector[resourceapi.ResourceSliceSelectorDriver] = driver
}
err = pm.kubeClient.ResourceV1().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: fieldSelector.String()})
switch {
case err == nil:
logger.V(3).Info("Deleted ResourceSlices", "fieldSelector", fieldSelector)
return true, nil
case apierrors.IsUnauthorized(err):
// This can happen while kubelet is still figuring out
// its credentials.
logger.V(5).Info("Deleting ResourceSlice failed, retrying", "fieldSelector", fieldSelector, "err", err)
return false, nil
case apierrors.IsNotFound(err):
logger.V(5).Info("ResourceSlices not found, nothing to delete.", "fieldSelector", fieldSelector)
return true, nil
default:
// Log and retry for other errors.
logger.V(3).Info("Deleting ResourceSlice failed, retrying", "fieldSelector", fieldSelector, "err", err)
return false, nil
}
})
}
// GetPlugin returns a wrapper around those gRPC methods of a DRA
// driver kubelet plugin which need to be called by kubelet. The wrapper
// handles gRPC connection management and logging. Connections are reused
// across different calls.
//
// It returns an informative error message including the driver name
// with an explanation why the driver is not usable.
func (pm *DRAPluginManager) GetPlugin(driverName string) (*DRAPlugin, error) {
if driverName == "" {
return nil, errors.New("DRA driver name is empty")
}
plugin := pm.get(driverName)
if plugin == nil {
return nil, fmt.Errorf("DRA driver %s is not registered", driverName)
}
return plugin, nil
}
// get lets you retrieve a DRA DRAPlugin by name.
func (pm *DRAPluginManager) get(driverName string) *DRAPlugin {
pm.mutex.RLock()
defer pm.mutex.RUnlock()
logger := klog.FromContext(pm.backgroundCtx)
plugins := pm.store[driverName]
if len(plugins) == 0 {
logger.V(5).Info("No plugin registered", "driverName", driverName)
return nil
}
// Heuristic: pick the most recent one. It's most likely
// the newest, except when kubelet got restarted and registered
// all running plugins in random order.
//
// Prefer plugins which are connected, otherwise also
// disconnected ones.
for i := len(plugins) - 1; i >= 0; i-- {
if plugin := plugins[i]; plugin.connected {
logger.V(5).Info("Preferring connected plugin", "driverName", driverName, "endpoint", plugin.endpoint)
return plugin.DRAPlugin
}
}
plugin := plugins[len(plugins)-1]
logger.V(5).Info("No plugin connected, using latest one", "driverName", driverName, "endpoint", plugin.endpoint)
return plugin.DRAPlugin
}
// RegisterPlugin implements [cache.PluginHandler].
// It is called by the plugin manager when a plugin is ready to be registered.
//
// Plugins of a DRA driver are required to register under the name of
// the DRA driver.
//
// DRA uses the version array in the registration API to enumerate all gRPC
// services that the plugin provides, using the "<gRPC package name>.<service
// name>" format (e.g. "v1beta1.DRAPlugin"). This allows kubelet to determine
// in advance which version to use resp. which optional services the plugin
// supports.
func (pm *DRAPluginManager) RegisterPlugin(driverName string, endpoint string, supportedServices []string, pluginClientTimeout *time.Duration) error {
chosenService, err := pm.validateSupportedServices(driverName, supportedServices)
if err != nil {
return fmt.Errorf("invalid supported gRPC versions of DRA driver plugin %s at endpoint %s: %w", driverName, endpoint, err)
}
timeout := ptr.Deref(pluginClientTimeout, defaultClientCallTimeout)
// Storing endpoint of newly registered DRA DRAPlugin into the map, where the DRA driver name will be the key
// under which the manager will be able to get a plugin when it needs to call it.
if err := pm.add(driverName, endpoint, chosenService, timeout); err != nil {
// No wrapping, the error already contains details.
return err
}
return nil
}
func (pm *DRAPluginManager) add(driverName string, endpoint string, chosenService string, clientCallTimeout time.Duration) error {
pm.mutex.Lock()
defer pm.mutex.Unlock()
p := &DRAPlugin{
driverName: driverName,
endpoint: endpoint,
chosenService: chosenService,
clientCallTimeout: clientCallTimeout,
backgroundCtx: pm.backgroundCtx,
}
if pm.store == nil {
pm.store = make(map[string][]*monitoredPlugin)
}
for _, oldP := range pm.store[driverName] {
if oldP.endpoint == endpoint {
// One plugin instance cannot hijack the endpoint of another instance.
return fmt.Errorf("endpoint %s already registered for DRA driver plugin %s", endpoint, driverName)
}
}
logger := klog.FromContext(pm.backgroundCtx)
mp := &monitoredPlugin{
DRAPlugin: p,
pm: pm,
}
// The gRPC connection gets created once. gRPC then connects to the gRPC server on demand.
target := "unix:" + endpoint
logger.V(4).Info("Creating new gRPC connection", "target", target)
options := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithChainUnaryInterceptor(newMetricsInterceptor(driverName)),
grpc.WithStatsHandler(mp),
}
if pm.withIdleTimeout > 0 {
options = append(options, grpc.WithIdleTimeout(pm.withIdleTimeout))
}
conn, err := grpc.NewClient(target, options...)
if err != nil {
return fmt.Errorf("create gRPC connection to DRA driver %s plugin at endpoint %s: %w", driverName, endpoint, err)
}
p.conn = conn
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceHealthStatus) {
pm.wg.Add(1)
go func() {
defer pm.wg.Done()
streamCtx, streamCancel := context.WithCancel(p.backgroundCtx)
p.SetHealthStream(streamCtx, streamCancel)
wait.UntilWithContext(streamCtx, func(ctx context.Context) {
logger.V(4).Info("Attempting to start WatchResources health stream")
stream, err := p.NodeWatchResources(ctx)
if err != nil {
logger.V(3).Error(err, "Failed to establish WatchResources stream, will retry")
return
}
logger.V(2).Info("Successfully started WatchResources health stream")
err = pm.streamHandler.HandleWatchResourcesStream(ctx, stream, driverName)
logger.V(2).Info("WatchResources health stream has ended", "error", err)
}, 5*time.Second)
}()
}
// Ensure that gRPC tries to connect even if we don't call any gRPC method.
// This is necessary to detect early whether a plugin is really available.
// This is currently an experimental gRPC method. Should it be removed we
// would need to do something else, like sending a fake gRPC method call.
conn.Connect()
pm.store[p.driverName] = append(pm.store[p.driverName], mp)
logger.V(3).Info("Registered DRA plugin", "driverName", p.driverName, "endpoint", p.endpoint, "chosenService", p.chosenService, "numPlugins", len(pm.store[p.driverName]))
pm.sync(p.driverName)
return nil
}
// DeRegisterPlugin implements [cache.PluginHandler].
//
// The plugin manager calls it after it has detected that
// the plugin removed its registration socket,
// signaling that it is no longer available.
func (pm *DRAPluginManager) DeRegisterPlugin(driverName, endpoint string) {
// remove could be removed (no pun intended) but is kept for the sake of symmetry.
pm.remove(driverName, endpoint)
}
func (pm *DRAPluginManager) remove(driverName, endpoint string) {
logger := klog.FromContext(pm.backgroundCtx)
var p *monitoredPlugin
defer func() {
// Defer is used to avoid holding the mutex while closing the connection.
// This is necessary because closing the connection may cause deadlock
// with DRAPlugin.HandleConn, which can be called while holding the mutex.
if p != nil && p.conn != nil {
// Close the gRPC connection, so that it doesn't leak and
// doesn't try to reconnect to an unregistered plugin.
if err := p.conn.Close(); err != nil {
logger.Error(err, "Closing gRPC connection", "driverName", driverName, "endpoint", endpoint)
}
}
}()
pm.mutex.Lock()
defer pm.mutex.Unlock()
plugins := pm.store[driverName]
i := slices.IndexFunc(plugins, func(mp *monitoredPlugin) bool { return mp.driverName == driverName && mp.endpoint == endpoint })
if i == -1 {
return
}
p = plugins[i]
last := len(plugins) == 1
if last {
delete(pm.store, driverName)
} else {
pm.store[driverName] = slices.Delete(plugins, i, i+1)
}
// Cancel the plugin's health stream if it was active.
healthCancel := p.HealthStreamCancel()
if healthCancel != nil {
logger.V(4).Info("Canceling health stream during deregistration")
healthCancel()
}
logger.V(3).Info("Unregistered DRA plugin", "driverName", driverName, "endpoint", endpoint, "numPlugins", len(pm.store[driverName]))
pm.sync(driverName)
}
// sync must be called each time the information about a plugin changes.
// The mutex must be locked for writing.
func (pm *DRAPluginManager) sync(driverName string) {
if pm.kubeClient == nil {
// Cannot wipe.
return
}
ctx := pm.backgroundCtx
logger := klog.FromContext(pm.backgroundCtx)
workerArgs := timedworkers.NewWorkArgs(driverName, "")
// Is the DRA driver usable again?
if pm.usable(driverName) {
// Yes: cancel any pending ResourceSlice wiping for the DRA driver.
pm.pendingWipes.CancelWork(logger, workerArgs.KeyFromWorkArgs())
return
}
// No: ensure that we wipe ResourceSlices of the driver.
// If this was already queued earlier, the original timeout
// continues to apply because nothing changed.
if pm.pendingWipes.GetWorkerUnsafe(workerArgs.KeyFromWorkArgs()) != nil {
// Already queued or potentially already running.
//
// There's a small time-of-check-time-of-use race here,
// but that's fine: if wiping starts after we retrieve
// the pointer and before checking it, the work gets
// done, which is what we want.
return
}
now := time.Now()
fireAt := now.Add(pm.wipingDelay)
logger = klog.LoggerWithName(logger, "driver-cleanup")
logger = klog.LoggerWithValues(logger, "driverName", driverName)
ctx = klog.NewContext(ctx, logger)
pm.pendingWipes.AddWork(ctx, timedworkers.NewWorkArgs(driverName, ""), now, fireAt)
}
// usable returns true if at least one endpoint is ready to handle gRPC calls for the DRA driver.
// Must be called while holding the mutex.
func (pm *DRAPluginManager) usable(driverName string) bool {
for _, mp := range pm.store[driverName] {
if mp.connected {
return true
}
}
return false
}
// ValidatePlugin implements [cache.PluginHandler].
//
// The plugin manager calls it upon detection of a new registration socket
// opened by DRA plugin.
func (pm *DRAPluginManager) ValidatePlugin(driverName string, endpoint string, supportedServices []string) error {
_, err := pm.validateSupportedServices(driverName, supportedServices)
if err != nil {
return fmt.Errorf("invalid supported gRPC versions of DRA driver plugin %s at endpoint %s: %w", driverName, endpoint, err)
}
return err
}
// validateSupportedServices identifies the highest supported gRPC service for
// NodePrepareResources and NodeUnprepareResources and returns its name
// (e.g. [drapbv1beta1.DRAPluginService]). An error is returned if the plugin
// is unusable.
func (pm *DRAPluginManager) validateSupportedServices(driverName string, supportedServices []string) (string, error) {
if len(supportedServices) == 0 {
return "", errors.New("empty list of supported gRPC services (aka supported versions)")
}
// Pick most recent version if available.
chosenService := ""
for _, service := range supportedServices {
if slices.Contains(servicesSupportedByKubelet, service) {
chosenService = service
break
}
}
// Fall back to alpha if necessary because
// plugins at that time didn't advertise gRPC services.
if chosenService == "" {
return "", fmt.Errorf("none of services supported by the plugin (%q) are supported by the kubelet (%q)", supportedServices, servicesSupportedByKubelet)
}
return chosenService, nil
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"encoding/json"
"hash/crc32"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
)
const (
CheckpointAPIGroup = "checkpoint.dra.kubelet.k8s.io"
CheckpointKind = "DRACheckpoint"
CheckpointAPIVersion = CheckpointAPIGroup + "/v1"
)
// Checkpoint represents a structure to store DRA checkpoint data
type Checkpoint struct {
// Data is a JSON serialized checkpoint data
Data string
// Checksum is a checksum of Data
Checksum uint32
}
type CheckpointData struct {
metav1.TypeMeta
ClaimInfoStateList ClaimInfoStateList
}
// NewCheckpoint creates a new checkpoint from a list of claim info states
func NewCheckpoint(data ClaimInfoStateList) (*Checkpoint, error) {
cpData := &CheckpointData{
TypeMeta: metav1.TypeMeta{
Kind: CheckpointKind,
APIVersion: CheckpointAPIVersion,
},
ClaimInfoStateList: data,
}
cpDataBytes, err := json.Marshal(cpData)
if err != nil {
return nil, err
}
cp := &Checkpoint{
Data: string(cpDataBytes),
Checksum: crc32.ChecksumIEEE(cpDataBytes),
}
return cp, nil
}
// MarshalCheckpoint marshals checkpoint to JSON
func (cp *Checkpoint) MarshalCheckpoint() ([]byte, error) {
return json.Marshal(cp)
}
// UnmarshalCheckpoint unmarshals checkpoint from JSON
// and verifies its data checksum
func (cp *Checkpoint) UnmarshalCheckpoint(blob []byte) error {
if err := json.Unmarshal(blob, cp); err != nil {
return err
}
// verify checksum
if err := cp.VerifyChecksum(); err != nil {
return err
}
return nil
}
// VerifyChecksum verifies that current checksum
// of checkpointed Data is valid
func (cp *Checkpoint) VerifyChecksum() error {
expectedCS := crc32.ChecksumIEEE([]byte(cp.Data))
if expectedCS != cp.Checksum {
return &errors.CorruptCheckpointError{ActualCS: uint64(cp.Checksum), ExpectedCS: uint64(expectedCS)}
}
return nil
}
// GetClaimInfoStateList returns list of claim info states from checkpoint
func (cp *Checkpoint) GetClaimInfoStateList() (ClaimInfoStateList, error) {
var data CheckpointData
if err := json.Unmarshal([]byte(cp.Data), &data); err != nil {
return nil, err
}
return data.ClaimInfoStateList, nil
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"errors"
"fmt"
"sync"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
checkpointerrors "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
)
type Checkpointer interface {
GetOrCreate() (*Checkpoint, error)
Store(*Checkpoint) error
}
type checkpointer struct {
sync.RWMutex
checkpointManager checkpointmanager.CheckpointManager
checkpointName string
}
// NewCheckpointer creates new checkpointer for keeping track of claim info with checkpoint backend
func NewCheckpointer(stateDir, checkpointName string) (Checkpointer, error) {
if len(checkpointName) == 0 {
return nil, fmt.Errorf("received empty string instead of checkpointName")
}
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
if err != nil {
return nil, fmt.Errorf("failed to initialize checkpoint manager: %w", err)
}
checkpointer := &checkpointer{
checkpointManager: checkpointManager,
checkpointName: checkpointName,
}
return checkpointer, nil
}
// GetOrCreate gets list of claim info states from a checkpoint
// or creates empty list if checkpoint doesn't exist
func (sc *checkpointer) GetOrCreate() (*Checkpoint, error) {
sc.Lock()
defer sc.Unlock()
checkpoint, err := NewCheckpoint(nil)
if err != nil {
return nil, fmt.Errorf("failed to create new checkpoint: %w", err)
}
err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint)
if errors.Is(err, checkpointerrors.ErrCheckpointNotFound) {
err = sc.store(checkpoint)
if err != nil {
return nil, fmt.Errorf("failed to store checkpoint %v: %w", sc.checkpointName, err)
}
return checkpoint, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get checkpoint %v: %w", sc.checkpointName, err)
}
return checkpoint, nil
}
// Store stores checkpoint to the file
func (sc *checkpointer) Store(checkpoint *Checkpoint) error {
sc.Lock()
defer sc.Unlock()
return sc.store(checkpoint)
}
// store saves state to a checkpoint, caller is responsible for locking
func (sc *checkpointer) store(checkpoint *Checkpoint) error {
if err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint); err != nil {
return fmt.Errorf("could not save checkpoint %s: %w", sc.checkpointName, err)
}
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package state
import (
sets "k8s.io/apimachinery/pkg/util/sets"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClaimInfoState) DeepCopyInto(out *ClaimInfoState) {
*out = *in
if in.PodUIDs != nil {
in, out := &in.PodUIDs, &out.PodUIDs
*out = make(sets.Set[string], len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.DriverState != nil {
in, out := &in.DriverState, &out.DriverState
*out = make(map[string]DriverState, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimInfoState.
func (in *ClaimInfoState) DeepCopy() *ClaimInfoState {
if in == nil {
return nil
}
out := new(ClaimInfoState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Device) DeepCopyInto(out *Device) {
*out = *in
if in.RequestNames != nil {
in, out := &in.RequestNames, &out.RequestNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.CDIDeviceIDs != nil {
in, out := &in.CDIDeviceIDs, &out.CDIDeviceIDs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device.
func (in *Device) DeepCopy() *Device {
if in == nil {
return nil
}
out := new(Device)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DriverState) DeepCopyInto(out *DriverState) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]Device, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverState.
func (in *DriverState) DeepCopy() *DriverState {
if in == nil {
return nil
}
out := new(DriverState)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package dra
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClaimInfo) DeepCopyInto(out *ClaimInfo) {
*out = *in
in.ClaimInfoState.DeepCopyInto(&out.ClaimInfoState)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimInfo.
func (in *ClaimInfo) DeepCopy() *ClaimInfo {
if in == nil {
return nil
}
out := new(ClaimInfo)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"context"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/server/healthz"
internalapi "k8s.io/cri-api/pkg/apis"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
)
type FakeContainerManager struct {
sync.Mutex
CalledFunctions []string
PodContainerManager *FakePodContainerManager
shouldResetExtendedResourceCapacity bool
nodeConfig NodeConfig
memoryManager memorymanager.Manager
}
var _ ContainerManager = &FakeContainerManager{}
func NewFakeContainerManager() *FakeContainerManager {
return &FakeContainerManager{
PodContainerManager: NewFakePodContainerManager(),
memoryManager: memorymanager.NewFakeManager(context.TODO()),
}
}
func NewFakeContainerManagerWithNodeConfig(nodeConfig NodeConfig) *FakeContainerManager {
return &FakeContainerManager{
PodContainerManager: NewFakePodContainerManager(),
nodeConfig: nodeConfig,
}
}
func (cm *FakeContainerManager) Start(_ context.Context, _ *v1.Node, _ ActivePodsFunc, _ GetNodeFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService, _ bool) error {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "Start")
return nil
}
func (cm *FakeContainerManager) SystemCgroupsLimit() v1.ResourceList {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "SystemCgroupsLimit")
return v1.ResourceList{}
}
func (cm *FakeContainerManager) GetNodeConfig() NodeConfig {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetNodeConfig")
return cm.nodeConfig
}
func (cm *FakeContainerManager) GetMountedSubsystems() *CgroupSubsystems {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetMountedSubsystems")
return &CgroupSubsystems{}
}
func (cm *FakeContainerManager) GetQOSContainersInfo() QOSContainersInfo {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "QOSContainersInfo")
return QOSContainersInfo{}
}
func (cm *FakeContainerManager) UpdateQOSCgroups() error {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "UpdateQOSCgroups")
return nil
}
func (cm *FakeContainerManager) Status() Status {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "Status")
return Status{}
}
func (cm *FakeContainerManager) GetNodeAllocatableReservation() v1.ResourceList {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetNodeAllocatableReservation")
return nil
}
func (cm *FakeContainerManager) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetCapacity")
if !localStorageCapacityIsolation {
return v1.ResourceList{}
}
c := v1.ResourceList{
v1.ResourceEphemeralStorage: *resource.NewQuantity(
int64(0),
resource.BinarySI),
}
return c
}
func (cm *FakeContainerManager) GetPluginRegistrationHandlers() map[string]cache.PluginHandler {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetPluginRegistrationHandlers")
return nil
}
func (cm *FakeContainerManager) GetHealthCheckers() []healthz.HealthChecker {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetPluginRegistrationServerChecker")
return []healthz.HealthChecker{}
}
func (cm *FakeContainerManager) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetDevicePluginResourceCapacity")
return nil, nil, []string{}
}
func (cm *FakeContainerManager) NewPodContainerManager() PodContainerManager {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "PodContainerManager")
return cm.PodContainerManager
}
func (cm *FakeContainerManager) GetResources(ctx context.Context, pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetResources")
return &kubecontainer.RunContainerOptions{}, nil
}
func (cm *FakeContainerManager) UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "UpdatePluginResources")
return nil
}
func (cm *FakeContainerManager) InternalContainerLifecycle() InternalContainerLifecycle {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "InternalContainerLifecycle")
return &internalContainerLifecycleImpl{cpumanager.NewFakeManager(), cm.memoryManager, topologymanager.NewFakeManager()}
}
func (cm *FakeContainerManager) GetPodCgroupRoot() string {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetPodCgroupRoot")
return ""
}
func (cm *FakeContainerManager) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetDevices")
return nil
}
func (cm *FakeContainerManager) GetAllocatableDevices() []*podresourcesapi.ContainerDevices {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocatableDevices")
return nil
}
func (cm *FakeContainerManager) ShouldResetExtendedResourceCapacity() bool {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "ShouldResetExtendedResourceCapacity")
return cm.shouldResetExtendedResourceCapacity
}
func (cm *FakeContainerManager) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetAllocateResourcesPodAdmitHandler")
return topologymanager.NewFakeManager()
}
func (cm *FakeContainerManager) UpdateAllocatedDevices() {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "UpdateAllocatedDevices")
return
}
func (cm *FakeContainerManager) GetCPUs(_, _ string) []int64 {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetCPUs")
return nil
}
func (cm *FakeContainerManager) GetAllocatableCPUs() []int64 {
cm.Lock()
defer cm.Unlock()
return nil
}
func (cm *FakeContainerManager) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetMemory")
return nil
}
func (cm *FakeContainerManager) GetAllocatableMemory() []*podresourcesapi.ContainerMemory {
cm.Lock()
defer cm.Unlock()
return nil
}
func (cm *FakeContainerManager) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*podresourcesapi.DynamicResource {
return nil
}
func (cm *FakeContainerManager) GetNodeAllocatableAbsolute() v1.ResourceList {
cm.Lock()
defer cm.Unlock()
return v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
}
}
func (cm *FakeContainerManager) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return nil
}
func (cm *FakeContainerManager) UnprepareDynamicResources(context.Context, *v1.Pod) error {
return nil
}
func (cm *FakeContainerManager) PodMightNeedToUnprepareResources(UID types.UID) bool {
return false
}
func (cm *FakeContainerManager) UpdateAllocatedResourcesStatus(pod *v1.Pod, status *v1.PodStatus) {
}
func (cm *FakeContainerManager) Updates() <-chan resourceupdates.Update {
return nil
}
func (cm *FakeContainerManager) PodHasExclusiveCPUs(pod *v1.Pod) bool {
return false
}
func (cm *FakeContainerManager) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
return false
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
)
func NewFakeInternalContainerLifecycle() *fakeInternalContainerLifecycle {
return &fakeInternalContainerLifecycle{}
}
type fakeInternalContainerLifecycle struct{}
func (f *fakeInternalContainerLifecycle) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
return nil
}
func (f *fakeInternalContainerLifecycle) PreStartContainer(pod *v1.Pod, container *v1.Container, containerID string) error {
return nil
}
func (f *fakeInternalContainerLifecycle) PostStopContainer(containerID string) error {
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"reflect"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
type FakePodContainerManager struct {
sync.Mutex
CalledFunctions []string
Cgroups map[types.UID]CgroupName
}
var _ PodContainerManager = &FakePodContainerManager{}
func NewFakePodContainerManager() *FakePodContainerManager {
return &FakePodContainerManager{
Cgroups: make(map[types.UID]CgroupName),
}
}
func (m *FakePodContainerManager) AddPodFromCgroups(pod *kubecontainer.Pod) {
m.Lock()
defer m.Unlock()
m.Cgroups[pod.ID] = []string{pod.Name}
}
func (m *FakePodContainerManager) Exists(_ *v1.Pod) bool {
m.Lock()
defer m.Unlock()
m.CalledFunctions = append(m.CalledFunctions, "Exists")
return true
}
func (m *FakePodContainerManager) EnsureExists(_ *v1.Pod) error {
m.Lock()
defer m.Unlock()
m.CalledFunctions = append(m.CalledFunctions, "EnsureExists")
return nil
}
func (m *FakePodContainerManager) GetPodContainerName(_ *v1.Pod) (CgroupName, string) {
m.Lock()
defer m.Unlock()
m.CalledFunctions = append(m.CalledFunctions, "GetPodContainerName")
return nil, ""
}
func (m *FakePodContainerManager) Destroy(name CgroupName) error {
m.Lock()
defer m.Unlock()
m.CalledFunctions = append(m.CalledFunctions, "Destroy")
for key, cgname := range m.Cgroups {
if reflect.DeepEqual(cgname, name) {
delete(m.Cgroups, key)
return nil
}
}
return nil
}
func (m *FakePodContainerManager) ReduceCPULimits(_ CgroupName) error {
m.Lock()
defer m.Unlock()
m.CalledFunctions = append(m.CalledFunctions, "ReduceCPULimits")
return nil
}
func (m *FakePodContainerManager) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) {
m.Lock()
defer m.Unlock()
m.CalledFunctions = append(m.CalledFunctions, "GetAllPodsFromCgroups")
// return a copy for the race detector
grp := make(map[types.UID]CgroupName)
for key, value := range m.Cgroups {
grp[key] = value
}
return grp, nil
}
func (m *FakePodContainerManager) IsPodCgroup(cgroupfs string) (bool, types.UID) {
m.Lock()
defer m.Unlock()
m.CalledFunctions = append(m.CalledFunctions, "IsPodCgroup")
return false, types.UID("")
}
func (cm *FakePodContainerManager) GetPodCgroupMemoryUsage(_ *v1.Pod) (uint64, error) {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetPodCgroupMemoryUsage")
return 0, nil
}
func (cm *FakePodContainerManager) GetPodCgroupConfig(_ *v1.Pod, _ v1.ResourceName) (*ResourceConfig, error) {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "GetPodCgroupConfig")
return nil, nil
}
func (cm *FakePodContainerManager) SetPodCgroupConfig(pod *v1.Pod, resourceConfig *ResourceConfig) error {
cm.Lock()
defer cm.Unlock()
cm.CalledFunctions = append(cm.CalledFunctions, "SetPodCgroupConfig")
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"context"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
)
// for typecheck across platforms
var _ func(int64, int64) int64 = MilliCPUToQuota
var _ func(int64) uint64 = MilliCPUToShares
var _ func(*v1.Pod, bool, uint64, bool) *ResourceConfig = ResourceConfigForPod
var _ func() (*CgroupSubsystems, error) = GetCgroupSubsystems
var _ func(string) ([]int, error) = getCgroupProcs
var _ func(types.UID) string = GetPodCgroupNameSuffix
var _ func(string, bool, string) string = NodeAllocatableRoot
var _ func(string) (string, error) = GetKubeletContainer
// hardEvictionReservation returns a resourcelist that includes reservation of resources based on hard eviction thresholds.
func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.ResourceList) v1.ResourceList {
if len(thresholds) == 0 {
return nil
}
ret := v1.ResourceList{}
for _, threshold := range thresholds {
if threshold.Operator != evictionapi.OpLessThan {
continue
}
switch threshold.Signal {
case evictionapi.SignalMemoryAvailable:
memoryCapacity := capacity[v1.ResourceMemory]
value := evictionapi.GetThresholdQuantity(threshold.Value, &memoryCapacity)
ret[v1.ResourceMemory] = *value
case evictionapi.SignalNodeFsAvailable:
storageCapacity := capacity[v1.ResourceEphemeralStorage]
value := evictionapi.GetThresholdQuantity(threshold.Value, &storageCapacity)
ret[v1.ResourceEphemeralStorage] = *value
}
}
return ret
}
func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.Set[string]) {
podSandboxMap := make(map[string]string)
podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil)
for _, p := range podSandboxList {
podSandboxMap[p.Id] = p.Metadata.Uid
}
runningSet := sets.New[string]()
containerMap := containermap.NewContainerMap()
containerList, _ := runtimeService.ListContainers(ctx, nil)
for _, c := range containerList {
if _, exists := podSandboxMap[c.PodSandboxId]; !exists {
klog.InfoS("No PodSandBox found for the container", "podSandboxId", c.PodSandboxId, "containerName", c.Metadata.Name, "containerId", c.Id)
continue
}
podUID := podSandboxMap[c.PodSandboxId]
containerMap.Add(podUID, c.Metadata.Name, c.Id)
if c.State == runtimeapi.ContainerState_CONTAINER_RUNNING {
klog.V(4).InfoS("Container reported running", "podSandboxId", c.PodSandboxId, "podUID", podUID, "containerName", c.Metadata.Name, "containerId", c.Id)
runningSet.Insert(c.Id)
}
}
return containerMap, runningSet
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strconv"
libcontainercgroups "github.com/opencontainers/cgroups"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-helpers/resource"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm/util"
)
const (
// These limits are defined in the kernel:
// https://github.com/torvalds/linux/blob/0bddd227f3dc55975e2b8dfa7fc6f959b062a2c7/kernel/sched/sched.h#L427-L428
MinShares = 2
MaxShares = 262144
SharesPerCPU = 1024
MilliCPUToCPU = 1000
// 100000 microseconds is equivalent to 100ms
QuotaPeriod = 100000
// 1000 microseconds is equivalent to 1ms
// defined here:
// https://github.com/torvalds/linux/blob/cac03ac368fabff0122853de2422d4e17a32de08/kernel/sched/core.c#L10546
MinQuotaPeriod = 1000
// From the inverse of the conversion in MilliCPUToQuota:
// MinQuotaPeriod * MilliCPUToCPU / QuotaPeriod
MinMilliCPULimit = 10
)
// MilliCPUToQuota converts milliCPU to CFS quota and period values.
// Input parameters and resulting value is number of microseconds.
func MilliCPUToQuota(milliCPU int64, period int64) (quota int64) {
// CFS quota is measured in two values:
// - cfs_period_us=100ms (the amount of time to measure usage across given by period)
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
// so in the above example, you are limited to 20% of a single CPU
// for multi-cpu environments, you just scale equivalent amounts
// see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details
if milliCPU == 0 {
return
}
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) {
period = QuotaPeriod
}
// we then convert your milliCPU to a value normalized over a period
quota = (milliCPU * period) / MilliCPUToCPU
// quota needs to be a minimum of 1ms.
if quota < MinQuotaPeriod {
quota = MinQuotaPeriod
}
return
}
// MilliCPUToShares converts the milliCPU to CFS shares.
func MilliCPUToShares(milliCPU int64) uint64 {
if milliCPU == 0 {
// Docker converts zero milliCPU to unset, which maps to kernel default
// for unset: 1024. Return 2 here to really match kernel default for
// zero milliCPU.
return MinShares
}
// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
shares := (milliCPU * SharesPerCPU) / MilliCPUToCPU
if shares < MinShares {
return MinShares
}
if shares > MaxShares {
return MaxShares
}
return uint64(shares)
}
// HugePageLimits converts the API representation to a map
// from huge page size (in bytes) to huge page limit (in bytes).
func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
hugePageLimits := map[int64]int64{}
for k, v := range resourceList {
if v1helper.IsHugePageResourceName(k) {
pageSize, _ := v1helper.HugePageSizeFromResourceName(k)
if value, exists := hugePageLimits[pageSize.Value()]; exists {
hugePageLimits[pageSize.Value()] = value + v.Value()
} else {
hugePageLimits[pageSize.Value()] = v.Value()
}
}
}
return hugePageLimits
}
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources)
// sum requests and limits.
reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
UseStatusResources: false,
})
// track if limits were applied for each resource.
memoryLimitsDeclared := true
cpuLimitsDeclared := true
limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
if res.Cpu().IsZero() {
cpuLimitsDeclared = false
}
if res.Memory().IsZero() {
memoryLimitsDeclared = false
}
},
})
if podLevelResourcesEnabled && resource.IsPodLevelResourcesSet(allocatedPod) {
if !allocatedPod.Spec.Resources.Limits.Cpu().IsZero() {
cpuLimitsDeclared = true
}
if !allocatedPod.Spec.Resources.Limits.Memory().IsZero() {
memoryLimitsDeclared = true
}
}
// map hugepage pagesize (bytes) to limits (bytes)
hugePageLimits := HugePageLimits(reqs)
cpuRequests := int64(0)
cpuLimits := int64(0)
memoryLimits := int64(0)
if request, found := reqs[v1.ResourceCPU]; found {
cpuRequests = request.MilliValue()
}
if limit, found := limits[v1.ResourceCPU]; found {
cpuLimits = limit.MilliValue()
}
if limit, found := limits[v1.ResourceMemory]; found {
memoryLimits = limit.Value()
}
// convert to CFS values
cpuShares := MilliCPUToShares(cpuRequests)
cpuQuota := MilliCPUToQuota(cpuLimits, int64(cpuPeriod))
// quota is not capped when cfs quota is disabled
if !enforceCPULimits {
cpuQuota = int64(-1)
}
// determine the qos class
qosClass := v1qos.GetPodQOS(allocatedPod)
// build the result
result := &ResourceConfig{}
if qosClass == v1.PodQOSGuaranteed {
result.CPUShares = &cpuShares
result.CPUQuota = &cpuQuota
result.CPUPeriod = &cpuPeriod
result.Memory = &memoryLimits
} else if qosClass == v1.PodQOSBurstable {
result.CPUShares = &cpuShares
if cpuLimitsDeclared {
result.CPUQuota = &cpuQuota
result.CPUPeriod = &cpuPeriod
}
if memoryLimitsDeclared {
result.Memory = &memoryLimits
}
} else {
shares := uint64(MinShares)
result.CPUShares = &shares
}
result.HugePageLimit = hugePageLimits
if enforceMemoryQoS {
memoryMin := int64(0)
if request, found := reqs[v1.ResourceMemory]; found {
memoryMin = request.Value()
}
if memoryMin > 0 {
result.Unified = map[string]string{
Cgroup2MemoryMin: strconv.FormatInt(memoryMin, 10),
}
}
}
return result
}
// getCgroupSubsystemsV1 returns information about the mounted cgroup v1 subsystems
func getCgroupSubsystemsV1() (*CgroupSubsystems, error) {
// get all cgroup mounts.
allCgroups, err := libcontainercgroups.GetCgroupMounts(true)
if err != nil {
return &CgroupSubsystems{}, err
}
if len(allCgroups) == 0 {
return &CgroupSubsystems{}, fmt.Errorf("failed to find cgroup mounts")
}
mountPoints := make(map[string]string, len(allCgroups))
for _, mount := range allCgroups {
// BEFORE kubelet used a random mount point per cgroups subsystem;
// NOW more deterministic: kubelet use mount point with shortest path;
// FUTURE is bright with clear expectation determined in doc.
// ref. issue: https://github.com/kubernetes/kubernetes/issues/95488
for _, subsystem := range mount.Subsystems {
previous := mountPoints[subsystem]
if previous == "" || len(mount.Mountpoint) < len(previous) {
mountPoints[subsystem] = mount.Mountpoint
}
}
}
return &CgroupSubsystems{
Mounts: allCgroups,
MountPoints: mountPoints,
}, nil
}
// getCgroupSubsystemsV2 returns information about the enabled cgroup v2 subsystems
func getCgroupSubsystemsV2() (*CgroupSubsystems, error) {
controllers, err := libcontainercgroups.GetAllSubsystems()
if err != nil {
return nil, err
}
mounts := []libcontainercgroups.Mount{}
mountPoints := make(map[string]string, len(controllers))
for _, controller := range controllers {
mountPoints[controller] = util.CgroupRoot
m := libcontainercgroups.Mount{
Mountpoint: util.CgroupRoot,
Root: util.CgroupRoot,
Subsystems: []string{controller},
}
mounts = append(mounts, m)
}
return &CgroupSubsystems{
Mounts: mounts,
MountPoints: mountPoints,
}, nil
}
// GetCgroupSubsystems returns information about the mounted cgroup subsystems
func GetCgroupSubsystems() (*CgroupSubsystems, error) {
if libcontainercgroups.IsCgroup2UnifiedMode() {
return getCgroupSubsystemsV2()
}
return getCgroupSubsystemsV1()
}
// getCgroupProcs takes a cgroup directory name as an argument
// reads through the cgroup's procs file and returns a list of tgid's.
// It returns an empty list if a procs file doesn't exists
func getCgroupProcs(dir string) ([]int, error) {
procsFile := filepath.Join(dir, "cgroup.procs")
f, err := os.Open(procsFile)
if err != nil {
if os.IsNotExist(err) {
// The procsFile does not exist, So no pids attached to this directory
return []int{}, nil
}
return nil, err
}
defer f.Close()
s := bufio.NewScanner(f)
out := []int{}
for s.Scan() {
if t := s.Text(); t != "" {
pid, err := strconv.Atoi(t)
if err != nil {
return nil, fmt.Errorf("unexpected line in %v; could not convert to pid: %v", procsFile, err)
}
out = append(out, pid)
}
}
return out, nil
}
// GetPodCgroupNameSuffix returns the last element of the pod CgroupName identifier
func GetPodCgroupNameSuffix(podUID types.UID) string {
return podCgroupNamePrefix + string(podUID)
}
// NodeAllocatableRoot returns the literal cgroup path for the node allocatable cgroup
func NodeAllocatableRoot(cgroupRoot string, cgroupsPerQOS bool, cgroupDriver string) string {
nodeAllocatableRoot := ParseCgroupfsToCgroupName(cgroupRoot)
if cgroupsPerQOS {
nodeAllocatableRoot = NewCgroupName(nodeAllocatableRoot, defaultNodeAllocatableCgroupName)
}
if cgroupDriver == "systemd" {
return nodeAllocatableRoot.ToSystemd()
}
return nodeAllocatableRoot.ToCgroupfs()
}
// GetKubeletContainer returns the cgroup the kubelet will use
func GetKubeletContainer(kubeletCgroups string) (string, error) {
if kubeletCgroups == "" {
cont, err := getContainer(os.Getpid())
if err != nil {
return "", err
}
return cont, nil
}
return kubeletCgroups, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"context"
v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
)
type InternalContainerLifecycle interface {
PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error
PreStartContainer(pod *v1.Pod, container *v1.Container, containerID string) error
PostStopContainer(containerID string) error
}
// Implements InternalContainerLifecycle interface.
type internalContainerLifecycleImpl struct {
cpuManager cpumanager.Manager
memoryManager memorymanager.Manager
topologyManager topologymanager.Manager
}
func (i *internalContainerLifecycleImpl) PreStartContainer(pod *v1.Pod, container *v1.Container, containerID string) error {
if i.cpuManager != nil {
i.cpuManager.AddContainer(pod, container, containerID)
}
if i.memoryManager != nil {
i.memoryManager.AddContainer(context.TODO(), pod, container, containerID)
}
i.topologyManager.AddContainer(pod, container, containerID)
return nil
}
func (i *internalContainerLifecycleImpl) PostStopContainer(containerID string) error {
return i.topologyManager.RemoveContainer(containerID)
}
//go:build linux
// +build linux
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"context"
"strconv"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
)
func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
if i.cpuManager != nil {
allocatedCPUs := i.cpuManager.GetCPUAffinity(string(pod.UID), container.Name)
if !allocatedCPUs.IsEmpty() {
containerConfig.Linux.Resources.CpusetCpus = allocatedCPUs.String()
}
}
if i.memoryManager != nil {
numaNodes := i.memoryManager.GetMemoryNUMANodes(context.TODO(), pod, container)
if numaNodes.Len() > 0 {
var affinity []string
for _, numaNode := range sets.List(numaNodes) {
affinity = append(affinity, strconv.Itoa(numaNode))
}
containerConfig.Linux.Resources.CpusetMems = strings.Join(affinity, ",")
}
}
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package memorymanager
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/status"
)
type fakeManager struct {
state state.State
}
func (m *fakeManager) Start(ctx context.Context, activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error {
logger := klog.FromContext(ctx)
logger.Info("Start()")
return nil
}
func (m *fakeManager) Policy(ctx context.Context) Policy {
logger := klog.FromContext(ctx)
logger.Info("Policy()")
return NewPolicyNone(ctx)
}
func (m *fakeManager) Allocate(pod *v1.Pod, container *v1.Container) error {
ctx := context.TODO()
logger := klog.FromContext(ctx)
logger.Info("Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
return nil
}
func (m *fakeManager) AddContainer(ctx context.Context, pod *v1.Pod, container *v1.Container, containerID string) {
logger := klog.FromContext(ctx)
logger.Info("Add container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID)
}
func (m *fakeManager) GetMemoryNUMANodes(ctx context.Context, pod *v1.Pod, container *v1.Container) sets.Set[int] {
logger := klog.FromContext(ctx)
logger.Info("Get MemoryNUMANodes", "pod", klog.KObj(pod), "containerName", container.Name)
return nil
}
func (m *fakeManager) RemoveContainer(ctx context.Context, containerID string) error {
logger := klog.FromContext(ctx)
logger.Info("RemoveContainer", "containerID", containerID)
return nil
}
func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
ctx := context.TODO()
logger := klog.FromContext(ctx)
logger.Info("Get Topology Hints", "pod", klog.KObj(pod), "containerName", container.Name)
return map[string][]topologymanager.TopologyHint{}
}
func (m *fakeManager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
ctx := context.TODO()
logger := klog.FromContext(ctx)
logger.Info("Get Pod Topology Hints", "pod", klog.KObj(pod))
return map[string][]topologymanager.TopologyHint{}
}
func (m *fakeManager) State() state.Reader {
return m.state
}
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (m *fakeManager) GetAllocatableMemory(ctx context.Context) []state.Block {
logger := klog.FromContext(ctx)
logger.Info("Get Allocatable Memory")
return []state.Block{}
}
// GetMemory returns the memory allocated by a container from NUMA nodes
func (m *fakeManager) GetMemory(ctx context.Context, podUID, containerName string) []state.Block {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "podUID", podUID, "containerName", containerName)
logger.Info("Get Memory")
return []state.Block{}
}
// NewFakeManager creates empty/fake memory manager
func NewFakeManager(ctx context.Context) Manager {
logger := klog.LoggerWithName(klog.FromContext(ctx), "memory-mgr.fake")
return &fakeManager{
// logger: logger,
state: state.NewMemoryState(logger),
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package memorymanager
import (
"context"
"fmt"
"runtime"
"sync"
cadvisorapi "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
corev1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/status"
)
// memoryManagerStateFileName is the file name where memory manager stores its state
const memoryManagerStateFileName = "memory_manager_state"
// ActivePodsFunc is a function that returns a list of active pods
type ActivePodsFunc func() []*v1.Pod
type runtimeService interface {
UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error
}
type sourcesReadyStub struct{}
func (s *sourcesReadyStub) AddSource(source string) {}
func (s *sourcesReadyStub) AllReady() bool { return true }
// Manager interface provides methods for Kubelet to manage pod memory.
type Manager interface {
// Start is called during Kubelet initialization.
Start(ctx context.Context, activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error
// AddContainer adds the mapping between container ID to pod UID and the container name
// The mapping used to remove the memory allocation during the container removal
AddContainer(ctx context.Context, p *v1.Pod, c *v1.Container, containerID string)
// Allocate is called to pre-allocate memory resources during Pod admission.
// This must be called at some point prior to the AddContainer() call for a container, e.g. at pod admission time.
Allocate(pod *v1.Pod, container *v1.Container) error
// RemoveContainer is called after Kubelet decides to kill or delete a
// container. After this call, any memory allocated to the container is freed.
RemoveContainer(ctx context.Context, containerID string) error
// State returns a read-only interface to the internal memory manager state.
State() state.Reader
// GetTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
// GetPodTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
GetPodTopologyHints(*v1.Pod) map[string][]topologymanager.TopologyHint
// GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory
GetMemoryNUMANodes(ctx context.Context, pod *v1.Pod, container *v1.Container) sets.Set[int]
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
GetAllocatableMemory(ctx context.Context) []state.Block
// GetMemory returns the memory allocated by a container from NUMA nodes
GetMemory(ctx context.Context, podUID, containerName string) []state.Block
}
type manager struct {
sync.Mutex
policy Policy
// state allows to restore information regarding memory allocation for guaranteed pods
// in the case of the kubelet restart
state state.State
// containerRuntime is the container runtime service interface needed
// to make UpdateContainerResources() calls against the containers.
containerRuntime runtimeService
// activePods is a method for listing active pods on the node
// so all the containers can be updated during call to the removeStaleState.
activePods ActivePodsFunc
// podStatusProvider provides a method for obtaining pod statuses
// and the containerID of their containers
podStatusProvider status.PodStatusProvider
// containerMap provides a mapping from (pod, container) -> containerID
// for all containers a pod
containerMap containermap.ContainerMap
// sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness.
// We use it to determine when we can purge inactive pods from checkpointed state.
sourcesReady config.SourcesReady
// stateFileDirectory holds the directory where the state file for checkpoints is held.
stateFileDirectory string
// allocatableMemory holds the allocatable memory for each NUMA node
allocatableMemory []state.Block
}
var _ Manager = &manager{}
// NewManager returns new instance of the memory manager
func NewManager(ctx context.Context, policyName string, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory []kubeletconfig.MemoryReservation, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
var policy Policy
switch policyType(policyName) {
case policyTypeNone:
policy = NewPolicyNone(ctx)
case PolicyTypeStatic:
if runtime.GOOS == "windows" {
return nil, fmt.Errorf("policy %q is not available on Windows", PolicyTypeStatic)
}
systemReserved, err := getSystemReservedMemory(machineInfo, nodeAllocatableReservation, reservedMemory)
if err != nil {
return nil, err
}
policy, err = NewPolicyStatic(ctx, machineInfo, systemReserved, affinity)
if err != nil {
return nil, err
}
case policyTypeBestEffort:
if runtime.GOOS == "windows" {
systemReserved, err := getSystemReservedMemory(machineInfo, nodeAllocatableReservation, reservedMemory)
if err != nil {
return nil, err
}
policy, err = NewPolicyBestEffort(ctx, machineInfo, systemReserved, affinity)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("policy %q is not available for platform %q", policyTypeBestEffort, runtime.GOOS)
}
default:
return nil, fmt.Errorf("unknown policy: %q", policyName)
}
manager := &manager{
policy: policy,
stateFileDirectory: stateFileDirectory,
}
manager.sourcesReady = &sourcesReadyStub{}
return manager, nil
}
// Start starts the memory manager under the kubelet and calls policy start
func (m *manager) Start(ctx context.Context, activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error {
logger := klog.FromContext(ctx)
logger.Info("Starting memorymanager", "policy", m.policy.Name())
m.sourcesReady = sourcesReady
m.activePods = activePods
m.podStatusProvider = podStatusProvider
m.containerRuntime = containerRuntime
m.containerMap = initialContainers
stateImpl, err := state.NewCheckpointState(logger, m.stateFileDirectory, memoryManagerStateFileName, m.policy.Name())
if err != nil {
logger.Error(err, "Could not initialize checkpoint manager, please drain node and remove policy state file")
return err
}
m.state = stateImpl
err = m.policy.Start(ctx, m.state)
if err != nil {
logger.Error(err, "Policy start error")
return err
}
m.allocatableMemory = m.policy.GetAllocatableMemory(ctx, m.state)
logger.V(4).Info("memorymanager started", "policy", m.policy.Name())
return nil
}
// AddContainer saves the value of requested memory for the guaranteed pod under the state and set memory affinity according to the topolgy manager
func (m *manager) AddContainer(ctx context.Context, pod *v1.Pod, container *v1.Container, containerID string) {
m.Lock()
defer m.Unlock()
m.containerMap.Add(string(pod.UID), container.Name, containerID)
// Since we know that each init container always runs to completion before
// the next container starts, we can safely remove references to any previously
// started init containers. This will free up the memory from these init containers
// for use in other pods. If the current container happens to be an init container,
// we skip deletion of it until the next container is added, and this is called again.
for _, initContainer := range pod.Spec.InitContainers {
if initContainer.Name == container.Name {
break
}
// Since a restartable init container remains running for the full
// duration of the pod's lifecycle, we should not remove it from the
// memory manager state.
if podutil.IsRestartableInitContainer(&initContainer) {
continue
}
m.policyRemoveContainerByRef(string(pod.UID), initContainer.Name)
}
}
// GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory
func (m *manager) GetMemoryNUMANodes(ctx context.Context, pod *v1.Pod, container *v1.Container) sets.Set[int] {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "pod", klog.KObj(pod), "containerName", container.Name)
// Get NUMA node affinity of blocks assigned to the container during Allocate()
numaNodes := sets.New[int]()
for _, block := range m.state.GetMemoryBlocks(string(pod.UID), container.Name) {
for _, nodeID := range block.NUMAAffinity {
// avoid nodes duplication when hugepages and memory blocks pinned to the same NUMA node
numaNodes.Insert(nodeID)
}
}
if numaNodes.Len() == 0 {
logger.V(5).Info("NUMA nodes not available for allocation")
return nil
}
logger.Info("Memory affinity", "numaNodes", numaNodes)
return numaNodes
}
// Allocate is called to pre-allocate memory resources during Pod admission.
func (m *manager) Allocate(pod *v1.Pod, container *v1.Container) error {
// Garbage collect any stranded resources before allocation
ctx := context.TODO()
logger := klog.FromContext(ctx)
m.removeStaleState(logger)
m.Lock()
defer m.Unlock()
// Call down into the policy to assign this container memory if required.
if err := m.policy.Allocate(ctx, m.state, pod, container); err != nil {
logger.Error(err, "Allocate error", "pod", klog.KObj(pod), "containerName", container.Name)
return err
}
return nil
}
// RemoveContainer removes the container from the state
func (m *manager) RemoveContainer(ctx context.Context, containerID string) error {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "containerID", containerID)
m.Lock()
defer m.Unlock()
// if error appears it means container entry already does not exist under the container map
podUID, containerName, err := m.containerMap.GetContainerRef(containerID)
if err != nil {
logger.Error(err, "Failed to get container from container map")
return nil
}
m.policyRemoveContainerByRef(podUID, containerName)
return nil
}
// State returns the state of the manager
func (m *manager) State() state.Reader {
return m.state
}
// GetPodTopologyHints returns the topology hints for the topology manager
func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
// Use context.TODO() because we currently do not have a proper context to pass in.
// This should be replaced with an appropriate context when refactoring this function to accept a context parameter.
ctx := context.TODO()
// Garbage collect any stranded resources before providing TopologyHints
m.removeStaleState(klog.FromContext(ctx))
// Delegate to active policy
return m.policy.GetPodTopologyHints(context.TODO(), m.state, pod)
}
// GetTopologyHints returns the topology hints for the topology manager
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
// Garbage collect any stranded resources before providing TopologyHints
m.removeStaleState(klog.TODO())
// Delegate to active policy
return m.policy.GetTopologyHints(context.TODO(), m.state, pod, container)
}
// TODO: move the method to the upper level, to re-use it under the CPU and memory managers
func (m *manager) removeStaleState(logger klog.Logger) {
// Only once all sources are ready do we attempt to remove any stale state.
// This ensures that the call to `m.activePods()` below will succeed with
// the actual active pods list.
if !m.sourcesReady.AllReady() {
return
}
// We grab the lock to ensure that no new containers will grab memory block while
// executing the code below. Without this lock, its possible that we end up
// removing state that is newly added by an asynchronous call to
// AddContainer() during the execution of this code.
m.Lock()
defer m.Unlock()
// Get the list of active pods.
activePods := m.activePods()
// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
activeContainers := make(map[string]map[string]struct{})
for _, pod := range activePods {
activeContainers[string(pod.UID)] = make(map[string]struct{})
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
activeContainers[string(pod.UID)][container.Name] = struct{}{}
}
}
// Loop through the MemoryManager state. Remove any state for containers not
// in the `activeContainers` list built above.
assignments := m.state.GetMemoryAssignments()
for podUID := range assignments {
for containerName := range assignments[podUID] {
if _, ok := activeContainers[podUID][containerName]; !ok {
logger.V(2).Info("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
m.policyRemoveContainerByRef(podUID, containerName)
}
}
}
m.containerMap.Visit(func(podUID, containerName, containerID string) {
if _, ok := activeContainers[podUID][containerName]; !ok {
logger.V(2).Info("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
m.policyRemoveContainerByRef(podUID, containerName)
}
})
}
func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) {
m.policy.RemoveContainer(context.TODO(), m.state, podUID, containerName)
m.containerMap.RemoveByContainerRef(podUID, containerName)
}
func getTotalMemoryTypeReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory []kubeletconfig.MemoryReservation) (map[v1.ResourceName]resource.Quantity, error) {
totalMemoryType := map[v1.ResourceName]resource.Quantity{}
numaNodes := map[int]bool{}
for _, numaNode := range machineInfo.Topology {
numaNodes[numaNode.Id] = true
}
for _, reservation := range reservedMemory {
if !numaNodes[int(reservation.NumaNode)] {
return nil, fmt.Errorf("the reserved memory configuration references a NUMA node %d that does not exist on this machine", reservation.NumaNode)
}
for resourceName, q := range reservation.Limits {
if value, ok := totalMemoryType[resourceName]; ok {
q.Add(value)
}
totalMemoryType[resourceName] = q
}
}
return totalMemoryType, nil
}
func validateReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory []kubeletconfig.MemoryReservation) error {
totalMemoryType, err := getTotalMemoryTypeReserved(machineInfo, reservedMemory)
if err != nil {
return err
}
commonMemoryTypeSet := make(map[v1.ResourceName]bool)
for resourceType := range totalMemoryType {
commonMemoryTypeSet[resourceType] = true
}
for resourceType := range nodeAllocatableReservation {
if !(corev1helper.IsHugePageResourceName(resourceType) || resourceType == v1.ResourceMemory) {
continue
}
commonMemoryTypeSet[resourceType] = true
}
for resourceType := range commonMemoryTypeSet {
nodeAllocatableMemory := resource.NewQuantity(0, resource.DecimalSI)
if memValue, set := nodeAllocatableReservation[resourceType]; set {
nodeAllocatableMemory.Add(memValue)
}
reservedMemory := resource.NewQuantity(0, resource.DecimalSI)
if memValue, set := totalMemoryType[resourceType]; set {
reservedMemory.Add(memValue)
}
if !(*nodeAllocatableMemory).Equal(*reservedMemory) {
return fmt.Errorf("the total amount %q of type %q is not equal to the value %q determined by Node Allocatable feature", reservedMemory.String(), resourceType, nodeAllocatableMemory.String())
}
}
return nil
}
func convertReserved(machineInfo *cadvisorapi.MachineInfo, reservedMemory []kubeletconfig.MemoryReservation) (systemReservedMemory, error) {
reservedMemoryConverted := make(map[int]map[v1.ResourceName]uint64)
for _, node := range machineInfo.Topology {
reservedMemoryConverted[node.Id] = make(map[v1.ResourceName]uint64)
}
for _, reservation := range reservedMemory {
for resourceName, q := range reservation.Limits {
val, success := q.AsInt64()
if !success {
return nil, fmt.Errorf("could not covert a variable of type Quantity to int64")
}
reservedMemoryConverted[int(reservation.NumaNode)][resourceName] = uint64(val)
}
}
return reservedMemoryConverted, nil
}
func getSystemReservedMemory(machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, reservedMemory []kubeletconfig.MemoryReservation) (systemReservedMemory, error) {
if err := validateReservedMemory(machineInfo, nodeAllocatableReservation, reservedMemory); err != nil {
return nil, err
}
reservedMemoryConverted, err := convertReserved(machineInfo, reservedMemory)
if err != nil {
return nil, err
}
return reservedMemoryConverted, nil
}
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (m *manager) GetAllocatableMemory(ctx context.Context) []state.Block {
return m.allocatableMemory
}
// GetMemory returns the memory allocated by a container from NUMA nodes
func (m *manager) GetMemory(ctx context.Context, podUID, containerName string) []state.Block {
return m.state.GetMemoryBlocks(podUID, containerName)
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package memorymanager
import (
"context"
cadvisorapi "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
)
// On Windows we want to use the same logic as the StaticPolicy to compute the memory topology hints
// but unlike linux based systems, on Windows systems numa nodes cannot be directly assigned or guaranteed via Windows APIs
// (windows scheduler will use the numa node that is closest to the cpu assigned therefor respecting the numa node assignment as a best effort). Because of this we don't want to have users specify "StaticPolicy" for the memory manager
// policy via kubelet configuration. Instead we want to use the "BestEffort" policy which will use the same logic as the StaticPolicy
// and doing so will reduce code duplication.
const policyTypeBestEffort policyType = "BestEffort"
// bestEffortPolicy is implementation of the policy interface for the BestEffort policy
type bestEffortPolicy struct {
static *staticPolicy
}
var _ Policy = &bestEffortPolicy{}
func NewPolicyBestEffort(ctx context.Context, machineInfo *cadvisorapi.MachineInfo, reserved systemReservedMemory, affinity topologymanager.Store) (Policy, error) {
p, err := NewPolicyStatic(ctx, machineInfo, reserved, affinity)
if err != nil {
return nil, err
}
return &bestEffortPolicy{
static: p.(*staticPolicy),
}, nil
}
func (p *bestEffortPolicy) Name() string {
return string(policyTypeBestEffort)
}
func (p *bestEffortPolicy) Start(ctx context.Context, s state.State) error {
return p.static.Start(ctx, s)
}
func (p *bestEffortPolicy) Allocate(ctx context.Context, s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
return p.static.Allocate(ctx, s, pod, container)
}
func (p *bestEffortPolicy) RemoveContainer(ctx context.Context, s state.State, podUID string, containerName string) {
p.static.RemoveContainer(ctx, s, podUID, containerName)
}
func (p *bestEffortPolicy) GetPodTopologyHints(ctx context.Context, s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
return p.static.GetPodTopologyHints(ctx, s, pod)
}
func (p *bestEffortPolicy) GetTopologyHints(ctx context.Context, s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
return p.static.GetTopologyHints(ctx, s, pod, container)
}
func (p *bestEffortPolicy) GetAllocatableMemory(ctx context.Context, s state.State) []state.Block {
return p.static.GetAllocatableMemory(ctx, s)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package memorymanager
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
)
const policyTypeNone policyType = "None"
// none is implementation of the policy interface for the none policy, using none
// policy is the same as disable memory management
type none struct{}
var _ Policy = &none{}
// NewPolicyNone returns new none policy instance
func NewPolicyNone(ctx context.Context) Policy {
return &none{}
}
func (p *none) Name() string {
return string(policyTypeNone)
}
func (p *none) Start(ctx context.Context, s state.State) error {
logger := klog.FromContext(ctx)
logger.Info("Start")
return nil
}
// Allocate call is idempotent
func (p *none) Allocate(_ context.Context, s state.State, pod *v1.Pod, container *v1.Container) error {
return nil
}
// RemoveContainer call is idempotent
func (p *none) RemoveContainer(_ context.Context, s state.State, podUID string, containerName string) {
}
// GetTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
func (p *none) GetTopologyHints(_ context.Context, s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
return nil
}
// GetPodTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
func (p *none) GetPodTopologyHints(_ context.Context, s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
return nil
}
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (p *none) GetAllocatableMemory(_ context.Context, s state.State) []state.Block {
return []state.Block{}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package memorymanager
import (
"context"
"fmt"
"sort"
"github.com/go-logr/logr"
cadvisorapi "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
corehelper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
const PolicyTypeStatic policyType = "Static"
type systemReservedMemory map[int]map[v1.ResourceName]uint64
type reusableMemory map[string]map[string]map[v1.ResourceName]uint64
// staticPolicy is implementation of the policy interface for the static policy
type staticPolicy struct {
// machineInfo contains machine memory related information
machineInfo *cadvisorapi.MachineInfo
// reserved contains memory that reserved for kube
systemReserved systemReservedMemory
// topology manager reference to get container Topology affinity
affinity topologymanager.Store
// initContainersReusableMemory contains the memory allocated for init
// containers that can be reused.
// Note that the restartable init container memory is not included here,
// because it is not reusable.
initContainersReusableMemory reusableMemory
}
var _ Policy = &staticPolicy{}
// NewPolicyStatic returns new static policy instance
func NewPolicyStatic(ctx context.Context, machineInfo *cadvisorapi.MachineInfo, reserved systemReservedMemory, affinity topologymanager.Store) (Policy, error) {
var totalSystemReserved uint64
for _, node := range reserved {
if _, ok := node[v1.ResourceMemory]; !ok {
continue
}
totalSystemReserved += node[v1.ResourceMemory]
}
// check if we have some reserved memory for the system
if totalSystemReserved <= 0 {
return nil, fmt.Errorf("[memorymanager] you should specify the system reserved memory")
}
return &staticPolicy{
machineInfo: machineInfo,
systemReserved: reserved,
affinity: affinity,
initContainersReusableMemory: reusableMemory{},
}, nil
}
func (p *staticPolicy) Name() string {
return string(PolicyTypeStatic)
}
func (p *staticPolicy) Start(ctx context.Context, s state.State) error {
logger := klog.FromContext(ctx)
if err := p.validateState(logger, s); err != nil {
logger.Error(err, "Invalid state, please drain node and remove policy state file")
return err
}
return nil
}
// Allocate call is idempotent
func (p *staticPolicy) Allocate(ctx context.Context, s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
// allocate the memory only for guaranteed pods
logger := klog.FromContext(ctx)
logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod), "containerName", container.Name)
qos := v1qos.GetPodQOS(pod)
if qos != v1.PodQOSGuaranteed {
logger.V(5).Info("Exclusive memory allocation skipped, pod QoS is not guaranteed", "qos", qos)
return nil
}
podUID := string(pod.UID)
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
logger.V(2).Info("Allocation skipped, pod is using pod-level resources which are not supported by the static Memory manager policy", "podUID", podUID)
return nil
}
logger.Info("Allocate")
// container belongs in an exclusively allocated pool
metrics.MemoryManagerPinningRequestTotal.Inc()
defer func() {
if rerr != nil {
metrics.MemoryManagerPinningErrorsTotal.Inc()
}
}()
if blocks := s.GetMemoryBlocks(podUID, container.Name); blocks != nil {
p.updatePodReusableMemory(pod, container, blocks)
logger.Info("Container already present in state, skipping")
return nil
}
// Call Topology Manager to get the aligned affinity across all hint providers.
hint := p.affinity.GetAffinity(podUID, container.Name)
logger.Info("Got topology affinity", "hint", hint)
requestedResources, err := getRequestedResources(pod, container)
if err != nil {
return err
}
machineState := s.GetMachineState()
bestHint := &hint
// topology manager returned the hint with NUMA affinity nil
// we should use the default NUMA affinity calculated the same way as for the topology manager
if hint.NUMANodeAffinity == nil {
defaultHint, err := p.getDefaultHint(machineState, pod, requestedResources)
if err != nil {
return err
}
if !defaultHint.Preferred && bestHint.Preferred {
return fmt.Errorf("[memorymanager] failed to find the default preferred hint")
}
bestHint = defaultHint
}
// topology manager returns the hint that does not satisfy completely the container request
// we should extend this hint to the one who will satisfy the request and include the current hint
if !isAffinitySatisfyRequest(machineState, bestHint.NUMANodeAffinity, requestedResources) {
extendedHint, err := p.extendTopologyManagerHint(machineState, pod, requestedResources, bestHint.NUMANodeAffinity)
if err != nil {
return err
}
if !extendedHint.Preferred && bestHint.Preferred {
return fmt.Errorf("[memorymanager] failed to find the extended preferred hint")
}
bestHint = extendedHint
}
// the best hint might violate the NUMA allocation rule on which
// NUMA node cannot have both single and cross NUMA node allocations
// https://kubernetes.io/blog/2021/08/11/kubernetes-1-22-feature-memory-manager-moves-to-beta/#single-vs-cross-numa-node-allocation
if isAffinityViolatingNUMAAllocations(machineState, bestHint.NUMANodeAffinity) {
return fmt.Errorf("[memorymanager] preferred hint violates NUMA node allocation")
}
var containerBlocks []state.Block
maskBits := bestHint.NUMANodeAffinity.GetBits()
for resourceName, requestedSize := range requestedResources {
// update memory blocks
containerBlocks = append(containerBlocks, state.Block{
NUMAAffinity: maskBits,
Size: requestedSize,
Type: resourceName,
})
podReusableMemory := p.getPodReusableMemory(pod, bestHint.NUMANodeAffinity, resourceName)
if podReusableMemory >= requestedSize {
requestedSize = 0
} else {
requestedSize -= podReusableMemory
}
// Update nodes memory state
p.updateMachineState(machineState, maskBits, resourceName, requestedSize)
}
p.updatePodReusableMemory(pod, container, containerBlocks)
s.SetMachineState(machineState)
s.SetMemoryBlocks(podUID, container.Name, containerBlocks)
// update init containers memory blocks to reflect the fact that we re-used init containers memory
// it is possible that the size of the init container memory block will have 0 value, when all memory
// allocated for it was re-used
// we only do this so that the sum(memory_for_all_containers) == total amount of allocated memory to the pod, even
// though the final state here doesn't accurately reflect what was (in reality) allocated to each container
// TODO: we should refactor our state structs to reflect the amount of the re-used memory
p.updateInitContainersMemoryBlocks(logger, s, pod, container, containerBlocks)
logger.V(4).Info("Allocated exclusive memory")
return nil
}
func (p *staticPolicy) updateMachineState(machineState state.NUMANodeMap, numaAffinity []int, resourceName v1.ResourceName, requestedSize uint64) {
for _, nodeID := range numaAffinity {
machineState[nodeID].NumberOfAssignments++
machineState[nodeID].Cells = numaAffinity
// we need to continue to update all affinity mask nodes
if requestedSize == 0 {
continue
}
// update the node memory state
nodeResourceMemoryState := machineState[nodeID].MemoryMap[resourceName]
if nodeResourceMemoryState.Free <= 0 {
continue
}
// the node has enough memory to satisfy the request
if nodeResourceMemoryState.Free >= requestedSize {
nodeResourceMemoryState.Reserved += requestedSize
nodeResourceMemoryState.Free -= requestedSize
requestedSize = 0
continue
}
// the node does not have enough memory, use the node remaining memory and move to the next node
requestedSize -= nodeResourceMemoryState.Free
nodeResourceMemoryState.Reserved += nodeResourceMemoryState.Free
nodeResourceMemoryState.Free = 0
}
}
func (p *staticPolicy) getPodReusableMemory(pod *v1.Pod, numaAffinity bitmask.BitMask, resourceName v1.ResourceName) uint64 {
podReusableMemory, ok := p.initContainersReusableMemory[string(pod.UID)]
if !ok {
return 0
}
numaReusableMemory, ok := podReusableMemory[numaAffinity.String()]
if !ok {
return 0
}
return numaReusableMemory[resourceName]
}
// RemoveContainer call is idempotent
func (p *staticPolicy) RemoveContainer(ctx context.Context, s state.State, podUID string, containerName string) {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "podUID", podUID, "containerName", containerName)
blocks := s.GetMemoryBlocks(podUID, containerName)
if blocks == nil {
return
}
logger.Info("RemoveContainer", "podUID", podUID, "containerName", containerName)
s.Delete(podUID, containerName)
// Mutate machine memory state to update free and reserved memory
machineState := s.GetMachineState()
for _, b := range blocks {
releasedSize := b.Size
for _, nodeID := range b.NUMAAffinity {
machineState[nodeID].NumberOfAssignments--
// once we do not have any memory allocations on this node, clear node groups
if machineState[nodeID].NumberOfAssignments == 0 {
machineState[nodeID].Cells = []int{nodeID}
}
// we still need to pass over all NUMA node under the affinity mask to update them
if releasedSize == 0 {
continue
}
nodeResourceMemoryState := machineState[nodeID].MemoryMap[b.Type]
// if the node does not have reserved memory to free, continue to the next node
if nodeResourceMemoryState.Reserved == 0 {
continue
}
// the reserved memory smaller than the amount of the memory that should be released
// release as much as possible and move to the next node
if nodeResourceMemoryState.Reserved < releasedSize {
releasedSize -= nodeResourceMemoryState.Reserved
nodeResourceMemoryState.Free += nodeResourceMemoryState.Reserved
nodeResourceMemoryState.Reserved = 0
continue
}
// the reserved memory big enough to satisfy the released memory
nodeResourceMemoryState.Free += releasedSize
nodeResourceMemoryState.Reserved -= releasedSize
releasedSize = 0
}
}
s.SetMachineState(machineState)
}
func regenerateHints(logger logr.Logger, pod *v1.Pod, ctn *v1.Container, ctnBlocks []state.Block, reqRsrc map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint {
hints := map[string][]topologymanager.TopologyHint{}
for resourceName := range reqRsrc {
hints[string(resourceName)] = []topologymanager.TopologyHint{}
}
if len(ctnBlocks) != len(reqRsrc) {
logger.Info("The number of requested resources by the container differs from the number of memory blocks", "containerName", ctn.Name)
return nil
}
for _, b := range ctnBlocks {
if _, ok := reqRsrc[b.Type]; !ok {
logger.Info("Container requested resources but none available of this type", "containerName", ctn.Name, "type", b.Type)
return nil
}
if b.Size != reqRsrc[b.Type] {
logger.Info("Memory already allocated with different numbers than requested", "containerName", ctn.Name, "type", b.Type, "requestedResource", reqRsrc[b.Type], "allocatedSize", b.Size)
return nil
}
containerNUMAAffinity, err := bitmask.NewBitMask(b.NUMAAffinity...)
if err != nil {
logger.Error(err, "Failed to generate NUMA bitmask", "containerName", ctn.Name, "type", b.Type)
return nil
}
logger.Info("Regenerating TopologyHints, resource was already allocated to pod", "resourceName", b.Type, "podUID", pod.UID, "containerName", ctn.Name)
hints[string(b.Type)] = append(hints[string(b.Type)], topologymanager.TopologyHint{
NUMANodeAffinity: containerNUMAAffinity,
Preferred: true,
})
}
return hints
}
func getPodRequestedResources(pod *v1.Pod) (map[v1.ResourceName]uint64, error) {
// Maximun resources requested by init containers at any given time.
reqRsrcsByInitCtrs := make(map[v1.ResourceName]uint64)
// Total resources requested by restartable init containers.
reqRsrcsByRestartableInitCtrs := make(map[v1.ResourceName]uint64)
for _, ctr := range pod.Spec.InitContainers {
reqRsrcs, err := getRequestedResources(pod, &ctr)
if err != nil {
return nil, err
}
for rsrcName, qty := range reqRsrcs {
if _, ok := reqRsrcsByInitCtrs[rsrcName]; !ok {
reqRsrcsByInitCtrs[rsrcName] = uint64(0)
}
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
// for the detail.
if podutil.IsRestartableInitContainer(&ctr) {
reqRsrcsByRestartableInitCtrs[rsrcName] += qty
} else if reqRsrcsByRestartableInitCtrs[rsrcName]+qty > reqRsrcsByInitCtrs[rsrcName] {
reqRsrcsByInitCtrs[rsrcName] = reqRsrcsByRestartableInitCtrs[rsrcName] + qty
}
}
}
reqRsrcsByAppCtrs := make(map[v1.ResourceName]uint64)
for _, ctr := range pod.Spec.Containers {
reqRsrcs, err := getRequestedResources(pod, &ctr)
if err != nil {
return nil, err
}
for rsrcName, qty := range reqRsrcs {
if _, ok := reqRsrcsByAppCtrs[rsrcName]; !ok {
reqRsrcsByAppCtrs[rsrcName] = uint64(0)
}
reqRsrcsByAppCtrs[rsrcName] += qty
}
}
reqRsrcs := make(map[v1.ResourceName]uint64)
for rsrcName := range reqRsrcsByAppCtrs {
// Total resources requested by long-running containers.
reqRsrcsByLongRunningCtrs := reqRsrcsByAppCtrs[rsrcName] + reqRsrcsByRestartableInitCtrs[rsrcName]
reqRsrcs[rsrcName] = reqRsrcsByLongRunningCtrs
if reqRsrcs[rsrcName] < reqRsrcsByInitCtrs[rsrcName] {
reqRsrcs[rsrcName] = reqRsrcsByInitCtrs[rsrcName]
}
}
return reqRsrcs, nil
}
func (p *staticPolicy) GetPodTopologyHints(ctx context.Context, s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "pod", klog.KObj(pod))
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
return nil
}
reqRsrcs, err := getPodRequestedResources(pod)
if err != nil {
logger.Error(err, "Failed to get pod requested resources", "podUID", pod.UID)
return nil
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
logger.V(3).Info("Topology hints generation skipped, pod is using pod-level resources which are not supported by the static Memory manager policy", "podUID", pod.UID)
return nil
}
for _, ctn := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
containerBlocks := s.GetMemoryBlocks(string(pod.UID), ctn.Name)
// Short circuit to regenerate the same hints if there are already
// memory allocated for the container. This might happen after a
// kubelet restart, for example.
if containerBlocks != nil {
return regenerateHints(logger, pod, &ctn, containerBlocks, reqRsrcs)
}
}
// the pod topology hints calculated only once for all containers, so no need to pass re-usable state
return p.calculateHints(s.GetMachineState(), pod, reqRsrcs)
}
// GetTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
func (p *staticPolicy) GetTopologyHints(ctx context.Context, s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "pod", klog.KObj(pod))
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
return nil
}
requestedResources, err := getRequestedResources(pod, container)
if err != nil {
logger.Error(err, "Failed to get container requested resources", "podUID", pod.UID, "containerName", container.Name)
return nil
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
logger.V(3).Info("Topology hints generation skipped, pod is using pod-level resources which are not supported by the static Memory manager policy", "podUID", pod.UID)
return nil
}
containerBlocks := s.GetMemoryBlocks(string(pod.UID), container.Name)
// Short circuit to regenerate the same hints if there are already
// memory allocated for the container. This might happen after a
// kubelet restart, for example.
if containerBlocks != nil {
return regenerateHints(logger, pod, container, containerBlocks, requestedResources)
}
return p.calculateHints(s.GetMachineState(), pod, requestedResources)
}
func getRequestedResources(pod *v1.Pod, container *v1.Container) (map[v1.ResourceName]uint64, error) {
requestedResources := map[v1.ResourceName]uint64{}
for resourceName, quantity := range container.Resources.Requests {
if resourceName != v1.ResourceMemory && !corehelper.IsHugePageResourceName(resourceName) {
continue
}
requestedSize, succeed := quantity.AsInt64()
if !succeed {
return nil, fmt.Errorf("[memorymanager] failed to represent quantity as int64")
}
requestedResources[resourceName] = uint64(requestedSize)
}
return requestedResources, nil
}
func (p *staticPolicy) calculateHints(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64) map[string][]topologymanager.TopologyHint {
var numaNodes []int
for n := range machineState {
numaNodes = append(numaNodes, n)
}
sort.Ints(numaNodes)
// Initialize minAffinitySize to include all NUMA Cells.
minAffinitySize := len(numaNodes)
hints := map[string][]topologymanager.TopologyHint{}
bitmask.IterateBitMasks(numaNodes, func(mask bitmask.BitMask) {
maskBits := mask.GetBits()
singleNUMAHint := len(maskBits) == 1
totalFreeSize := map[v1.ResourceName]uint64{}
totalAllocatableSize := map[v1.ResourceName]uint64{}
// calculate total free and allocatable memory for the node mask
for _, nodeID := range maskBits {
for resourceName := range requestedResources {
if _, ok := totalFreeSize[resourceName]; !ok {
totalFreeSize[resourceName] = 0
}
totalFreeSize[resourceName] += machineState[nodeID].MemoryMap[resourceName].Free
if _, ok := totalAllocatableSize[resourceName]; !ok {
totalAllocatableSize[resourceName] = 0
}
totalAllocatableSize[resourceName] += machineState[nodeID].MemoryMap[resourceName].Allocatable
}
}
// verify that for all memory types the node mask has enough allocatable resources
for resourceName, requestedSize := range requestedResources {
if totalAllocatableSize[resourceName] < requestedSize {
return
}
}
// set the minimum amount of NUMA nodes that can satisfy the container resources requests
if mask.Count() < minAffinitySize {
minAffinitySize = mask.Count()
}
// the node already in group with another node, it can not be used for the single NUMA node allocation
if singleNUMAHint && len(machineState[maskBits[0]].Cells) > 1 {
return
}
for _, nodeID := range maskBits {
// the node already used for the memory allocation
if !singleNUMAHint && machineState[nodeID].NumberOfAssignments > 0 {
// the node used for the single NUMA memory allocation, it can not be used for the multi NUMA node allocation
if len(machineState[nodeID].Cells) == 1 {
return
}
// the node already used with different group of nodes, it can not be use with in the current hint
if !areGroupsEqual(machineState[nodeID].Cells, maskBits) {
return
}
}
}
// verify that for all memory types the node mask has enough free resources
for resourceName, requestedSize := range requestedResources {
podReusableMemory := p.getPodReusableMemory(pod, mask, resourceName)
if totalFreeSize[resourceName]+podReusableMemory < requestedSize {
return
}
}
// add the node mask as topology hint for all memory types
for resourceName := range requestedResources {
if _, ok := hints[string(resourceName)]; !ok {
hints[string(resourceName)] = []topologymanager.TopologyHint{}
}
hints[string(resourceName)] = append(hints[string(resourceName)], topologymanager.TopologyHint{
NUMANodeAffinity: mask,
Preferred: false,
})
}
})
// update hints preferred according to multiNUMAGroups, in case when it wasn't provided, the default
// behaviour to prefer the minimal amount of NUMA nodes will be used
for resourceName := range requestedResources {
for i, hint := range hints[string(resourceName)] {
hints[string(resourceName)][i].Preferred = p.isHintPreferred(hint.NUMANodeAffinity.GetBits(), minAffinitySize)
}
}
return hints
}
func (p *staticPolicy) isHintPreferred(maskBits []int, minAffinitySize int) bool {
return len(maskBits) == minAffinitySize
}
func areGroupsEqual(group1, group2 []int) bool {
sort.Ints(group1)
sort.Ints(group2)
if len(group1) != len(group2) {
return false
}
for i, elm := range group1 {
if group2[i] != elm {
return false
}
}
return true
}
func (p *staticPolicy) validateState(logger logr.Logger, s state.State) error {
machineState := s.GetMachineState()
memoryAssignments := s.GetMemoryAssignments()
if len(machineState) == 0 {
// Machine state cannot be empty when assignments exist
if len(memoryAssignments) != 0 {
return fmt.Errorf("[memorymanager] machine state can not be empty when it has memory assignments")
}
defaultMachineState := p.getDefaultMachineState()
s.SetMachineState(defaultMachineState)
return nil
}
// calculate all memory assigned to containers
expectedMachineState := p.getDefaultMachineState()
for pod, container := range memoryAssignments {
for containerName, blocks := range container {
for _, b := range blocks {
requestedSize := b.Size
for _, nodeID := range b.NUMAAffinity {
nodeState, ok := expectedMachineState[nodeID]
if !ok {
return fmt.Errorf("[memorymanager] (pod: %s, container: %s) the memory assignment uses the NUMA that does not exist", pod, containerName)
}
nodeState.NumberOfAssignments++
nodeState.Cells = b.NUMAAffinity
memoryState, ok := nodeState.MemoryMap[b.Type]
if !ok {
return fmt.Errorf("[memorymanager] (pod: %s, container: %s) the memory assignment uses memory resource that does not exist", pod, containerName)
}
if requestedSize == 0 {
continue
}
// this node does not have enough memory continue to the next one
if memoryState.Free <= 0 {
continue
}
// the node has enough memory to satisfy the request
if memoryState.Free >= requestedSize {
memoryState.Reserved += requestedSize
memoryState.Free -= requestedSize
requestedSize = 0
continue
}
// the node does not have enough memory, use the node remaining memory and move to the next node
requestedSize -= memoryState.Free
memoryState.Reserved += memoryState.Free
memoryState.Free = 0
}
}
}
}
// State has already been initialized from file (is not empty)
// Validate that total size, system reserved and reserved memory not changed, it can happen, when:
// - adding or removing physical memory bank from the node
// - change of kubelet system-reserved, kube-reserved or pre-reserved-memory-zone parameters
if !areMachineStatesEqual(logger, machineState, expectedMachineState) {
return fmt.Errorf("[memorymanager] the expected machine state is different from the real one")
}
return nil
}
func areMachineStatesEqual(logger logr.Logger, ms1, ms2 state.NUMANodeMap) bool {
if len(ms1) != len(ms2) {
logger.Info("Node states were different", "lengthNode1", len(ms1), "lengthNode2", len(ms2))
return false
}
for nodeID, nodeState1 := range ms1 {
nodeState2, ok := ms2[nodeID]
if !ok {
logger.Info("Node state didn't have node ID", "nodeID", nodeID)
return false
}
if nodeState1.NumberOfAssignments != nodeState2.NumberOfAssignments {
logger.Info("Node state had a different number of memory assignments.", "assignment1", nodeState1.NumberOfAssignments, "assignment2", nodeState2.NumberOfAssignments)
return false
}
if !areGroupsEqual(nodeState1.Cells, nodeState2.Cells) {
logger.Info("Node states had different groups", "stateNode1", nodeState1.Cells, "stateNode2", nodeState2.Cells)
return false
}
if len(nodeState1.MemoryMap) != len(nodeState2.MemoryMap) {
logger.Info("Node state had memory maps of different lengths", "lengthNode1", len(nodeState1.MemoryMap), "lengthNode2", len(nodeState2.MemoryMap))
return false
}
for resourceName, memoryState1 := range nodeState1.MemoryMap {
memoryState2, ok := nodeState2.MemoryMap[resourceName]
if !ok {
logger.Info("Memory state didn't have resource", "resource", resourceName)
return false
}
if !areMemoryStatesEqual(logger, memoryState1, memoryState2, nodeID, resourceName) {
return false
}
tmpState1 := state.MemoryTable{}
tmpState2 := state.MemoryTable{}
for _, nodeID := range nodeState1.Cells {
tmpState1.Free += ms1[nodeID].MemoryMap[resourceName].Free
tmpState1.Reserved += ms1[nodeID].MemoryMap[resourceName].Reserved
tmpState2.Free += ms2[nodeID].MemoryMap[resourceName].Free
tmpState2.Reserved += ms2[nodeID].MemoryMap[resourceName].Reserved
}
if tmpState1.Free != tmpState2.Free {
logger.Info("NUMA node and resource had different memory states", "node", nodeID, "resource", resourceName, "field", "free", "free1", tmpState1.Free, "free2", tmpState2.Free, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
return false
}
if tmpState1.Reserved != tmpState2.Reserved {
logger.Info("NUMA node and resource had different memory states", "node", nodeID, "resource", resourceName, "field", "reserved", "reserved1", tmpState1.Reserved, "reserved2", tmpState2.Reserved, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
return false
}
}
}
return true
}
func areMemoryStatesEqual(logger logr.Logger, memoryState1, memoryState2 *state.MemoryTable, nodeID int, resourceName v1.ResourceName) bool {
loggerWithValues := klog.LoggerWithValues(logger, "node", nodeID, "resource", resourceName, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
if memoryState1.TotalMemSize != memoryState2.TotalMemSize {
logger.Info("Memory states for the NUMA node and resource are different", "field", "TotalMemSize", "TotalMemSize1", memoryState1.TotalMemSize, "TotalMemSize2", memoryState2.TotalMemSize)
return false
}
if memoryState1.SystemReserved != memoryState2.SystemReserved {
loggerWithValues.Info("Memory states for the NUMA node and resource are different", "field", "SystemReserved", "SystemReserved1", memoryState1.SystemReserved, "SystemReserved2", memoryState2.SystemReserved)
return false
}
if memoryState1.Allocatable != memoryState2.Allocatable {
loggerWithValues.Info("Memory states for the NUMA node and resource are different", "field", "Allocatable", "Allocatable1", memoryState1.Allocatable, "Allocatable2", memoryState2.Allocatable)
return false
}
return true
}
func (p *staticPolicy) getDefaultMachineState() state.NUMANodeMap {
defaultMachineState := state.NUMANodeMap{}
nodeHugepages := map[int]uint64{}
for _, node := range p.machineInfo.Topology {
defaultMachineState[node.Id] = &state.NUMANodeState{
NumberOfAssignments: 0,
MemoryMap: map[v1.ResourceName]*state.MemoryTable{},
Cells: []int{node.Id},
}
// fill memory table with huge pages values
for _, hugepage := range node.HugePages {
hugepageQuantity := resource.NewQuantity(int64(hugepage.PageSize)*1024, resource.BinarySI)
resourceName := corehelper.HugePageResourceName(*hugepageQuantity)
systemReserved := p.getResourceSystemReserved(node.Id, resourceName)
totalHugepagesSize := hugepage.NumPages * hugepage.PageSize * 1024
allocatable := totalHugepagesSize - systemReserved
defaultMachineState[node.Id].MemoryMap[resourceName] = &state.MemoryTable{
Allocatable: allocatable,
Free: allocatable,
Reserved: 0,
SystemReserved: systemReserved,
TotalMemSize: totalHugepagesSize,
}
if _, ok := nodeHugepages[node.Id]; !ok {
nodeHugepages[node.Id] = 0
}
nodeHugepages[node.Id] += totalHugepagesSize
}
// fill memory table with regular memory values
systemReserved := p.getResourceSystemReserved(node.Id, v1.ResourceMemory)
allocatable := node.Memory - systemReserved
// remove memory allocated by hugepages
if allocatedByHugepages, ok := nodeHugepages[node.Id]; ok {
allocatable -= allocatedByHugepages
}
defaultMachineState[node.Id].MemoryMap[v1.ResourceMemory] = &state.MemoryTable{
Allocatable: allocatable,
Free: allocatable,
Reserved: 0,
SystemReserved: systemReserved,
TotalMemSize: node.Memory,
}
}
return defaultMachineState
}
func (p *staticPolicy) getResourceSystemReserved(nodeID int, resourceName v1.ResourceName) uint64 {
var systemReserved uint64
if nodeSystemReserved, ok := p.systemReserved[nodeID]; ok {
if nodeMemorySystemReserved, ok := nodeSystemReserved[resourceName]; ok {
systemReserved = nodeMemorySystemReserved
}
}
return systemReserved
}
func (p *staticPolicy) getDefaultHint(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64) (*topologymanager.TopologyHint, error) {
hints := p.calculateHints(machineState, pod, requestedResources)
if len(hints) < 1 {
return nil, fmt.Errorf("[memorymanager] failed to get the default NUMA affinity, no NUMA nodes with enough memory is available")
}
// hints for all memory types should be the same, so we will check hints only for regular memory type
return findBestHint(hints[string(v1.ResourceMemory)]), nil
}
func isAffinitySatisfyRequest(machineState state.NUMANodeMap, mask bitmask.BitMask, requestedResources map[v1.ResourceName]uint64) bool {
totalFreeSize := map[v1.ResourceName]uint64{}
for _, nodeID := range mask.GetBits() {
for resourceName := range requestedResources {
if _, ok := totalFreeSize[resourceName]; !ok {
totalFreeSize[resourceName] = 0
}
totalFreeSize[resourceName] += machineState[nodeID].MemoryMap[resourceName].Free
}
}
// verify that for all memory types the node mask has enough resources
for resourceName, requestedSize := range requestedResources {
if totalFreeSize[resourceName] < requestedSize {
return false
}
}
return true
}
// extendTopologyManagerHint extends the topology manager hint, in case when it does not satisfy to the container request
// the topology manager uses bitwise AND to merge all topology hints into the best one, so in case of the restricted policy,
// it possible that we will get the subset of hint that we provided to the topology manager, in this case we want to extend
// it to the original one
func (p *staticPolicy) extendTopologyManagerHint(machineState state.NUMANodeMap, pod *v1.Pod, requestedResources map[v1.ResourceName]uint64, mask bitmask.BitMask) (*topologymanager.TopologyHint, error) {
hints := p.calculateHints(machineState, pod, requestedResources)
var filteredHints []topologymanager.TopologyHint
// hints for all memory types should be the same, so we will check hints only for regular memory type
for _, hint := range hints[string(v1.ResourceMemory)] {
affinityBits := hint.NUMANodeAffinity.GetBits()
// filter all hints that does not include currentHint
if isHintInGroup(mask.GetBits(), affinityBits) {
filteredHints = append(filteredHints, hint)
}
}
if len(filteredHints) < 1 {
return nil, fmt.Errorf("[memorymanager] failed to find NUMA nodes to extend the current topology hint")
}
// try to find the preferred hint with the minimal number of NUMA nodes, relevant for the restricted policy
return findBestHint(filteredHints), nil
}
func isHintInGroup(hint []int, group []int) bool {
sort.Ints(hint)
sort.Ints(group)
hintIndex := 0
for i := range group {
if hintIndex == len(hint) {
return true
}
if group[i] != hint[hintIndex] {
continue
}
hintIndex++
}
return hintIndex == len(hint)
}
func findBestHint(hints []topologymanager.TopologyHint) *topologymanager.TopologyHint {
// try to find the preferred hint with the minimal number of NUMA nodes, relevant for the restricted policy
bestHint := topologymanager.TopologyHint{}
for _, hint := range hints {
if bestHint.NUMANodeAffinity == nil {
bestHint = hint
continue
}
// preferred of the current hint is true, when the extendedHint preferred is false
if hint.Preferred && !bestHint.Preferred {
bestHint = hint
continue
}
// both hints has the same preferred value, but the current hint has less NUMA nodes than the extended one
if hint.Preferred == bestHint.Preferred && hint.NUMANodeAffinity.IsNarrowerThan(bestHint.NUMANodeAffinity) {
bestHint = hint
}
}
return &bestHint
}
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
func (p *staticPolicy) GetAllocatableMemory(_ context.Context, s state.State) []state.Block {
var allocatableMemory []state.Block
machineState := s.GetMachineState()
for numaNodeID, numaNodeState := range machineState {
for resourceName, memoryTable := range numaNodeState.MemoryMap {
if memoryTable.Allocatable == 0 {
continue
}
block := state.Block{
NUMAAffinity: []int{numaNodeID},
Type: resourceName,
Size: memoryTable.Allocatable,
}
allocatableMemory = append(allocatableMemory, block)
}
}
return allocatableMemory
}
func (p *staticPolicy) updatePodReusableMemory(pod *v1.Pod, container *v1.Container, memoryBlocks []state.Block) {
podUID := string(pod.UID)
// If pod entries to m.initContainersReusableMemory other than the current pod exist, delete them.
for uid := range p.initContainersReusableMemory {
if podUID != uid {
delete(p.initContainersReusableMemory, uid)
}
}
if isRegularInitContainer(pod, container) {
if _, ok := p.initContainersReusableMemory[podUID]; !ok {
p.initContainersReusableMemory[podUID] = map[string]map[v1.ResourceName]uint64{}
}
for _, block := range memoryBlocks {
blockBitMask, _ := bitmask.NewBitMask(block.NUMAAffinity...)
blockBitMaskString := blockBitMask.String()
if _, ok := p.initContainersReusableMemory[podUID][blockBitMaskString]; !ok {
p.initContainersReusableMemory[podUID][blockBitMaskString] = map[v1.ResourceName]uint64{}
}
if blockReusableMemory := p.initContainersReusableMemory[podUID][blockBitMaskString][block.Type]; block.Size > blockReusableMemory {
p.initContainersReusableMemory[podUID][blockBitMaskString][block.Type] = block.Size
}
}
return
}
// update re-usable memory once it used by the app container
for _, block := range memoryBlocks {
blockBitMask, _ := bitmask.NewBitMask(block.NUMAAffinity...)
if podReusableMemory := p.getPodReusableMemory(pod, blockBitMask, block.Type); podReusableMemory != 0 {
if block.Size >= podReusableMemory {
p.initContainersReusableMemory[podUID][blockBitMask.String()][block.Type] = 0
} else {
p.initContainersReusableMemory[podUID][blockBitMask.String()][block.Type] -= block.Size
}
}
}
}
func (p *staticPolicy) updateInitContainersMemoryBlocks(logger logr.Logger, s state.State, pod *v1.Pod, container *v1.Container, containerMemoryBlocks []state.Block) {
podUID := string(pod.UID)
for _, containerBlock := range containerMemoryBlocks {
blockSize := containerBlock.Size
for _, initContainer := range pod.Spec.InitContainers {
// we do not want to continue updates once we reach the current container
if initContainer.Name == container.Name {
break
}
if blockSize == 0 {
break
}
if podutil.IsRestartableInitContainer(&initContainer) {
// we should not reuse the resource from any restartable init
// container
continue
}
initContainerBlocks := s.GetMemoryBlocks(podUID, initContainer.Name)
if len(initContainerBlocks) == 0 {
continue
}
for i := range initContainerBlocks {
initContainerBlock := &initContainerBlocks[i]
if initContainerBlock.Size == 0 {
continue
}
if initContainerBlock.Type != containerBlock.Type {
continue
}
if !isNUMAAffinitiesEqual(logger, initContainerBlock.NUMAAffinity, containerBlock.NUMAAffinity) {
continue
}
if initContainerBlock.Size > blockSize {
initContainerBlock.Size -= blockSize
blockSize = 0
} else {
blockSize -= initContainerBlock.Size
initContainerBlock.Size = 0
}
}
s.SetMemoryBlocks(podUID, initContainer.Name, initContainerBlocks)
}
}
}
func isRegularInitContainer(pod *v1.Pod, container *v1.Container) bool {
for _, initContainer := range pod.Spec.InitContainers {
if initContainer.Name == container.Name {
return !podutil.IsRestartableInitContainer(&initContainer)
}
}
return false
}
func isNUMAAffinitiesEqual(logger logr.Logger, numaAffinity1, numaAffinity2 []int) bool {
bitMask1, err := bitmask.NewBitMask(numaAffinity1...)
if err != nil {
logger.Error(err, "failed to create bit mask", "numaAffinity1", numaAffinity1)
return false
}
bitMask2, err := bitmask.NewBitMask(numaAffinity2...)
if err != nil {
logger.Error(err, "failed to create bit mask", "numaAffinity2", numaAffinity2)
return false
}
return bitMask1.IsEqual(bitMask2)
}
func isAffinityViolatingNUMAAllocations(machineState state.NUMANodeMap, mask bitmask.BitMask) bool {
maskBits := mask.GetBits()
singleNUMAHint := len(maskBits) == 1
for _, nodeID := range mask.GetBits() {
// the node was never used for the memory allocation
if machineState[nodeID].NumberOfAssignments == 0 {
continue
}
if singleNUMAHint {
continue
}
// the node used for the single NUMA memory allocation, it cannot be used for the multi NUMA node allocation
if len(machineState[nodeID].Cells) == 1 {
return true
}
// the node already used with a different group of nodes, it cannot be used within the current hint
if !areGroupsEqual(machineState[nodeID].Cells, maskBits) {
return true
}
}
return false
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"encoding/json"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
)
var _ checkpointmanager.Checkpoint = &MemoryManagerCheckpoint{}
// MemoryManagerCheckpoint struct is used to store memory/pod assignments in a checkpoint
type MemoryManagerCheckpoint struct {
PolicyName string `json:"policyName"`
MachineState NUMANodeMap `json:"machineState"`
Entries ContainerMemoryAssignments `json:"entries,omitempty"`
Checksum checksum.Checksum `json:"checksum"`
}
// NewMemoryManagerCheckpoint returns an instance of Checkpoint
func NewMemoryManagerCheckpoint() *MemoryManagerCheckpoint {
//nolint:staticcheck // unexported-type-in-api user-facing error message
return &MemoryManagerCheckpoint{
Entries: ContainerMemoryAssignments{},
MachineState: NUMANodeMap{},
}
}
// MarshalCheckpoint returns marshalled checkpoint
func (mp *MemoryManagerCheckpoint) MarshalCheckpoint() ([]byte, error) {
// make sure checksum wasn't set before so it doesn't affect output checksum
mp.Checksum = 0
mp.Checksum = checksum.New(mp)
return json.Marshal(*mp)
}
// UnmarshalCheckpoint tries to unmarshal passed bytes to checkpoint
func (mp *MemoryManagerCheckpoint) UnmarshalCheckpoint(blob []byte) error {
return json.Unmarshal(blob, mp)
}
// VerifyChecksum verifies that current checksum of checkpoint is valid
func (mp *MemoryManagerCheckpoint) VerifyChecksum() error {
ck := mp.Checksum
mp.Checksum = 0
err := ck.Verify(mp)
mp.Checksum = ck
return err
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
v1 "k8s.io/api/core/v1"
)
// MemoryTable contains memory information
type MemoryTable struct {
TotalMemSize uint64 `json:"total"`
SystemReserved uint64 `json:"systemReserved"`
Allocatable uint64 `json:"allocatable"`
Reserved uint64 `json:"reserved"`
Free uint64 `json:"free"`
}
// NUMANodeState contains NUMA node related information
type NUMANodeState struct {
// NumberOfAssignments contains a number memory assignments from this node
// When the container requires memory and hugepages it will increase number of assignments by two
NumberOfAssignments int `json:"numberOfAssignments"`
// MemoryTable contains NUMA node memory related information
MemoryMap map[v1.ResourceName]*MemoryTable `json:"memoryMap"`
// Cells contains the current NUMA node and all other nodes that are in a group with current NUMA node
// This parameter indicates if the current node is used for the multiple NUMA node memory allocation
// For example if some container has pinning 0,1,2, NUMA nodes 0,1,2 under the state will have
// this parameter equals to [0, 1, 2]
Cells []int `json:"cells"`
}
// NUMANodeMap contains memory information for each NUMA node.
type NUMANodeMap map[int]*NUMANodeState
// Clone returns a copy of NUMANodeMap
func (nm NUMANodeMap) Clone() NUMANodeMap {
clone := make(NUMANodeMap)
for node, s := range nm {
if s == nil {
clone[node] = nil
continue
}
clone[node] = &NUMANodeState{}
clone[node].NumberOfAssignments = s.NumberOfAssignments
clone[node].Cells = append([]int{}, s.Cells...)
if s.MemoryMap == nil {
continue
}
clone[node].MemoryMap = map[v1.ResourceName]*MemoryTable{}
for memoryType, memoryTable := range s.MemoryMap {
clone[node].MemoryMap[memoryType] = &MemoryTable{
Allocatable: memoryTable.Allocatable,
Free: memoryTable.Free,
Reserved: memoryTable.Reserved,
SystemReserved: memoryTable.SystemReserved,
TotalMemSize: memoryTable.TotalMemSize,
}
}
}
return clone
}
// Block is a data structure used to represent a certain amount of memory
type Block struct {
// NUMAAffinity contains the string that represents NUMA affinity bitmask
NUMAAffinity []int `json:"numaAffinity"`
Type v1.ResourceName `json:"type"`
Size uint64 `json:"size"`
}
// ContainerMemoryAssignments stores memory assignments of containers
type ContainerMemoryAssignments map[string]map[string][]Block
// Clone returns a copy of ContainerMemoryAssignments
func (as ContainerMemoryAssignments) Clone() ContainerMemoryAssignments {
clone := make(ContainerMemoryAssignments)
for pod := range as {
clone[pod] = make(map[string][]Block)
for container, blocks := range as[pod] {
clone[pod][container] = append([]Block{}, blocks...)
}
}
return clone
}
// Reader interface used to read current memory/pod assignment state
type Reader interface {
// GetMachineState returns Memory Map stored in the State
GetMachineState() NUMANodeMap
// GetMemoryBlocks returns memory assignments of a container
GetMemoryBlocks(podUID string, containerName string) []Block
// GetMemoryAssignments returns ContainerMemoryAssignments
GetMemoryAssignments() ContainerMemoryAssignments
}
type writer interface {
// SetMachineState stores NUMANodeMap in State
SetMachineState(memoryMap NUMANodeMap)
// SetMemoryBlocks stores memory assignments of a container
SetMemoryBlocks(podUID string, containerName string, blocks []Block)
// SetMemoryAssignments sets ContainerMemoryAssignments by using the passed parameter
SetMemoryAssignments(assignments ContainerMemoryAssignments)
// Delete deletes corresponding Blocks from ContainerMemoryAssignments
Delete(podUID string, containerName string)
// ClearState clears machineState and ContainerMemoryAssignments
ClearState()
}
// State interface provides methods for tracking and setting memory/pod assignment
type State interface {
Reader
writer
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"fmt"
"path/filepath"
"sync"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
)
var _ State = &stateCheckpoint{}
type stateCheckpoint struct {
sync.RWMutex
logger klog.Logger
cache State
policyName string
checkpointManager checkpointmanager.CheckpointManager
checkpointName string
}
// NewCheckpointState creates new State for keeping track of memory/pod assignment with checkpoint backend
func NewCheckpointState(logger klog.Logger, stateDir, checkpointName, policyName string) (State, error) {
logger = klog.LoggerWithName(logger, "Memory Manager state checkpoint")
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
if err != nil {
return nil, fmt.Errorf("failed to initialize checkpoint manager: %v", err)
}
stateCheckpoint := &stateCheckpoint{
logger: logger,
cache: NewMemoryState(logger),
policyName: policyName,
checkpointManager: checkpointManager,
checkpointName: checkpointName,
}
if err := stateCheckpoint.restoreState(); err != nil {
//nolint:staticcheck // ST1005 user-facing error message
return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete the memory manager checkpoint file %q before restarting Kubelet",
err, filepath.Join(stateDir, checkpointName))
}
return stateCheckpoint, nil
}
// restores state from a checkpoint and creates it if it doesn't exist
func (sc *stateCheckpoint) restoreState() error {
sc.Lock()
defer sc.Unlock()
var err error
checkpoint := NewMemoryManagerCheckpoint()
if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint); err != nil {
if err == errors.ErrCheckpointNotFound {
return sc.storeState()
}
return err
}
if sc.policyName != checkpoint.PolicyName {
return fmt.Errorf("[memorymanager] configured policy %q differs from state checkpoint policy %q", sc.policyName, checkpoint.PolicyName)
}
sc.cache.SetMachineState(checkpoint.MachineState)
sc.cache.SetMemoryAssignments(checkpoint.Entries)
sc.logger.V(2).Info("State checkpoint: restored state from checkpoint")
return nil
}
// saves state to a checkpoint, caller is responsible for locking
func (sc *stateCheckpoint) storeState() error {
checkpoint := NewMemoryManagerCheckpoint()
checkpoint.PolicyName = sc.policyName
checkpoint.MachineState = sc.cache.GetMachineState()
checkpoint.Entries = sc.cache.GetMemoryAssignments()
err := sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
if err != nil {
sc.logger.Error(err, "Could not save checkpoint")
return err
}
return nil
}
// GetMemoryState returns Memory Map stored in the State
func (sc *stateCheckpoint) GetMachineState() NUMANodeMap {
sc.RLock()
defer sc.RUnlock()
return sc.cache.GetMachineState()
}
// GetMemoryBlocks returns memory assignments of a container
func (sc *stateCheckpoint) GetMemoryBlocks(podUID string, containerName string) []Block {
sc.RLock()
defer sc.RUnlock()
return sc.cache.GetMemoryBlocks(podUID, containerName)
}
// GetMemoryAssignments returns ContainerMemoryAssignments
func (sc *stateCheckpoint) GetMemoryAssignments() ContainerMemoryAssignments {
sc.RLock()
defer sc.RUnlock()
return sc.cache.GetMemoryAssignments()
}
// SetMachineState stores NUMANodeMap in State
func (sc *stateCheckpoint) SetMachineState(memoryMap NUMANodeMap) {
sc.Lock()
defer sc.Unlock()
sc.cache.SetMachineState(memoryMap)
err := sc.storeState()
if err != nil {
sc.logger.Error(err, "Failed to store state to checkpoint")
}
}
// SetMemoryBlocks stores memory assignments of container
func (sc *stateCheckpoint) SetMemoryBlocks(podUID string, containerName string, blocks []Block) {
sc.Lock()
defer sc.Unlock()
sc.cache.SetMemoryBlocks(podUID, containerName, blocks)
err := sc.storeState()
if err != nil {
sc.logger.Error(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
}
}
// SetMemoryAssignments sets ContainerMemoryAssignments by using the passed parameter
func (sc *stateCheckpoint) SetMemoryAssignments(assignments ContainerMemoryAssignments) {
sc.Lock()
defer sc.Unlock()
sc.cache.SetMemoryAssignments(assignments)
err := sc.storeState()
if err != nil {
sc.logger.Error(err, "Failed to store state to checkpoint")
}
}
// Delete deletes corresponding Blocks from ContainerMemoryAssignments
func (sc *stateCheckpoint) Delete(podUID string, containerName string) {
sc.Lock()
defer sc.Unlock()
sc.cache.Delete(podUID, containerName)
err := sc.storeState()
if err != nil {
sc.logger.Error(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
}
}
// ClearState clears machineState and ContainerMemoryAssignments
func (sc *stateCheckpoint) ClearState() {
sc.Lock()
defer sc.Unlock()
sc.cache.ClearState()
err := sc.storeState()
if err != nil {
sc.logger.Error(err, "Failed to store state to checkpoint")
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"sync"
"k8s.io/klog/v2"
)
type stateMemory struct {
sync.RWMutex
logger klog.Logger
assignments ContainerMemoryAssignments
machineState NUMANodeMap
}
var _ State = &stateMemory{}
// NewMemoryState creates new State for keeping track of cpu/pod assignment
func NewMemoryState(logger klog.Logger) State {
logger.Info("Initializing new in-memory state store")
return &stateMemory{
logger: logger,
assignments: ContainerMemoryAssignments{},
machineState: NUMANodeMap{},
}
}
// GetMemoryState returns Memory Map stored in the State
func (s *stateMemory) GetMachineState() NUMANodeMap {
s.RLock()
defer s.RUnlock()
return s.machineState.Clone()
}
// GetMemoryBlocks returns memory assignments of a container
func (s *stateMemory) GetMemoryBlocks(podUID string, containerName string) []Block {
s.RLock()
defer s.RUnlock()
if res, ok := s.assignments[podUID][containerName]; ok {
return append([]Block{}, res...)
}
return nil
}
// GetMemoryAssignments returns ContainerMemoryAssignments
func (s *stateMemory) GetMemoryAssignments() ContainerMemoryAssignments {
s.RLock()
defer s.RUnlock()
return s.assignments.Clone()
}
// SetMachineState stores NUMANodeMap in State
func (s *stateMemory) SetMachineState(nodeMap NUMANodeMap) {
s.Lock()
defer s.Unlock()
s.machineState = nodeMap.Clone()
s.logger.Info("Updated machine memory state")
}
// SetMemoryBlocks stores memory assignments of container
func (s *stateMemory) SetMemoryBlocks(podUID string, containerName string, blocks []Block) {
s.Lock()
defer s.Unlock()
if _, ok := s.assignments[podUID]; !ok {
s.assignments[podUID] = map[string][]Block{}
}
s.assignments[podUID][containerName] = append([]Block{}, blocks...)
s.logger.Info("Updated memory state", "podUID", podUID, "containerName", containerName)
}
// SetMemoryAssignments sets ContainerMemoryAssignments by using the passed parameter
func (s *stateMemory) SetMemoryAssignments(assignments ContainerMemoryAssignments) {
s.Lock()
defer s.Unlock()
s.assignments = assignments.Clone()
s.logger.V(5).Info("Updated Memory assignments", "assignments", assignments)
}
// Delete deletes corresponding Blocks from ContainerMemoryAssignments
func (s *stateMemory) Delete(podUID string, containerName string) {
s.Lock()
defer s.Unlock()
if _, ok := s.assignments[podUID]; !ok {
return
}
delete(s.assignments[podUID], containerName)
if len(s.assignments[podUID]) == 0 {
delete(s.assignments, podUID)
}
s.logger.V(2).Info("Deleted memory assignment", "podUID", podUID, "containerName", containerName)
}
// ClearState clears machineState and ContainerMemoryAssignments
func (s *stateMemory) ClearState() {
s.Lock()
defer s.Unlock()
s.machineState = NUMANodeMap{}
s.assignments = make(ContainerMemoryAssignments)
s.logger.V(2).Info("Cleared state")
}
//go:build linux
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
defaultNodeAllocatableCgroupName = "kubepods"
)
// createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true
func (cm *containerManagerImpl) createNodeAllocatableCgroups() error {
nodeAllocatable := cm.internalCapacity
// Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable.
nc := cm.NodeConfig.NodeAllocatableConfig
if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) {
nodeAllocatable = cm.getNodeAllocatableInternalAbsolute()
}
cgroupConfig := &CgroupConfig{
Name: cm.cgroupRoot,
// The default limits for cpu shares can be very low which can lead to CPU starvation for pods.
ResourceParameters: cm.getCgroupConfig(nodeAllocatable, false),
}
if cm.cgroupManager.Exists(cgroupConfig.Name) {
return nil
}
if err := cm.cgroupManager.Create(cgroupConfig); err != nil {
klog.ErrorS(err, "Failed to create cgroup", "cgroupName", cm.cgroupRoot)
return err
}
return nil
}
// enforceNodeAllocatableCgroups enforce Node Allocatable Cgroup settings.
func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
nc := cm.NodeConfig.NodeAllocatableConfig
// We need to update limits on node allocatable cgroup no matter what because
// default cpu shares on cgroups are low and can cause cpu starvation.
nodeAllocatable := cm.internalCapacity
// Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable.
if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) {
nodeAllocatable = cm.getNodeAllocatableInternalAbsolute()
}
klog.V(4).InfoS("Attempting to enforce Node Allocatable", "config", nc)
cgroupConfig := &CgroupConfig{
Name: cm.cgroupRoot,
ResourceParameters: cm.getCgroupConfig(nodeAllocatable, false),
}
// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail.
nodeRef := nodeRefFromNode(cm.nodeInfo.Name)
// If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value,
// existing memory usage across pods might be higher than current Node Allocatable Memory Limits.
// Pod Evictions are expected to bring down memory usage to below Node Allocatable limits.
// Until evictions happen retry cgroup updates.
// Update limits on non root cgroup-root to be safe since the default limits for CPU can be too low.
// Check if cgroupRoot is set to a non-empty value (empty would be the root container)
if len(cm.cgroupRoot) > 0 {
go func() {
for {
err := cm.cgroupManager.Update(cgroupConfig)
if err == nil {
cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods")
return
}
message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err)
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
time.Sleep(time.Minute)
}
}()
}
// Now apply kube reserved and system reserved limits if required.
if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) {
klog.V(2).InfoS("Enforcing system reserved on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved)
if err := cm.enforceExistingCgroup(nc.SystemReservedCgroupName, nc.SystemReserved, false); err != nil {
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
return errors.New(message)
}
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
}
if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) {
klog.V(2).InfoS("Enforcing kube reserved on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved)
if err := cm.enforceExistingCgroup(nc.KubeReservedCgroupName, nc.KubeReserved, false); err != nil {
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
return errors.New(message)
}
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
}
if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedCompressibleEnforcementKey) {
klog.V(2).InfoS("Enforcing system reserved compressible on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved)
if err := cm.enforceExistingCgroup(nc.SystemReservedCgroupName, nc.SystemReserved, true); err != nil {
message := fmt.Sprintf("Failed to enforce System Reserved Compressible Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
return errors.New(message)
}
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
}
if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedCompressibleEnforcementKey) {
klog.V(2).InfoS("Enforcing kube reserved compressible on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved)
if err := cm.enforceExistingCgroup(nc.KubeReservedCgroupName, nc.KubeReserved, true); err != nil {
message := fmt.Sprintf("Failed to enforce Kube Reserved Compressible Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
return errors.New(message)
}
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
}
return nil
}
// enforceExistingCgroup updates the limits `rl` on existing cgroup `cName` using `cgroupManager` interface.
func (cm *containerManagerImpl) enforceExistingCgroup(cNameStr string, rl v1.ResourceList, compressibleResources bool) error {
cName := cm.cgroupManager.CgroupName(cNameStr)
rp := cm.getCgroupConfig(rl, compressibleResources)
if rp == nil {
return fmt.Errorf("%q cgroup is not configured properly", cName)
}
// Enforce MemoryQoS for cgroups of kube-reserved/system-reserved. For more information,
// see https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) {
if rp.Memory != nil {
if rp.Unified == nil {
rp.Unified = make(map[string]string)
}
rp.Unified[Cgroup2MemoryMin] = strconv.FormatInt(*rp.Memory, 10)
}
}
cgroupConfig := &CgroupConfig{
Name: cName,
ResourceParameters: rp,
}
klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit)
if err := cm.cgroupManager.Validate(cgroupConfig.Name); err != nil {
return err
}
if err := cm.cgroupManager.Update(cgroupConfig); err != nil {
return err
}
return nil
}
// getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface.
func (cm *containerManagerImpl) getCgroupConfig(rl v1.ResourceList, compressibleResourcesOnly bool) *ResourceConfig {
rc := getCgroupConfigInternal(rl, compressibleResourcesOnly)
if rc == nil {
return nil
}
// In the case of a None policy, cgroupv2 and systemd cgroup manager, we must make sure systemd is aware of the cpuset cgroup.
// By default, systemd will not create it, as we've not chosen to delegate it, and we haven't included it in the Apply() request.
// However, this causes a bug where kubelet restarts unnecessarily (cpuset cgroup is created in the cgroupfs, but systemd
// doesn't know about it and deletes it, and then kubelet doesn't continue because the cgroup isn't configured as expected).
// An alternative is to delegate the `cpuset` cgroup to the kubelet, but that would require some plumbing in libcontainer,
// and this is sufficient.
// Only do so on None policy, as Static policy will do its own updating of the cpuset.
// Please see the comment on policy none's GetAllocatableCPUs
if cm.cpuManager.GetAllocatableCPUs().IsEmpty() {
rc.CPUSet = cm.cpuManager.GetAllCPUs()
}
return rc
}
// getCgroupConfigInternal are the pieces of getCgroupConfig that don't require the cm object.
// This is added to unit test without needing to create a full containerManager
func getCgroupConfigInternal(rl v1.ResourceList, compressibleResourcesOnly bool) *ResourceConfig {
// TODO(vishh): Set CPU Quota if necessary.
if rl == nil {
return nil
}
var rc ResourceConfig
setCompressibleResources := func() {
if q, exists := rl[v1.ResourceCPU]; exists {
// CPU is defined in milli-cores.
val := MilliCPUToShares(q.MilliValue())
rc.CPUShares = &val
}
}
// Only return compressible resources
if compressibleResourcesOnly {
setCompressibleResources()
} else {
if q, exists := rl[v1.ResourceMemory]; exists {
// Memory is defined in bytes.
val := q.Value()
rc.Memory = &val
}
setCompressibleResources()
if q, exists := rl[pidlimit.PIDs]; exists {
val := q.Value()
rc.PidsLimit = &val
}
rc.HugePageLimit = HugePageLimits(rl)
}
return &rc
}
// GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement.
// Note that not all resources that are available on the node are included in the returned list of resources.
// Returns a ResourceList.
func (cm *containerManagerImpl) GetNodeAllocatableAbsolute() v1.ResourceList {
return cm.getNodeAllocatableAbsoluteImpl(cm.capacity)
}
func (cm *containerManagerImpl) getNodeAllocatableAbsoluteImpl(capacity v1.ResourceList) v1.ResourceList {
result := make(v1.ResourceList)
for k, v := range capacity {
value := v.DeepCopy()
if cm.NodeConfig.SystemReserved != nil {
value.Sub(cm.NodeConfig.SystemReserved[k])
}
if cm.NodeConfig.KubeReserved != nil {
value.Sub(cm.NodeConfig.KubeReserved[k])
}
if value.Sign() < 0 {
// Negative Allocatable resources don't make sense.
value.Set(0)
}
result[k] = value
}
return result
}
// getNodeAllocatableInternalAbsolute is similar to getNodeAllocatableAbsolute except that
// it also includes internal resources (currently process IDs). It is intended for setting
// up top level cgroups only.
func (cm *containerManagerImpl) getNodeAllocatableInternalAbsolute() v1.ResourceList {
return cm.getNodeAllocatableAbsoluteImpl(cm.internalCapacity)
}
// GetNodeAllocatableReservation returns amount of compute or storage resource that have to be reserved on this node from scheduling.
func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList {
evictionReservation := hardEvictionReservation(cm.HardEvictionThresholds, cm.capacity)
result := make(v1.ResourceList)
for k := range cm.capacity {
value := resource.NewQuantity(0, resource.DecimalSI)
if cm.NodeConfig.SystemReserved != nil {
value.Add(cm.NodeConfig.SystemReserved[k])
}
if cm.NodeConfig.KubeReserved != nil {
value.Add(cm.NodeConfig.KubeReserved[k])
}
if evictionReservation != nil {
value.Add(evictionReservation[k])
}
if !value.IsZero() {
result[k] = *value
}
}
return result
}
// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity.
// Returns error if the configuration is invalid, nil otherwise.
func (cm *containerManagerImpl) validateNodeAllocatable() error {
var errors []string
nar := cm.GetNodeAllocatableReservation()
for k, v := range nar {
value := cm.capacity[k].DeepCopy()
value.Sub(v)
if value.Sign() < 0 {
errors = append(errors, fmt.Sprintf("Resource %q has a reservation of %v but capacity of %v. Expected capacity >= reservation.", k, v, cm.capacity[k]))
}
}
if len(errors) > 0 {
return fmt.Errorf("invalid Node Allocatable configuration. %s", strings.Join(errors, " "))
}
return nil
}
// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail.
func nodeRefFromNode(nodeName string) *v1.ObjectReference {
return &v1.ObjectReference{
Kind: "Node",
Name: nodeName,
UID: types.UID(nodeName),
Namespace: "",
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"errors"
"fmt"
"os"
"path"
"strings"
libcontainercgroups "github.com/opencontainers/cgroups"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
kubefeatures "k8s.io/kubernetes/pkg/features"
)
const (
podCgroupNamePrefix = "pod"
)
// podContainerManagerImpl implements podContainerManager interface.
// It is the general implementation which allows pod level container
// management if qos Cgroup is enabled.
type podContainerManagerImpl struct {
// qosContainersInfo hold absolute paths of the top level qos containers
qosContainersInfo QOSContainersInfo
// Stores the mounted cgroup subsystems
subsystems *CgroupSubsystems
// cgroupManager is the cgroup Manager Object responsible for managing all
// pod cgroups.
cgroupManager CgroupManager
// Maximum number of pids in a pod
podPidsLimit int64
// enforceCPULimits controls whether cfs quota is enforced or not
enforceCPULimits bool
// cpuCFSQuotaPeriod is the cfs period value, cfs_period_us, setting per
// node for all containers in usec
cpuCFSQuotaPeriod uint64
// podContainerManager is the ContainerManager running on the machine
podContainerManager ContainerManager
}
// Make sure that podContainerManagerImpl implements the PodContainerManager interface
var _ PodContainerManager = &podContainerManagerImpl{}
// Exists checks if the pod's cgroup already exists
func (m *podContainerManagerImpl) Exists(pod *v1.Pod) bool {
podContainerName, _ := m.GetPodContainerName(pod)
return m.cgroupManager.Exists(podContainerName)
}
// EnsureExists takes a pod as argument and makes sure that
// pod cgroup exists if qos cgroup hierarchy flag is enabled.
// If the pod level container doesn't already exist it is created.
func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
// check if container already exist
alreadyExists := m.Exists(pod)
if !alreadyExists {
enforceCPULimits := m.enforceCPULimits
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.podContainerManager.PodHasExclusiveCPUs(pod) {
klog.V(2).InfoS("Disabled CFS quota", "pod", klog.KObj(pod))
enforceCPULimits = false
}
enforceMemoryQoS := false
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
libcontainercgroups.IsCgroup2UnifiedMode() {
enforceMemoryQoS = true
}
// Create the pod container
podContainerName, _ := m.GetPodContainerName(pod)
containerConfig := &CgroupConfig{
Name: podContainerName,
ResourceParameters: ResourceConfigForPod(pod, enforceCPULimits, m.cpuCFSQuotaPeriod, enforceMemoryQoS),
}
if m.podPidsLimit > 0 {
containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit
}
if enforceMemoryQoS {
klog.V(4).InfoS("MemoryQoS config for pod", "pod", klog.KObj(pod), "unified", containerConfig.ResourceParameters.Unified)
}
if err := m.cgroupManager.Create(containerConfig); err != nil {
return fmt.Errorf("failed to create container for %v : %v", podContainerName, err)
}
}
return nil
}
// GetPodContainerName returns the CgroupName identifier, and its literal cgroupfs form on the host.
func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) {
podQOS := v1qos.GetPodQOS(pod)
// Get the parent QOS container name
var parentContainer CgroupName
switch podQOS {
case v1.PodQOSGuaranteed:
parentContainer = m.qosContainersInfo.Guaranteed
case v1.PodQOSBurstable:
parentContainer = m.qosContainersInfo.Burstable
case v1.PodQOSBestEffort:
parentContainer = m.qosContainersInfo.BestEffort
}
podContainer := GetPodCgroupNameSuffix(pod.UID)
// Get the absolute path of the cgroup
cgroupName := NewCgroupName(parentContainer, podContainer)
// Get the literal cgroupfs name
cgroupfsName := m.cgroupManager.Name(cgroupName)
return cgroupName, cgroupfsName
}
func (m *podContainerManagerImpl) GetPodCgroupMemoryUsage(pod *v1.Pod) (uint64, error) {
podCgroupName, _ := m.GetPodContainerName(pod)
memUsage, err := m.cgroupManager.MemoryUsage(podCgroupName)
if err != nil {
return 0, err
}
return uint64(memUsage), nil
}
func (m *podContainerManagerImpl) GetPodCgroupConfig(pod *v1.Pod, resource v1.ResourceName) (*ResourceConfig, error) {
podCgroupName, _ := m.GetPodContainerName(pod)
return m.cgroupManager.GetCgroupConfig(podCgroupName, resource)
}
func (m *podContainerManagerImpl) SetPodCgroupConfig(pod *v1.Pod, resourceConfig *ResourceConfig) error {
podCgroupName, _ := m.GetPodContainerName(pod)
return m.cgroupManager.SetCgroupConfig(podCgroupName, resourceConfig)
}
// Kill one process ID
func (m *podContainerManagerImpl) killOnePid(pid int) error {
// os.FindProcess never returns an error on POSIX
// https://go-review.googlesource.com/c/go/+/19093
p, _ := os.FindProcess(pid)
if err := p.Kill(); err != nil {
// If the process already exited, that's fine.
if errors.Is(err, os.ErrProcessDone) {
klog.V(3).InfoS("Process no longer exists", "pid", pid)
return nil
}
return err
}
return nil
}
// Scan through the whole cgroup directory and kill all processes either
// attached to the pod cgroup or to a container cgroup under the pod cgroup
func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName) error {
pidsToKill := m.cgroupManager.Pids(podCgroup)
// No pids charged to the terminated pod cgroup return
if len(pidsToKill) == 0 {
return nil
}
var errlist []error
// os.Kill often errors out,
// We try killing all the pids multiple times
removed := map[int]bool{}
for i := 0; i < 5; i++ {
if i != 0 {
klog.V(3).InfoS("Attempt failed to kill all unwanted process from cgroup, retrying", "attempt", i, "cgroupName", podCgroup)
}
errlist = []error{}
for _, pid := range pidsToKill {
if _, ok := removed[pid]; ok {
continue
}
klog.V(3).InfoS("Attempting to kill process from cgroup", "pid", pid, "cgroupName", podCgroup)
if err := m.killOnePid(pid); err != nil {
klog.V(3).InfoS("Failed to kill process from cgroup", "pid", pid, "cgroupName", podCgroup, "err", err)
errlist = append(errlist, err)
} else {
removed[pid] = true
}
}
if len(errlist) == 0 {
klog.V(3).InfoS("Successfully killed all unwanted processes from cgroup", "cgroupName", podCgroup)
return nil
}
}
return utilerrors.NewAggregate(errlist)
}
// Destroy destroys the pod container cgroup paths
func (m *podContainerManagerImpl) Destroy(podCgroup CgroupName) error {
// Try killing all the processes attached to the pod cgroup
if err := m.tryKillingCgroupProcesses(podCgroup); err != nil {
klog.InfoS("Failed to kill all the processes attached to cgroup", "cgroupName", podCgroup, "err", err)
return fmt.Errorf("failed to kill all the processes attached to the %v cgroups : %v", podCgroup, err)
}
// Now its safe to remove the pod's cgroup
containerConfig := &CgroupConfig{
Name: podCgroup,
ResourceParameters: &ResourceConfig{},
}
if err := m.cgroupManager.Destroy(containerConfig); err != nil {
klog.InfoS("Failed to delete cgroup paths", "cgroupName", podCgroup, "err", err)
return fmt.Errorf("failed to delete cgroup paths for %v : %v", podCgroup, err)
}
return nil
}
// ReduceCPULimits reduces the CPU CFS values to the minimum amount of shares.
func (m *podContainerManagerImpl) ReduceCPULimits(podCgroup CgroupName) error {
return m.cgroupManager.ReduceCPULimits(podCgroup)
}
// IsPodCgroup returns true if the literal cgroupfs name corresponds to a pod
func (m *podContainerManagerImpl) IsPodCgroup(cgroupfs string) (bool, types.UID) {
// convert the literal cgroupfs form to the driver specific value
cgroupName := m.cgroupManager.CgroupName(cgroupfs)
qosContainersList := [3]CgroupName{m.qosContainersInfo.BestEffort, m.qosContainersInfo.Burstable, m.qosContainersInfo.Guaranteed}
basePath := ""
for _, qosContainerName := range qosContainersList {
// a pod cgroup is a direct child of a qos node, so check if its a match
if len(cgroupName) == len(qosContainerName)+1 {
basePath = cgroupName[len(qosContainerName)]
}
}
if basePath == "" {
return false, types.UID("")
}
if !strings.HasPrefix(basePath, podCgroupNamePrefix) {
return false, types.UID("")
}
parts := strings.Split(basePath, podCgroupNamePrefix)
if len(parts) != 2 {
return false, types.UID("")
}
return true, types.UID(parts[1])
}
// GetAllPodsFromCgroups scans through all the subsystems of pod cgroups
// Get list of pods whose cgroup still exist on the cgroup mounts
func (m *podContainerManagerImpl) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) {
// Map for storing all the found pods on the disk
foundPods := make(map[types.UID]CgroupName)
qosContainersList := [3]CgroupName{m.qosContainersInfo.BestEffort, m.qosContainersInfo.Burstable, m.qosContainersInfo.Guaranteed}
// Scan through all the subsystem mounts
// and through each QoS cgroup directory for each subsystem mount
// If a pod cgroup exists in even a single subsystem mount
// we will attempt to delete it
for _, val := range m.subsystems.MountPoints {
for _, qosContainerName := range qosContainersList {
// get the subsystems QoS cgroup absolute name
qcConversion := m.cgroupManager.Name(qosContainerName)
qc := path.Join(val, qcConversion)
dirInfo, err := os.ReadDir(qc)
if err != nil {
if os.IsNotExist(err) {
continue
}
return nil, fmt.Errorf("failed to read the cgroup directory %v : %v", qc, err)
}
for i := range dirInfo {
// its not a directory, so continue on...
if !dirInfo[i].IsDir() {
continue
}
// convert the concrete cgroupfs name back to an internal identifier
// this is needed to handle path conversion for systemd environments.
// we pass the fully qualified path so decoding can work as expected
// since systemd encodes the path in each segment.
cgroupfsPath := path.Join(qcConversion, dirInfo[i].Name())
internalPath := m.cgroupManager.CgroupName(cgroupfsPath)
// we only care about base segment of the converted path since that
// is what we are reading currently to know if it is a pod or not.
basePath := internalPath[len(internalPath)-1]
if !strings.Contains(basePath, podCgroupNamePrefix) {
continue
}
// we then split the name on the pod prefix to determine the uid
parts := strings.Split(basePath, podCgroupNamePrefix)
// the uid is missing, so we log the unexpected cgroup not of form pod<uid>
if len(parts) != 2 {
klog.InfoS("Pod cgroup manager ignored unexpected cgroup because it is not a pod", "path", cgroupfsPath)
continue
}
podUID := parts[1]
foundPods[types.UID(podUID)] = internalPath
}
}
}
return foundPods, nil
}
// podContainerManagerNoop implements podContainerManager interface.
// It is a no-op implementation and basically does nothing
// podContainerManagerNoop is used in case the QoS cgroup Hierarchy is not
// enabled, so Exists() returns true always as the cgroupRoot
// is expected to always exist.
type podContainerManagerNoop struct {
cgroupRoot CgroupName
}
// Make sure that podContainerManagerStub implements the PodContainerManager interface
var _ PodContainerManager = &podContainerManagerNoop{}
func (m *podContainerManagerNoop) Exists(_ *v1.Pod) bool {
return true
}
func (m *podContainerManagerNoop) EnsureExists(_ *v1.Pod) error {
return nil
}
func (m *podContainerManagerNoop) GetPodContainerName(_ *v1.Pod) (CgroupName, string) {
return m.cgroupRoot, ""
}
func (m *podContainerManagerNoop) GetPodContainerNameForDriver(_ *v1.Pod) string {
return ""
}
// Destroy destroys the pod container cgroup paths
func (m *podContainerManagerNoop) Destroy(_ CgroupName) error {
return nil
}
func (m *podContainerManagerNoop) ReduceCPULimits(_ CgroupName) error {
return nil
}
func (m *podContainerManagerNoop) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) {
return nil, nil
}
func (m *podContainerManagerNoop) IsPodCgroup(cgroupfs string) (bool, types.UID) {
return false, types.UID("")
}
func (m *podContainerManagerNoop) GetPodCgroupMemoryUsage(_ *v1.Pod) (uint64, error) {
return 0, nil
}
func (m *podContainerManagerNoop) GetPodCgroupConfig(_ *v1.Pod, _ v1.ResourceName) (*ResourceConfig, error) {
return nil, nil
}
func (m *podContainerManagerNoop) SetPodCgroupConfig(_ *v1.Pod, _ *ResourceConfig) error {
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
type podContainerManagerStub struct {
}
var _ PodContainerManager = &podContainerManagerStub{}
func (m *podContainerManagerStub) Exists(_ *v1.Pod) bool {
return true
}
func (m *podContainerManagerStub) EnsureExists(_ *v1.Pod) error {
return nil
}
func (m *podContainerManagerStub) GetPodContainerName(_ *v1.Pod) (CgroupName, string) {
return nil, ""
}
func (m *podContainerManagerStub) Destroy(_ CgroupName) error {
return nil
}
func (m *podContainerManagerStub) ReduceCPULimits(_ CgroupName) error {
return nil
}
func (m *podContainerManagerStub) GetAllPodsFromCgroups() (map[types.UID]CgroupName, error) {
return nil, nil
}
func (m *podContainerManagerStub) IsPodCgroup(cgroupfs string) (bool, types.UID) {
return false, types.UID("")
}
func (m *podContainerManagerStub) GetPodCgroupMemoryUsage(_ *v1.Pod) (uint64, error) {
return 0, nil
}
func (m *podContainerManagerStub) GetPodCgroupMemoryLimit(_ *v1.Pod) (uint64, error) {
return 0, nil
}
func (m *podContainerManagerStub) GetPodCgroupCpuLimit(_ *v1.Pod) (int64, uint64, uint64, error) {
return 0, 0, 0, nil
}
func (m *podContainerManagerStub) SetPodCgroupMemoryLimit(_ *v1.Pod, _ int64) error {
return nil
}
func (m *podContainerManagerStub) SetPodCgroupCpuLimit(_ *v1.Pod, _ *int64, _, _ *uint64) error {
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/wait"
units "github.com/docker/go-units"
libcontainercgroups "github.com/opencontainers/cgroups"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-helpers/resource"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
kubefeatures "k8s.io/kubernetes/pkg/features"
)
const (
// how often the qos cgroup manager will perform periodic update
// of the qos level cgroup resource constraints
periodicQOSCgroupUpdateInterval = 1 * time.Minute
)
type QOSContainerManager interface {
Start(func() v1.ResourceList, ActivePodsFunc) error
GetQOSContainersInfo() QOSContainersInfo
UpdateCgroups() error
}
type qosContainerManagerImpl struct {
sync.Mutex
qosContainersInfo QOSContainersInfo
subsystems *CgroupSubsystems
cgroupManager CgroupManager
activePods ActivePodsFunc
getNodeAllocatable func() v1.ResourceList
cgroupRoot CgroupName
qosReserved map[v1.ResourceName]int64
}
func NewQOSContainerManager(subsystems *CgroupSubsystems, cgroupRoot CgroupName, nodeConfig NodeConfig, cgroupManager CgroupManager) (QOSContainerManager, error) {
if !nodeConfig.CgroupsPerQOS {
return &qosContainerManagerNoop{
cgroupRoot: cgroupRoot,
}, nil
}
return &qosContainerManagerImpl{
subsystems: subsystems,
cgroupManager: cgroupManager,
cgroupRoot: cgroupRoot,
qosReserved: nodeConfig.QOSReserved,
}, nil
}
func (m *qosContainerManagerImpl) GetQOSContainersInfo() QOSContainersInfo {
return m.qosContainersInfo
}
func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceList, activePods ActivePodsFunc) error {
cm := m.cgroupManager
rootContainer := m.cgroupRoot
if err := cm.Validate(rootContainer); err != nil {
return fmt.Errorf("error validating root container %v : %w", rootContainer, err)
}
// Top level for Qos containers are created only for Burstable
// and Best Effort classes
qosClasses := map[v1.PodQOSClass]CgroupName{
v1.PodQOSBurstable: NewCgroupName(rootContainer, strings.ToLower(string(v1.PodQOSBurstable))),
v1.PodQOSBestEffort: NewCgroupName(rootContainer, strings.ToLower(string(v1.PodQOSBestEffort))),
}
// Create containers for both qos classes
for qosClass, containerName := range qosClasses {
resourceParameters := &ResourceConfig{}
// the BestEffort QoS class has a statically configured minShares value
if qosClass == v1.PodQOSBestEffort {
minShares := uint64(MinShares)
resourceParameters.CPUShares = &minShares
}
// containerConfig object stores the cgroup specifications
containerConfig := &CgroupConfig{
Name: containerName,
ResourceParameters: resourceParameters,
}
// for each enumerated huge page size, the qos tiers are unbounded
m.setHugePagesUnbounded(containerConfig)
// check if it exists
if !cm.Exists(containerName) {
if err := cm.Create(containerConfig); err != nil {
return fmt.Errorf("failed to create top level %v QOS cgroup : %v", qosClass, err)
}
} else {
// to ensure we actually have the right state, we update the config on startup
if err := cm.Update(containerConfig); err != nil {
return fmt.Errorf("failed to update top level %v QOS cgroup : %v", qosClass, err)
}
}
}
// Store the top level qos container names
m.qosContainersInfo = QOSContainersInfo{
Guaranteed: rootContainer,
Burstable: qosClasses[v1.PodQOSBurstable],
BestEffort: qosClasses[v1.PodQOSBestEffort],
}
m.getNodeAllocatable = getNodeAllocatable
m.activePods = activePods
// update qos cgroup tiers on startup and in periodic intervals
// to ensure desired state is in sync with actual state.
go wait.Until(func() {
err := m.UpdateCgroups()
if err != nil {
klog.InfoS("Failed to reserve QoS requests", "err", err)
}
}, periodicQOSCgroupUpdateInterval, wait.NeverStop)
return nil
}
// setHugePagesUnbounded ensures hugetlb is effectively unbounded
func (m *qosContainerManagerImpl) setHugePagesUnbounded(cgroupConfig *CgroupConfig) error {
hugePageLimit := map[int64]int64{}
for _, pageSize := range libcontainercgroups.HugePageSizes() {
pageSizeBytes, err := units.RAMInBytes(pageSize)
if err != nil {
return err
}
hugePageLimit[pageSizeBytes] = int64(1 << 62)
}
cgroupConfig.ResourceParameters.HugePageLimit = hugePageLimit
return nil
}
func (m *qosContainerManagerImpl) setHugePagesConfig(configs map[v1.PodQOSClass]*CgroupConfig) error {
for _, v := range configs {
if err := m.setHugePagesUnbounded(v); err != nil {
return err
}
}
return nil
}
func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]*CgroupConfig) error {
pods := m.activePods()
burstablePodCPURequest := int64(0)
reuseReqs := make(v1.ResourceList, 4)
for i := range pods {
pod := pods[i]
qosClass := v1qos.GetPodQOS(pod)
if qosClass != v1.PodQOSBurstable {
// we only care about the burstable qos tier
continue
}
req := resource.PodRequests(pod, resource.PodResourcesOptions{
Reuse: reuseReqs,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources),
})
if request, found := req[v1.ResourceCPU]; found {
burstablePodCPURequest += request.MilliValue()
}
}
// make sure best effort is always 2 shares
bestEffortCPUShares := uint64(MinShares)
configs[v1.PodQOSBestEffort].ResourceParameters.CPUShares = &bestEffortCPUShares
// set burstable shares based on current observe state
burstableCPUShares := MilliCPUToShares(burstablePodCPURequest)
configs[v1.PodQOSBurstable].ResourceParameters.CPUShares = &burstableCPUShares
return nil
}
// getQoSMemoryRequests sums and returns the memory request of all pods for
// guaranteed and burstable qos classes.
func (m *qosContainerManagerImpl) getQoSMemoryRequests() map[v1.PodQOSClass]int64 {
qosMemoryRequests := map[v1.PodQOSClass]int64{
v1.PodQOSGuaranteed: 0,
v1.PodQOSBurstable: 0,
}
// Sum the pod limits for pods in each QOS class
pods := m.activePods()
reuseReqs := make(v1.ResourceList, 4)
for _, pod := range pods {
podMemoryRequest := int64(0)
qosClass := v1qos.GetPodQOS(pod)
if qosClass == v1.PodQOSBestEffort {
// limits are not set for Best Effort pods
continue
}
req := resource.PodRequests(pod, resource.PodResourcesOptions{Reuse: reuseReqs})
if request, found := req[v1.ResourceMemory]; found {
podMemoryRequest += request.Value()
}
qosMemoryRequests[qosClass] += podMemoryRequest
}
return qosMemoryRequests
}
// setMemoryReserve sums the memory limits of all pods in a QOS class,
// calculates QOS class memory limits, and set those limits in the
// CgroupConfig for each QOS class.
func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*CgroupConfig, percentReserve int64) {
qosMemoryRequests := m.getQoSMemoryRequests()
resources := m.getNodeAllocatable()
allocatableResource, ok := resources[v1.ResourceMemory]
if !ok {
klog.V(2).InfoS("Allocatable memory value could not be determined, not setting QoS memory limits")
return
}
allocatable := allocatableResource.Value()
if allocatable == 0 {
klog.V(2).InfoS("Allocatable memory reported as 0, might be in standalone mode, not setting QoS memory limits")
return
}
for qos, limits := range qosMemoryRequests {
klog.V(2).InfoS("QoS pod memory limit", "qos", qos, "limits", limits, "percentReserve", percentReserve)
}
// Calculate QOS memory limits
burstableLimit := allocatable - (qosMemoryRequests[v1.PodQOSGuaranteed] * percentReserve / 100)
bestEffortLimit := burstableLimit - (qosMemoryRequests[v1.PodQOSBurstable] * percentReserve / 100)
configs[v1.PodQOSBurstable].ResourceParameters.Memory = &burstableLimit
configs[v1.PodQOSBestEffort].ResourceParameters.Memory = &bestEffortLimit
}
// retrySetMemoryReserve checks for any QoS cgroups over the limit
// that was attempted to be set in the first Update() and adjusts
// their memory limit to the usage to prevent further growth.
func (m *qosContainerManagerImpl) retrySetMemoryReserve(configs map[v1.PodQOSClass]*CgroupConfig, percentReserve int64) {
// Unreclaimable memory usage may already exceeded the desired limit
// Attempt to set the limit near the current usage to put pressure
// on the cgroup and prevent further growth.
for qos, config := range configs {
usage, err := m.cgroupManager.MemoryUsage(config.Name)
if err != nil {
klog.V(2).InfoS("Failed to get resource stats", "err", err)
return
}
// Because there is no good way to determine of the original Update()
// on the memory resource was successful, we determine failure of the
// first attempt by checking if the usage is above the limit we attempt
// to set. If it is, we assume the first attempt to set the limit failed
// and try again setting the limit to the usage. Otherwise we leave
// the CgroupConfig as is.
if configs[qos].ResourceParameters.Memory != nil && usage > *configs[qos].ResourceParameters.Memory {
configs[qos].ResourceParameters.Memory = &usage
}
}
}
// setMemoryQoS sums the memory requests of all pods in the Burstable class,
// and set the sum memory as the memory.min in the Unified field of CgroupConfig.
func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*CgroupConfig) {
qosMemoryRequests := m.getQoSMemoryRequests()
// Calculate the memory.min:
// for burstable(/kubepods/burstable): sum of all burstable pods
// for guaranteed(/kubepods): sum of all guaranteed and burstable pods
burstableMin := qosMemoryRequests[v1.PodQOSBurstable]
guaranteedMin := qosMemoryRequests[v1.PodQOSGuaranteed] + burstableMin
if burstableMin > 0 {
if configs[v1.PodQOSBurstable].ResourceParameters.Unified == nil {
configs[v1.PodQOSBurstable].ResourceParameters.Unified = make(map[string]string)
}
configs[v1.PodQOSBurstable].ResourceParameters.Unified[Cgroup2MemoryMin] = strconv.FormatInt(burstableMin, 10)
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memoryMin", burstableMin)
}
if guaranteedMin > 0 {
if configs[v1.PodQOSGuaranteed].ResourceParameters.Unified == nil {
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified = make(map[string]string)
}
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified[Cgroup2MemoryMin] = strconv.FormatInt(guaranteedMin, 10)
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memoryMin", guaranteedMin)
}
}
func (m *qosContainerManagerImpl) UpdateCgroups() error {
m.Lock()
defer m.Unlock()
qosConfigs := map[v1.PodQOSClass]*CgroupConfig{
v1.PodQOSGuaranteed: {
Name: m.qosContainersInfo.Guaranteed,
ResourceParameters: &ResourceConfig{},
},
v1.PodQOSBurstable: {
Name: m.qosContainersInfo.Burstable,
ResourceParameters: &ResourceConfig{},
},
v1.PodQOSBestEffort: {
Name: m.qosContainersInfo.BestEffort,
ResourceParameters: &ResourceConfig{},
},
}
// update the qos level cgroup settings for cpu shares
if err := m.setCPUCgroupConfig(qosConfigs); err != nil {
return err
}
// update the qos level cgroup settings for huge pages (ensure they remain unbounded)
if err := m.setHugePagesConfig(qosConfigs); err != nil {
return err
}
// update the qos level cgrougs v2 settings of memory qos if feature enabled
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
libcontainercgroups.IsCgroup2UnifiedMode() {
m.setMemoryQoS(qosConfigs)
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.QOSReserved) {
for resource, percentReserve := range m.qosReserved {
switch resource {
case v1.ResourceMemory:
m.setMemoryReserve(qosConfigs, percentReserve)
}
}
updateSuccess := true
for _, config := range qosConfigs {
err := m.cgroupManager.Update(config)
if err != nil {
updateSuccess = false
}
}
if updateSuccess {
klog.V(4).InfoS("Updated QoS cgroup configuration")
return nil
}
// If the resource can adjust the ResourceConfig to increase likelihood of
// success, call the adjustment function here. Otherwise, the Update() will
// be called again with the same values.
for resource, percentReserve := range m.qosReserved {
switch resource {
case v1.ResourceMemory:
m.retrySetMemoryReserve(qosConfigs, percentReserve)
}
}
}
for _, config := range qosConfigs {
err := m.cgroupManager.Update(config)
if err != nil {
klog.ErrorS(err, "Failed to update QoS cgroup configuration")
return err
}
}
klog.V(4).InfoS("Updated QoS cgroup configuration")
return nil
}
type qosContainerManagerNoop struct {
cgroupRoot CgroupName
}
var _ QOSContainerManager = &qosContainerManagerNoop{}
func (m *qosContainerManagerNoop) GetQOSContainersInfo() QOSContainersInfo {
return QOSContainersInfo{}
}
func (m *qosContainerManagerNoop) Start(_ func() v1.ResourceList, _ ActivePodsFunc) error {
return nil
}
func (m *qosContainerManagerNoop) UpdateCgroups() error {
return nil
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package testing
import (
"context"
mock "github.com/stretchr/testify/mock"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/cri-api/pkg/apis"
v10 "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/cm/resourceupdates"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// NewMockContainerManager creates a new instance of MockContainerManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockContainerManager(t interface {
mock.TestingT
Cleanup(func())
}) *MockContainerManager {
mock := &MockContainerManager{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockContainerManager is an autogenerated mock type for the ContainerManager type
type MockContainerManager struct {
mock.Mock
}
type MockContainerManager_Expecter struct {
mock *mock.Mock
}
func (_m *MockContainerManager) EXPECT() *MockContainerManager_Expecter {
return &MockContainerManager_Expecter{mock: &_m.Mock}
}
// ContainerHasExclusiveCPUs provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
ret := _mock.Called(pod, container)
if len(ret) == 0 {
panic("no return value specified for ContainerHasExclusiveCPUs")
}
var r0 bool
if returnFunc, ok := ret.Get(0).(func(*v1.Pod, *v1.Container) bool); ok {
r0 = returnFunc(pod, container)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockContainerManager_ContainerHasExclusiveCPUs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerHasExclusiveCPUs'
type MockContainerManager_ContainerHasExclusiveCPUs_Call struct {
*mock.Call
}
// ContainerHasExclusiveCPUs is a helper method to define mock.On call
// - pod *v1.Pod
// - container *v1.Container
func (_e *MockContainerManager_Expecter) ContainerHasExclusiveCPUs(pod interface{}, container interface{}) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
return &MockContainerManager_ContainerHasExclusiveCPUs_Call{Call: _e.mock.On("ContainerHasExclusiveCPUs", pod, container)}
}
func (_c *MockContainerManager_ContainerHasExclusiveCPUs_Call) Run(run func(pod *v1.Pod, container *v1.Container)) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
var arg1 *v1.Container
if args[1] != nil {
arg1 = args[1].(*v1.Container)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_ContainerHasExclusiveCPUs_Call) Return(b bool) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
_c.Call.Return(b)
return _c
}
func (_c *MockContainerManager_ContainerHasExclusiveCPUs_Call) RunAndReturn(run func(pod *v1.Pod, container *v1.Container) bool) *MockContainerManager_ContainerHasExclusiveCPUs_Call {
_c.Call.Return(run)
return _c
}
// GetAllocatableCPUs provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetAllocatableCPUs() []int64 {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetAllocatableCPUs")
}
var r0 []int64
if returnFunc, ok := ret.Get(0).(func() []int64); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
return r0
}
// MockContainerManager_GetAllocatableCPUs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllocatableCPUs'
type MockContainerManager_GetAllocatableCPUs_Call struct {
*mock.Call
}
// GetAllocatableCPUs is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetAllocatableCPUs() *MockContainerManager_GetAllocatableCPUs_Call {
return &MockContainerManager_GetAllocatableCPUs_Call{Call: _e.mock.On("GetAllocatableCPUs")}
}
func (_c *MockContainerManager_GetAllocatableCPUs_Call) Run(run func()) *MockContainerManager_GetAllocatableCPUs_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetAllocatableCPUs_Call) Return(int64s []int64) *MockContainerManager_GetAllocatableCPUs_Call {
_c.Call.Return(int64s)
return _c
}
func (_c *MockContainerManager_GetAllocatableCPUs_Call) RunAndReturn(run func() []int64) *MockContainerManager_GetAllocatableCPUs_Call {
_c.Call.Return(run)
return _c
}
// GetAllocatableDevices provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetAllocatableDevices() []*v10.ContainerDevices {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetAllocatableDevices")
}
var r0 []*v10.ContainerDevices
if returnFunc, ok := ret.Get(0).(func() []*v10.ContainerDevices); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v10.ContainerDevices)
}
}
return r0
}
// MockContainerManager_GetAllocatableDevices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllocatableDevices'
type MockContainerManager_GetAllocatableDevices_Call struct {
*mock.Call
}
// GetAllocatableDevices is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetAllocatableDevices() *MockContainerManager_GetAllocatableDevices_Call {
return &MockContainerManager_GetAllocatableDevices_Call{Call: _e.mock.On("GetAllocatableDevices")}
}
func (_c *MockContainerManager_GetAllocatableDevices_Call) Run(run func()) *MockContainerManager_GetAllocatableDevices_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetAllocatableDevices_Call) Return(containerDevicess []*v10.ContainerDevices) *MockContainerManager_GetAllocatableDevices_Call {
_c.Call.Return(containerDevicess)
return _c
}
func (_c *MockContainerManager_GetAllocatableDevices_Call) RunAndReturn(run func() []*v10.ContainerDevices) *MockContainerManager_GetAllocatableDevices_Call {
_c.Call.Return(run)
return _c
}
// GetAllocatableMemory provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetAllocatableMemory() []*v10.ContainerMemory {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetAllocatableMemory")
}
var r0 []*v10.ContainerMemory
if returnFunc, ok := ret.Get(0).(func() []*v10.ContainerMemory); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v10.ContainerMemory)
}
}
return r0
}
// MockContainerManager_GetAllocatableMemory_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllocatableMemory'
type MockContainerManager_GetAllocatableMemory_Call struct {
*mock.Call
}
// GetAllocatableMemory is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetAllocatableMemory() *MockContainerManager_GetAllocatableMemory_Call {
return &MockContainerManager_GetAllocatableMemory_Call{Call: _e.mock.On("GetAllocatableMemory")}
}
func (_c *MockContainerManager_GetAllocatableMemory_Call) Run(run func()) *MockContainerManager_GetAllocatableMemory_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetAllocatableMemory_Call) Return(containerMemorys []*v10.ContainerMemory) *MockContainerManager_GetAllocatableMemory_Call {
_c.Call.Return(containerMemorys)
return _c
}
func (_c *MockContainerManager_GetAllocatableMemory_Call) RunAndReturn(run func() []*v10.ContainerMemory) *MockContainerManager_GetAllocatableMemory_Call {
_c.Call.Return(run)
return _c
}
// GetAllocateResourcesPodAdmitHandler provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetAllocateResourcesPodAdmitHandler")
}
var r0 lifecycle.PodAdmitHandler
if returnFunc, ok := ret.Get(0).(func() lifecycle.PodAdmitHandler); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(lifecycle.PodAdmitHandler)
}
}
return r0
}
// MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllocateResourcesPodAdmitHandler'
type MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call struct {
*mock.Call
}
// GetAllocateResourcesPodAdmitHandler is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetAllocateResourcesPodAdmitHandler() *MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call {
return &MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call{Call: _e.mock.On("GetAllocateResourcesPodAdmitHandler")}
}
func (_c *MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call) Run(run func()) *MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call) Return(podAdmitHandler lifecycle.PodAdmitHandler) *MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call {
_c.Call.Return(podAdmitHandler)
return _c
}
func (_c *MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call) RunAndReturn(run func() lifecycle.PodAdmitHandler) *MockContainerManager_GetAllocateResourcesPodAdmitHandler_Call {
_c.Call.Return(run)
return _c
}
// GetCPUs provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetCPUs(podUID string, containerName string) []int64 {
ret := _mock.Called(podUID, containerName)
if len(ret) == 0 {
panic("no return value specified for GetCPUs")
}
var r0 []int64
if returnFunc, ok := ret.Get(0).(func(string, string) []int64); ok {
r0 = returnFunc(podUID, containerName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]int64)
}
}
return r0
}
// MockContainerManager_GetCPUs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCPUs'
type MockContainerManager_GetCPUs_Call struct {
*mock.Call
}
// GetCPUs is a helper method to define mock.On call
// - podUID string
// - containerName string
func (_e *MockContainerManager_Expecter) GetCPUs(podUID interface{}, containerName interface{}) *MockContainerManager_GetCPUs_Call {
return &MockContainerManager_GetCPUs_Call{Call: _e.mock.On("GetCPUs", podUID, containerName)}
}
func (_c *MockContainerManager_GetCPUs_Call) Run(run func(podUID string, containerName string)) *MockContainerManager_GetCPUs_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
var arg1 string
if args[1] != nil {
arg1 = args[1].(string)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_GetCPUs_Call) Return(int64s []int64) *MockContainerManager_GetCPUs_Call {
_c.Call.Return(int64s)
return _c
}
func (_c *MockContainerManager_GetCPUs_Call) RunAndReturn(run func(podUID string, containerName string) []int64) *MockContainerManager_GetCPUs_Call {
_c.Call.Return(run)
return _c
}
// GetCapacity provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
ret := _mock.Called(localStorageCapacityIsolation)
if len(ret) == 0 {
panic("no return value specified for GetCapacity")
}
var r0 v1.ResourceList
if returnFunc, ok := ret.Get(0).(func(bool) v1.ResourceList); ok {
r0 = returnFunc(localStorageCapacityIsolation)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(v1.ResourceList)
}
}
return r0
}
// MockContainerManager_GetCapacity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCapacity'
type MockContainerManager_GetCapacity_Call struct {
*mock.Call
}
// GetCapacity is a helper method to define mock.On call
// - localStorageCapacityIsolation bool
func (_e *MockContainerManager_Expecter) GetCapacity(localStorageCapacityIsolation interface{}) *MockContainerManager_GetCapacity_Call {
return &MockContainerManager_GetCapacity_Call{Call: _e.mock.On("GetCapacity", localStorageCapacityIsolation)}
}
func (_c *MockContainerManager_GetCapacity_Call) Run(run func(localStorageCapacityIsolation bool)) *MockContainerManager_GetCapacity_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 bool
if args[0] != nil {
arg0 = args[0].(bool)
}
run(
arg0,
)
})
return _c
}
func (_c *MockContainerManager_GetCapacity_Call) Return(resourceList v1.ResourceList) *MockContainerManager_GetCapacity_Call {
_c.Call.Return(resourceList)
return _c
}
func (_c *MockContainerManager_GetCapacity_Call) RunAndReturn(run func(localStorageCapacityIsolation bool) v1.ResourceList) *MockContainerManager_GetCapacity_Call {
_c.Call.Return(run)
return _c
}
// GetDevicePluginResourceCapacity provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetDevicePluginResourceCapacity")
}
var r0 v1.ResourceList
var r1 v1.ResourceList
var r2 []string
if returnFunc, ok := ret.Get(0).(func() (v1.ResourceList, v1.ResourceList, []string)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() v1.ResourceList); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(v1.ResourceList)
}
}
if returnFunc, ok := ret.Get(1).(func() v1.ResourceList); ok {
r1 = returnFunc()
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(v1.ResourceList)
}
}
if returnFunc, ok := ret.Get(2).(func() []string); ok {
r2 = returnFunc()
} else {
if ret.Get(2) != nil {
r2 = ret.Get(2).([]string)
}
}
return r0, r1, r2
}
// MockContainerManager_GetDevicePluginResourceCapacity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDevicePluginResourceCapacity'
type MockContainerManager_GetDevicePluginResourceCapacity_Call struct {
*mock.Call
}
// GetDevicePluginResourceCapacity is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetDevicePluginResourceCapacity() *MockContainerManager_GetDevicePluginResourceCapacity_Call {
return &MockContainerManager_GetDevicePluginResourceCapacity_Call{Call: _e.mock.On("GetDevicePluginResourceCapacity")}
}
func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) Run(run func()) *MockContainerManager_GetDevicePluginResourceCapacity_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) Return(resourceList v1.ResourceList, resourceList1 v1.ResourceList, strings []string) *MockContainerManager_GetDevicePluginResourceCapacity_Call {
_c.Call.Return(resourceList, resourceList1, strings)
return _c
}
func (_c *MockContainerManager_GetDevicePluginResourceCapacity_Call) RunAndReturn(run func() (v1.ResourceList, v1.ResourceList, []string)) *MockContainerManager_GetDevicePluginResourceCapacity_Call {
_c.Call.Return(run)
return _c
}
// GetDevices provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetDevices(podUID string, containerName string) []*v10.ContainerDevices {
ret := _mock.Called(podUID, containerName)
if len(ret) == 0 {
panic("no return value specified for GetDevices")
}
var r0 []*v10.ContainerDevices
if returnFunc, ok := ret.Get(0).(func(string, string) []*v10.ContainerDevices); ok {
r0 = returnFunc(podUID, containerName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v10.ContainerDevices)
}
}
return r0
}
// MockContainerManager_GetDevices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDevices'
type MockContainerManager_GetDevices_Call struct {
*mock.Call
}
// GetDevices is a helper method to define mock.On call
// - podUID string
// - containerName string
func (_e *MockContainerManager_Expecter) GetDevices(podUID interface{}, containerName interface{}) *MockContainerManager_GetDevices_Call {
return &MockContainerManager_GetDevices_Call{Call: _e.mock.On("GetDevices", podUID, containerName)}
}
func (_c *MockContainerManager_GetDevices_Call) Run(run func(podUID string, containerName string)) *MockContainerManager_GetDevices_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
var arg1 string
if args[1] != nil {
arg1 = args[1].(string)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_GetDevices_Call) Return(containerDevicess []*v10.ContainerDevices) *MockContainerManager_GetDevices_Call {
_c.Call.Return(containerDevicess)
return _c
}
func (_c *MockContainerManager_GetDevices_Call) RunAndReturn(run func(podUID string, containerName string) []*v10.ContainerDevices) *MockContainerManager_GetDevices_Call {
_c.Call.Return(run)
return _c
}
// GetDynamicResources provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetDynamicResources(pod *v1.Pod, container *v1.Container) []*v10.DynamicResource {
ret := _mock.Called(pod, container)
if len(ret) == 0 {
panic("no return value specified for GetDynamicResources")
}
var r0 []*v10.DynamicResource
if returnFunc, ok := ret.Get(0).(func(*v1.Pod, *v1.Container) []*v10.DynamicResource); ok {
r0 = returnFunc(pod, container)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v10.DynamicResource)
}
}
return r0
}
// MockContainerManager_GetDynamicResources_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDynamicResources'
type MockContainerManager_GetDynamicResources_Call struct {
*mock.Call
}
// GetDynamicResources is a helper method to define mock.On call
// - pod *v1.Pod
// - container *v1.Container
func (_e *MockContainerManager_Expecter) GetDynamicResources(pod interface{}, container interface{}) *MockContainerManager_GetDynamicResources_Call {
return &MockContainerManager_GetDynamicResources_Call{Call: _e.mock.On("GetDynamicResources", pod, container)}
}
func (_c *MockContainerManager_GetDynamicResources_Call) Run(run func(pod *v1.Pod, container *v1.Container)) *MockContainerManager_GetDynamicResources_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
var arg1 *v1.Container
if args[1] != nil {
arg1 = args[1].(*v1.Container)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_GetDynamicResources_Call) Return(dynamicResources []*v10.DynamicResource) *MockContainerManager_GetDynamicResources_Call {
_c.Call.Return(dynamicResources)
return _c
}
func (_c *MockContainerManager_GetDynamicResources_Call) RunAndReturn(run func(pod *v1.Pod, container *v1.Container) []*v10.DynamicResource) *MockContainerManager_GetDynamicResources_Call {
_c.Call.Return(run)
return _c
}
// GetHealthCheckers provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetHealthCheckers() []healthz.HealthChecker {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetHealthCheckers")
}
var r0 []healthz.HealthChecker
if returnFunc, ok := ret.Get(0).(func() []healthz.HealthChecker); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]healthz.HealthChecker)
}
}
return r0
}
// MockContainerManager_GetHealthCheckers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHealthCheckers'
type MockContainerManager_GetHealthCheckers_Call struct {
*mock.Call
}
// GetHealthCheckers is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetHealthCheckers() *MockContainerManager_GetHealthCheckers_Call {
return &MockContainerManager_GetHealthCheckers_Call{Call: _e.mock.On("GetHealthCheckers")}
}
func (_c *MockContainerManager_GetHealthCheckers_Call) Run(run func()) *MockContainerManager_GetHealthCheckers_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetHealthCheckers_Call) Return(healthCheckers []healthz.HealthChecker) *MockContainerManager_GetHealthCheckers_Call {
_c.Call.Return(healthCheckers)
return _c
}
func (_c *MockContainerManager_GetHealthCheckers_Call) RunAndReturn(run func() []healthz.HealthChecker) *MockContainerManager_GetHealthCheckers_Call {
_c.Call.Return(run)
return _c
}
// GetMemory provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetMemory(podUID string, containerName string) []*v10.ContainerMemory {
ret := _mock.Called(podUID, containerName)
if len(ret) == 0 {
panic("no return value specified for GetMemory")
}
var r0 []*v10.ContainerMemory
if returnFunc, ok := ret.Get(0).(func(string, string) []*v10.ContainerMemory); ok {
r0 = returnFunc(podUID, containerName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v10.ContainerMemory)
}
}
return r0
}
// MockContainerManager_GetMemory_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMemory'
type MockContainerManager_GetMemory_Call struct {
*mock.Call
}
// GetMemory is a helper method to define mock.On call
// - podUID string
// - containerName string
func (_e *MockContainerManager_Expecter) GetMemory(podUID interface{}, containerName interface{}) *MockContainerManager_GetMemory_Call {
return &MockContainerManager_GetMemory_Call{Call: _e.mock.On("GetMemory", podUID, containerName)}
}
func (_c *MockContainerManager_GetMemory_Call) Run(run func(podUID string, containerName string)) *MockContainerManager_GetMemory_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
var arg1 string
if args[1] != nil {
arg1 = args[1].(string)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_GetMemory_Call) Return(containerMemorys []*v10.ContainerMemory) *MockContainerManager_GetMemory_Call {
_c.Call.Return(containerMemorys)
return _c
}
func (_c *MockContainerManager_GetMemory_Call) RunAndReturn(run func(podUID string, containerName string) []*v10.ContainerMemory) *MockContainerManager_GetMemory_Call {
_c.Call.Return(run)
return _c
}
// GetMountedSubsystems provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetMountedSubsystems() *cm.CgroupSubsystems {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetMountedSubsystems")
}
var r0 *cm.CgroupSubsystems
if returnFunc, ok := ret.Get(0).(func() *cm.CgroupSubsystems); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*cm.CgroupSubsystems)
}
}
return r0
}
// MockContainerManager_GetMountedSubsystems_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMountedSubsystems'
type MockContainerManager_GetMountedSubsystems_Call struct {
*mock.Call
}
// GetMountedSubsystems is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetMountedSubsystems() *MockContainerManager_GetMountedSubsystems_Call {
return &MockContainerManager_GetMountedSubsystems_Call{Call: _e.mock.On("GetMountedSubsystems")}
}
func (_c *MockContainerManager_GetMountedSubsystems_Call) Run(run func()) *MockContainerManager_GetMountedSubsystems_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetMountedSubsystems_Call) Return(cgroupSubsystems *cm.CgroupSubsystems) *MockContainerManager_GetMountedSubsystems_Call {
_c.Call.Return(cgroupSubsystems)
return _c
}
func (_c *MockContainerManager_GetMountedSubsystems_Call) RunAndReturn(run func() *cm.CgroupSubsystems) *MockContainerManager_GetMountedSubsystems_Call {
_c.Call.Return(run)
return _c
}
// GetNodeAllocatableAbsolute provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetNodeAllocatableAbsolute() v1.ResourceList {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetNodeAllocatableAbsolute")
}
var r0 v1.ResourceList
if returnFunc, ok := ret.Get(0).(func() v1.ResourceList); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(v1.ResourceList)
}
}
return r0
}
// MockContainerManager_GetNodeAllocatableAbsolute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNodeAllocatableAbsolute'
type MockContainerManager_GetNodeAllocatableAbsolute_Call struct {
*mock.Call
}
// GetNodeAllocatableAbsolute is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetNodeAllocatableAbsolute() *MockContainerManager_GetNodeAllocatableAbsolute_Call {
return &MockContainerManager_GetNodeAllocatableAbsolute_Call{Call: _e.mock.On("GetNodeAllocatableAbsolute")}
}
func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) Run(run func()) *MockContainerManager_GetNodeAllocatableAbsolute_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) Return(resourceList v1.ResourceList) *MockContainerManager_GetNodeAllocatableAbsolute_Call {
_c.Call.Return(resourceList)
return _c
}
func (_c *MockContainerManager_GetNodeAllocatableAbsolute_Call) RunAndReturn(run func() v1.ResourceList) *MockContainerManager_GetNodeAllocatableAbsolute_Call {
_c.Call.Return(run)
return _c
}
// GetNodeAllocatableReservation provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetNodeAllocatableReservation() v1.ResourceList {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetNodeAllocatableReservation")
}
var r0 v1.ResourceList
if returnFunc, ok := ret.Get(0).(func() v1.ResourceList); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(v1.ResourceList)
}
}
return r0
}
// MockContainerManager_GetNodeAllocatableReservation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNodeAllocatableReservation'
type MockContainerManager_GetNodeAllocatableReservation_Call struct {
*mock.Call
}
// GetNodeAllocatableReservation is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetNodeAllocatableReservation() *MockContainerManager_GetNodeAllocatableReservation_Call {
return &MockContainerManager_GetNodeAllocatableReservation_Call{Call: _e.mock.On("GetNodeAllocatableReservation")}
}
func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) Run(run func()) *MockContainerManager_GetNodeAllocatableReservation_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) Return(resourceList v1.ResourceList) *MockContainerManager_GetNodeAllocatableReservation_Call {
_c.Call.Return(resourceList)
return _c
}
func (_c *MockContainerManager_GetNodeAllocatableReservation_Call) RunAndReturn(run func() v1.ResourceList) *MockContainerManager_GetNodeAllocatableReservation_Call {
_c.Call.Return(run)
return _c
}
// GetNodeConfig provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetNodeConfig() cm.NodeConfig {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetNodeConfig")
}
var r0 cm.NodeConfig
if returnFunc, ok := ret.Get(0).(func() cm.NodeConfig); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(cm.NodeConfig)
}
return r0
}
// MockContainerManager_GetNodeConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNodeConfig'
type MockContainerManager_GetNodeConfig_Call struct {
*mock.Call
}
// GetNodeConfig is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetNodeConfig() *MockContainerManager_GetNodeConfig_Call {
return &MockContainerManager_GetNodeConfig_Call{Call: _e.mock.On("GetNodeConfig")}
}
func (_c *MockContainerManager_GetNodeConfig_Call) Run(run func()) *MockContainerManager_GetNodeConfig_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetNodeConfig_Call) Return(nodeConfig cm.NodeConfig) *MockContainerManager_GetNodeConfig_Call {
_c.Call.Return(nodeConfig)
return _c
}
func (_c *MockContainerManager_GetNodeConfig_Call) RunAndReturn(run func() cm.NodeConfig) *MockContainerManager_GetNodeConfig_Call {
_c.Call.Return(run)
return _c
}
// GetPluginRegistrationHandlers provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetPluginRegistrationHandlers() map[string]cache.PluginHandler {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetPluginRegistrationHandlers")
}
var r0 map[string]cache.PluginHandler
if returnFunc, ok := ret.Get(0).(func() map[string]cache.PluginHandler); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string]cache.PluginHandler)
}
}
return r0
}
// MockContainerManager_GetPluginRegistrationHandlers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPluginRegistrationHandlers'
type MockContainerManager_GetPluginRegistrationHandlers_Call struct {
*mock.Call
}
// GetPluginRegistrationHandlers is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetPluginRegistrationHandlers() *MockContainerManager_GetPluginRegistrationHandlers_Call {
return &MockContainerManager_GetPluginRegistrationHandlers_Call{Call: _e.mock.On("GetPluginRegistrationHandlers")}
}
func (_c *MockContainerManager_GetPluginRegistrationHandlers_Call) Run(run func()) *MockContainerManager_GetPluginRegistrationHandlers_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetPluginRegistrationHandlers_Call) Return(stringToPluginHandler map[string]cache.PluginHandler) *MockContainerManager_GetPluginRegistrationHandlers_Call {
_c.Call.Return(stringToPluginHandler)
return _c
}
func (_c *MockContainerManager_GetPluginRegistrationHandlers_Call) RunAndReturn(run func() map[string]cache.PluginHandler) *MockContainerManager_GetPluginRegistrationHandlers_Call {
_c.Call.Return(run)
return _c
}
// GetPodCgroupRoot provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetPodCgroupRoot() string {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetPodCgroupRoot")
}
var r0 string
if returnFunc, ok := ret.Get(0).(func() string); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// MockContainerManager_GetPodCgroupRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodCgroupRoot'
type MockContainerManager_GetPodCgroupRoot_Call struct {
*mock.Call
}
// GetPodCgroupRoot is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetPodCgroupRoot() *MockContainerManager_GetPodCgroupRoot_Call {
return &MockContainerManager_GetPodCgroupRoot_Call{Call: _e.mock.On("GetPodCgroupRoot")}
}
func (_c *MockContainerManager_GetPodCgroupRoot_Call) Run(run func()) *MockContainerManager_GetPodCgroupRoot_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetPodCgroupRoot_Call) Return(s string) *MockContainerManager_GetPodCgroupRoot_Call {
_c.Call.Return(s)
return _c
}
func (_c *MockContainerManager_GetPodCgroupRoot_Call) RunAndReturn(run func() string) *MockContainerManager_GetPodCgroupRoot_Call {
_c.Call.Return(run)
return _c
}
// GetQOSContainersInfo provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetQOSContainersInfo() cm.QOSContainersInfo {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetQOSContainersInfo")
}
var r0 cm.QOSContainersInfo
if returnFunc, ok := ret.Get(0).(func() cm.QOSContainersInfo); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(cm.QOSContainersInfo)
}
return r0
}
// MockContainerManager_GetQOSContainersInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetQOSContainersInfo'
type MockContainerManager_GetQOSContainersInfo_Call struct {
*mock.Call
}
// GetQOSContainersInfo is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) GetQOSContainersInfo() *MockContainerManager_GetQOSContainersInfo_Call {
return &MockContainerManager_GetQOSContainersInfo_Call{Call: _e.mock.On("GetQOSContainersInfo")}
}
func (_c *MockContainerManager_GetQOSContainersInfo_Call) Run(run func()) *MockContainerManager_GetQOSContainersInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_GetQOSContainersInfo_Call) Return(qOSContainersInfo cm.QOSContainersInfo) *MockContainerManager_GetQOSContainersInfo_Call {
_c.Call.Return(qOSContainersInfo)
return _c
}
func (_c *MockContainerManager_GetQOSContainersInfo_Call) RunAndReturn(run func() cm.QOSContainersInfo) *MockContainerManager_GetQOSContainersInfo_Call {
_c.Call.Return(run)
return _c
}
// GetResources provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) GetResources(ctx context.Context, pod *v1.Pod, container1 *v1.Container) (*container.RunContainerOptions, error) {
ret := _mock.Called(ctx, pod, container1)
if len(ret) == 0 {
panic("no return value specified for GetResources")
}
var r0 *container.RunContainerOptions
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *v1.Pod, *v1.Container) (*container.RunContainerOptions, error)); ok {
return returnFunc(ctx, pod, container1)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, *v1.Pod, *v1.Container) *container.RunContainerOptions); ok {
r0 = returnFunc(ctx, pod, container1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*container.RunContainerOptions)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context, *v1.Pod, *v1.Container) error); ok {
r1 = returnFunc(ctx, pod, container1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockContainerManager_GetResources_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetResources'
type MockContainerManager_GetResources_Call struct {
*mock.Call
}
// GetResources is a helper method to define mock.On call
// - ctx context.Context
// - pod *v1.Pod
// - container1 *v1.Container
func (_e *MockContainerManager_Expecter) GetResources(ctx interface{}, pod interface{}, container1 interface{}) *MockContainerManager_GetResources_Call {
return &MockContainerManager_GetResources_Call{Call: _e.mock.On("GetResources", ctx, pod, container1)}
}
func (_c *MockContainerManager_GetResources_Call) Run(run func(ctx context.Context, pod *v1.Pod, container1 *v1.Container)) *MockContainerManager_GetResources_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v1.Pod
if args[1] != nil {
arg1 = args[1].(*v1.Pod)
}
var arg2 *v1.Container
if args[2] != nil {
arg2 = args[2].(*v1.Container)
}
run(
arg0,
arg1,
arg2,
)
})
return _c
}
func (_c *MockContainerManager_GetResources_Call) Return(runContainerOptions *container.RunContainerOptions, err error) *MockContainerManager_GetResources_Call {
_c.Call.Return(runContainerOptions, err)
return _c
}
func (_c *MockContainerManager_GetResources_Call) RunAndReturn(run func(ctx context.Context, pod *v1.Pod, container1 *v1.Container) (*container.RunContainerOptions, error)) *MockContainerManager_GetResources_Call {
_c.Call.Return(run)
return _c
}
// InternalContainerLifecycle provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) InternalContainerLifecycle() cm.InternalContainerLifecycle {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for InternalContainerLifecycle")
}
var r0 cm.InternalContainerLifecycle
if returnFunc, ok := ret.Get(0).(func() cm.InternalContainerLifecycle); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(cm.InternalContainerLifecycle)
}
}
return r0
}
// MockContainerManager_InternalContainerLifecycle_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InternalContainerLifecycle'
type MockContainerManager_InternalContainerLifecycle_Call struct {
*mock.Call
}
// InternalContainerLifecycle is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) InternalContainerLifecycle() *MockContainerManager_InternalContainerLifecycle_Call {
return &MockContainerManager_InternalContainerLifecycle_Call{Call: _e.mock.On("InternalContainerLifecycle")}
}
func (_c *MockContainerManager_InternalContainerLifecycle_Call) Run(run func()) *MockContainerManager_InternalContainerLifecycle_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_InternalContainerLifecycle_Call) Return(internalContainerLifecycle cm.InternalContainerLifecycle) *MockContainerManager_InternalContainerLifecycle_Call {
_c.Call.Return(internalContainerLifecycle)
return _c
}
func (_c *MockContainerManager_InternalContainerLifecycle_Call) RunAndReturn(run func() cm.InternalContainerLifecycle) *MockContainerManager_InternalContainerLifecycle_Call {
_c.Call.Return(run)
return _c
}
// NewPodContainerManager provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) NewPodContainerManager() cm.PodContainerManager {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for NewPodContainerManager")
}
var r0 cm.PodContainerManager
if returnFunc, ok := ret.Get(0).(func() cm.PodContainerManager); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(cm.PodContainerManager)
}
}
return r0
}
// MockContainerManager_NewPodContainerManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewPodContainerManager'
type MockContainerManager_NewPodContainerManager_Call struct {
*mock.Call
}
// NewPodContainerManager is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) NewPodContainerManager() *MockContainerManager_NewPodContainerManager_Call {
return &MockContainerManager_NewPodContainerManager_Call{Call: _e.mock.On("NewPodContainerManager")}
}
func (_c *MockContainerManager_NewPodContainerManager_Call) Run(run func()) *MockContainerManager_NewPodContainerManager_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_NewPodContainerManager_Call) Return(podContainerManager cm.PodContainerManager) *MockContainerManager_NewPodContainerManager_Call {
_c.Call.Return(podContainerManager)
return _c
}
func (_c *MockContainerManager_NewPodContainerManager_Call) RunAndReturn(run func() cm.PodContainerManager) *MockContainerManager_NewPodContainerManager_Call {
_c.Call.Return(run)
return _c
}
// PodHasExclusiveCPUs provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) PodHasExclusiveCPUs(pod *v1.Pod) bool {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for PodHasExclusiveCPUs")
}
var r0 bool
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) bool); ok {
r0 = returnFunc(pod)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockContainerManager_PodHasExclusiveCPUs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PodHasExclusiveCPUs'
type MockContainerManager_PodHasExclusiveCPUs_Call struct {
*mock.Call
}
// PodHasExclusiveCPUs is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockContainerManager_Expecter) PodHasExclusiveCPUs(pod interface{}) *MockContainerManager_PodHasExclusiveCPUs_Call {
return &MockContainerManager_PodHasExclusiveCPUs_Call{Call: _e.mock.On("PodHasExclusiveCPUs", pod)}
}
func (_c *MockContainerManager_PodHasExclusiveCPUs_Call) Run(run func(pod *v1.Pod)) *MockContainerManager_PodHasExclusiveCPUs_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockContainerManager_PodHasExclusiveCPUs_Call) Return(b bool) *MockContainerManager_PodHasExclusiveCPUs_Call {
_c.Call.Return(b)
return _c
}
func (_c *MockContainerManager_PodHasExclusiveCPUs_Call) RunAndReturn(run func(pod *v1.Pod) bool) *MockContainerManager_PodHasExclusiveCPUs_Call {
_c.Call.Return(run)
return _c
}
// PodMightNeedToUnprepareResources provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) PodMightNeedToUnprepareResources(UID types.UID) bool {
ret := _mock.Called(UID)
if len(ret) == 0 {
panic("no return value specified for PodMightNeedToUnprepareResources")
}
var r0 bool
if returnFunc, ok := ret.Get(0).(func(types.UID) bool); ok {
r0 = returnFunc(UID)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockContainerManager_PodMightNeedToUnprepareResources_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PodMightNeedToUnprepareResources'
type MockContainerManager_PodMightNeedToUnprepareResources_Call struct {
*mock.Call
}
// PodMightNeedToUnprepareResources is a helper method to define mock.On call
// - UID types.UID
func (_e *MockContainerManager_Expecter) PodMightNeedToUnprepareResources(UID interface{}) *MockContainerManager_PodMightNeedToUnprepareResources_Call {
return &MockContainerManager_PodMightNeedToUnprepareResources_Call{Call: _e.mock.On("PodMightNeedToUnprepareResources", UID)}
}
func (_c *MockContainerManager_PodMightNeedToUnprepareResources_Call) Run(run func(UID types.UID)) *MockContainerManager_PodMightNeedToUnprepareResources_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 types.UID
if args[0] != nil {
arg0 = args[0].(types.UID)
}
run(
arg0,
)
})
return _c
}
func (_c *MockContainerManager_PodMightNeedToUnprepareResources_Call) Return(b bool) *MockContainerManager_PodMightNeedToUnprepareResources_Call {
_c.Call.Return(b)
return _c
}
func (_c *MockContainerManager_PodMightNeedToUnprepareResources_Call) RunAndReturn(run func(UID types.UID) bool) *MockContainerManager_PodMightNeedToUnprepareResources_Call {
_c.Call.Return(run)
return _c
}
// PrepareDynamicResources provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) PrepareDynamicResources(context1 context.Context, pod *v1.Pod) error {
ret := _mock.Called(context1, pod)
if len(ret) == 0 {
panic("no return value specified for PrepareDynamicResources")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *v1.Pod) error); ok {
r0 = returnFunc(context1, pod)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockContainerManager_PrepareDynamicResources_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrepareDynamicResources'
type MockContainerManager_PrepareDynamicResources_Call struct {
*mock.Call
}
// PrepareDynamicResources is a helper method to define mock.On call
// - context1 context.Context
// - pod *v1.Pod
func (_e *MockContainerManager_Expecter) PrepareDynamicResources(context1 interface{}, pod interface{}) *MockContainerManager_PrepareDynamicResources_Call {
return &MockContainerManager_PrepareDynamicResources_Call{Call: _e.mock.On("PrepareDynamicResources", context1, pod)}
}
func (_c *MockContainerManager_PrepareDynamicResources_Call) Run(run func(context1 context.Context, pod *v1.Pod)) *MockContainerManager_PrepareDynamicResources_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v1.Pod
if args[1] != nil {
arg1 = args[1].(*v1.Pod)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_PrepareDynamicResources_Call) Return(err error) *MockContainerManager_PrepareDynamicResources_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockContainerManager_PrepareDynamicResources_Call) RunAndReturn(run func(context1 context.Context, pod *v1.Pod) error) *MockContainerManager_PrepareDynamicResources_Call {
_c.Call.Return(run)
return _c
}
// ShouldResetExtendedResourceCapacity provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) ShouldResetExtendedResourceCapacity() bool {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for ShouldResetExtendedResourceCapacity")
}
var r0 bool
if returnFunc, ok := ret.Get(0).(func() bool); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockContainerManager_ShouldResetExtendedResourceCapacity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShouldResetExtendedResourceCapacity'
type MockContainerManager_ShouldResetExtendedResourceCapacity_Call struct {
*mock.Call
}
// ShouldResetExtendedResourceCapacity is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) ShouldResetExtendedResourceCapacity() *MockContainerManager_ShouldResetExtendedResourceCapacity_Call {
return &MockContainerManager_ShouldResetExtendedResourceCapacity_Call{Call: _e.mock.On("ShouldResetExtendedResourceCapacity")}
}
func (_c *MockContainerManager_ShouldResetExtendedResourceCapacity_Call) Run(run func()) *MockContainerManager_ShouldResetExtendedResourceCapacity_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_ShouldResetExtendedResourceCapacity_Call) Return(b bool) *MockContainerManager_ShouldResetExtendedResourceCapacity_Call {
_c.Call.Return(b)
return _c
}
func (_c *MockContainerManager_ShouldResetExtendedResourceCapacity_Call) RunAndReturn(run func() bool) *MockContainerManager_ShouldResetExtendedResourceCapacity_Call {
_c.Call.Return(run)
return _c
}
// Start provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) Start(context1 context.Context, node *v1.Node, activePodsFunc cm.ActivePodsFunc, getNodeFunc cm.GetNodeFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, runtimeService cri.RuntimeService, b bool) error {
ret := _mock.Called(context1, node, activePodsFunc, getNodeFunc, sourcesReady, podStatusProvider, runtimeService, b)
if len(ret) == 0 {
panic("no return value specified for Start")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *v1.Node, cm.ActivePodsFunc, cm.GetNodeFunc, config.SourcesReady, status.PodStatusProvider, cri.RuntimeService, bool) error); ok {
r0 = returnFunc(context1, node, activePodsFunc, getNodeFunc, sourcesReady, podStatusProvider, runtimeService, b)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockContainerManager_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
type MockContainerManager_Start_Call struct {
*mock.Call
}
// Start is a helper method to define mock.On call
// - context1 context.Context
// - node *v1.Node
// - activePodsFunc cm.ActivePodsFunc
// - getNodeFunc cm.GetNodeFunc
// - sourcesReady config.SourcesReady
// - podStatusProvider status.PodStatusProvider
// - runtimeService cri.RuntimeService
// - b bool
func (_e *MockContainerManager_Expecter) Start(context1 interface{}, node interface{}, activePodsFunc interface{}, getNodeFunc interface{}, sourcesReady interface{}, podStatusProvider interface{}, runtimeService interface{}, b interface{}) *MockContainerManager_Start_Call {
return &MockContainerManager_Start_Call{Call: _e.mock.On("Start", context1, node, activePodsFunc, getNodeFunc, sourcesReady, podStatusProvider, runtimeService, b)}
}
func (_c *MockContainerManager_Start_Call) Run(run func(context1 context.Context, node *v1.Node, activePodsFunc cm.ActivePodsFunc, getNodeFunc cm.GetNodeFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, runtimeService cri.RuntimeService, b bool)) *MockContainerManager_Start_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v1.Node
if args[1] != nil {
arg1 = args[1].(*v1.Node)
}
var arg2 cm.ActivePodsFunc
if args[2] != nil {
arg2 = args[2].(cm.ActivePodsFunc)
}
var arg3 cm.GetNodeFunc
if args[3] != nil {
arg3 = args[3].(cm.GetNodeFunc)
}
var arg4 config.SourcesReady
if args[4] != nil {
arg4 = args[4].(config.SourcesReady)
}
var arg5 status.PodStatusProvider
if args[5] != nil {
arg5 = args[5].(status.PodStatusProvider)
}
var arg6 cri.RuntimeService
if args[6] != nil {
arg6 = args[6].(cri.RuntimeService)
}
var arg7 bool
if args[7] != nil {
arg7 = args[7].(bool)
}
run(
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7,
)
})
return _c
}
func (_c *MockContainerManager_Start_Call) Return(err error) *MockContainerManager_Start_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockContainerManager_Start_Call) RunAndReturn(run func(context1 context.Context, node *v1.Node, activePodsFunc cm.ActivePodsFunc, getNodeFunc cm.GetNodeFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, runtimeService cri.RuntimeService, b bool) error) *MockContainerManager_Start_Call {
_c.Call.Return(run)
return _c
}
// Status provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) Status() cm.Status {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for Status")
}
var r0 cm.Status
if returnFunc, ok := ret.Get(0).(func() cm.Status); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(cm.Status)
}
return r0
}
// MockContainerManager_Status_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Status'
type MockContainerManager_Status_Call struct {
*mock.Call
}
// Status is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) Status() *MockContainerManager_Status_Call {
return &MockContainerManager_Status_Call{Call: _e.mock.On("Status")}
}
func (_c *MockContainerManager_Status_Call) Run(run func()) *MockContainerManager_Status_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_Status_Call) Return(status1 cm.Status) *MockContainerManager_Status_Call {
_c.Call.Return(status1)
return _c
}
func (_c *MockContainerManager_Status_Call) RunAndReturn(run func() cm.Status) *MockContainerManager_Status_Call {
_c.Call.Return(run)
return _c
}
// SystemCgroupsLimit provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) SystemCgroupsLimit() v1.ResourceList {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for SystemCgroupsLimit")
}
var r0 v1.ResourceList
if returnFunc, ok := ret.Get(0).(func() v1.ResourceList); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(v1.ResourceList)
}
}
return r0
}
// MockContainerManager_SystemCgroupsLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SystemCgroupsLimit'
type MockContainerManager_SystemCgroupsLimit_Call struct {
*mock.Call
}
// SystemCgroupsLimit is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) SystemCgroupsLimit() *MockContainerManager_SystemCgroupsLimit_Call {
return &MockContainerManager_SystemCgroupsLimit_Call{Call: _e.mock.On("SystemCgroupsLimit")}
}
func (_c *MockContainerManager_SystemCgroupsLimit_Call) Run(run func()) *MockContainerManager_SystemCgroupsLimit_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_SystemCgroupsLimit_Call) Return(resourceList v1.ResourceList) *MockContainerManager_SystemCgroupsLimit_Call {
_c.Call.Return(resourceList)
return _c
}
func (_c *MockContainerManager_SystemCgroupsLimit_Call) RunAndReturn(run func() v1.ResourceList) *MockContainerManager_SystemCgroupsLimit_Call {
_c.Call.Return(run)
return _c
}
// UnprepareDynamicResources provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) UnprepareDynamicResources(context1 context.Context, pod *v1.Pod) error {
ret := _mock.Called(context1, pod)
if len(ret) == 0 {
panic("no return value specified for UnprepareDynamicResources")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *v1.Pod) error); ok {
r0 = returnFunc(context1, pod)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockContainerManager_UnprepareDynamicResources_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnprepareDynamicResources'
type MockContainerManager_UnprepareDynamicResources_Call struct {
*mock.Call
}
// UnprepareDynamicResources is a helper method to define mock.On call
// - context1 context.Context
// - pod *v1.Pod
func (_e *MockContainerManager_Expecter) UnprepareDynamicResources(context1 interface{}, pod interface{}) *MockContainerManager_UnprepareDynamicResources_Call {
return &MockContainerManager_UnprepareDynamicResources_Call{Call: _e.mock.On("UnprepareDynamicResources", context1, pod)}
}
func (_c *MockContainerManager_UnprepareDynamicResources_Call) Run(run func(context1 context.Context, pod *v1.Pod)) *MockContainerManager_UnprepareDynamicResources_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v1.Pod
if args[1] != nil {
arg1 = args[1].(*v1.Pod)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_UnprepareDynamicResources_Call) Return(err error) *MockContainerManager_UnprepareDynamicResources_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockContainerManager_UnprepareDynamicResources_Call) RunAndReturn(run func(context1 context.Context, pod *v1.Pod) error) *MockContainerManager_UnprepareDynamicResources_Call {
_c.Call.Return(run)
return _c
}
// UpdateAllocatedDevices provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) UpdateAllocatedDevices() {
_mock.Called()
return
}
// MockContainerManager_UpdateAllocatedDevices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateAllocatedDevices'
type MockContainerManager_UpdateAllocatedDevices_Call struct {
*mock.Call
}
// UpdateAllocatedDevices is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) UpdateAllocatedDevices() *MockContainerManager_UpdateAllocatedDevices_Call {
return &MockContainerManager_UpdateAllocatedDevices_Call{Call: _e.mock.On("UpdateAllocatedDevices")}
}
func (_c *MockContainerManager_UpdateAllocatedDevices_Call) Run(run func()) *MockContainerManager_UpdateAllocatedDevices_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_UpdateAllocatedDevices_Call) Return() *MockContainerManager_UpdateAllocatedDevices_Call {
_c.Call.Return()
return _c
}
func (_c *MockContainerManager_UpdateAllocatedDevices_Call) RunAndReturn(run func()) *MockContainerManager_UpdateAllocatedDevices_Call {
_c.Run(run)
return _c
}
// UpdateAllocatedResourcesStatus provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) UpdateAllocatedResourcesStatus(pod *v1.Pod, status1 *v1.PodStatus) {
_mock.Called(pod, status1)
return
}
// MockContainerManager_UpdateAllocatedResourcesStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateAllocatedResourcesStatus'
type MockContainerManager_UpdateAllocatedResourcesStatus_Call struct {
*mock.Call
}
// UpdateAllocatedResourcesStatus is a helper method to define mock.On call
// - pod *v1.Pod
// - status1 *v1.PodStatus
func (_e *MockContainerManager_Expecter) UpdateAllocatedResourcesStatus(pod interface{}, status1 interface{}) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
return &MockContainerManager_UpdateAllocatedResourcesStatus_Call{Call: _e.mock.On("UpdateAllocatedResourcesStatus", pod, status1)}
}
func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) Run(run func(pod *v1.Pod, status1 *v1.PodStatus)) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
var arg1 *v1.PodStatus
if args[1] != nil {
arg1 = args[1].(*v1.PodStatus)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) Return() *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
_c.Call.Return()
return _c
}
func (_c *MockContainerManager_UpdateAllocatedResourcesStatus_Call) RunAndReturn(run func(pod *v1.Pod, status1 *v1.PodStatus)) *MockContainerManager_UpdateAllocatedResourcesStatus_Call {
_c.Run(run)
return _c
}
// UpdatePluginResources provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) UpdatePluginResources(nodeInfo *framework.NodeInfo, podAdmitAttributes *lifecycle.PodAdmitAttributes) error {
ret := _mock.Called(nodeInfo, podAdmitAttributes)
if len(ret) == 0 {
panic("no return value specified for UpdatePluginResources")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(*framework.NodeInfo, *lifecycle.PodAdmitAttributes) error); ok {
r0 = returnFunc(nodeInfo, podAdmitAttributes)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockContainerManager_UpdatePluginResources_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdatePluginResources'
type MockContainerManager_UpdatePluginResources_Call struct {
*mock.Call
}
// UpdatePluginResources is a helper method to define mock.On call
// - nodeInfo *framework.NodeInfo
// - podAdmitAttributes *lifecycle.PodAdmitAttributes
func (_e *MockContainerManager_Expecter) UpdatePluginResources(nodeInfo interface{}, podAdmitAttributes interface{}) *MockContainerManager_UpdatePluginResources_Call {
return &MockContainerManager_UpdatePluginResources_Call{Call: _e.mock.On("UpdatePluginResources", nodeInfo, podAdmitAttributes)}
}
func (_c *MockContainerManager_UpdatePluginResources_Call) Run(run func(nodeInfo *framework.NodeInfo, podAdmitAttributes *lifecycle.PodAdmitAttributes)) *MockContainerManager_UpdatePluginResources_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *framework.NodeInfo
if args[0] != nil {
arg0 = args[0].(*framework.NodeInfo)
}
var arg1 *lifecycle.PodAdmitAttributes
if args[1] != nil {
arg1 = args[1].(*lifecycle.PodAdmitAttributes)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockContainerManager_UpdatePluginResources_Call) Return(err error) *MockContainerManager_UpdatePluginResources_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockContainerManager_UpdatePluginResources_Call) RunAndReturn(run func(nodeInfo *framework.NodeInfo, podAdmitAttributes *lifecycle.PodAdmitAttributes) error) *MockContainerManager_UpdatePluginResources_Call {
_c.Call.Return(run)
return _c
}
// UpdateQOSCgroups provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) UpdateQOSCgroups() error {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for UpdateQOSCgroups")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func() error); ok {
r0 = returnFunc()
} else {
r0 = ret.Error(0)
}
return r0
}
// MockContainerManager_UpdateQOSCgroups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateQOSCgroups'
type MockContainerManager_UpdateQOSCgroups_Call struct {
*mock.Call
}
// UpdateQOSCgroups is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) UpdateQOSCgroups() *MockContainerManager_UpdateQOSCgroups_Call {
return &MockContainerManager_UpdateQOSCgroups_Call{Call: _e.mock.On("UpdateQOSCgroups")}
}
func (_c *MockContainerManager_UpdateQOSCgroups_Call) Run(run func()) *MockContainerManager_UpdateQOSCgroups_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_UpdateQOSCgroups_Call) Return(err error) *MockContainerManager_UpdateQOSCgroups_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockContainerManager_UpdateQOSCgroups_Call) RunAndReturn(run func() error) *MockContainerManager_UpdateQOSCgroups_Call {
_c.Call.Return(run)
return _c
}
// Updates provides a mock function for the type MockContainerManager
func (_mock *MockContainerManager) Updates() <-chan resourceupdates.Update {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for Updates")
}
var r0 <-chan resourceupdates.Update
if returnFunc, ok := ret.Get(0).(func() <-chan resourceupdates.Update); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan resourceupdates.Update)
}
}
return r0
}
// MockContainerManager_Updates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Updates'
type MockContainerManager_Updates_Call struct {
*mock.Call
}
// Updates is a helper method to define mock.On call
func (_e *MockContainerManager_Expecter) Updates() *MockContainerManager_Updates_Call {
return &MockContainerManager_Updates_Call{Call: _e.mock.On("Updates")}
}
func (_c *MockContainerManager_Updates_Call) Run(run func()) *MockContainerManager_Updates_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockContainerManager_Updates_Call) Return(updateCh <-chan resourceupdates.Update) *MockContainerManager_Updates_Call {
_c.Call.Return(updateCh)
return _c
}
func (_c *MockContainerManager_Updates_Call) RunAndReturn(run func() <-chan resourceupdates.Update) *MockContainerManager_Updates_Call {
_c.Call.Return(run)
return _c
}
// NewMockPodContainerManager creates a new instance of MockPodContainerManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockPodContainerManager(t interface {
mock.TestingT
Cleanup(func())
}) *MockPodContainerManager {
mock := &MockPodContainerManager{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockPodContainerManager is an autogenerated mock type for the PodContainerManager type
type MockPodContainerManager struct {
mock.Mock
}
type MockPodContainerManager_Expecter struct {
mock *mock.Mock
}
func (_m *MockPodContainerManager) EXPECT() *MockPodContainerManager_Expecter {
return &MockPodContainerManager_Expecter{mock: &_m.Mock}
}
// Destroy provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) Destroy(name cm.CgroupName) error {
ret := _mock.Called(name)
if len(ret) == 0 {
panic("no return value specified for Destroy")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(cm.CgroupName) error); ok {
r0 = returnFunc(name)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockPodContainerManager_Destroy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Destroy'
type MockPodContainerManager_Destroy_Call struct {
*mock.Call
}
// Destroy is a helper method to define mock.On call
// - name cm.CgroupName
func (_e *MockPodContainerManager_Expecter) Destroy(name interface{}) *MockPodContainerManager_Destroy_Call {
return &MockPodContainerManager_Destroy_Call{Call: _e.mock.On("Destroy", name)}
}
func (_c *MockPodContainerManager_Destroy_Call) Run(run func(name cm.CgroupName)) *MockPodContainerManager_Destroy_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 cm.CgroupName
if args[0] != nil {
arg0 = args[0].(cm.CgroupName)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodContainerManager_Destroy_Call) Return(err error) *MockPodContainerManager_Destroy_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockPodContainerManager_Destroy_Call) RunAndReturn(run func(name cm.CgroupName) error) *MockPodContainerManager_Destroy_Call {
_c.Call.Return(run)
return _c
}
// EnsureExists provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) EnsureExists(pod *v1.Pod) error {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for EnsureExists")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) error); ok {
r0 = returnFunc(pod)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockPodContainerManager_EnsureExists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnsureExists'
type MockPodContainerManager_EnsureExists_Call struct {
*mock.Call
}
// EnsureExists is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockPodContainerManager_Expecter) EnsureExists(pod interface{}) *MockPodContainerManager_EnsureExists_Call {
return &MockPodContainerManager_EnsureExists_Call{Call: _e.mock.On("EnsureExists", pod)}
}
func (_c *MockPodContainerManager_EnsureExists_Call) Run(run func(pod *v1.Pod)) *MockPodContainerManager_EnsureExists_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodContainerManager_EnsureExists_Call) Return(err error) *MockPodContainerManager_EnsureExists_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockPodContainerManager_EnsureExists_Call) RunAndReturn(run func(pod *v1.Pod) error) *MockPodContainerManager_EnsureExists_Call {
_c.Call.Return(run)
return _c
}
// Exists provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) Exists(pod *v1.Pod) bool {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for Exists")
}
var r0 bool
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) bool); ok {
r0 = returnFunc(pod)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockPodContainerManager_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists'
type MockPodContainerManager_Exists_Call struct {
*mock.Call
}
// Exists is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockPodContainerManager_Expecter) Exists(pod interface{}) *MockPodContainerManager_Exists_Call {
return &MockPodContainerManager_Exists_Call{Call: _e.mock.On("Exists", pod)}
}
func (_c *MockPodContainerManager_Exists_Call) Run(run func(pod *v1.Pod)) *MockPodContainerManager_Exists_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodContainerManager_Exists_Call) Return(b bool) *MockPodContainerManager_Exists_Call {
_c.Call.Return(b)
return _c
}
func (_c *MockPodContainerManager_Exists_Call) RunAndReturn(run func(pod *v1.Pod) bool) *MockPodContainerManager_Exists_Call {
_c.Call.Return(run)
return _c
}
// GetAllPodsFromCgroups provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) GetAllPodsFromCgroups() (map[types.UID]cm.CgroupName, error) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetAllPodsFromCgroups")
}
var r0 map[types.UID]cm.CgroupName
var r1 error
if returnFunc, ok := ret.Get(0).(func() (map[types.UID]cm.CgroupName, error)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() map[types.UID]cm.CgroupName); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[types.UID]cm.CgroupName)
}
}
if returnFunc, ok := ret.Get(1).(func() error); ok {
r1 = returnFunc()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockPodContainerManager_GetAllPodsFromCgroups_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllPodsFromCgroups'
type MockPodContainerManager_GetAllPodsFromCgroups_Call struct {
*mock.Call
}
// GetAllPodsFromCgroups is a helper method to define mock.On call
func (_e *MockPodContainerManager_Expecter) GetAllPodsFromCgroups() *MockPodContainerManager_GetAllPodsFromCgroups_Call {
return &MockPodContainerManager_GetAllPodsFromCgroups_Call{Call: _e.mock.On("GetAllPodsFromCgroups")}
}
func (_c *MockPodContainerManager_GetAllPodsFromCgroups_Call) Run(run func()) *MockPodContainerManager_GetAllPodsFromCgroups_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockPodContainerManager_GetAllPodsFromCgroups_Call) Return(uIDToCgroupName map[types.UID]cm.CgroupName, err error) *MockPodContainerManager_GetAllPodsFromCgroups_Call {
_c.Call.Return(uIDToCgroupName, err)
return _c
}
func (_c *MockPodContainerManager_GetAllPodsFromCgroups_Call) RunAndReturn(run func() (map[types.UID]cm.CgroupName, error)) *MockPodContainerManager_GetAllPodsFromCgroups_Call {
_c.Call.Return(run)
return _c
}
// GetPodCgroupConfig provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) GetPodCgroupConfig(pod *v1.Pod, resource v1.ResourceName) (*cm.ResourceConfig, error) {
ret := _mock.Called(pod, resource)
if len(ret) == 0 {
panic("no return value specified for GetPodCgroupConfig")
}
var r0 *cm.ResourceConfig
var r1 error
if returnFunc, ok := ret.Get(0).(func(*v1.Pod, v1.ResourceName) (*cm.ResourceConfig, error)); ok {
return returnFunc(pod, resource)
}
if returnFunc, ok := ret.Get(0).(func(*v1.Pod, v1.ResourceName) *cm.ResourceConfig); ok {
r0 = returnFunc(pod, resource)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*cm.ResourceConfig)
}
}
if returnFunc, ok := ret.Get(1).(func(*v1.Pod, v1.ResourceName) error); ok {
r1 = returnFunc(pod, resource)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockPodContainerManager_GetPodCgroupConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodCgroupConfig'
type MockPodContainerManager_GetPodCgroupConfig_Call struct {
*mock.Call
}
// GetPodCgroupConfig is a helper method to define mock.On call
// - pod *v1.Pod
// - resource v1.ResourceName
func (_e *MockPodContainerManager_Expecter) GetPodCgroupConfig(pod interface{}, resource interface{}) *MockPodContainerManager_GetPodCgroupConfig_Call {
return &MockPodContainerManager_GetPodCgroupConfig_Call{Call: _e.mock.On("GetPodCgroupConfig", pod, resource)}
}
func (_c *MockPodContainerManager_GetPodCgroupConfig_Call) Run(run func(pod *v1.Pod, resource v1.ResourceName)) *MockPodContainerManager_GetPodCgroupConfig_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
var arg1 v1.ResourceName
if args[1] != nil {
arg1 = args[1].(v1.ResourceName)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockPodContainerManager_GetPodCgroupConfig_Call) Return(resourceConfig *cm.ResourceConfig, err error) *MockPodContainerManager_GetPodCgroupConfig_Call {
_c.Call.Return(resourceConfig, err)
return _c
}
func (_c *MockPodContainerManager_GetPodCgroupConfig_Call) RunAndReturn(run func(pod *v1.Pod, resource v1.ResourceName) (*cm.ResourceConfig, error)) *MockPodContainerManager_GetPodCgroupConfig_Call {
_c.Call.Return(run)
return _c
}
// GetPodCgroupMemoryUsage provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) GetPodCgroupMemoryUsage(pod *v1.Pod) (uint64, error) {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for GetPodCgroupMemoryUsage")
}
var r0 uint64
var r1 error
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) (uint64, error)); ok {
return returnFunc(pod)
}
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) uint64); ok {
r0 = returnFunc(pod)
} else {
r0 = ret.Get(0).(uint64)
}
if returnFunc, ok := ret.Get(1).(func(*v1.Pod) error); ok {
r1 = returnFunc(pod)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockPodContainerManager_GetPodCgroupMemoryUsage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodCgroupMemoryUsage'
type MockPodContainerManager_GetPodCgroupMemoryUsage_Call struct {
*mock.Call
}
// GetPodCgroupMemoryUsage is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockPodContainerManager_Expecter) GetPodCgroupMemoryUsage(pod interface{}) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
return &MockPodContainerManager_GetPodCgroupMemoryUsage_Call{Call: _e.mock.On("GetPodCgroupMemoryUsage", pod)}
}
func (_c *MockPodContainerManager_GetPodCgroupMemoryUsage_Call) Run(run func(pod *v1.Pod)) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodContainerManager_GetPodCgroupMemoryUsage_Call) Return(v uint64, err error) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
_c.Call.Return(v, err)
return _c
}
func (_c *MockPodContainerManager_GetPodCgroupMemoryUsage_Call) RunAndReturn(run func(pod *v1.Pod) (uint64, error)) *MockPodContainerManager_GetPodCgroupMemoryUsage_Call {
_c.Call.Return(run)
return _c
}
// GetPodContainerName provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) GetPodContainerName(pod *v1.Pod) (cm.CgroupName, string) {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for GetPodContainerName")
}
var r0 cm.CgroupName
var r1 string
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) (cm.CgroupName, string)); ok {
return returnFunc(pod)
}
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) cm.CgroupName); ok {
r0 = returnFunc(pod)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(cm.CgroupName)
}
}
if returnFunc, ok := ret.Get(1).(func(*v1.Pod) string); ok {
r1 = returnFunc(pod)
} else {
r1 = ret.Get(1).(string)
}
return r0, r1
}
// MockPodContainerManager_GetPodContainerName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodContainerName'
type MockPodContainerManager_GetPodContainerName_Call struct {
*mock.Call
}
// GetPodContainerName is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockPodContainerManager_Expecter) GetPodContainerName(pod interface{}) *MockPodContainerManager_GetPodContainerName_Call {
return &MockPodContainerManager_GetPodContainerName_Call{Call: _e.mock.On("GetPodContainerName", pod)}
}
func (_c *MockPodContainerManager_GetPodContainerName_Call) Run(run func(pod *v1.Pod)) *MockPodContainerManager_GetPodContainerName_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodContainerManager_GetPodContainerName_Call) Return(cgroupName cm.CgroupName, s string) *MockPodContainerManager_GetPodContainerName_Call {
_c.Call.Return(cgroupName, s)
return _c
}
func (_c *MockPodContainerManager_GetPodContainerName_Call) RunAndReturn(run func(pod *v1.Pod) (cm.CgroupName, string)) *MockPodContainerManager_GetPodContainerName_Call {
_c.Call.Return(run)
return _c
}
// IsPodCgroup provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) IsPodCgroup(cgroupfs string) (bool, types.UID) {
ret := _mock.Called(cgroupfs)
if len(ret) == 0 {
panic("no return value specified for IsPodCgroup")
}
var r0 bool
var r1 types.UID
if returnFunc, ok := ret.Get(0).(func(string) (bool, types.UID)); ok {
return returnFunc(cgroupfs)
}
if returnFunc, ok := ret.Get(0).(func(string) bool); ok {
r0 = returnFunc(cgroupfs)
} else {
r0 = ret.Get(0).(bool)
}
if returnFunc, ok := ret.Get(1).(func(string) types.UID); ok {
r1 = returnFunc(cgroupfs)
} else {
r1 = ret.Get(1).(types.UID)
}
return r0, r1
}
// MockPodContainerManager_IsPodCgroup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsPodCgroup'
type MockPodContainerManager_IsPodCgroup_Call struct {
*mock.Call
}
// IsPodCgroup is a helper method to define mock.On call
// - cgroupfs string
func (_e *MockPodContainerManager_Expecter) IsPodCgroup(cgroupfs interface{}) *MockPodContainerManager_IsPodCgroup_Call {
return &MockPodContainerManager_IsPodCgroup_Call{Call: _e.mock.On("IsPodCgroup", cgroupfs)}
}
func (_c *MockPodContainerManager_IsPodCgroup_Call) Run(run func(cgroupfs string)) *MockPodContainerManager_IsPodCgroup_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodContainerManager_IsPodCgroup_Call) Return(b bool, uID types.UID) *MockPodContainerManager_IsPodCgroup_Call {
_c.Call.Return(b, uID)
return _c
}
func (_c *MockPodContainerManager_IsPodCgroup_Call) RunAndReturn(run func(cgroupfs string) (bool, types.UID)) *MockPodContainerManager_IsPodCgroup_Call {
_c.Call.Return(run)
return _c
}
// ReduceCPULimits provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) ReduceCPULimits(name cm.CgroupName) error {
ret := _mock.Called(name)
if len(ret) == 0 {
panic("no return value specified for ReduceCPULimits")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(cm.CgroupName) error); ok {
r0 = returnFunc(name)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockPodContainerManager_ReduceCPULimits_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReduceCPULimits'
type MockPodContainerManager_ReduceCPULimits_Call struct {
*mock.Call
}
// ReduceCPULimits is a helper method to define mock.On call
// - name cm.CgroupName
func (_e *MockPodContainerManager_Expecter) ReduceCPULimits(name interface{}) *MockPodContainerManager_ReduceCPULimits_Call {
return &MockPodContainerManager_ReduceCPULimits_Call{Call: _e.mock.On("ReduceCPULimits", name)}
}
func (_c *MockPodContainerManager_ReduceCPULimits_Call) Run(run func(name cm.CgroupName)) *MockPodContainerManager_ReduceCPULimits_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 cm.CgroupName
if args[0] != nil {
arg0 = args[0].(cm.CgroupName)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodContainerManager_ReduceCPULimits_Call) Return(err error) *MockPodContainerManager_ReduceCPULimits_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockPodContainerManager_ReduceCPULimits_Call) RunAndReturn(run func(name cm.CgroupName) error) *MockPodContainerManager_ReduceCPULimits_Call {
_c.Call.Return(run)
return _c
}
// SetPodCgroupConfig provides a mock function for the type MockPodContainerManager
func (_mock *MockPodContainerManager) SetPodCgroupConfig(pod *v1.Pod, resourceConfig *cm.ResourceConfig) error {
ret := _mock.Called(pod, resourceConfig)
if len(ret) == 0 {
panic("no return value specified for SetPodCgroupConfig")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(*v1.Pod, *cm.ResourceConfig) error); ok {
r0 = returnFunc(pod, resourceConfig)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockPodContainerManager_SetPodCgroupConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPodCgroupConfig'
type MockPodContainerManager_SetPodCgroupConfig_Call struct {
*mock.Call
}
// SetPodCgroupConfig is a helper method to define mock.On call
// - pod *v1.Pod
// - resourceConfig *cm.ResourceConfig
func (_e *MockPodContainerManager_Expecter) SetPodCgroupConfig(pod interface{}, resourceConfig interface{}) *MockPodContainerManager_SetPodCgroupConfig_Call {
return &MockPodContainerManager_SetPodCgroupConfig_Call{Call: _e.mock.On("SetPodCgroupConfig", pod, resourceConfig)}
}
func (_c *MockPodContainerManager_SetPodCgroupConfig_Call) Run(run func(pod *v1.Pod, resourceConfig *cm.ResourceConfig)) *MockPodContainerManager_SetPodCgroupConfig_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
var arg1 *cm.ResourceConfig
if args[1] != nil {
arg1 = args[1].(*cm.ResourceConfig)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockPodContainerManager_SetPodCgroupConfig_Call) Return(err error) *MockPodContainerManager_SetPodCgroupConfig_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockPodContainerManager_SetPodCgroupConfig_Call) RunAndReturn(run func(pod *v1.Pod, resourceConfig *cm.ResourceConfig) error) *MockPodContainerManager_SetPodCgroupConfig_Call {
_c.Call.Return(run)
return _c
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bitmask
import (
"fmt"
"math/bits"
"strconv"
)
// BitMask interface allows hint providers to create BitMasks for TopologyHints
type BitMask interface {
Add(bits ...int) error
Remove(bits ...int) error
And(masks ...BitMask)
Or(masks ...BitMask)
Clear()
Fill()
IsEqual(mask BitMask) bool
IsEmpty() bool
IsSet(bit int) bool
AnySet(bits []int) bool
IsNarrowerThan(mask BitMask) bool
IsLessThan(mask BitMask) bool
IsGreaterThan(mask BitMask) bool
String() string
Count() int
GetBits() []int
}
type bitMask uint64
// NewEmptyBitMask creates a new, empty BitMask
func NewEmptyBitMask() BitMask {
s := bitMask(0)
return &s
}
// NewBitMask creates a new BitMask
func NewBitMask(bits ...int) (BitMask, error) {
s := bitMask(0)
err := (&s).Add(bits...)
if err != nil {
return nil, err
}
return &s, nil
}
// Add adds the bits with topology affinity to the BitMask
func (s *bitMask) Add(bits ...int) error {
mask := *s
for _, i := range bits {
if i < 0 || i >= 64 {
return fmt.Errorf("bit number must be in range 0-63")
}
mask |= 1 << uint64(i)
}
*s = mask
return nil
}
// Remove removes specified bits from BitMask
func (s *bitMask) Remove(bits ...int) error {
mask := *s
for _, i := range bits {
if i < 0 || i >= 64 {
return fmt.Errorf("bit number must be in range 0-63")
}
mask &^= 1 << uint64(i)
}
*s = mask
return nil
}
// And performs and operation on all bits in masks
func (s *bitMask) And(masks ...BitMask) {
for _, m := range masks {
*s &= *m.(*bitMask)
}
}
// Or performs or operation on all bits in masks
func (s *bitMask) Or(masks ...BitMask) {
for _, m := range masks {
*s |= *m.(*bitMask)
}
}
// Clear resets all bits in mask to zero
func (s *bitMask) Clear() {
*s = 0
}
// Fill sets all bits in mask to one
func (s *bitMask) Fill() {
*s = bitMask(^uint64(0))
}
// IsEmpty checks mask to see if all bits are zero
func (s *bitMask) IsEmpty() bool {
return *s == 0
}
// IsSet checks bit in mask to see if bit is set to one
func (s *bitMask) IsSet(bit int) bool {
if bit < 0 || bit >= 64 {
return false
}
return (*s & (1 << uint64(bit))) > 0
}
// AnySet checks bit in mask to see if any provided bit is set to one
func (s *bitMask) AnySet(bits []int) bool {
for _, b := range bits {
if s.IsSet(b) {
return true
}
}
return false
}
// IsEqual checks if masks are equal
func (s *bitMask) IsEqual(mask BitMask) bool {
return *s == *mask.(*bitMask)
}
// IsNarrowerThan checks if one mask is narrower than another.
//
// A mask is said to be "narrower" than another if it has lets bits set. If the
// same number of bits are set in both masks, then the mask with more
// lower-numbered bits set wins out.
func (s *bitMask) IsNarrowerThan(mask BitMask) bool {
if s.Count() == mask.Count() {
return s.IsLessThan(mask)
}
return s.Count() < mask.Count()
}
// IsLessThan checks which bitmask has more lower-numbered bits set.
func (s *bitMask) IsLessThan(mask BitMask) bool {
return *s < *mask.(*bitMask)
}
// IsGreaterThan checks which bitmask has more higher-numbered bits set.
func (s *bitMask) IsGreaterThan(mask BitMask) bool {
return *s > *mask.(*bitMask)
}
// String converts mask to string
func (s *bitMask) String() string {
grouping := 2
for shift := 64 - grouping; shift > 0; shift -= grouping {
if *s > (1 << uint(shift)) {
return fmt.Sprintf("%0"+strconv.Itoa(shift+grouping)+"b", *s)
}
}
return fmt.Sprintf("%0"+strconv.Itoa(grouping)+"b", *s)
}
// Count counts number of bits in mask set to one
func (s *bitMask) Count() int {
return bits.OnesCount64(uint64(*s))
}
// Getbits returns each bit number with bits set to one
func (s *bitMask) GetBits() []int {
var bits []int
for i := uint64(0); i < 64; i++ {
if (*s & (1 << i)) > 0 {
bits = append(bits, int(i))
}
}
return bits
}
// And is a package level implementation of 'and' between first and masks
func And(first BitMask, masks ...BitMask) BitMask {
s := *first.(*bitMask)
s.And(masks...)
return &s
}
// Or is a package level implementation of 'or' between first and masks
func Or(first BitMask, masks ...BitMask) BitMask {
s := *first.(*bitMask)
s.Or(masks...)
return &s
}
// IterateBitMasks iterates all possible masks from a list of bits,
// issuing a callback on each mask.
func IterateBitMasks(bits []int, callback func(BitMask)) {
var iterate func(bits, accum []int, size int)
iterate = func(bits, accum []int, size int) {
if len(accum) == size {
mask, _ := NewBitMask(accum...)
callback(mask)
return
}
for i := range bits {
iterate(bits[i+1:], append(accum, bits[i]), size)
}
}
for i := 1; i <= len(bits); i++ {
iterate(bits, []int{}, i)
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/admission"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
type fakeManager struct {
hint *TopologyHint
policy Policy
}
// NewFakeManager returns an instance of FakeManager
func NewFakeManager() Manager {
klog.InfoS("NewFakeManager")
return &fakeManager{}
}
// NewFakeManagerWithHint returns an instance of fake topology manager with specified topology hints
func NewFakeManagerWithHint(hint *TopologyHint) Manager {
klog.InfoS("NewFakeManagerWithHint")
return &fakeManager{
hint: hint,
policy: NewNonePolicy(),
}
}
// NewFakeManagerWithPolicy returns an instance of fake topology manager with specified policy
func NewFakeManagerWithPolicy(policy Policy) Manager {
klog.InfoS("NewFakeManagerWithPolicy", "policy", policy.Name())
return &fakeManager{
policy: policy,
}
}
func (m *fakeManager) GetAffinity(podUID string, containerName string) TopologyHint {
klog.InfoS("GetAffinity", "podUID", podUID, "containerName", containerName)
if m.hint == nil {
return TopologyHint{}
}
return *m.hint
}
func (m *fakeManager) GetPolicy() Policy {
return m.policy
}
func (m *fakeManager) AddHintProvider(h HintProvider) {
klog.InfoS("AddHintProvider", "hintProvider", h)
}
func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID)
}
func (m *fakeManager) RemoveContainer(containerID string) error {
klog.InfoS("RemoveContainer", "containerID", containerID)
return nil
}
func (m *fakeManager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
klog.InfoS("Topology Admit Handler")
return admission.GetPodAdmitResult(nil)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"fmt"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
type NUMADistances map[int][]uint64
type NUMAInfo struct {
Nodes []int
NUMADistances NUMADistances
}
func NewNUMAInfo(topology []cadvisorapi.Node, opts PolicyOptions) (*NUMAInfo, error) {
var numaNodes []int
distances := map[int][]uint64{}
for _, node := range topology {
numaNodes = append(numaNodes, node.Id)
var nodeDistance []uint64
if opts.PreferClosestNUMA {
nodeDistance = node.Distances
if nodeDistance == nil {
return nil, fmt.Errorf("error getting NUMA distances from cadvisor")
}
}
distances[node.Id] = nodeDistance
}
numaInfo := &NUMAInfo{
Nodes: numaNodes,
NUMADistances: distances,
}
return numaInfo, nil
}
func (n *NUMAInfo) Narrowest(m1 bitmask.BitMask, m2 bitmask.BitMask) bitmask.BitMask {
if m1.IsNarrowerThan(m2) {
return m1
}
return m2
}
func (n *NUMAInfo) Closest(m1 bitmask.BitMask, m2 bitmask.BitMask) bitmask.BitMask {
// If the length of both bitmasks aren't the same, choose the one that is narrowest.
if m1.Count() != m2.Count() {
return n.Narrowest(m1, m2)
}
m1Distance := n.NUMADistances.CalculateAverageFor(m1)
m2Distance := n.NUMADistances.CalculateAverageFor(m2)
// If average distance is the same, take bitmask with more lower-number bits set.
if m1Distance == m2Distance {
if m1.IsLessThan(m2) {
return m1
}
return m2
}
// Otherwise, return the bitmask with the shortest average distance between NUMA nodes.
if m1Distance < m2Distance {
return m1
}
return m2
}
func (n NUMAInfo) DefaultAffinityMask() bitmask.BitMask {
defaultAffinity, _ := bitmask.NewBitMask(n.Nodes...)
return defaultAffinity
}
func (d NUMADistances) CalculateAverageFor(bm bitmask.BitMask) float64 {
// This should never happen, but just in case make sure we do not divide by zero.
if bm.Count() == 0 {
return 0
}
var count float64 = 0
var sum float64 = 0
for _, node1 := range bm.GetBits() {
for _, node2 := range bm.GetBits() {
sum += float64(d[node1][node2])
count++
}
}
return sum / count
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
)
// Policy interface for Topology Manager Pod Admit Result
type Policy interface {
// Returns Policy Name
Name() string
// Returns a merged TopologyHint based on input from hint providers
// and a Pod Admit Handler Response based on hints and policy type
Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool)
}
// IsAlignmentGuaranteed return true if the given policy guarantees that either
// the compute resources will be allocated within a NUMA boundary, or the allocation will fail at all.
func IsAlignmentGuaranteed(p Policy) bool {
// We are abusing the name, but atm this matches almost 1:1 the policy name
// so we are not adding new fields for now.
return p.Name() == PolicySingleNumaNode
}
// Merge a TopologyHints permutation to a single hint by performing a bitwise-AND
// of their affinity masks. The hint shall be preferred if all hits in the permutation
// are preferred.
func mergePermutation(defaultAffinity bitmask.BitMask, permutation []TopologyHint) TopologyHint {
// Get the NUMANodeAffinity from each hint in the permutation and see if any
// of them encode unpreferred allocations.
preferred := true
var numaAffinities []bitmask.BitMask
for _, hint := range permutation {
// Only consider hints that have an actual NUMANodeAffinity set.
if hint.NUMANodeAffinity != nil {
numaAffinities = append(numaAffinities, hint.NUMANodeAffinity)
// Only mark preferred if all affinities are equal.
if !hint.NUMANodeAffinity.IsEqual(numaAffinities[0]) {
preferred = false
}
}
// Only mark preferred if all affinities are preferred.
if !hint.Preferred {
preferred = false
}
}
// Merge the affinities using a bitwise-and operation.
mergedAffinity := bitmask.And(defaultAffinity, numaAffinities...)
// Build a mergedHint from the merged affinity mask, setting preferred as
// appropriate based on the logic above.
return TopologyHint{mergedAffinity, preferred}
}
func filterProvidersHints(providersHints []map[string][]TopologyHint) [][]TopologyHint {
// Loop through all hint providers and save an accumulated list of the
// hints returned by each hint provider. If no hints are provided, assume
// that provider has no preference for topology-aware allocation.
var allProviderHints [][]TopologyHint
for _, hints := range providersHints {
// If hints is nil, insert a single, preferred any-numa hint into allProviderHints.
if len(hints) == 0 {
klog.InfoS("Hint Provider has no preference for NUMA affinity with any resource")
allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}})
continue
}
// Otherwise, accumulate the hints for each resource type into allProviderHints.
for resource := range hints {
if hints[resource] == nil {
klog.InfoS("Hint Provider has no preference for NUMA affinity with resource", "resource", resource)
allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}})
continue
}
if len(hints[resource]) == 0 {
klog.InfoS("Hint Provider has no possible NUMA affinities for resource", "resource", resource)
allProviderHints = append(allProviderHints, []TopologyHint{{nil, false}})
continue
}
allProviderHints = append(allProviderHints, hints[resource])
}
}
return allProviderHints
}
func narrowestHint(hints []TopologyHint) *TopologyHint {
if len(hints) == 0 {
return nil
}
var narrowestHint *TopologyHint
for i := range hints {
if hints[i].NUMANodeAffinity == nil {
continue
}
if narrowestHint == nil {
narrowestHint = &hints[i]
}
if hints[i].NUMANodeAffinity.IsNarrowerThan(narrowestHint.NUMANodeAffinity) {
narrowestHint = &hints[i]
}
}
return narrowestHint
}
func maxOfMinAffinityCounts(filteredHints [][]TopologyHint) int {
maxOfMinCount := 0
for _, resourceHints := range filteredHints {
narrowestHint := narrowestHint(resourceHints)
if narrowestHint == nil {
continue
}
if narrowestHint.NUMANodeAffinity.Count() > maxOfMinCount {
maxOfMinCount = narrowestHint.NUMANodeAffinity.Count()
}
}
return maxOfMinCount
}
type HintMerger struct {
NUMAInfo *NUMAInfo
Hints [][]TopologyHint
// Set bestNonPreferredAffinityCount to help decide which affinity mask is
// preferred amongst all non-preferred hints. We calculate this value as
// the maximum of the minimum affinity counts supplied for any given hint
// provider. In other words, prefer a hint that has an affinity mask that
// includes all of the NUMA nodes from the provider that requires the most
// NUMA nodes to satisfy its allocation.
BestNonPreferredAffinityCount int
CompareNUMAAffinityMasks func(candidate *TopologyHint, current *TopologyHint) (best *TopologyHint)
}
func NewHintMerger(numaInfo *NUMAInfo, hints [][]TopologyHint, policyName string, opts PolicyOptions) HintMerger {
compareNumaAffinityMasks := func(current, candidate *TopologyHint) *TopologyHint {
// If current and candidate bitmasks are the same, prefer current hint.
if candidate.NUMANodeAffinity.IsEqual(current.NUMANodeAffinity) {
return current
}
// Otherwise compare the hints, based on the policy options provided
var best bitmask.BitMask
if (policyName != PolicySingleNumaNode) && opts.PreferClosestNUMA {
best = numaInfo.Closest(current.NUMANodeAffinity, candidate.NUMANodeAffinity)
} else {
best = numaInfo.Narrowest(current.NUMANodeAffinity, candidate.NUMANodeAffinity)
}
if best.IsEqual(current.NUMANodeAffinity) {
return current
}
return candidate
}
merger := HintMerger{
NUMAInfo: numaInfo,
Hints: hints,
BestNonPreferredAffinityCount: maxOfMinAffinityCounts(hints),
CompareNUMAAffinityMasks: compareNumaAffinityMasks,
}
return merger
}
func (m HintMerger) compare(current *TopologyHint, candidate *TopologyHint) *TopologyHint {
// Only consider candidates that result in a NUMANodeAffinity > 0 to
// replace the current bestHint.
if candidate.NUMANodeAffinity.Count() == 0 {
return current
}
// If no current bestHint is set, return the candidate as the bestHint.
if current == nil {
return candidate
}
// If the current bestHint is non-preferred and the candidate hint is
// preferred, always choose the preferred hint over the non-preferred one.
if !current.Preferred && candidate.Preferred {
return candidate
}
// If the current bestHint is preferred and the candidate hint is
// non-preferred, never update the bestHint, regardless of how
// the candidate hint's affinity mask compares to the current
// hint's affinity mask.
if current.Preferred && !candidate.Preferred {
return current
}
// If the current bestHint and the candidate hint are both preferred,
// then only consider fitter NUMANodeAffinity
if current.Preferred && candidate.Preferred {
return m.CompareNUMAAffinityMasks(current, candidate)
}
// The only case left is if the current best bestHint and the candidate
// hint are both non-preferred. In this case, try and find a hint whose
// affinity count is as close to (but not higher than) the
// bestNonPreferredAffinityCount as possible. To do this we need to
// consider the following cases and react accordingly:
//
// 1. current.NUMANodeAffinity.Count() > bestNonPreferredAffinityCount
// 2. current.NUMANodeAffinity.Count() == bestNonPreferredAffinityCount
// 3. current.NUMANodeAffinity.Count() < bestNonPreferredAffinityCount
//
// For case (1), the current bestHint is larger than the
// bestNonPreferredAffinityCount, so updating to fitter mergeHint
// is preferred over staying where we are.
//
// For case (2), the current bestHint is equal to the
// bestNonPreferredAffinityCount, so we would like to stick with what
// we have *unless* the candidate hint is also equal to
// bestNonPreferredAffinityCount and it is fitter.
//
// For case (3), the current bestHint is less than
// bestNonPreferredAffinityCount, so we would like to creep back up to
// bestNonPreferredAffinityCount as close as we can. There are three
// cases to consider here:
//
// 3a. candidate.NUMANodeAffinity.Count() > bestNonPreferredAffinityCount
// 3b. candidate.NUMANodeAffinity.Count() == bestNonPreferredAffinityCount
// 3c. candidate.NUMANodeAffinity.Count() < bestNonPreferredAffinityCount
//
// For case (3a), we just want to stick with the current bestHint
// because choosing a new hint that is greater than
// bestNonPreferredAffinityCount would be counter-productive.
//
// For case (3b), we want to immediately update bestHint to the
// candidate hint, making it now equal to bestNonPreferredAffinityCount.
//
// For case (3c), we know that *both* the current bestHint and the
// candidate hint are less than bestNonPreferredAffinityCount, so we
// want to choose one that brings us back up as close to
// bestNonPreferredAffinityCount as possible. There are three cases to
// consider here:
//
// 3ca. candidate.NUMANodeAffinity.Count() > current.NUMANodeAffinity.Count()
// 3cb. candidate.NUMANodeAffinity.Count() < current.NUMANodeAffinity.Count()
// 3cc. candidate.NUMANodeAffinity.Count() == current.NUMANodeAffinity.Count()
//
// For case (3ca), we want to immediately update bestHint to the
// candidate hint because that will bring us closer to the (higher)
// value of bestNonPreferredAffinityCount.
//
// For case (3cb), we want to stick with the current bestHint because
// choosing the candidate hint would strictly move us further away from
// the bestNonPreferredAffinityCount.
//
// Finally, for case (3cc), we know that the current bestHint and the
// candidate hint are equal, so we simply choose the fitter of the 2.
// Case 1
if current.NUMANodeAffinity.Count() > m.BestNonPreferredAffinityCount {
return m.CompareNUMAAffinityMasks(current, candidate)
}
// Case 2
if current.NUMANodeAffinity.Count() == m.BestNonPreferredAffinityCount {
if candidate.NUMANodeAffinity.Count() != m.BestNonPreferredAffinityCount {
return current
}
return m.CompareNUMAAffinityMasks(current, candidate)
}
// Case 3a
if candidate.NUMANodeAffinity.Count() > m.BestNonPreferredAffinityCount {
return current
}
// Case 3b
if candidate.NUMANodeAffinity.Count() == m.BestNonPreferredAffinityCount {
return candidate
}
// Case 3ca
if candidate.NUMANodeAffinity.Count() > current.NUMANodeAffinity.Count() {
return candidate
}
// Case 3cb
if candidate.NUMANodeAffinity.Count() < current.NUMANodeAffinity.Count() {
return current
}
// Case 3cc
return m.CompareNUMAAffinityMasks(current, candidate)
}
func (m HintMerger) Merge() TopologyHint {
defaultAffinity := m.NUMAInfo.DefaultAffinityMask()
var bestHint *TopologyHint
iterateAllProviderTopologyHints(m.Hints, func(permutation []TopologyHint) {
// Get the NUMANodeAffinity from each hint in the permutation and see if any
// of them encode unpreferred allocations.
mergedHint := mergePermutation(defaultAffinity, permutation)
// Compare the current bestHint with the candidate mergedHint and
// update bestHint if appropriate.
bestHint = m.compare(bestHint, &mergedHint)
})
if bestHint == nil {
bestHint = &TopologyHint{defaultAffinity, false}
}
return *bestHint
}
// Iterate over all permutations of hints in 'allProviderHints [][]TopologyHint'.
//
// This procedure is implemented as a recursive function over the set of hints
// in 'allproviderHints[i]'. It applies the function 'callback' to each
// permutation as it is found. It is the equivalent of:
//
// for i := 0; i < len(providerHints[0]); i++
//
// for j := 0; j < len(providerHints[1]); j++
// for k := 0; k < len(providerHints[2]); k++
// ...
// for z := 0; z < len(providerHints[-1]); z++
// permutation := []TopologyHint{
// providerHints[0][i],
// providerHints[1][j],
// providerHints[2][k],
// ...
// providerHints[-1][z]
// }
// callback(permutation)
func iterateAllProviderTopologyHints(allProviderHints [][]TopologyHint, callback func([]TopologyHint)) {
// Internal helper function to accumulate the permutation before calling the callback.
var iterate func(i int, accum []TopologyHint)
iterate = func(i int, accum []TopologyHint) {
// Base case: we have looped through all providers and have a full permutation.
if i == len(allProviderHints) {
callback(accum)
return
}
// Loop through all hints for provider 'i', and recurse to build the
// permutation of this hint with all hints from providers 'i++'.
for j := range allProviderHints[i] {
iterate(i+1, append(accum, allProviderHints[i][j]))
}
}
iterate(0, []TopologyHint{})
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
type bestEffortPolicy struct {
// numaInfo represents list of NUMA Nodes available on the underlying machine and distances between them
numaInfo *NUMAInfo
opts PolicyOptions
}
var _ Policy = &bestEffortPolicy{}
// PolicyBestEffort policy name.
const PolicyBestEffort string = "best-effort"
// NewBestEffortPolicy returns best-effort policy.
func NewBestEffortPolicy(numaInfo *NUMAInfo, opts PolicyOptions) Policy {
return &bestEffortPolicy{numaInfo: numaInfo, opts: opts}
}
func (p *bestEffortPolicy) Name() string {
return PolicyBestEffort
}
func (p *bestEffortPolicy) canAdmitPodResult(hint *TopologyHint) bool {
return true
}
func (p *bestEffortPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
filteredHints := filterProvidersHints(providersHints)
merger := NewHintMerger(p.numaInfo, filteredHints, p.Name(), p.opts)
bestHint := merger.Merge()
admit := p.canAdmitPodResult(&bestHint)
return bestHint, admit
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
type nonePolicy struct{}
var _ Policy = &nonePolicy{}
// PolicyNone policy name.
const PolicyNone string = "none"
// NewNonePolicy returns none policy.
func NewNonePolicy() Policy {
return &nonePolicy{}
}
func (p *nonePolicy) Name() string {
return PolicyNone
}
func (p *nonePolicy) canAdmitPodResult(hint *TopologyHint) bool {
return true
}
func (p *nonePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
return TopologyHint{}, p.canAdmitPodResult(nil)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"fmt"
"strconv"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
kubefeatures "k8s.io/kubernetes/pkg/features"
)
const (
PreferClosestNUMANodes string = "prefer-closest-numa-nodes"
MaxAllowableNUMANodes string = "max-allowable-numa-nodes"
)
var (
alphaOptions = sets.New[string]()
betaOptions = sets.New[string](
MaxAllowableNUMANodes,
)
stableOptions = sets.New[string](
PreferClosestNUMANodes,
)
)
func CheckPolicyOptionAvailable(option string) error {
if !alphaOptions.Has(option) && !betaOptions.Has(option) && !stableOptions.Has(option) {
return fmt.Errorf("unknown Topology Manager Policy option: %q", option)
}
if alphaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManagerPolicyAlphaOptions) {
return fmt.Errorf("topology manager policy alpha-level options not enabled, but option %q provided", option)
}
if betaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManagerPolicyBetaOptions) {
return fmt.Errorf("topology manager policy beta-level options not enabled, but option %q provided", option)
}
return nil
}
type PolicyOptions struct {
PreferClosestNUMA bool
MaxAllowableNUMANodes int
}
func NewPolicyOptions(policyOptions map[string]string) (PolicyOptions, error) {
opts := PolicyOptions{
// Set MaxAllowableNUMANodes to the default. This will be overwritten
// if the user has specified a policy option for MaxAllowableNUMANodes.
MaxAllowableNUMANodes: defaultMaxAllowableNUMANodes,
}
for name, value := range policyOptions {
if err := CheckPolicyOptionAvailable(name); err != nil {
return opts, err
}
switch name {
case PreferClosestNUMANodes:
optValue, err := strconv.ParseBool(value)
if err != nil {
return opts, fmt.Errorf("bad value for option %q: %w", name, err)
}
opts.PreferClosestNUMA = optValue
case MaxAllowableNUMANodes:
optValue, err := strconv.Atoi(value)
if err != nil {
return opts, fmt.Errorf("unable to convert policy option to integer %q: %w", name, err)
}
if optValue < defaultMaxAllowableNUMANodes {
return opts, fmt.Errorf("the minimum value of %q should not be less than %v", name, defaultMaxAllowableNUMANodes)
}
if optValue > defaultMaxAllowableNUMANodes {
klog.InfoS("WARNING: the value of max-allowable-numa-nodes is more than the default recommended value", "max-allowable-numa-nodes", optValue, "defaultMaxAllowableNUMANodes", defaultMaxAllowableNUMANodes)
}
opts.MaxAllowableNUMANodes = optValue
default:
// this should never be reached, we already detect unknown options,
// but we keep it as further safety.
return opts, fmt.Errorf("unsupported topologymanager option: %q (%s)", name, value)
}
}
return opts, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
type restrictedPolicy struct {
bestEffortPolicy
}
var _ Policy = &restrictedPolicy{}
// PolicyRestricted policy name.
const PolicyRestricted string = "restricted"
// NewRestrictedPolicy returns restricted policy.
func NewRestrictedPolicy(numaInfo *NUMAInfo, opts PolicyOptions) Policy {
return &restrictedPolicy{bestEffortPolicy{numaInfo: numaInfo, opts: opts}}
}
func (p *restrictedPolicy) Name() string {
return PolicyRestricted
}
func (p *restrictedPolicy) canAdmitPodResult(hint *TopologyHint) bool {
return hint.Preferred
}
func (p *restrictedPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
filteredHints := filterProvidersHints(providersHints)
merger := NewHintMerger(p.numaInfo, filteredHints, p.Name(), p.opts)
bestHint := merger.Merge()
admit := p.canAdmitPodResult(&bestHint)
return bestHint, admit
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
type singleNumaNodePolicy struct {
// numaInfo represents list of NUMA Nodes available on the underlying machine and distances between them
numaInfo *NUMAInfo
opts PolicyOptions
}
var _ Policy = &singleNumaNodePolicy{}
// PolicySingleNumaNode policy name.
const PolicySingleNumaNode string = "single-numa-node"
// NewSingleNumaNodePolicy returns single-numa-node policy.
func NewSingleNumaNodePolicy(numaInfo *NUMAInfo, opts PolicyOptions) Policy {
return &singleNumaNodePolicy{numaInfo: numaInfo, opts: opts}
}
func (p *singleNumaNodePolicy) Name() string {
return PolicySingleNumaNode
}
func (p *singleNumaNodePolicy) canAdmitPodResult(hint *TopologyHint) bool {
return hint.Preferred
}
// Return hints that have valid bitmasks with exactly one bit set.
func filterSingleNumaHints(allResourcesHints [][]TopologyHint) [][]TopologyHint {
var filteredResourcesHints [][]TopologyHint
for _, oneResourceHints := range allResourcesHints {
var filtered []TopologyHint
for _, hint := range oneResourceHints {
if hint.NUMANodeAffinity == nil && hint.Preferred {
filtered = append(filtered, hint)
}
if hint.NUMANodeAffinity != nil && hint.NUMANodeAffinity.Count() == 1 && hint.Preferred {
filtered = append(filtered, hint)
}
}
filteredResourcesHints = append(filteredResourcesHints, filtered)
}
return filteredResourcesHints
}
func (p *singleNumaNodePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
filteredHints := filterProvidersHints(providersHints)
// Filter to only include don't cares and hints with a single NUMA node.
singleNumaHints := filterSingleNumaHints(filteredHints)
merger := NewHintMerger(p.numaInfo, singleNumaHints, p.Name(), p.opts)
bestHint := merger.Merge()
if bestHint.NUMANodeAffinity.IsEqual(p.numaInfo.DefaultAffinityMask()) {
bestHint = TopologyHint{nil, bestHint.Preferred}
}
admit := p.canAdmitPodResult(&bestHint)
return bestHint, admit
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"sync"
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/admission"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
const (
// containerTopologyScope specifies the TopologyManagerScope per container.
containerTopologyScope = "container"
// podTopologyScope specifies the TopologyManagerScope per pod.
podTopologyScope = "pod"
// noneTopologyScope specifies the TopologyManagerScope when topologyPolicyName is none.
noneTopologyScope = "none"
)
type podTopologyHints map[string]map[string]TopologyHint
// Scope interface for Topology Manager
type Scope interface {
Name() string
GetPolicy() Policy
Admit(pod *v1.Pod) lifecycle.PodAdmitResult
// AddHintProvider adds a hint provider to manager to indicate the hint provider
// wants to be consoluted with when making topology hints
AddHintProvider(h HintProvider)
// AddContainer adds pod to Manager for tracking
AddContainer(pod *v1.Pod, container *v1.Container, containerID string)
// RemoveContainer removes pod from Manager tracking
RemoveContainer(containerID string) error
// Store is the interface for storing pod topology hints
Store
}
type scope struct {
mutex sync.Mutex
name string
// Mapping of a Pods mapping of Containers and their TopologyHints
// Indexed by PodUID to ContainerName
podTopologyHints podTopologyHints
// The list of components registered with the Manager
hintProviders []HintProvider
// Topology Manager Policy
policy Policy
// Mapping of (PodUid, ContainerName) to ContainerID for Adding/Removing Pods from PodTopologyHints mapping
podMap containermap.ContainerMap
}
func (s *scope) Name() string {
return s.name
}
func (s *scope) getTopologyHints(podUID string, containerName string) TopologyHint {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.podTopologyHints[podUID][containerName]
}
func (s *scope) setTopologyHints(podUID string, containerName string, th TopologyHint) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.podTopologyHints[podUID] == nil {
s.podTopologyHints[podUID] = make(map[string]TopologyHint)
}
s.podTopologyHints[podUID][containerName] = th
}
func (s *scope) GetAffinity(podUID string, containerName string) TopologyHint {
return s.getTopologyHints(podUID, containerName)
}
func (s *scope) GetPolicy() Policy {
return s.policy
}
func (s *scope) AddHintProvider(h HintProvider) {
s.hintProviders = append(s.hintProviders, h)
}
// It would be better to implement this function in topologymanager instead of scope
// but topologymanager do not track mapping anymore
func (s *scope) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.podMap.Add(string(pod.UID), container.Name, containerID)
}
// It would be better to implement this function in topologymanager instead of scope
// but topologymanager do not track mapping anymore
func (s *scope) RemoveContainer(containerID string) error {
s.mutex.Lock()
defer s.mutex.Unlock()
klog.InfoS("RemoveContainer", "containerID", containerID)
// Get the podUID and containerName associated with the containerID to be removed and remove it
podUIDString, containerName, err := s.podMap.GetContainerRef(containerID)
if err != nil {
return nil
}
s.podMap.RemoveByContainerID(containerID)
// In cases where a container has been restarted, it's possible that the same podUID and
// containerName are already associated with a *different* containerID now. Only remove
// the TopologyHints associated with that podUID and containerName if this is not true
if _, err := s.podMap.GetContainerID(podUIDString, containerName); err != nil {
delete(s.podTopologyHints[podUIDString], containerName)
if len(s.podTopologyHints[podUIDString]) == 0 {
delete(s.podTopologyHints, podUIDString)
}
}
return nil
}
func (s *scope) admitPolicyNone(pod *v1.Pod) lifecycle.PodAdmitResult {
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
err := s.allocateAlignedResources(pod, &container)
if err != nil {
return admission.GetPodAdmitResult(err)
}
}
return admission.GetPodAdmitResult(nil)
}
// It would be better to implement this function in topologymanager instead of scope
// but topologymanager do not track providers anymore
func (s *scope) allocateAlignedResources(pod *v1.Pod, container *v1.Container) error {
for _, provider := range s.hintProviders {
err := provider.Allocate(pod, container)
if err != nil {
return err
}
}
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/admission"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
type containerScope struct {
scope
}
// Ensure containerScope implements Scope interface
var _ Scope = &containerScope{}
// NewContainerScope returns a container scope.
func NewContainerScope(policy Policy) Scope {
return &containerScope{
scope{
name: containerTopologyScope,
podTopologyHints: podTopologyHints{},
policy: policy,
podMap: containermap.NewContainerMap(),
},
}
}
func (s *containerScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
bestHint, admit := s.calculateAffinity(pod, &container)
klog.InfoS("Best TopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name)
if !admit {
if IsAlignmentGuaranteed(s.policy) {
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Inc()
}
metrics.TopologyManagerAdmissionErrorsTotal.Inc()
return admission.GetPodAdmitResult(&TopologyAffinityError{})
}
klog.InfoS("Topology Affinity", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name)
s.setTopologyHints(string(pod.UID), container.Name, bestHint)
err := s.allocateAlignedResources(pod, &container)
if err != nil {
metrics.TopologyManagerAdmissionErrorsTotal.Inc()
return admission.GetPodAdmitResult(err)
}
if IsAlignmentGuaranteed(s.policy) {
klog.V(4).InfoS("Resource alignment at container scope guaranteed", "pod", klog.KObj(pod))
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Inc()
}
}
return admission.GetPodAdmitResult(nil)
}
func (s *containerScope) accumulateProvidersHints(pod *v1.Pod, container *v1.Container) []map[string][]TopologyHint {
var providersHints []map[string][]TopologyHint
for _, provider := range s.hintProviders {
// Get the TopologyHints for a Container from a provider.
hints := provider.GetTopologyHints(pod, container)
providersHints = append(providersHints, hints)
klog.InfoS("TopologyHints", "hints", hints, "pod", klog.KObj(pod), "containerName", container.Name)
}
return providersHints
}
func (s *containerScope) calculateAffinity(pod *v1.Pod, container *v1.Container) (TopologyHint, bool) {
providersHints := s.accumulateProvidersHints(pod, container)
bestHint, admit := s.policy.Merge(providersHints)
klog.InfoS("ContainerTopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name)
return bestHint, admit
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
type noneScope struct {
scope
}
// Ensure noneScope implements Scope interface
var _ Scope = &noneScope{}
// NewNoneScope returns a none scope.
func NewNoneScope() Scope {
return &noneScope{
scope{
name: noneTopologyScope,
podTopologyHints: podTopologyHints{},
policy: NewNonePolicy(),
podMap: containermap.NewContainerMap(),
},
}
}
func (s *noneScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
return s.admitPolicyNone(pod)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/admission"
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
type podScope struct {
scope
}
// Ensure podScope implements Scope interface
var _ Scope = &podScope{}
// NewPodScope returns a pod scope.
func NewPodScope(policy Policy) Scope {
return &podScope{
scope{
name: podTopologyScope,
podTopologyHints: podTopologyHints{},
policy: policy,
podMap: containermap.NewContainerMap(),
},
}
}
func (s *podScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
bestHint, admit := s.calculateAffinity(pod)
klog.InfoS("Best TopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod))
if !admit {
if IsAlignmentGuaranteed(s.policy) {
// increment only if we know we allocate aligned resources.
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Inc()
}
metrics.TopologyManagerAdmissionErrorsTotal.Inc()
return admission.GetPodAdmitResult(&TopologyAffinityError{})
}
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
klog.InfoS("Topology Affinity", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name)
s.setTopologyHints(string(pod.UID), container.Name, bestHint)
err := s.allocateAlignedResources(pod, &container)
if err != nil {
metrics.TopologyManagerAdmissionErrorsTotal.Inc()
return admission.GetPodAdmitResult(err)
}
}
if IsAlignmentGuaranteed(s.policy) {
// increment only if we know we allocate aligned resources.
klog.V(4).InfoS("Resource alignment at pod scope guaranteed", "pod", klog.KObj(pod))
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Inc()
}
return admission.GetPodAdmitResult(nil)
}
func (s *podScope) accumulateProvidersHints(pod *v1.Pod) []map[string][]TopologyHint {
var providersHints []map[string][]TopologyHint
for _, provider := range s.hintProviders {
// Get the TopologyHints for a Pod from a provider.
hints := provider.GetPodTopologyHints(pod)
providersHints = append(providersHints, hints)
klog.InfoS("TopologyHints", "hints", hints, "pod", klog.KObj(pod))
}
return providersHints
}
func (s *podScope) calculateAffinity(pod *v1.Pod) (TopologyHint, bool) {
providersHints := s.accumulateProvidersHints(pod)
bestHint, admit := s.policy.Merge(providersHints)
klog.InfoS("PodTopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod))
return bestHint, admit
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topologymanager
import (
"fmt"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
const (
// defaultMaxAllowableNUMANodes specifies the maximum number of NUMA Nodes that
// the TopologyManager supports on the underlying machine.
//
// At present, having more than this number of NUMA Nodes will result in a
// state explosion when trying to enumerate possible NUMAAffinity masks and
// generate hints for them. As such, if more NUMA Nodes than this are
// present on a machine and the TopologyManager is enabled, an error will
// be returned and the TopologyManager will not be loaded.
defaultMaxAllowableNUMANodes = 8
// ErrorTopologyAffinity represents the type for a TopologyAffinityError
ErrorTopologyAffinity = "TopologyAffinityError"
)
// TopologyAffinityError represents an resource alignment error
type TopologyAffinityError struct{}
func (e TopologyAffinityError) Error() string {
return "Resources cannot be allocated with Topology locality"
}
func (e TopologyAffinityError) Type() string {
return ErrorTopologyAffinity
}
// Manager interface provides methods for Kubelet to manage pod topology hints
type Manager interface {
// PodAdmitHandler is implemented by Manager
lifecycle.PodAdmitHandler
// AddHintProvider adds a hint provider to manager to indicate the hint provider
// wants to be consulted with when making topology hints
AddHintProvider(HintProvider)
// AddContainer adds pod to Manager for tracking
AddContainer(pod *v1.Pod, container *v1.Container, containerID string)
// RemoveContainer removes pod from Manager tracking
RemoveContainer(containerID string) error
// Store is the interface for storing pod topology hints
Store
}
type manager struct {
//Topology Manager Scope
scope Scope
}
// HintProvider is an interface for components that want to collaborate to
// achieve globally optimal concrete resource alignment with respect to
// NUMA locality.
type HintProvider interface {
// GetTopologyHints returns a map of resource names to a list of possible
// concrete resource allocations in terms of NUMA locality hints. Each hint
// is optionally marked "preferred" and indicates the set of NUMA nodes
// involved in the hypothetical allocation. The topology manager calls
// this function for each hint provider, and merges the hints to produce
// a consensus "best" hint. The hint providers may subsequently query the
// topology manager to influence actual resource assignment.
GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]TopologyHint
// GetPodTopologyHints returns a map of resource names to a list of possible
// concrete resource allocations per Pod in terms of NUMA locality hints.
GetPodTopologyHints(pod *v1.Pod) map[string][]TopologyHint
// Allocate triggers resource allocation to occur on the HintProvider after
// all hints have been gathered and the aggregated Hint is available via a
// call to Store.GetAffinity().
Allocate(pod *v1.Pod, container *v1.Container) error
}
// Store interface is to allow Hint Providers to retrieve pod affinity
type Store interface {
GetAffinity(podUID string, containerName string) TopologyHint
GetPolicy() Policy
}
// TopologyHint is a struct containing the NUMANodeAffinity for a Container
type TopologyHint struct {
NUMANodeAffinity bitmask.BitMask
// Preferred is set to true when the NUMANodeAffinity encodes a preferred
// allocation for the Container. It is set to false otherwise.
Preferred bool
}
// IsEqual checks if TopologyHint are equal
func (th *TopologyHint) IsEqual(topologyHint TopologyHint) bool {
if th.Preferred == topologyHint.Preferred {
if th.NUMANodeAffinity == nil || topologyHint.NUMANodeAffinity == nil {
return th.NUMANodeAffinity == topologyHint.NUMANodeAffinity
}
return th.NUMANodeAffinity.IsEqual(topologyHint.NUMANodeAffinity)
}
return false
}
// LessThan checks if TopologyHint `a` is less than TopologyHint `b`
// this means that either `a` is a preferred hint and `b` is not
// or `a` NUMANodeAffinity attribute is narrower than `b` NUMANodeAffinity attribute.
func (th *TopologyHint) LessThan(other TopologyHint) bool {
if th.Preferred != other.Preferred {
return th.Preferred
}
return th.NUMANodeAffinity.IsNarrowerThan(other.NUMANodeAffinity)
}
var _ Manager = &manager{}
// NewManager creates a new TopologyManager based on provided policy and scope
func NewManager(topology []cadvisorapi.Node, topologyPolicyName string, topologyScopeName string, topologyPolicyOptions map[string]string) (Manager, error) {
// When policy is none, the scope is not relevant, so we can short circuit here.
if topologyPolicyName == PolicyNone {
klog.InfoS("Creating topology manager with none policy")
return &manager{scope: NewNoneScope()}, nil
}
opts, err := NewPolicyOptions(topologyPolicyOptions)
if err != nil {
return nil, err
}
klog.InfoS("Creating topology manager with policy per scope", "topologyPolicyName", topologyPolicyName, "topologyScopeName", topologyScopeName, "topologyPolicyOptions", opts)
numaInfo, err := NewNUMAInfo(topology, opts)
if err != nil {
return nil, fmt.Errorf("cannot discover NUMA topology: %w", err)
}
if topologyPolicyName != PolicyNone && len(numaInfo.Nodes) > opts.MaxAllowableNUMANodes {
return nil, fmt.Errorf("unsupported on machines with more than %v NUMA Nodes", opts.MaxAllowableNUMANodes)
}
var policy Policy
switch topologyPolicyName {
case PolicyBestEffort:
policy = NewBestEffortPolicy(numaInfo, opts)
case PolicyRestricted:
policy = NewRestrictedPolicy(numaInfo, opts)
case PolicySingleNumaNode:
policy = NewSingleNumaNodePolicy(numaInfo, opts)
default:
return nil, fmt.Errorf("unknown policy: \"%s\"", topologyPolicyName)
}
var scope Scope
switch topologyScopeName {
case containerTopologyScope:
scope = NewContainerScope(policy)
case podTopologyScope:
scope = NewPodScope(policy)
default:
return nil, fmt.Errorf("unknown scope: \"%s\"", topologyScopeName)
}
manager := &manager{
scope: scope,
}
manager.initializeMetrics()
return manager, nil
}
func (m *manager) initializeMetrics() {
// ensure the values exist
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Add(0)
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Add(0)
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Add(0)
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Add(0)
}
func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint {
return m.scope.GetAffinity(podUID, containerName)
}
func (m *manager) GetPolicy() Policy {
return m.scope.GetPolicy()
}
func (m *manager) AddHintProvider(h HintProvider) {
m.scope.AddHintProvider(h)
}
func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
m.scope.AddContainer(pod, container, containerID)
}
func (m *manager) RemoveContainer(containerID string) error {
return m.scope.RemoveContainer(containerID)
}
func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
klog.V(4).InfoS("Topology manager admission check", "pod", klog.KObj(attrs.Pod))
metrics.TopologyManagerAdmissionRequestsTotal.Inc()
startTime := time.Now()
podAdmitResult := m.scope.Admit(attrs.Pod)
metrics.TopologyManagerAdmissionDuration.Observe(float64(time.Since(startTime).Milliseconds()))
klog.V(4).InfoS("Pod Admit Result", "Message", podAdmitResult.Message, "pod", klog.KObj(attrs.Pod))
return podAdmitResult
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"path/filepath"
libcontainerutils "k8s.io/kubernetes/third_party/forked/libcontainer/utils"
libcontainercgroups "github.com/opencontainers/cgroups"
)
const (
// CgroupRoot is the base path where cgroups are mounted
CgroupRoot = "/sys/fs/cgroup"
)
// GetPids gets pids of the desired cgroup
// Forked from opencontainers/runc/libcontainer/cgroup/fs.Manager.GetPids()
func GetPids(cgroupPath string) ([]int, error) {
dir := ""
if libcontainercgroups.IsCgroup2UnifiedMode() {
path, err := filepath.Rel("/", cgroupPath)
if err != nil {
return nil, err
}
dir = filepath.Join(CgroupRoot, path)
} else {
var err error
dir, err = getCgroupV1Path(cgroupPath)
if err != nil {
return nil, err
}
}
return libcontainercgroups.GetPids(dir)
}
// getCgroupV1Path gets the file path to the "devices" subsystem of the desired cgroup.
// cgroupPath is the path in the cgroup hierarchy.
func getCgroupV1Path(cgroupPath string) (string, error) {
cgroupPath = libcontainerutils.CleanPath(cgroupPath)
mnt, root, err := libcontainercgroups.FindCgroupMountpointAndRoot(cgroupPath, "devices")
// If we didn't mount the subsystem, there is no point we make the path.
if err != nil {
return "", err
}
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
if filepath.IsAbs(cgroupPath) {
// Sometimes subsystems can be mounted together as 'cpu,cpuacct'.
return filepath.Join(root, mnt, cgroupPath), nil
}
parentPath, err := getCgroupV1ParentPath(mnt, root)
if err != nil {
return "", err
}
return filepath.Join(parentPath, cgroupPath), nil
}
// getCgroupV1ParentPath gets the parent filepath to this cgroup, for resolving relative cgroup paths.
func getCgroupV1ParentPath(mountpoint, root string) (string, error) {
// Use GetThisCgroupDir instead of GetInitCgroupDir, because the creating
// process could in container and shared pid namespace with host, and
// /proc/1/cgroup could point to whole other world of cgroups.
initPath, err := libcontainercgroups.GetOwnCgroup("devices")
if err != nil {
return "", err
}
// This is needed for nested containers, because in /proc/self/cgroup we
// see paths from host, which don't exist in container.
relDir, err := filepath.Rel(root, initPath)
if err != nil {
return "", err
}
return filepath.Join(mountpoint, relDir), nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
// WaitForAPIServerSyncPeriod is the period between checks for the node list/watch initial sync
const WaitForAPIServerSyncPeriod = 1 * time.Second
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
func NewSourceApiserver(logger klog.Logger, c clientset.Interface, nodeName types.NodeName, nodeHasSynced func() bool, updates chan<- interface{}) {
lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector("spec.nodeName", string(nodeName)))
// The Reflector responsible for watching pods at the apiserver should be run only after
// the node sync with the apiserver has completed.
logger.Info("Waiting for node sync before watching apiserver pods")
go func() {
for {
if nodeHasSynced() {
logger.V(4).Info("node sync completed")
break
}
time.Sleep(WaitForAPIServerSyncPeriod)
logger.V(4).Info("node sync has not completed yet")
}
logger.Info("Watching apiserver")
newSourceApiserverFromLW(lw, updates)
}()
}
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver.
func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) {
send := func(objs []interface{}) {
var pods []*v1.Pod
for _, o := range objs {
pods = append(pods, o.(*v1.Pod))
}
updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.ApiserverSource}
}
r := cache.NewReflector(lw, &v1.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0)
go r.Run(wait.NeverStop)
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
utilfeature "k8s.io/apiserver/pkg/util/feature"
podutil "k8s.io/kubernetes/pkg/api/pod"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/features"
// TODO: remove this import if
// api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String() is changed
// to "v1"?
"k8s.io/kubernetes/pkg/api/legacyscheme"
// Ensure that core apis are installed
_ "k8s.io/kubernetes/pkg/apis/core/install"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/apis/core/validation"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/hash"
"k8s.io/klog/v2"
)
const (
maxConfigLength = 10 * 1 << 20 // 10MB
)
// Generate a pod name that is unique among nodes by appending the nodeName.
func generatePodName(name string, nodeName types.NodeName) string {
return fmt.Sprintf("%s-%s", name, strings.ToLower(string(nodeName)))
}
func applyDefaults(logger klog.Logger, pod *api.Pod, source string, isFile bool, nodeName types.NodeName) error {
if len(pod.UID) == 0 {
hasher := md5.New()
hash.DeepHashObject(hasher, pod)
// DeepHashObject resets the hash, so we should write the pod source
// information AFTER it.
if isFile {
fmt.Fprintf(hasher, "host:%s", nodeName)
fmt.Fprintf(hasher, "file:%s", source)
} else {
fmt.Fprintf(hasher, "url:%s", source)
}
pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:]))
logger.V(5).Info("Generated UID", "pod", klog.KObj(pod), "podUID", pod.UID, "source", source)
}
pod.Name = generatePodName(pod.Name, nodeName)
logger.V(5).Info("Generated pod name", "pod", klog.KObj(pod), "podUID", pod.UID, "source", source)
if pod.Namespace == "" {
pod.Namespace = metav1.NamespaceDefault
}
logger.V(5).Info("Set namespace for pod", "pod", klog.KObj(pod), "source", source)
// Set the Host field to indicate this pod is scheduled on the current node.
pod.Spec.NodeName = string(nodeName)
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
// The generated UID is the hash of the file.
pod.Annotations[kubetypes.ConfigHashAnnotationKey] = string(pod.UID)
if isFile {
// Applying the default Taint tolerations to static pods,
// so they are not evicted when there are node problems.
helper.AddOrUpdateTolerationInPod(pod, &api.Toleration{
Operator: "Exists",
Effect: api.TaintEffectNoExecute,
})
}
// Set the default status to pending.
pod.Status.Phase = api.PodPending
return nil
}
type defaultFunc func(logger klog.Logger, pod *api.Pod) error
// A static pod tried to use a ClusterTrustBundle projected volume source.
var ErrStaticPodTriedToUseClusterTrustBundle = errors.New("static pods may not use ClusterTrustBundle projected volume sources")
// A static pod tried to use a resource claim.
var ErrStaticPodTriedToUseResourceClaims = errors.New("static pods may not use ResourceClaims")
// tryDecodeSinglePod takes data and tries to extract valid Pod config information from it.
func tryDecodeSinglePod(logger klog.Logger, data []byte, defaultFn defaultFunc) (parsed bool, pod *v1.Pod, err error) {
// JSON is valid YAML, so this should work for everything.
json, err := utilyaml.ToJSON(data)
if err != nil {
return false, nil, err
}
obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), json)
if err != nil {
return false, pod, err
}
newPod, ok := obj.(*api.Pod)
// Check whether the object could be converted to single pod.
if !ok {
return false, pod, fmt.Errorf("invalid pod: %#v", obj)
}
if newPod.Name == "" {
return true, pod, fmt.Errorf("invalid pod: name is needed for the pod")
}
// Apply default values and validate the pod.
if err = defaultFn(logger, newPod); err != nil {
return true, pod, err
}
if errs := validation.ValidatePodCreate(newPod, validation.PodValidationOptions{}); len(errs) > 0 {
return true, pod, fmt.Errorf("invalid pod: %v", errs)
}
v1Pod := &v1.Pod{}
if err := k8s_api_v1.Convert_core_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil {
logger.Error(err, "Pod failed to convert to v1", "pod", klog.KObj(newPod))
return true, nil, err
}
if utilfeature.DefaultFeatureGate.Enabled(features.PreventStaticPodAPIReferences) {
// Check if pod has references to API objects
_, resource, err := podutil.HasAPIObjectReference(newPod)
if err != nil {
return true, nil, err
}
if resource != "" {
return true, nil, fmt.Errorf("static pods may not reference %s", resource)
}
} else {
// TODO: Remove this else block once the PreventStaticPodAPIReferences gate is GA
for _, v := range v1Pod.Spec.Volumes {
if v.Projected == nil {
continue
}
for _, s := range v.Projected.Sources {
if s.ClusterTrustBundle != nil {
return true, nil, ErrStaticPodTriedToUseClusterTrustBundle
}
}
}
if len(v1Pod.Spec.ResourceClaims) > 0 {
return true, nil, ErrStaticPodTriedToUseResourceClaims
}
}
return true, v1Pod, nil
}
func tryDecodePodList(logger klog.Logger, data []byte, defaultFn defaultFunc) (parsed bool, pods v1.PodList, err error) {
obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data)
if err != nil {
return false, pods, err
}
newPods, ok := obj.(*api.PodList)
// Check whether the object could be converted to list of pods.
if !ok {
err = fmt.Errorf("invalid pods list: %#v", obj)
return false, pods, err
}
// Apply default values and validate pods.
for i := range newPods.Items {
newPod := &newPods.Items[i]
if newPod.Name == "" {
return true, pods, fmt.Errorf("invalid pod: name is needed for the pod")
}
if err = defaultFn(logger, newPod); err != nil {
return true, pods, err
}
if errs := validation.ValidatePodCreate(newPod, validation.PodValidationOptions{}); len(errs) > 0 {
err = fmt.Errorf("invalid pod: %v", errs)
return true, pods, err
}
if utilfeature.DefaultFeatureGate.Enabled(features.PreventStaticPodAPIReferences) {
// Check if pod has references to API objects
_, resource, err := podutil.HasAPIObjectReference(newPod)
if err != nil {
return true, pods, err
}
if resource != "" {
return true, pods, fmt.Errorf("static pods may not reference %s", resource)
}
}
}
v1Pods := &v1.PodList{}
if err := k8s_api_v1.Convert_core_PodList_To_v1_PodList(newPods, v1Pods, nil); err != nil {
return true, pods, err
}
return true, *v1Pods, err
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"context"
"fmt"
"reflect"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
// PodConfigNotificationMode describes how changes are sent to the update channel.
type PodConfigNotificationMode int
const (
// PodConfigNotificationUnknown is the default value for
// PodConfigNotificationMode when uninitialized.
PodConfigNotificationUnknown PodConfigNotificationMode = iota
// PodConfigNotificationSnapshot delivers the full configuration as a SET whenever
// any change occurs.
PodConfigNotificationSnapshot
// PodConfigNotificationSnapshotAndUpdates delivers an UPDATE and DELETE message whenever pods are
// changed, and a SET message if there are any additions or removals.
PodConfigNotificationSnapshotAndUpdates
// PodConfigNotificationIncremental delivers ADD, UPDATE, DELETE, REMOVE, RECONCILE to the update channel.
PodConfigNotificationIncremental
)
type podStartupSLIObserver interface {
ObservedPodOnWatch(pod *v1.Pod, when time.Time)
}
// PodConfig is a configuration mux that merges many sources of pod configuration into a single
// consistent structure, and then delivers incremental change notifications to listeners
// in order.
type PodConfig struct {
pods *podStorage
mux *mux
// the channel of denormalized changes passed to listeners
updates chan kubetypes.PodUpdate
// contains the list of all configured sources
sourcesLock sync.Mutex
sources sets.Set[string]
}
// NewPodConfig creates an object that can merge many configuration sources into a stream
// of normalized updates to a pod configuration.
func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder, startupSLIObserver podStartupSLIObserver) *PodConfig {
updates := make(chan kubetypes.PodUpdate, 50)
storage := newPodStorage(updates, mode, recorder, startupSLIObserver)
podConfig := &PodConfig{
pods: storage,
mux: newMux(storage),
updates: updates,
sources: sets.Set[string]{},
}
return podConfig
}
// Channel creates or returns a config source channel. The channel
// only accepts PodUpdates
func (c *PodConfig) Channel(ctx context.Context, source string) chan<- interface{} {
c.sourcesLock.Lock()
defer c.sourcesLock.Unlock()
c.sources.Insert(source)
return c.mux.ChannelWithContext(ctx, source)
}
// SeenAllSources returns true if seenSources contains all sources in the
// config, and also this config has received a SET message from each source.
func (c *PodConfig) SeenAllSources(seenSources sets.Set[string]) bool {
if c.pods == nil {
return false
}
// Use klog.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate logger when refactoring this function to accept a logger parameter.
logger := klog.TODO()
c.sourcesLock.Lock()
defer c.sourcesLock.Unlock()
logger.V(5).Info("Looking for sources, have seen", "sources", sets.List(c.sources), "seenSources", seenSources)
return seenSources.HasAll(sets.List(c.sources)...) && c.pods.seenSources(sets.List(c.sources)...)
}
// Updates returns a channel of updates to the configuration, properly denormalized.
func (c *PodConfig) Updates() <-chan kubetypes.PodUpdate {
return c.updates
}
// Sync requests the full configuration be delivered to the update channel.
func (c *PodConfig) Sync() {
c.pods.sync()
}
// podStorage manages the current pod state at any point in time and ensures updates
// to the channel are delivered in order. Note that this object is an in-memory source of
// "truth" and on creation contains zero entries. Once all previously read sources are
// available, then this object should be considered authoritative.
type podStorage struct {
podLock sync.RWMutex
// map of source name to pod uid to pod reference
pods map[string]map[types.UID]*v1.Pod
mode PodConfigNotificationMode
// ensures that updates are delivered in strict order
// on the updates channel
updateLock sync.Mutex
updates chan<- kubetypes.PodUpdate
// contains the set of all sources that have sent at least one SET
sourcesSeenLock sync.RWMutex
sourcesSeen sets.Set[string]
// the EventRecorder to use
recorder record.EventRecorder
startupSLIObserver podStartupSLIObserver
}
// TODO: PodConfigNotificationMode could be handled by a listener to the updates channel
// in the future, especially with multiple listeners.
// TODO: allow initialization of the current state of the store with snapshotted version.
func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder, startupSLIObserver podStartupSLIObserver) *podStorage {
return &podStorage{
pods: make(map[string]map[types.UID]*v1.Pod),
mode: mode,
updates: updates,
sourcesSeen: sets.Set[string]{},
recorder: recorder,
startupSLIObserver: startupSLIObserver,
}
}
// Merge normalizes a set of incoming changes from different sources into a map of all Pods
// and ensures that redundant changes are filtered out, and then pushes zero or more minimal
// updates onto the update channel. Ensures that updates are delivered in order.
func (s *podStorage) Merge(ctx context.Context, source string, change interface{}) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
seenBefore := s.sourcesSeen.Has(source)
adds, updates, deletes, removes, reconciles := s.merge(ctx, source, change)
firstSet := !seenBefore && s.sourcesSeen.Has(source)
// deliver update notifications
switch s.mode {
case PodConfigNotificationIncremental:
if len(removes.Pods) > 0 {
s.updates <- *removes
}
if len(adds.Pods) > 0 {
s.updates <- *adds
}
if len(updates.Pods) > 0 {
s.updates <- *updates
}
if len(deletes.Pods) > 0 {
s.updates <- *deletes
}
if firstSet && len(adds.Pods) == 0 && len(updates.Pods) == 0 && len(deletes.Pods) == 0 {
// Send an empty update when first seeing the source and there are
// no ADD or UPDATE or DELETE pods from the source. This signals kubelet that
// the source is ready.
s.updates <- *adds
}
// Only add reconcile support here, because kubelet doesn't support Snapshot update now.
if len(reconciles.Pods) > 0 {
s.updates <- *reconciles
}
case PodConfigNotificationSnapshotAndUpdates:
if len(removes.Pods) > 0 || len(adds.Pods) > 0 || firstSet {
s.updates <- kubetypes.PodUpdate{Pods: s.mergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source}
}
if len(updates.Pods) > 0 {
s.updates <- *updates
}
if len(deletes.Pods) > 0 {
s.updates <- *deletes
}
case PodConfigNotificationSnapshot:
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 || len(removes.Pods) > 0 || firstSet {
s.updates <- kubetypes.PodUpdate{Pods: s.mergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source}
}
case PodConfigNotificationUnknown:
fallthrough
default:
panic(fmt.Sprintf("unsupported PodConfigNotificationMode: %#v", s.mode))
}
return nil
}
func (s *podStorage) merge(ctx context.Context, source string, change interface{}) (adds, updates, deletes, removes, reconciles *kubetypes.PodUpdate) {
s.podLock.Lock()
defer s.podLock.Unlock()
logger := klog.FromContext(ctx)
addPods := []*v1.Pod{}
updatePods := []*v1.Pod{}
deletePods := []*v1.Pod{}
removePods := []*v1.Pod{}
reconcilePods := []*v1.Pod{}
pods := s.pods[source]
if pods == nil {
pods = make(map[types.UID]*v1.Pod)
}
// updatePodFunc is the local function which updates the pod cache *oldPods* with new pods *newPods*.
// After updated, new pod will be stored in the pod cache *pods*.
// Notice that *pods* and *oldPods* could be the same cache.
updatePodsFunc := func(newPods []*v1.Pod, oldPods, pods map[types.UID]*v1.Pod) {
filtered := filterInvalidPods(logger, newPods, source, s.recorder)
for _, ref := range filtered {
// Annotate the pod with the source before any comparison.
if ref.Annotations == nil {
ref.Annotations = make(map[string]string)
}
ref.Annotations[kubetypes.ConfigSourceAnnotationKey] = source
// ignore static pods
if !kubetypes.IsStaticPod(ref) {
s.startupSLIObserver.ObservedPodOnWatch(ref, time.Now())
}
if existing, found := oldPods[ref.UID]; found {
pods[ref.UID] = existing
needUpdate, needReconcile, needGracefulDelete := checkAndUpdatePod(existing, ref)
if needUpdate {
updatePods = append(updatePods, existing)
} else if needReconcile {
reconcilePods = append(reconcilePods, existing)
} else if needGracefulDelete {
deletePods = append(deletePods, existing)
}
continue
}
recordFirstSeenTime(logger, ref)
pods[ref.UID] = ref
addPods = append(addPods, ref)
}
}
update := change.(kubetypes.PodUpdate)
switch update.Op {
case kubetypes.ADD, kubetypes.UPDATE, kubetypes.DELETE:
if update.Op == kubetypes.ADD {
logger.V(4).Info("Adding new pods from source", "source", source, "pods", klog.KObjSlice(update.Pods))
} else if update.Op == kubetypes.DELETE {
logger.V(4).Info("Gracefully deleting pods from source", "source", source, "pods", klog.KObjSlice(update.Pods))
} else {
logger.V(4).Info("Updating pods from source", "source", source, "pods", klog.KObjSlice(update.Pods))
}
updatePodsFunc(update.Pods, pods, pods)
case kubetypes.REMOVE:
logger.V(4).Info("Removing pods from source", "source", source, "pods", klog.KObjSlice(update.Pods))
for _, value := range update.Pods {
if existing, found := pods[value.UID]; found {
// this is a delete
delete(pods, value.UID)
removePods = append(removePods, existing)
continue
}
// this is a no-op
}
case kubetypes.SET:
logger.V(4).Info("Setting pods for source", "source", source)
s.markSourceSet(source)
// Clear the old map entries by just creating a new map
oldPods := pods
pods = make(map[types.UID]*v1.Pod)
updatePodsFunc(update.Pods, oldPods, pods)
for uid, existing := range oldPods {
if _, found := pods[uid]; !found {
// this is a delete
removePods = append(removePods, existing)
}
}
default:
logger.Info("Received invalid update type", "type", update)
}
s.pods[source] = pods
adds = &kubetypes.PodUpdate{Op: kubetypes.ADD, Pods: copyPods(addPods), Source: source}
updates = &kubetypes.PodUpdate{Op: kubetypes.UPDATE, Pods: copyPods(updatePods), Source: source}
deletes = &kubetypes.PodUpdate{Op: kubetypes.DELETE, Pods: copyPods(deletePods), Source: source}
removes = &kubetypes.PodUpdate{Op: kubetypes.REMOVE, Pods: copyPods(removePods), Source: source}
reconciles = &kubetypes.PodUpdate{Op: kubetypes.RECONCILE, Pods: copyPods(reconcilePods), Source: source}
return adds, updates, deletes, removes, reconciles
}
func (s *podStorage) markSourceSet(source string) {
s.sourcesSeenLock.Lock()
defer s.sourcesSeenLock.Unlock()
s.sourcesSeen.Insert(source)
}
func (s *podStorage) seenSources(sources ...string) bool {
s.sourcesSeenLock.RLock()
defer s.sourcesSeenLock.RUnlock()
return s.sourcesSeen.HasAll(sources...)
}
func filterInvalidPods(logger klog.Logger, pods []*v1.Pod, source string, recorder record.EventRecorder) (filtered []*v1.Pod) {
names := sets.Set[string]{}
for i, pod := range pods {
// Pods from each source are assumed to have passed validation individually.
// This function only checks if there is any naming conflict.
name := kubecontainer.GetPodFullName(pod)
if names.Has(name) {
logger.Info("Pod failed validation due to duplicate pod name, ignoring", "index", i, "pod", klog.KObj(pod), "source", source)
recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s due to duplicate pod name %q, ignoring", format.Pod(pod), source, pod.Name)
continue
} else {
names.Insert(name)
}
filtered = append(filtered, pod)
}
return
}
// Annotations that the kubelet adds to the pod.
var localAnnotations = []string{
kubetypes.ConfigSourceAnnotationKey,
kubetypes.ConfigMirrorAnnotationKey,
kubetypes.ConfigFirstSeenAnnotationKey,
}
func isLocalAnnotationKey(key string) bool {
for _, localKey := range localAnnotations {
if key == localKey {
return true
}
}
return false
}
// isAnnotationMapEqual returns true if the existing annotation Map is equal to candidate except
// for local annotations.
func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool {
if candidateMap == nil {
candidateMap = make(map[string]string)
}
for k, v := range candidateMap {
if isLocalAnnotationKey(k) {
continue
}
if existingValue, ok := existingMap[k]; ok && existingValue == v {
continue
}
return false
}
for k := range existingMap {
if isLocalAnnotationKey(k) {
continue
}
// stale entry in existing map.
if _, exists := candidateMap[k]; !exists {
return false
}
}
return true
}
// recordFirstSeenTime records the first seen time of this pod.
func recordFirstSeenTime(logger klog.Logger, pod *v1.Pod) {
logger.V(4).Info("Receiving a new pod", "pod", klog.KObj(pod))
pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString()
}
// updateAnnotations returns an Annotation map containing the api annotation map plus
// locally managed annotations
func updateAnnotations(existing, ref *v1.Pod) {
annotations := make(map[string]string, len(ref.Annotations)+len(localAnnotations))
for k, v := range ref.Annotations {
annotations[k] = v
}
for _, k := range localAnnotations {
if v, ok := existing.Annotations[k]; ok {
annotations[k] = v
}
}
existing.Annotations = annotations
}
func podsDifferSemantically(existing, ref *v1.Pod) bool {
if reflect.DeepEqual(existing.Spec, ref.Spec) &&
reflect.DeepEqual(existing.Labels, ref.Labels) &&
reflect.DeepEqual(existing.DeletionTimestamp, ref.DeletionTimestamp) &&
reflect.DeepEqual(existing.DeletionGracePeriodSeconds, ref.DeletionGracePeriodSeconds) &&
isAnnotationMapEqual(existing.Annotations, ref.Annotations) {
return false
}
return true
}
// checkAndUpdatePod updates existing, and:
// - if ref makes a meaningful change, returns needUpdate=true
// - if ref makes a meaningful change, and this change is graceful deletion, returns needGracefulDelete=true
// - if ref makes no meaningful change, but changes the pod status, returns needReconcile=true
// - else return all false
// Now, needUpdate, needGracefulDelete and needReconcile should never be both true
func checkAndUpdatePod(existing, ref *v1.Pod) (needUpdate, needReconcile, needGracefulDelete bool) {
// 1. this is a reconcile
// TODO: it would be better to update the whole object and only preserve certain things
// like the source annotation or the UID (to ensure safety)
if !podsDifferSemantically(existing, ref) {
// this is not an update
// Only check reconcile when it is not an update, because if the pod is going to
// be updated, an extra reconcile is unnecessary
if !reflect.DeepEqual(existing.Status, ref.Status) {
// Pod with changed pod status needs reconcile, because kubelet should
// be the source of truth of pod status.
existing.Status = ref.Status
needReconcile = true
}
return
}
// Overwrite the first-seen time with the existing one. This is our own
// internal annotation, there is no need to update.
ref.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = existing.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]
existing.Spec = ref.Spec
existing.Labels = ref.Labels
existing.DeletionTimestamp = ref.DeletionTimestamp
existing.DeletionGracePeriodSeconds = ref.DeletionGracePeriodSeconds
existing.Generation = ref.Generation
existing.Status = ref.Status
updateAnnotations(existing, ref)
// 2. this is an graceful delete
if ref.DeletionTimestamp != nil {
needGracefulDelete = true
} else {
// 3. this is an update
needUpdate = true
}
return
}
// sync sends a copy of the current state through the update channel.
func (s *podStorage) sync() {
s.updateLock.Lock()
defer s.updateLock.Unlock()
s.updates <- kubetypes.PodUpdate{Pods: s.mergedState().([]*v1.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource}
}
func (s *podStorage) mergedState() interface{} {
s.podLock.RLock()
defer s.podLock.RUnlock()
pods := make([]*v1.Pod, 0)
for _, sourcePods := range s.pods {
for _, podRef := range sourcePods {
pods = append(pods, podRef.DeepCopy())
}
}
return pods
}
func copyPods(sourcePods []*v1.Pod) []*v1.Pod {
pods := []*v1.Pod{}
for _, source := range sourcePods {
// Use a deep copy here just in case
pods = append(pods, source.DeepCopy())
}
return pods
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"time"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
api "k8s.io/kubernetes/pkg/apis/core"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
utilio "k8s.io/utils/io"
)
type podEventType int
const (
podAdd podEventType = iota
podModify
podDelete
eventBufferLen = 10
)
type watchEvent struct {
fileName string
eventType podEventType
}
type sourceFile struct {
path string
nodeName types.NodeName
period time.Duration
store cache.Store
fileKeyMapping map[string]string
updates chan<- interface{}
watchEvents chan *watchEvent
}
// NewSourceFile watches a config file for changes.
func NewSourceFile(logger klog.Logger, path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) {
// "github.com/sigma/go-inotify" requires a path without trailing "/"
path = strings.TrimRight(path, string(os.PathSeparator))
config := newSourceFile(path, nodeName, period, updates)
logger.V(1).Info("Watching path", "path", path)
config.run(logger)
}
func newSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) *sourceFile {
send := func(objs []interface{}) {
var pods []*v1.Pod
for _, o := range objs {
pods = append(pods, o.(*v1.Pod))
}
updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource}
}
store := cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc)
return &sourceFile{
path: path,
nodeName: nodeName,
period: period,
store: store,
fileKeyMapping: map[string]string{},
updates: updates,
watchEvents: make(chan *watchEvent, eventBufferLen),
}
}
func (s *sourceFile) run(logger klog.Logger) {
listTicker := time.NewTicker(s.period)
go func() {
// Read path immediately to speed up startup.
if err := s.listConfig(logger); err != nil {
logger.Error(err, "Unable to read config path", "path", s.path)
}
for {
select {
case <-listTicker.C:
if err := s.listConfig(logger); err != nil {
logger.Error(err, "Unable to read config path", "path", s.path)
}
case e := <-s.watchEvents:
if err := s.consumeWatchEvent(logger, e); err != nil {
logger.Error(err, "Unable to process watch event")
}
}
}
}()
s.startWatch(logger)
}
func (s *sourceFile) applyDefaults(logger klog.Logger, pod *api.Pod, source string) error {
return applyDefaults(logger, pod, source, true, s.nodeName)
}
func (s *sourceFile) listConfig(logger klog.Logger) error {
path := s.path
statInfo, err := os.Stat(path)
if err != nil {
if !os.IsNotExist(err) {
return err
}
// Emit an update with an empty PodList to allow FileSource to be marked as seen
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
return fmt.Errorf("path does not exist, ignoring")
}
switch {
case statInfo.Mode().IsDir():
pods, err := s.extractFromDir(logger, path)
if err != nil {
return err
}
if len(pods) == 0 {
// Emit an update with an empty PodList to allow FileSource to be marked as seen
s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource}
return nil
}
return s.replaceStore(pods...)
case statInfo.Mode().IsRegular():
pod, err := s.extractFromFile(logger, path)
if err != nil {
return err
}
return s.replaceStore(pod)
default:
return fmt.Errorf("path is not a directory or file")
}
}
// Get as many pod manifests as we can from a directory. Return an error if and only if something
// prevented us from reading anything at all. Do not return an error if only some files
// were problematic.
func (s *sourceFile) extractFromDir(logger klog.Logger, name string) ([]*v1.Pod, error) {
dirents, err := filepath.Glob(filepath.Join(name, "[^.]*"))
if err != nil {
return nil, fmt.Errorf("glob failed: %v", err)
}
pods := make([]*v1.Pod, 0, len(dirents))
if len(dirents) == 0 {
return pods, nil
}
sort.Strings(dirents)
for _, path := range dirents {
statInfo, err := os.Stat(path)
if err != nil {
logger.Error(err, "Could not get metadata", "path", path)
continue
}
switch {
case statInfo.Mode().IsDir():
logger.Error(nil, "Provided manifest path is a directory, not recursing into manifest path", "path", path)
case statInfo.Mode().IsRegular():
pod, err := s.extractFromFile(logger, path)
if err != nil {
if !os.IsNotExist(err) {
logger.Error(err, "Could not process manifest file", "path", path)
}
} else {
pods = append(pods, pod)
}
default:
logger.Error(nil, "Manifest path is not a directory or file", "path", path, "mode", statInfo.Mode())
}
}
return pods, nil
}
// extractFromFile parses a file for Pod configuration information.
func (s *sourceFile) extractFromFile(logger klog.Logger, filename string) (pod *v1.Pod, err error) {
logger.V(3).Info("Reading config file", "path", filename)
defer func() {
if err == nil && pod != nil {
objKey, keyErr := cache.MetaNamespaceKeyFunc(pod)
if keyErr != nil {
err = keyErr
return
}
s.fileKeyMapping[filename] = objKey
}
}()
file, err := os.Open(filename)
if err != nil {
return pod, err
}
defer file.Close()
data, err := utilio.ReadAtMost(file, maxConfigLength)
if err != nil {
return pod, err
}
defaultFn := func(logger klog.Logger, pod *api.Pod) error {
return s.applyDefaults(logger, pod, filename)
}
parsed, pod, podErr := tryDecodeSinglePod(logger, data, defaultFn)
if parsed {
if podErr != nil {
return pod, podErr
}
return pod, nil
}
return pod, fmt.Errorf("%v: couldn't parse as pod(%v), please check config file", filename, podErr)
}
func (s *sourceFile) replaceStore(pods ...*v1.Pod) (err error) {
objs := []interface{}{}
for _, pod := range pods {
objs = append(objs, pod)
}
return s.store.Replace(objs, "")
}
//go:build linux
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/fsnotify/fsnotify"
"k8s.io/klog/v2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/flowcontrol"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
retryPeriod = 1 * time.Second
maxRetryPeriod = 20 * time.Second
)
type retryableError struct {
message string
}
func (e *retryableError) Error() string {
return e.message
}
func (s *sourceFile) startWatch(logger klog.Logger) {
backOff := flowcontrol.NewBackOff(retryPeriod, maxRetryPeriod)
backOffID := "watch"
go wait.Forever(func() {
if backOff.IsInBackOffSinceUpdate(backOffID, time.Now()) {
return
}
if err := s.doWatch(logger); err != nil {
logger.Error(err, "Unable to read config path", "path", s.path)
if _, retryable := err.(*retryableError); !retryable {
backOff.Next(backOffID, time.Now())
}
}
}, retryPeriod)
}
func (s *sourceFile) doWatch(logger klog.Logger) error {
_, err := os.Stat(s.path)
if err != nil {
if !os.IsNotExist(err) {
return err
}
// Emit an update with an empty PodList to allow FileSource to be marked as seen
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
return &retryableError{"path does not exist, ignoring"}
}
w, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("unable to create inotify: %v", err)
}
defer w.Close()
err = w.Add(s.path)
if err != nil {
return fmt.Errorf("unable to create inotify for path %q: %v", s.path, err)
}
for {
select {
case event := <-w.Events:
if err = s.produceWatchEvent(logger, &event); err != nil {
return fmt.Errorf("error while processing inotify event (%+v): %v", event, err)
}
case err = <-w.Errors:
return fmt.Errorf("error while watching %q: %v", s.path, err)
}
}
}
func (s *sourceFile) produceWatchEvent(logger klog.Logger, e *fsnotify.Event) error {
// Ignore file start with dots
if strings.HasPrefix(filepath.Base(e.Name), ".") {
logger.V(4).Info("Ignored pod manifest, because it starts with dots", "eventName", e.Name)
return nil
}
var eventType podEventType
switch {
case (e.Op & fsnotify.Create) > 0:
eventType = podAdd
case (e.Op & fsnotify.Write) > 0:
eventType = podModify
case (e.Op & fsnotify.Chmod) > 0:
eventType = podModify
case (e.Op & fsnotify.Remove) > 0:
eventType = podDelete
case (e.Op & fsnotify.Rename) > 0:
eventType = podDelete
default:
// Ignore rest events
return nil
}
s.watchEvents <- &watchEvent{e.Name, eventType}
return nil
}
func (s *sourceFile) consumeWatchEvent(logger klog.Logger, e *watchEvent) error {
switch e.eventType {
case podAdd, podModify:
pod, err := s.extractFromFile(logger, e.fileName)
if err != nil {
return fmt.Errorf("can't process config file %q: %v", e.fileName, err)
}
return s.store.Add(pod)
case podDelete:
if objKey, keyExist := s.fileKeyMapping[e.fileName]; keyExist {
pod, podExist, err := s.store.GetByKey(objKey)
if err != nil {
return err
}
if !podExist {
return fmt.Errorf("the pod with key %s doesn't exist in cache", objKey)
}
if err = s.store.Delete(pod); err != nil {
return fmt.Errorf("failed to remove deleted pod from cache: %v", err)
}
delete(s.fileKeyMapping, e.fileName)
}
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
)
// ContainerRuntimeOptions defines options for the container runtime.
type ContainerRuntimeOptions struct {
// General Options.
// RuntimeCgroups that container runtime is expected to be isolated in.
RuntimeCgroups string
// Image credential provider plugin options
// ImageCredentialProviderConfigPath is the path to the credential provider plugin config file or directory.
// If a directory is specified, all .json, .yaml, or .yml files in the directory are loaded and merged
// in lexicographical order. This config file(s) specify what credential providers are enabled
// and invoked by the kubelet. The plugin config should contain information about what plugin binary
// to execute and what container images the plugin should be called for.
// +optional
ImageCredentialProviderConfigPath string
// ImageCredentialProviderBinDir is the path to the directory where credential provider plugin
// binaries exist. The name of each plugin binary is expected to match the name of the plugin
// specified in imageCredentialProviderConfigFile.
// +optional
ImageCredentialProviderBinDir string
}
// AddFlags adds flags to the container runtime, according to ContainerRuntimeOptions.
func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) {
var tmp string
// General settings.
fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.")
fs.StringVar(&tmp, "pod-infra-container-image", "", "Specified image will not be pruned by the image garbage collector. CRI implementations have their own configuration to set this image.")
_ = fs.MarkDeprecated("pod-infra-container-image", "will be removed in 1.35. Image garbage collector will get sandbox image information from CRI.")
// Image credential provider settings.
fs.StringVar(&s.ImageCredentialProviderConfigPath, "image-credential-provider-config", s.ImageCredentialProviderConfigPath, "Path to a credential provider plugin config file (JSON/YAML/YML) or a directory of such files (merged in lexicographical order; non-recursive search).")
fs.StringVar(&s.ImageCredentialProviderBinDir, "image-credential-provider-bin-dir", s.ImageCredentialProviderBinDir, "The path to the directory where credential provider plugin binaries are located.")
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"bytes"
"fmt"
"net/http"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
api "k8s.io/kubernetes/pkg/apis/core"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
utilio "k8s.io/utils/io"
)
type sourceURL struct {
url string
header http.Header
nodeName types.NodeName
updates chan<- interface{}
data []byte
failureLogs int
client *http.Client
}
// NewSourceURL specifies the URL where to read the Pod configuration from, then watches it for changes.
func NewSourceURL(logger klog.Logger, url string, header http.Header, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) {
config := &sourceURL{
url: url,
header: header,
nodeName: nodeName,
updates: updates,
data: nil,
// Timing out requests leads to retries. This client is only used to
// read the manifest URL passed to kubelet.
client: &http.Client{Timeout: 10 * time.Second},
}
logger.V(1).Info("Watching URL", "URL", url)
go wait.Until(func() { config.run(logger) }, period, wait.NeverStop)
}
func (s *sourceURL) run(logger klog.Logger) {
if err := s.extractFromURL(logger); err != nil {
// Don't log this multiple times per minute. The first few entries should be
// enough to get the point across.
if s.failureLogs < 3 {
logger.Info("Failed to read pods from URL", "err", err)
} else if s.failureLogs == 3 {
logger.Info("Failed to read pods from URL. Dropping verbosity of this message to V(4)", "err", err)
} else {
logger.V(4).Info("Failed to read pods from URL", "err", err)
}
s.failureLogs++
} else {
if s.failureLogs > 0 {
logger.Info("Successfully read pods from URL")
s.failureLogs = 0
}
}
}
func (s *sourceURL) applyDefaults(logger klog.Logger, pod *api.Pod) error {
return applyDefaults(logger, pod, s.url, false, s.nodeName)
}
func (s *sourceURL) extractFromURL(logger klog.Logger) error {
req, err := http.NewRequest("GET", s.url, nil)
if err != nil {
return err
}
req.Header = s.header
resp, err := s.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
data, err := utilio.ReadAtMost(resp.Body, maxConfigLength)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%v: %v", s.url, resp.Status)
}
if len(data) == 0 {
// Emit an update with an empty PodList to allow HTTPSource to be marked as seen
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
return fmt.Errorf("zero-length data received from %v", s.url)
}
// Short circuit if the data has not changed since the last time it was read.
if bytes.Equal(data, s.data) {
return nil
}
s.data = data
// First try as it is a single pod.
parsed, pod, singlePodErr := tryDecodeSinglePod(logger, data, s.applyDefaults)
if parsed {
if singlePodErr != nil {
// It parsed but could not be used.
return singlePodErr
}
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
return nil
}
// That didn't work, so try a list of pods.
parsed, podList, multiPodErr := tryDecodePodList(logger, data, s.applyDefaults)
if parsed {
if multiPodErr != nil {
// It parsed but could not be used.
return multiPodErr
}
pods := make([]*v1.Pod, 0, len(podList.Items))
for i := range podList.Items {
pods = append(pods, &podList.Items[i])
}
s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
return nil
}
return fmt.Errorf("%v: received '%v', but couldn't parse as "+
"single (%v) or multiple pods (%v)",
s.url, string(data), singlePodErr, multiPodErr)
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"context"
"sync"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
type merger interface {
// Invoked when a change from a source is received. May also function as an incremental
// merger if you wish to consume changes incrementally. Must be reentrant when more than
// one source is defined.
Merge(ctx context.Context, source string, update interface{}) error
}
// mux is a class for merging configuration from multiple sources. Changes are
// pushed via channels and sent to the merge function.
type mux struct {
// Invoked when an update is sent to a source.
merger merger
// Sources and their lock.
sourceLock sync.RWMutex
// Maps source names to channels
sources map[string]chan interface{}
}
// newMux creates a new mux that can merge changes from multiple sources.
func newMux(merger merger) *mux {
mux := &mux{
sources: make(map[string]chan interface{}),
merger: merger,
}
return mux
}
// ChannelWithContext returns a channel where a configuration source
// can send updates of new configurations. Multiple calls with the same
// source will return the same channel. This allows change and state based sources
// to use the same channel. Different source names however will be treated as a
// union.
func (m *mux) ChannelWithContext(ctx context.Context, source string) chan interface{} {
if len(source) == 0 {
panic("Channel given an empty name")
}
m.sourceLock.Lock()
defer m.sourceLock.Unlock()
channel, exists := m.sources[source]
if exists {
return channel
}
newChannel := make(chan interface{})
m.sources[source] = newChannel
go wait.Until(func() { m.listen(ctx, source, newChannel) }, 0, ctx.Done())
return newChannel
}
func (m *mux) listen(ctx context.Context, source string, listenChannel <-chan interface{}) {
logger := klog.FromContext(ctx)
for update := range listenChannel {
if err := m.merger.Merge(ctx, source, update); err != nil {
logger.Info("failed merging update", "err", err)
}
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config implements the pod configuration readers.
package config
import (
"sync"
"k8s.io/apimachinery/pkg/util/sets"
)
// SourcesReadyFn is function that returns true if the specified sources have been seen.
type SourcesReadyFn func(sourcesSeen sets.Set[string]) bool
// SourcesReady tracks the set of configured sources seen by the kubelet.
type SourcesReady interface {
// AddSource adds the specified source to the set of sources managed.
AddSource(source string)
// AllReady returns true if the currently configured sources have all been seen.
AllReady() bool
}
// NewSourcesReady returns a SourcesReady with the specified function.
func NewSourcesReady(sourcesReadyFn SourcesReadyFn) SourcesReady {
return &sourcesImpl{
sourcesSeen: sets.New[string](),
sourcesReadyFn: sourcesReadyFn,
}
}
// sourcesImpl implements SourcesReady. It is thread-safe.
type sourcesImpl struct {
// lock protects access to sources seen.
lock sync.RWMutex
// set of sources seen.
sourcesSeen sets.Set[string]
// sourcesReady is a function that evaluates if the sources are ready.
sourcesReadyFn SourcesReadyFn
}
// Add adds the specified source to the set of sources managed.
func (s *sourcesImpl) AddSource(source string) {
s.lock.Lock()
defer s.lock.Unlock()
s.sourcesSeen.Insert(source)
}
// AllReady returns true if each configured source is ready.
func (s *sourcesImpl) AllReady() bool {
s.lock.RLock()
defer s.lock.RUnlock()
return s.sourcesReadyFn(s.sourcesSeen)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configmap
import (
"context"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
)
// Manager interface provides methods for Kubelet to manage ConfigMap.
type Manager interface {
// Get configmap by configmap namespace and name.
GetConfigMap(namespace, name string) (*v1.ConfigMap, error)
// WARNING: Register/UnregisterPod functions should be efficient,
// i.e. should not block on network operations.
// RegisterPod registers all configmaps from a given pod.
RegisterPod(pod *v1.Pod)
// UnregisterPod unregisters configmaps from a given pod that are not
// used by any other registered pod.
UnregisterPod(pod *v1.Pod)
}
// simpleConfigMapManager implements ConfigMap Manager interface with
// simple operations to apiserver.
type simpleConfigMapManager struct {
kubeClient clientset.Interface
}
// NewSimpleConfigMapManager creates a new ConfigMapManager instance.
func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager {
return &simpleConfigMapManager{kubeClient: kubeClient}
}
func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) {
}
func (s *simpleConfigMapManager) UnregisterPod(pod *v1.Pod) {
}
// configMapManager keeps a cache of all configmaps necessary
// for registered pods. Different implementation of the store
// may result in different semantics for freshness of configmaps
// (e.g. ttl-based implementation vs watch-based implementation).
type configMapManager struct {
manager manager.Manager
}
func (c *configMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
object, err := c.manager.GetObject(namespace, name)
if err != nil {
return nil, err
}
if configmap, ok := object.(*v1.ConfigMap); ok {
return configmap, nil
}
return nil, fmt.Errorf("unexpected object type: %v", object)
}
func (c *configMapManager) RegisterPod(pod *v1.Pod) {
c.manager.RegisterPod(pod)
}
func (c *configMapManager) UnregisterPod(pod *v1.Pod) {
c.manager.UnregisterPod(pod)
}
func getConfigMapNames(pod *v1.Pod) sets.Set[string] {
result := sets.New[string]()
podutil.VisitPodConfigmapNames(pod, func(name string) bool {
result.Insert(name)
return true
})
return result
}
const (
defaultTTL = time.Minute
)
// NewCachingConfigMapManager creates a manager that keeps a cache of all configmaps
// necessary for registered pods.
// It implement the following logic:
// - whenever a pod is create or updated, the cached versions of all configmaps
// are invalidated
// - every GetObject() call tries to fetch the value from local cache; if it is
// not there, invalidated or too old, we fetch it from apiserver and refresh the
// value in cache; otherwise it is just fetched from cache
func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager {
getConfigMap := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
return kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, opts)
}
configMapStore := manager.NewObjectStore(getConfigMap, clock.RealClock{}, getTTL, defaultTTL)
return &configMapManager{
manager: manager.NewCacheBasedManager(configMapStore, getConfigMapNames),
}
}
// NewWatchingConfigMapManager creates a manager that keeps a cache of all configmaps
// necessary for registered pods.
// It implements the following logic:
// - whenever a pod is created or updated, we start individual watches for all
// referenced objects that aren't referenced from other registered pods
// - every GetObject() returns a value from local cache propagated via watches
func NewWatchingConfigMapManager(kubeClient clientset.Interface, resyncInterval time.Duration) Manager {
listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) {
return kubeClient.CoreV1().ConfigMaps(namespace).List(context.TODO(), opts)
}
watchConfigMap := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
return kubeClient.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), opts)
}
newConfigMap := func() runtime.Object {
return &v1.ConfigMap{}
}
isImmutable := func(object runtime.Object) bool {
if configMap, ok := object.(*v1.ConfigMap); ok {
return configMap.Immutable != nil && *configMap.Immutable
}
return false
}
gr := corev1.Resource("configmap")
return &configMapManager{
manager: manager.NewWatchBasedManager(listConfigMap, watchConfigMap, newConfigMap, isImmutable, gr, resyncInterval, getConfigMapNames),
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configmap
import v1 "k8s.io/api/core/v1"
// fakeManager implements Manager interface for testing purposes.
// simple operations to apiserver.
type fakeManager struct {
}
// NewFakeManager creates empty/fake ConfigMap manager
func NewFakeManager() Manager {
return &fakeManager{}
}
func (s *fakeManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
return nil, nil
}
func (s *fakeManager) RegisterPod(pod *v1.Pod) {
}
func (s *fakeManager) UnregisterPod(pod *v1.Pod) {
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
// Cache stores the PodStatus for the pods. It represents *all* the visible
// pods/containers in the container runtime. All cache entries are at least as
// new or newer than the global timestamp (set by UpdateTime()), while
// individual entries may be slightly newer than the global timestamp. If a pod
// has no states known by the runtime, Cache returns an empty PodStatus object
// with ID populated.
//
// Cache provides two methods to retrieve the PodStatus: the non-blocking Get()
// and the blocking GetNewerThan() method. The component responsible for
// populating the cache is expected to call Delete() to explicitly free the
// cache entries.
type Cache interface {
Get(types.UID) (*PodStatus, error)
// Set updates the cache by setting the PodStatus for the pod only
// if the data is newer than the cache based on the provided
// time stamp. Returns if the cache was updated.
Set(types.UID, *PodStatus, error, time.Time) (updated bool)
// GetNewerThan is a blocking call that only returns the status
// when it is newer than the given time.
GetNewerThan(types.UID, time.Time) (*PodStatus, error)
Delete(types.UID)
UpdateTime(time.Time)
}
type data struct {
// Status of the pod.
status *PodStatus
// Error got when trying to inspect the pod.
err error
// Time when the data was last modified.
modified time.Time
}
type subRecord struct {
time time.Time
ch chan *data
}
// cache implements Cache.
type cache struct {
// Lock which guards all internal data structures.
lock sync.RWMutex
// Map that stores the pod statuses.
pods map[types.UID]*data
// A global timestamp represents how fresh the cached data is. All
// cache content is at the least newer than this timestamp. Note that the
// timestamp is nil after initialization, and will only become non-nil when
// it is ready to serve the cached statuses.
timestamp *time.Time
// Map that stores the subscriber records.
subscribers map[types.UID][]*subRecord
}
// NewCache creates a pod cache.
func NewCache() Cache {
return &cache{pods: map[types.UID]*data{}, subscribers: map[types.UID][]*subRecord{}}
}
// Get returns the PodStatus for the pod; callers are expected not to
// modify the objects returned.
func (c *cache) Get(id types.UID) (*PodStatus, error) {
c.lock.RLock()
defer c.lock.RUnlock()
d := c.get(id)
return d.status, d.err
}
func (c *cache) GetNewerThan(id types.UID, minTime time.Time) (*PodStatus, error) {
ch := c.subscribe(id, minTime)
d := <-ch
return d.status, d.err
}
// Set sets the PodStatus for the pod only if the data is newer than the cache
func (c *cache) Set(id types.UID, status *PodStatus, err error, timestamp time.Time) (updated bool) {
c.lock.Lock()
defer c.lock.Unlock()
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
// Set the value in the cache only if it's not present already
// or the timestamp in the cache is older than the current update timestamp
if cachedVal, ok := c.pods[id]; ok && cachedVal.modified.After(timestamp) {
return false
}
}
c.pods[id] = &data{status: status, err: err, modified: timestamp}
c.notify(id, timestamp)
return true
}
// Delete removes the entry of the pod.
func (c *cache) Delete(id types.UID) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.pods, id)
}
// UpdateTime modifies the global timestamp of the cache and notify
// subscribers if needed.
func (c *cache) UpdateTime(timestamp time.Time) {
c.lock.Lock()
defer c.lock.Unlock()
c.timestamp = ×tamp
// Notify all the subscribers if the condition is met.
for id := range c.subscribers {
c.notify(id, *c.timestamp)
}
}
func makeDefaultData(id types.UID) *data {
return &data{status: &PodStatus{ID: id}, err: nil}
}
func (c *cache) get(id types.UID) *data {
d, ok := c.pods[id]
if !ok {
// Cache should store *all* pod/container information known by the
// container runtime. A cache miss indicates that there are no states
// regarding the pod last time we queried the container runtime.
// What this *really* means is that there are no visible pod/containers
// associated with this pod. Simply return an default (mostly empty)
// PodStatus to reflect this.
return makeDefaultData(id)
}
return d
}
// getIfNewerThan returns the data it is newer than the given time.
// Otherwise, it returns nil. The caller should acquire the lock.
func (c *cache) getIfNewerThan(id types.UID, minTime time.Time) *data {
d, ok := c.pods[id]
globalTimestampIsNewer := (c.timestamp != nil && c.timestamp.After(minTime))
if !ok && globalTimestampIsNewer {
// Status is not cached, but the global timestamp is newer than
// minTime, return the default status.
return makeDefaultData(id)
}
if ok && (d.modified.After(minTime) || globalTimestampIsNewer) {
// Status is cached, return status if either of the following is true.
// * status was modified after minTime
// * the global timestamp of the cache is newer than minTime.
return d
}
// The pod status is not ready.
return nil
}
// notify sends notifications for pod with the given id, if the requirements
// are met. Note that the caller should acquire the lock.
func (c *cache) notify(id types.UID, timestamp time.Time) {
list, ok := c.subscribers[id]
if !ok {
// No one to notify.
return
}
newList := []*subRecord{}
for i, r := range list {
if timestamp.Before(r.time) {
// Doesn't meet the time requirement; keep the record.
newList = append(newList, list[i])
continue
}
r.ch <- c.get(id)
close(r.ch)
}
if len(newList) == 0 {
delete(c.subscribers, id)
} else {
c.subscribers[id] = newList
}
}
func (c *cache) subscribe(id types.UID, timestamp time.Time) chan *data {
ch := make(chan *data, 1)
c.lock.Lock()
defer c.lock.Unlock()
d := c.getIfNewerThan(id, timestamp)
if d != nil {
// If the cache entry is ready, send the data and return immediately.
ch <- d
return ch
}
// Add the subscription record.
c.subscribers[id] = append(c.subscribers[id], &subRecord{time: timestamp, ch: ch})
return ch
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"context"
"fmt"
"time"
"k8s.io/klog/v2"
)
// GCPolicy specifies a policy for garbage collecting containers.
type GCPolicy struct {
// Minimum age at which a container can be garbage collected, zero for no limit.
MinAge time.Duration
// Max number of dead containers any single pod (UID, container name) pair is
// allowed to have, less than zero for no limit.
MaxPerPodContainer int
// Max number of total dead containers, less than zero for no limit.
MaxContainers int
}
// GC manages garbage collection of dead containers.
//
// Implementation is thread-compatible.
type GC interface {
// Garbage collect containers.
GarbageCollect(ctx context.Context) error
// Deletes all unused containers, including containers belonging to pods that are terminated but not deleted
DeleteAllUnusedContainers(ctx context.Context) error
}
// SourcesReadyProvider knows how to determine if configuration sources are ready
type SourcesReadyProvider interface {
// AllReady returns true if the currently configured sources have all been seen.
AllReady() bool
}
// TODO(vmarmol): Preferentially remove pod infra containers.
type realContainerGC struct {
// Container runtime
runtime Runtime
// Policy for garbage collection.
policy GCPolicy
// sourcesReadyProvider provides the readiness of kubelet configuration sources.
sourcesReadyProvider SourcesReadyProvider
}
// NewContainerGC creates a new instance of GC with the specified policy.
func NewContainerGC(runtime Runtime, policy GCPolicy, sourcesReadyProvider SourcesReadyProvider) (GC, error) {
if policy.MinAge < 0 {
return nil, fmt.Errorf("invalid minimum garbage collection age: %v", policy.MinAge)
}
return &realContainerGC{
runtime: runtime,
policy: policy,
sourcesReadyProvider: sourcesReadyProvider,
}, nil
}
func (cgc *realContainerGC) GarbageCollect(ctx context.Context) error {
return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), false)
}
func (cgc *realContainerGC) DeleteAllUnusedContainers(ctx context.Context) error {
klog.FromContext(ctx).Info("Attempting to delete unused containers")
return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"context"
"encoding/json"
"fmt"
"hash/fnv"
"strings"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
sc "k8s.io/kubernetes/pkg/securitycontext"
hashutil "k8s.io/kubernetes/pkg/util/hash"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
utilsnet "k8s.io/utils/net"
)
// HandlerRunner runs a lifecycle handler for a container.
type HandlerRunner interface {
Run(ctx context.Context, containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error)
}
// RuntimeHelper wraps kubelet to make container runtime
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
type RuntimeHelper interface {
GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string, imageVolumes ImageVolumes) (contOpts *RunContainerOptions, cleanupAction func(), err error)
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
// GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host
// of a pod.
GetPodCgroupParent(pod *v1.Pod) string
GetPodDir(podUID types.UID) string
GeneratePodHostNameAndDomain(pod *v1.Pod) (hostname string, hostDomain string, err error)
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64
// GetOrCreateUserNamespaceMappings returns the configuration for the sandbox user namespace
GetOrCreateUserNamespaceMappings(pod *v1.Pod, runtimeHandler string) (*runtimeapi.UserNamespace, error)
// PrepareDynamicResources prepares resources for a pod.
PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error
// UnprepareDynamicResources unprepares resources for a a pod.
UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error
// SetPodWatchCondition flags a pod to be inspected until the condition is met.
SetPodWatchCondition(types.UID, string, func(*PodStatus) bool)
// PodCPUAndMemoryStats reads the latest CPU & memory usage stats.
PodCPUAndMemoryStats(context.Context, *v1.Pod, *PodStatus) (*statsapi.PodStats, error)
}
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
// TODO(yifan): Think about how to refactor this.
func ShouldContainerBeRestarted(logger klog.Logger, container *v1.Container, pod *v1.Pod, podStatus *PodStatus) bool {
// Once a pod has been marked deleted, it should not be restarted
if pod.DeletionTimestamp != nil {
return false
}
// Get latest container status.
status := podStatus.FindContainerStatusByName(container.Name)
// If the container was never started before, we should start it.
// NOTE(random-liu): If all historical containers were GC'd, we'll also return true here.
if status == nil {
return true
}
// Check whether container is running
if status.State == ContainerStateRunning {
return false
}
// Always restart container in the unknown, or in the created state.
if status.State == ContainerStateUnknown || status.State == ContainerStateCreated {
return true
}
// Check RestartPolicy for dead container
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
return podutil.ContainerShouldRestart(*container, pod.Spec, int32(status.ExitCode))
}
if pod.Spec.RestartPolicy == v1.RestartPolicyNever {
logger.V(4).Info("Already ran container, do nothing", "pod", klog.KObj(pod), "containerName", container.Name)
return false
}
if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {
// Check the exit code.
if status.ExitCode == 0 {
logger.V(4).Info("Already successfully ran container, do nothing", "pod", klog.KObj(pod), "containerName", container.Name)
return false
}
}
return true
}
// HashContainer returns the hash of the container. It is used to compare
// the running container with its desired spec.
// Note: remember to update hashValues in container_hash_test.go as well.
func HashContainer(container *v1.Container) uint64 {
hash := fnv.New32a()
containerJSON, _ := json.Marshal(pickFieldsToHash(container))
hashutil.DeepHashObject(hash, containerJSON)
return uint64(hash.Sum32())
}
// pickFieldsToHash pick fields that will affect the running status of the container for hash,
// currently this field range only contains `image` and `name`.
// Note: this list must be updated if ever kubelet wants to allow mutations to other fields.
func pickFieldsToHash(container *v1.Container) map[string]string {
retval := map[string]string{
"name": container.Name,
"image": container.Image,
}
return retval
}
// envVarsToMap constructs a map of environment name to value from a slice
// of env vars.
func envVarsToMap(envs []EnvVar) map[string]string {
result := map[string]string{}
for _, env := range envs {
result[env.Name] = env.Value
}
return result
}
// v1EnvVarsToMap constructs a map of environment name to value from a slice
// of env vars.
func v1EnvVarsToMap(envs []v1.EnvVar) map[string]string {
result := map[string]string{}
for _, env := range envs {
result[env.Name] = env.Value
}
return result
}
// ExpandContainerCommandOnlyStatic substitutes only static environment variable values from the
// container environment definitions. This does *not* include valueFrom substitutions. Note any unbound
// variables will not be expanded or empty substituted, i.e. "echo $(MISSING) => echo $(MISSING)".
// TODO: callers should use ExpandContainerCommandAndArgs with a fully resolved list of environment.
func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVar) (command []string) {
mapping := expansion.MappingFuncFor(v1EnvVarsToMap(envs))
if len(containerCommand) != 0 {
for _, cmd := range containerCommand {
command = append(command, expansion.Expand(cmd, mapping))
}
}
return command
}
// ExpandContainerVolumeMounts expands the subpath of the given VolumeMount by replacing variable references with the values of given EnvVar.
func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, error) {
envmap := envVarsToMap(envs)
missingKeys := sets.New[string]()
expanded := expansion.Expand(mount.SubPathExpr, func(key string) string {
value, ok := envmap[key]
if !ok || len(value) == 0 {
missingKeys.Insert(key)
}
return value
})
if len(missingKeys) > 0 {
return "", fmt.Errorf("missing value for %s", strings.Join(sets.List(missingKeys), ", "))
}
return expanded, nil
}
// ExpandContainerCommandAndArgs expands the given Container's command by replacing variable references `with the values of given EnvVar.
func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) {
mapping := expansion.MappingFuncFor(envVarsToMap(envs))
if len(container.Command) != 0 {
for _, cmd := range container.Command {
command = append(command, expansion.Expand(cmd, mapping))
}
}
if len(container.Args) != 0 {
for _, arg := range container.Args {
args = append(args, expansion.Expand(arg, mapping))
}
}
return command, args
}
// FilterEventRecorder creates an event recorder to record object's event except implicitly required container's, like infra container.
func FilterEventRecorder(recorder record.EventRecorder) record.EventRecorder {
return &innerEventRecorder{
recorder: recorder,
}
}
type innerEventRecorder struct {
recorder record.EventRecorder
}
func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*v1.ObjectReference, bool) {
if ref, ok := object.(*v1.ObjectReference); ok {
// this check is needed AFTER the cast. See https://github.com/kubernetes/kubernetes/issues/95552
if ref == nil {
return nil, false
}
if !strings.HasPrefix(ref.FieldPath, ImplicitContainerPrefix) {
return ref, true
}
}
return nil, false
}
func (irecorder *innerEventRecorder) Event(object runtime.Object, eventtype, reason, message string) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Event(ref, eventtype, reason, message)
}
}
func (irecorder *innerEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Eventf(ref, eventtype, reason, messageFmt, args...)
}
}
func (irecorder *innerEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.AnnotatedEventf(ref, annotations, eventtype, reason, messageFmt, args...)
}
}
// IsHostNetworkPod returns whether the host networking requested for the given Pod.
// Pod must not be nil.
func IsHostNetworkPod(pod *v1.Pod) bool {
return pod.Spec.HostNetwork
}
// ConvertPodStatusToRunningPod returns Pod given PodStatus and container runtime string.
// TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon
func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod {
runningPod := Pod{
ID: podStatus.ID,
Name: podStatus.Name,
Namespace: podStatus.Namespace,
}
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.State != ContainerStateRunning {
continue
}
container := &Container{
ID: containerStatus.ID,
Name: containerStatus.Name,
Image: containerStatus.Image,
ImageID: containerStatus.ImageID,
ImageRef: containerStatus.ImageRef,
ImageRuntimeHandler: containerStatus.ImageRuntimeHandler,
Hash: containerStatus.Hash,
State: containerStatus.State,
}
runningPod.Containers = append(runningPod.Containers, container)
}
// Populate sandboxes in kubecontainer.Pod
for _, sandbox := range podStatus.SandboxStatuses {
runningPod.Sandboxes = append(runningPod.Sandboxes, &Container{
ID: ContainerID{Type: runtimeName, ID: sandbox.Id},
State: SandboxToContainerState(sandbox.State),
})
}
return runningPod
}
// SandboxToContainerState converts runtimeapi.PodSandboxState to
// kubecontainer.State.
// This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete.
func SandboxToContainerState(state runtimeapi.PodSandboxState) State {
switch state {
case runtimeapi.PodSandboxState_SANDBOX_READY:
return ContainerStateRunning
case runtimeapi.PodSandboxState_SANDBOX_NOTREADY:
return ContainerStateExited
}
return ContainerStateUnknown
}
// GetContainerSpec gets the container spec by containerName.
func GetContainerSpec(pod *v1.Pod, containerName string) *v1.Container {
var containerSpec *v1.Container
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
if containerName == c.Name {
containerSpec = c
return false
}
return true
})
return containerSpec
}
// HasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func HasPrivilegedContainer(pod *v1.Pod) bool {
var hasPrivileged bool
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
if c.SecurityContext != nil && c.SecurityContext.Privileged != nil && *c.SecurityContext.Privileged {
hasPrivileged = true
return false
}
return true
})
return hasPrivileged
}
// HasWindowsHostProcessContainer returns true if any of the containers in a pod are HostProcess containers.
func HasWindowsHostProcessContainer(pod *v1.Pod) bool {
var hasHostProcess bool
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
if sc.HasWindowsHostProcessRequest(pod, c) {
hasHostProcess = true
return false
}
return true
})
return hasHostProcess
}
// AllContainersAreWindowsHostProcess returns true if all containers in a pod are HostProcess containers.
func AllContainersAreWindowsHostProcess(pod *v1.Pod) bool {
allHostProcess := true
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
if !sc.HasWindowsHostProcessRequest(pod, c) {
allHostProcess = false
return false
}
return true
})
return allHostProcess
}
// MakePortMappings creates internal port mapping from api port mapping.
func MakePortMappings(logger klog.Logger, container *v1.Container) (ports []PortMapping) {
names := make(map[string]struct{})
for _, p := range container.Ports {
pm := PortMapping{
HostPort: int(p.HostPort),
ContainerPort: int(p.ContainerPort),
Protocol: p.Protocol,
HostIP: p.HostIP,
}
// We need to determine the address family this entry applies to. We do this to ensure
// duplicate containerPort / protocol rules work across different address families.
// https://github.com/kubernetes/kubernetes/issues/82373
family := "any"
if p.HostIP != "" {
if utilsnet.IsIPv6String(p.HostIP) {
family = "v6"
} else {
family = "v4"
}
}
var name = p.Name
if name == "" {
name = fmt.Sprintf("%s-%s-%s:%d:%d", family, p.Protocol, p.HostIP, p.ContainerPort, p.HostPort)
}
// Protect against a port name being used more than once in a container.
if _, ok := names[name]; ok {
logger.Info("Port name conflicted, it is defined more than once", "portName", name)
continue
}
ports = append(ports, pm)
names[name] = struct{}{}
}
return
}
// HasAnyRegularContainerStarted returns true if any regular container has
// started, which indicates all init containers have been initialized.
// Deprecated: This function is not accurate when its pod sandbox is recreated.
// Use HasAnyActiveRegularContainerStarted instead.
func HasAnyRegularContainerStarted(spec *v1.PodSpec, statuses []v1.ContainerStatus) bool {
if len(statuses) == 0 {
return false
}
containerNames := make(map[string]struct{})
for _, c := range spec.Containers {
containerNames[c.Name] = struct{}{}
}
for _, status := range statuses {
if _, ok := containerNames[status.Name]; !ok {
continue
}
if status.State.Running != nil || status.State.Terminated != nil {
return true
}
}
return false
}
// HasAnyActiveRegularContainerStarted returns true if any regular container of
// the current pod sandbox has started, which indicates all init containers
// have been initialized.
func HasAnyActiveRegularContainerStarted(spec *v1.PodSpec, podStatus *PodStatus) bool {
if podStatus == nil {
return false
}
containerNames := sets.New[string]()
for _, c := range spec.Containers {
containerNames.Insert(c.Name)
}
for _, status := range podStatus.ActiveContainerStatuses {
if !containerNames.Has(status.Name) {
continue
}
return true
}
return false
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"os"
"path/filepath"
"time"
)
// OSInterface collects system level operations that need to be mocked out
// during tests.
type OSInterface interface {
MkdirAll(path string, perm os.FileMode) error
Symlink(oldname string, newname string) error
Stat(path string) (os.FileInfo, error)
Remove(path string) error
RemoveAll(path string) error
Create(path string) (*os.File, error)
Chmod(path string, perm os.FileMode) error
Hostname() (name string, err error)
Chtimes(path string, atime time.Time, mtime time.Time) error
Pipe() (r *os.File, w *os.File, err error)
ReadDir(dirname string) ([]os.DirEntry, error)
Glob(pattern string) ([]string, error)
Open(name string) (*os.File, error)
OpenFile(name string, flag int, perm os.FileMode) (*os.File, error)
Rename(oldpath, newpath string) error
}
// RealOS is used to dispatch the real system level operations.
type RealOS struct{}
// MkdirAll will call os.MkdirAll to create a directory.
func (RealOS) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
// Symlink will call os.Symlink to create a symbolic link.
func (RealOS) Symlink(oldname string, newname string) error {
return os.Symlink(oldname, newname)
}
// Stat will call os.Stat to get the FileInfo for a given path
func (RealOS) Stat(path string) (os.FileInfo, error) {
return os.Stat(path)
}
// Remove will call os.Remove to remove the path.
func (RealOS) Remove(path string) error {
return os.Remove(path)
}
// RemoveAll will call os.RemoveAll to remove the path and its children.
func (RealOS) RemoveAll(path string) error {
return os.RemoveAll(path)
}
// Create will call os.Create to create and return a file
// at path.
func (RealOS) Create(path string) (*os.File, error) {
return os.Create(path)
}
// Chmod will change the permissions on the specified path or return
// an error.
func (RealOS) Chmod(path string, perm os.FileMode) error {
return os.Chmod(path, perm)
}
// Hostname will call os.Hostname to return the hostname.
func (RealOS) Hostname() (name string, err error) {
return os.Hostname()
}
// Chtimes will call os.Chtimes to change the atime and mtime of the path
func (RealOS) Chtimes(path string, atime time.Time, mtime time.Time) error {
return os.Chtimes(path, atime, mtime)
}
// Pipe will call os.Pipe to return a connected pair of pipe.
func (RealOS) Pipe() (r *os.File, w *os.File, err error) {
return os.Pipe()
}
// ReadDir will call os.ReadDir to return the files under the directory.
func (RealOS) ReadDir(dirname string) ([]os.DirEntry, error) {
return os.ReadDir(dirname)
}
// Glob will call filepath.Glob to return the names of all files matching
// pattern.
func (RealOS) Glob(pattern string) ([]string, error) {
return filepath.Glob(pattern)
}
// Open will call os.Open to return the file.
func (RealOS) Open(name string) (*os.File, error) {
return os.Open(name)
}
// OpenFile will call os.OpenFile to return the file.
func (RealOS) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(name, flag, perm)
}
// Rename will call os.Rename to rename a file.
func (RealOS) Rename(oldpath, newpath string) error {
return os.Rename(oldpath, newpath)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"fmt"
v1 "k8s.io/api/core/v1"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// ImplicitContainerPrefix is a container name prefix that will indicate that container was started implicitly (like the pod infra container).
var ImplicitContainerPrefix = "implicitly required container "
// GenerateContainerRef returns an *v1.ObjectReference which references the given container
// within the given pod. Returns an error if the reference can't be constructed or the
// container doesn't actually belong to the pod.
func GenerateContainerRef(pod *v1.Pod, container *v1.Container) (*v1.ObjectReference, error) {
fieldPath, err := fieldPath(pod, container)
if err != nil {
// TODO: figure out intelligent way to refer to containers that we implicitly
// start (like the pod infra container). This is not a good way, ugh.
fieldPath = ImplicitContainerPrefix + container.Name
}
ref, err := ref.GetPartialReference(legacyscheme.Scheme, pod, fieldPath)
if err != nil {
return nil, err
}
return ref, nil
}
// fieldPath returns a fieldPath locating container within pod.
// Returns an error if the container isn't part of the pod.
func fieldPath(pod *v1.Pod, container *v1.Container) (string, error) {
for i := range pod.Spec.Containers {
here := &pod.Spec.Containers[i]
if here.Name == container.Name {
if here.Name == "" {
return fmt.Sprintf("spec.containers[%d]", i), nil
}
return fmt.Sprintf("spec.containers{%s}", here.Name), nil
}
}
for i := range pod.Spec.InitContainers {
here := &pod.Spec.InitContainers[i]
if here.Name == container.Name {
if here.Name == "" {
return fmt.Sprintf("spec.initContainers[%d]", i), nil
}
return fmt.Sprintf("spec.initContainers{%s}", here.Name), nil
}
}
for i := range pod.Spec.EphemeralContainers {
here := &pod.Spec.EphemeralContainers[i]
if here.Name == container.Name {
if here.Name == "" {
return fmt.Sprintf("spec.ephemeralContainers[%d]", i), nil
}
return fmt.Sprintf("spec.ephemeralContainers{%s}", here.Name), nil
}
}
return "", fmt.Errorf("container %q not found in pod %s/%s", container.Name, pod.Namespace, pod.Name)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package container
import (
"context"
"fmt"
"io"
"net/url"
"reflect"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/credentialprovider"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/volume"
)
// Version interface allow to consume the runtime versions - compare and format to string.
type Version interface {
// Compare compares two versions of the runtime. On success it returns -1
// if the version is less than the other, 1 if it is greater than the other,
// or 0 if they are equal.
Compare(other string) (int, error)
// String returns a string that represents the version.
String() string
}
// ImageSpec is an internal representation of an image. Currently, it wraps the
// value of a Container's Image field, but in the future it will include more detailed
// information about the different image types.
type ImageSpec struct {
// ID of the image.
Image string
// Runtime handler used to pull this image
RuntimeHandler string
// The annotations for the image.
// This should be passed to CRI during image pulls and returned when images are listed.
Annotations []Annotation
}
// ImageStats contains statistics about all the images currently available.
type ImageStats struct {
// Total amount of storage consumed by existing images.
TotalStorageBytes uint64
}
// Runtime interface defines the interfaces that should be implemented
// by a container runtime.
// Thread safety is required from implementations of this interface.
type Runtime interface {
// Type returns the type of the container runtime.
Type() string
// Version returns the version information of the container runtime.
Version(ctx context.Context) (Version, error)
// APIVersion returns the cached API version information of the container
// runtime. Implementation is expected to update this cache periodically.
// This may be different from the runtime engine's version.
// TODO(random-liu): We should fold this into Version()
APIVersion() (Version, error)
// Status returns the status of the runtime. An error is returned if the Status
// function itself fails, nil otherwise.
Status(ctx context.Context) (*RuntimeStatus, error)
// GetPods returns a list of containers grouped by pods. The boolean parameter
// specifies whether the runtime returns all containers including those already
// exited and dead containers (used for garbage collection).
GetPods(ctx context.Context, all bool) ([]*Pod, error)
// GarbageCollect removes dead containers using the specified container gc policy
// If allSourcesReady is not true, it means that kubelet doesn't have the
// complete list of pods from all available sources (e.g., apiserver, http,
// file). In this case, garbage collector should refrain itself from aggressive
// behavior such as removing all containers of unrecognized pods (yet).
// If evictNonDeletedPods is set to true, containers and sandboxes belonging to pods
// that are terminated, but not deleted will be evicted. Otherwise, only deleted pods
// will be GC'd.
// TODO: Revisit this method and make it cleaner.
GarbageCollect(ctx context.Context, gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error
// SyncPod syncs the running pod into the desired pod.
SyncPod(ctx context.Context, pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
// TODO(random-liu): Return PodSyncResult in KillPod.
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
KillPod(ctx context.Context, pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visible in Runtime.
GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*PodStatus, error)
// TODO(vmarmol): Unify pod and containerID args.
// GetContainerLogs returns logs of a specific container. By
// default, it returns a snapshot of the container log. Set 'follow' to true to
// stream the log. Set 'follow' to false and specify the number of lines (e.g.
// "100" or "all") to tail the log.
GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error)
// DeleteContainer deletes a container. If the container is still running, an error is returned.
DeleteContainer(ctx context.Context, containerID ContainerID) error
// ImageService provides methods to image-related methods.
ImageService
// UpdatePodCIDR sends a new podCIDR to the runtime.
// This method just proxies a new runtimeConfig with the updated
// CIDR value down to the runtime shim.
UpdatePodCIDR(ctx context.Context, podCIDR string) error
// CheckpointContainer tells the runtime to checkpoint a container
// and store the resulting archive to the checkpoint directory.
CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error
// Generate pod status from the CRI event
GeneratePodStatus(event *runtimeapi.ContainerEventResponse) *PodStatus
// ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics.
// This list should be static at startup: either the client and server restart together when
// adding or removing metrics descriptors, or they should not change.
// Put differently, if ListPodSandboxMetrics references a name that is not described in the initial
// ListMetricDescriptors call, then the metric will not be broadcasted.
ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error)
// ListPodSandboxMetrics retrieves the metrics for all pod sandboxes.
ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error)
// GetContainerStatus returns the status for the container.
GetContainerStatus(ctx context.Context, id ContainerID) (*Status, error)
// GetContainerSwapBehavior reports whether a container could be swappable.
// This is used to decide whether to handle InPlacePodVerticalScaling for containers.
GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) kubelettypes.SwapBehavior
}
// StreamingRuntime is the interface implemented by runtimes that handle the serving of the
// streaming calls (exec/attach/port-forward) themselves. In this case, Kubelet should redirect to
// the runtime server.
type StreamingRuntime interface {
GetExec(ctx context.Context, id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error)
GetAttach(ctx context.Context, id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error)
GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error)
}
// ImageService interfaces allows to work with image service.
type ImageService interface {
// PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary.
// It returns a reference (digest or ID) to the pulled image and the credentials
// that were used to pull the image. If the returned credentials are nil, the
// pull was anonymous.
PullImage(ctx context.Context, image ImageSpec, credentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error)
// GetImageRef gets the reference (digest or ID) of the image which has already been in
// the local storage. It returns ("", nil) if the image isn't in the local storage.
GetImageRef(ctx context.Context, image ImageSpec) (string, error)
// ListImages gets all images currently on the machine.
ListImages(ctx context.Context) ([]Image, error)
// RemoveImage removes the specified image.
RemoveImage(ctx context.Context, image ImageSpec) error
// ImageStats returns Image statistics.
ImageStats(ctx context.Context) (*ImageStats, error)
// ImageFsInfo returns a list of file systems for containers/images
ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error)
// GetImageSize returns the size of the image
GetImageSize(ctx context.Context, image ImageSpec) (uint64, error)
}
// Attacher interface allows to attach a container.
type Attacher interface {
AttachContainer(ctx context.Context, id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error)
}
// CommandRunner interface allows to run command in a container.
type CommandRunner interface {
// RunInContainer synchronously executes the command in the container, and returns the output.
// If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned.
RunInContainer(ctx context.Context, id ContainerID, cmd []string, timeout time.Duration) ([]byte, error)
}
// Pod is a group of containers.
type Pod struct {
// The ID of the pod, which can be used to retrieve a particular pod
// from the pod list returned by GetPods().
ID types.UID
// The name and namespace of the pod, which is readable by human.
Name string
Namespace string
// Creation timestamps of the Pod in nanoseconds.
CreatedAt uint64
// List of containers that belongs to this pod. It may contain only
// running containers, or mixed with dead ones (when GetPods(true)).
Containers []*Container
// List of sandboxes associated with this pod. The sandboxes are converted
// to Container temporarily to avoid substantial changes to other
// components. This is only populated by kuberuntime.
// TODO: use the runtimeApi.PodSandbox type directly.
Sandboxes []*Container
}
// PodPair contains both runtime#Pod and api#Pod
type PodPair struct {
// APIPod is the v1.Pod
APIPod *v1.Pod
// RunningPod is the pod defined in pkg/kubelet/container/runtime#Pod
RunningPod *Pod
}
// ContainerID is a type that identifies a container.
type ContainerID struct {
// The type of the container runtime. e.g. 'docker'.
Type string
// The identification of the container, this is comsumable by
// the underlying container runtime. (Note that the container
// runtime interface still takes the whole struct as input).
ID string
}
// BuildContainerID returns the ContainerID given type and id.
func BuildContainerID(typ, ID string) ContainerID {
return ContainerID{Type: typ, ID: ID}
}
// ParseContainerID is a convenience method for creating a ContainerID from an ID string.
func ParseContainerID(containerID string) ContainerID {
var id ContainerID
if err := id.ParseString(containerID); err != nil {
// Use klog.TODO() because we currently do not have a proper logger to pass in.
// This should be replaced with an appropriate logger when refactoring this function to accept a logger parameter.
logger := klog.TODO()
logger.Error(err, "Parsing containerID failed")
}
return id
}
// ParseString converts given string into ContainerID
func (c *ContainerID) ParseString(data string) error {
// Trim the quotes and split the type and ID.
parts := strings.Split(strings.Trim(data, "\""), "://")
if len(parts) != 2 {
return fmt.Errorf("invalid container ID: %q", data)
}
c.Type, c.ID = parts[0], parts[1]
return nil
}
func (c *ContainerID) String() string {
return fmt.Sprintf("%s://%s", c.Type, c.ID)
}
// IsEmpty returns whether given ContainerID is empty.
func (c *ContainerID) IsEmpty() bool {
return *c == ContainerID{}
}
// MarshalJSON formats a given ContainerID into a byte array.
func (c *ContainerID) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("%q", c.String())), nil
}
// UnmarshalJSON parses ContainerID from a given array of bytes.
func (c *ContainerID) UnmarshalJSON(data []byte) error {
return c.ParseString(string(data))
}
// State represents the state of a container
type State string
const (
// ContainerStateCreated indicates a container that has been created (e.g. with docker create) but not started.
ContainerStateCreated State = "created"
// ContainerStateRunning indicates a currently running container.
ContainerStateRunning State = "running"
// ContainerStateExited indicates a container that ran and completed ("stopped" in other contexts, although a created container is technically also "stopped").
ContainerStateExited State = "exited"
// ContainerStateUnknown encompasses all the states that we currently don't care about (like restarting, paused, dead).
ContainerStateUnknown State = "unknown"
)
// ContainerReasonStatusUnknown indicates a container the status of the container cannot be determined.
const ContainerReasonStatusUnknown string = "ContainerStatusUnknown"
// Container provides the runtime information for a container, such as ID, hash,
// state of the container.
type Container struct {
// The ID of the container, used by the container runtime to identify
// a container.
ID ContainerID
// The name of the container, which should be the same as specified by
// v1.Container.
Name string
// The image name of the container, this also includes the tag of the image,
// the expected form is "NAME:TAG".
Image string
// The id of the image used by the container.
ImageID string
// The digested reference of the image used by the container.
ImageRef string
// Runtime handler used to pull the image if any.
ImageRuntimeHandler string
// Hash of the container, used for comparison. Optional for containers
// not managed by kubelet.
Hash uint64
// State is the state of the container.
State State
}
// PodStatus represents the status of the pod and its containers.
// v1.PodStatus can be derived from examining PodStatus and v1.Pod.
type PodStatus struct {
// ID of the pod.
ID types.UID
// Name of the pod.
Name string
// Namespace of the pod.
Namespace string
// All IPs assigned to this pod
IPs []string
// Status of containers in the pod.
ContainerStatuses []*Status
// Statuses of containers of the active sandbox in the pod.
ActiveContainerStatuses []*Status
// Status of the pod sandbox.
// Only for kuberuntime now, other runtime may keep it nil.
SandboxStatuses []*runtimeapi.PodSandboxStatus
// Timestamp at which container and pod statuses were recorded
TimeStamp time.Time
}
// ContainerResources represents the Resources allocated to the running container.
type ContainerResources struct {
// CPU capacity reserved for the container
CPURequest *resource.Quantity
// CPU limit enforced on the container
CPULimit *resource.Quantity
// Memory capaacity reserved for the container
MemoryRequest *resource.Quantity
// Memory limit enforced on the container
MemoryLimit *resource.Quantity
}
// Status represents the status of a container.
//
// Status does not contain VolumeMap because CRI API is unaware of volume names.
type Status struct {
// ID of the container.
ID ContainerID
// Name of the container.
Name string
// Status of the container.
State State
// Creation time of the container.
CreatedAt time.Time
// Start time of the container.
StartedAt time.Time
// Finish time of the container.
FinishedAt time.Time
// Exit code of the container.
ExitCode int
// Name of the image, this also includes the tag of the image,
// the expected form is "NAME:TAG".
Image string
// ID of the image.
ImageID string
// The digested reference of the image used by the container.
ImageRef string
// Runtime handler used to pull the image if any.
ImageRuntimeHandler string
// Hash of the container, used for comparison.
Hash uint64
// Number of times that the container has been restarted.
RestartCount int
// A string explains why container is in such a status.
Reason string
// Message written by the container before exiting (stored in
// TerminationMessagePath).
Message string
// CPU and memory resources for this container
Resources *ContainerResources
// User identity information of the first process of this container
User *ContainerUser
// Mounts are the volume mounts of the container
Mounts []Mount
// StopSignal is used to show the container's effective stop signal in the Status
StopSignal *v1.Signal
}
// ContainerUser represents user identity information
type ContainerUser struct {
// Linux holds user identity information of the first process of the containers in Linux.
// Note that this field cannot be set when spec.os.name is windows.
Linux *LinuxContainerUser
// Windows holds user identity information of the first process of the containers in Windows
// This is just reserved for future use.
// Windows *WindowsContainerUser
}
// LinuxContainerUser represents user identity information in Linux containers
type LinuxContainerUser struct {
// UID is the primary uid of the first process in the container
UID int64
// GID is the primary gid of the first process in the container
GID int64
// SupplementalGroups are the supplemental groups attached to the first process in the container
SupplementalGroups []int64
}
// FindContainerStatusByName returns container status in the pod status with the given name.
// When there are multiple containers' statuses with the same name, the first match will be returned.
func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *Status {
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.Name == containerName {
return containerStatus
}
}
return nil
}
// GetRunningContainerStatuses returns container status of all the running containers in a pod
func (podStatus *PodStatus) GetRunningContainerStatuses() []*Status {
runningContainerStatuses := []*Status{}
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.State == ContainerStateRunning {
runningContainerStatuses = append(runningContainerStatuses, containerStatus)
}
}
return runningContainerStatuses
}
// Image contains basic information about a container image.
type Image struct {
// ID of the image.
ID string
// Other names by which this image is known.
RepoTags []string
// Digests by which this image is known.
RepoDigests []string
// The size of the image in bytes.
Size int64
// ImageSpec for the image which include annotations.
Spec ImageSpec
// Pin for preventing garbage collection
Pinned bool
}
// EnvVar represents the environment variable.
type EnvVar struct {
Name string
Value string
}
// Annotation represents an annotation.
type Annotation struct {
Name string
Value string
}
// Mount represents a volume mount.
type Mount struct {
// Name of the volume mount.
// TODO(yifan): Remove this field, as this is not representing the unique name of the mount,
// but the volume name only.
Name string
// Path of the mount within the container.
ContainerPath string
// Path of the mount on the host.
HostPath string
// Whether the mount is read-only.
ReadOnly bool
// Whether the mount is recursive read-only.
// Must not be true if ReadOnly is false.
RecursiveReadOnly bool
// Whether the mount needs SELinux relabeling
SELinuxRelabel bool
// Requested propagation mode
Propagation runtimeapi.MountPropagation
// Image is set if an OCI volume as image ID or digest should get mounted (special case).
Image *runtimeapi.ImageSpec
// ImageSubPath is set if an image volume sub path should get mounted. This
// field is only required if the above Image is set.
ImageSubPath string
}
// ImageVolumes is a map of image specs by volume name.
type ImageVolumes = map[string]*runtimeapi.ImageSpec
// PortMapping contains information about the port mapping.
type PortMapping struct {
// Protocol of the port mapping.
Protocol v1.Protocol
// The port number within the container.
ContainerPort int
// The port number on the host.
HostPort int
// The host IP.
HostIP string
}
// DeviceInfo contains information about the device.
type DeviceInfo struct {
// Path on host for mapping
PathOnHost string
// Path in Container to map
PathInContainer string
// Cgroup permissions
Permissions string
}
// CDIDevice contains information about CDI device
type CDIDevice struct {
// Name is a fully qualified device name according to
// https://github.com/cncf-tags/container-device-interface/blob/e66544063aa7760c4ea6330ce9e6c757f8e61df2/README.md?plain=1#L9-L15
Name string
}
// RunContainerOptions specify the options which are necessary for running containers
type RunContainerOptions struct {
// The environment variables list.
Envs []EnvVar
// The mounts for the containers.
Mounts []Mount
// The host devices mapped into the containers.
Devices []DeviceInfo
// The CDI devices for the container
CDIDevices []CDIDevice
// The annotations for the container
// These annotations are generated by other components (i.e.,
// not users). Currently, only device plugins populate the annotations.
Annotations []Annotation
// If the container has specified the TerminationMessagePath, then
// this directory will be used to create and mount the log file to
// container.TerminationMessagePath
PodContainerDir string
// The type of container rootfs
ReadOnly bool
}
// VolumeInfo contains information about the volume.
type VolumeInfo struct {
// Mounter is the volume's mounter
Mounter volume.Mounter
// BlockVolumeMapper is the Block volume's mapper
BlockVolumeMapper volume.BlockVolumeMapper
// SELinuxLabeled indicates whether this volume has had the
// pod's SELinux label applied to it or not
SELinuxLabeled bool
// Whether the volume permission is set to read-only or not
// This value is passed from volume.spec
ReadOnly bool
// Inner volume spec name, which is the PV name if used, otherwise
// it is the same as the outer volume spec name.
InnerVolumeSpecName string
}
// VolumeMap represents the map of volumes.
type VolumeMap map[string]VolumeInfo
// RuntimeConditionType is the types of required runtime conditions.
type RuntimeConditionType string
const (
// RuntimeReady means the runtime is up and ready to accept basic containers.
RuntimeReady RuntimeConditionType = "RuntimeReady"
// NetworkReady means the runtime network is up and ready to accept containers which require network.
NetworkReady RuntimeConditionType = "NetworkReady"
)
// RuntimeStatus contains the status of the runtime.
type RuntimeStatus struct {
// Conditions is an array of current observed runtime conditions.
Conditions []RuntimeCondition
// Handlers is an array of current available handlers
Handlers []RuntimeHandler
// Features is the set of features implemented by the runtime
Features *RuntimeFeatures
}
// GetRuntimeCondition gets a specified runtime condition from the runtime status.
func (r *RuntimeStatus) GetRuntimeCondition(t RuntimeConditionType) *RuntimeCondition {
for i := range r.Conditions {
c := &r.Conditions[i]
if c.Type == t {
return c
}
}
return nil
}
// String formats the runtime status into human readable string.
func (r *RuntimeStatus) String() string {
var ss []string
var sh []string
for _, c := range r.Conditions {
ss = append(ss, c.String())
}
for _, h := range r.Handlers {
sh = append(sh, h.String())
}
return fmt.Sprintf("Runtime Conditions: %s; Handlers: %s, Features: %s", strings.Join(ss, ", "), strings.Join(sh, ", "), r.Features.String())
}
// RuntimeHandler contains condition information for the runtime handler.
type RuntimeHandler struct {
// Name is the handler name.
Name string
// SupportsRecursiveReadOnlyMounts is true if the handler has support for
// recursive read-only mounts.
SupportsRecursiveReadOnlyMounts bool
// SupportsUserNamespaces is true if the handler has support for
// user namespaces.
SupportsUserNamespaces bool
}
// String formats the runtime handler into human readable string.
func (h *RuntimeHandler) String() string {
return fmt.Sprintf("Name=%s SupportsRecursiveReadOnlyMounts: %v SupportsUserNamespaces: %v",
h.Name, h.SupportsRecursiveReadOnlyMounts, h.SupportsUserNamespaces)
}
// RuntimeCondition contains condition information for the runtime.
type RuntimeCondition struct {
// Type of runtime condition.
Type RuntimeConditionType
// Status of the condition, one of true/false.
Status bool
// Reason is brief reason for the condition's last transition.
Reason string
// Message is human readable message indicating details about last transition.
Message string
}
// String formats the runtime condition into human readable string.
func (c *RuntimeCondition) String() string {
return fmt.Sprintf("%s=%t reason:%s message:%s", c.Type, c.Status, c.Reason, c.Message)
}
// RuntimeFeatures contains the set of features implemented by the runtime
type RuntimeFeatures struct {
SupplementalGroupsPolicy bool
}
// String formats the runtime condition into a human readable string.
func (f *RuntimeFeatures) String() string {
if f == nil {
return "nil"
}
return fmt.Sprintf("SupplementalGroupsPolicy: %v", f.SupplementalGroupsPolicy)
}
// Pods represents the list of pods
type Pods []*Pod
// FindPodByID finds and returns a pod in the pod list by UID. It will return an empty pod
// if not found.
func (p Pods) FindPodByID(podUID types.UID) Pod {
for i := range p {
if p[i].ID == podUID {
return *p[i]
}
}
return Pod{}
}
// FindPodByFullName finds and returns a pod in the pod list by the full name.
// It will return an empty pod if not found.
func (p Pods) FindPodByFullName(podFullName string) Pod {
for i := range p {
if BuildPodFullName(p[i].Name, p[i].Namespace) == podFullName {
return *p[i]
}
}
return Pod{}
}
// FindPod combines FindPodByID and FindPodByFullName, it finds and returns a pod in the
// pod list either by the full name or the pod ID. It will return an empty pod
// if not found.
func (p Pods) FindPod(podFullName string, podUID types.UID) Pod {
if len(podFullName) > 0 {
return p.FindPodByFullName(podFullName)
}
return p.FindPodByID(podUID)
}
// FindContainerByName returns a container in the pod with the given name.
// When there are multiple containers with the same name, the first match will
// be returned.
func (p *Pod) FindContainerByName(containerName string) *Container {
for _, c := range p.Containers {
if c.Name == containerName {
return c
}
}
return nil
}
// FindContainerByID returns a container in the pod with the given ContainerID.
func (p *Pod) FindContainerByID(id ContainerID) *Container {
for _, c := range p.Containers {
if c.ID == id {
return c
}
}
return nil
}
// FindSandboxByID returns a sandbox in the pod with the given ContainerID.
func (p *Pod) FindSandboxByID(id ContainerID) *Container {
for _, c := range p.Sandboxes {
if c.ID == id {
return c
}
}
return nil
}
// ToAPIPod converts Pod to v1.Pod. Note that if a field in v1.Pod has no
// corresponding field in Pod, the field would not be populated.
func (p *Pod) ToAPIPod() *v1.Pod {
var pod v1.Pod
pod.UID = p.ID
pod.Name = p.Name
pod.Namespace = p.Namespace
for _, c := range p.Containers {
var container v1.Container
container.Name = c.Name
container.Image = c.Image
pod.Spec.Containers = append(pod.Spec.Containers, container)
}
return &pod
}
// IsEmpty returns true if the pod is empty.
func (p *Pod) IsEmpty() bool {
return reflect.DeepEqual(p, &Pod{})
}
// GetPodFullName returns a name that uniquely identifies a pod.
func GetPodFullName(pod *v1.Pod) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format.
return pod.Name + "_" + pod.Namespace
}
// BuildPodFullName builds the pod full name from pod name and namespace.
func BuildPodFullName(name, namespace string) string {
return name + "_" + namespace
}
// ParsePodFullName parsed the pod full name.
func ParsePodFullName(podFullName string) (string, string, error) {
parts := strings.Split(podFullName, "_")
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
return "", "", fmt.Errorf("failed to parse the pod full name %q", podFullName)
}
return parts[0], parts[1], nil
}
// Option is a functional option type for Runtime, useful for
// completely optional settings.
type Option func(Runtime)
// SortContainerStatusesByCreationTime sorts the container statuses by creation time.
type SortContainerStatusesByCreationTime []*Status
func (s SortContainerStatusesByCreationTime) Len() int { return len(s) }
func (s SortContainerStatusesByCreationTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SortContainerStatusesByCreationTime) Less(i, j int) bool {
return s[i].CreatedAt.Before(s[j].CreatedAt)
}
const (
// MaxPodTerminationMessageLogLength is the maximum bytes any one pod may have written
// as termination message output across all containers. Containers will be evenly truncated
// until output is below this limit.
MaxPodTerminationMessageLogLength = 1024 * 12
// MaxContainerTerminationMessageLength is the upper bound any one container may write to
// its termination message path. Contents above this length will be truncated.
MaxContainerTerminationMessageLength = 1024 * 4
// MaxContainerTerminationMessageLogLength is the maximum bytes any one container will
// have written to its termination message when the message is read from the logs.
MaxContainerTerminationMessageLogLength = 1024 * 2
// MaxContainerTerminationMessageLogLines is the maximum number of previous lines of
// log output that the termination message can contain.
MaxContainerTerminationMessageLogLines = 80
)
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package container
import (
"context"
"sync"
"time"
)
// RuntimeCache is in interface for obtaining cached Pods.
type RuntimeCache interface {
GetPods(context.Context) ([]*Pod, error)
ForceUpdateIfOlder(context.Context, time.Time) error
}
type podsGetter interface {
GetPods(context.Context, bool) ([]*Pod, error)
}
// NewRuntimeCache creates a container runtime cache.
func NewRuntimeCache(getter podsGetter, cachePeriod time.Duration) (RuntimeCache, error) {
return &runtimeCache{
getter: getter,
cachePeriod: cachePeriod,
}, nil
}
// runtimeCache caches a list of pods. It records a timestamp (cacheTime) right
// before updating the pods, so the timestamp is at most as new as the pods
// (and can be slightly older). The timestamp always moves forward. Callers are
// expected not to modify the pods returned from GetPods.
type runtimeCache struct {
sync.Mutex
// The underlying container runtime used to update the cache.
getter podsGetter
// The interval after which the cache should be refreshed.
cachePeriod time.Duration
// Last time when cache was updated.
cacheTime time.Time
// The content of the cache.
pods []*Pod
}
// GetPods returns the cached pods if they are not outdated; otherwise, it
// retrieves the latest pods and return them.
func (r *runtimeCache) GetPods(ctx context.Context) ([]*Pod, error) {
r.Lock()
defer r.Unlock()
if time.Since(r.cacheTime) > r.cachePeriod {
if err := r.updateCache(ctx); err != nil {
return nil, err
}
}
return r.pods, nil
}
func (r *runtimeCache) ForceUpdateIfOlder(ctx context.Context, minExpectedCacheTime time.Time) error {
r.Lock()
defer r.Unlock()
if r.cacheTime.Before(minExpectedCacheTime) {
return r.updateCache(ctx)
}
return nil
}
func (r *runtimeCache) updateCache(ctx context.Context) error {
pods, timestamp, err := r.getPodsWithTimestamp(ctx)
if err != nil {
return err
}
r.pods, r.cacheTime = pods, timestamp
return nil
}
// getPodsWithTimestamp records a timestamp and retrieves pods from the getter.
func (r *runtimeCache) getPodsWithTimestamp(ctx context.Context) ([]*Pod, time.Time, error) {
// Always record the timestamp before getting the pods to avoid stale pods.
timestamp := time.Now()
pods, err := r.getter.GetPods(ctx, false)
return pods, timestamp, err
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import "context"
// TestRuntimeCache embeds runtimeCache with some additional methods for testing.
// It must be declared in the container package to have visibility to runtimeCache.
// It cannot be in a "..._test.go" file in order for runtime_cache_test.go to have cross-package visibility to it.
// (cross-package declarations in test files cannot be used from dot imports if this package is vendored)
type TestRuntimeCache struct {
runtimeCache
}
// UpdateCacheWithLock updates the cache with the lock.
func (r *TestRuntimeCache) UpdateCacheWithLock(ctx context.Context) error {
r.Lock()
defer r.Unlock()
return r.updateCache(ctx)
}
// GetCachedPods returns the cached pods.
func (r *TestRuntimeCache) GetCachedPods() []*Pod {
r.Lock()
defer r.Unlock()
return r.pods
}
// NewTestRuntimeCache creates a new instance of TestRuntimeCache.
func NewTestRuntimeCache(getter podsGetter) *TestRuntimeCache {
return &TestRuntimeCache{
runtimeCache: runtimeCache{
getter: getter,
},
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"errors"
"fmt"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
// TODO(random-liu): We need to better organize runtime errors for introspection.
// ErrCrashLoopBackOff returned when a container Terminated and Kubelet is backing off the restart.
var ErrCrashLoopBackOff = errors.New("CrashLoopBackOff")
var (
// ErrContainerNotFound returned when a container in the given pod with the
// given container name was not found, amongst those managed by the kubelet.
ErrContainerNotFound = errors.New("no matching container")
)
var (
// ErrRunContainer returned when runtime failed to start any of pod's container.
ErrRunContainer = errors.New("RunContainerError")
// ErrKillContainer returned when runtime failed to kill any of pod's containers.
ErrKillContainer = errors.New("KillContainerError")
// ErrCreatePodSandbox returned when runtime failed to create a sandbox for pod.
ErrCreatePodSandbox = errors.New("CreatePodSandboxError")
// ErrConfigPodSandbox returned when runetime failed to get pod sandbox config from pod.
ErrConfigPodSandbox = errors.New("ConfigPodSandboxError")
// ErrKillPodSandbox returned when runtime failed to stop pod's sandbox.
ErrKillPodSandbox = errors.New("KillPodSandboxError")
// ErrResizePodInPlace returned when runtime failed to resize a pod.
ErrResizePodInPlace = errors.New("ResizePodInPlaceError")
)
// SyncAction indicates different kind of actions in SyncPod() and KillPod(). Now there are only actions
// about start/kill container and setup/teardown network.
type SyncAction string
const (
// StartContainer action
StartContainer SyncAction = "StartContainer"
// KillContainer action
KillContainer SyncAction = "KillContainer"
// SetupNetwork action
SetupNetwork SyncAction = "SetupNetwork"
// TeardownNetwork action
TeardownNetwork SyncAction = "TeardownNetwork"
// InitContainer action
InitContainer SyncAction = "InitContainer"
// CreatePodSandbox action
CreatePodSandbox SyncAction = "CreatePodSandbox"
// ConfigPodSandbox action
ConfigPodSandbox SyncAction = "ConfigPodSandbox"
// KillPodSandbox action
KillPodSandbox SyncAction = "KillPodSandbox"
// ResizePodInPlace action is included whenever any containers in the pod are resized without restart
ResizePodInPlace SyncAction = "ResizePodInPlace"
)
// SyncResult is the result of sync action.
type SyncResult struct {
// The associated action of the result
Action SyncAction
// The target of the action, now the target can only be:
// * Container: Target should be container name
// * Network: Target is useless now, we just set it as pod full name now
Target interface{}
// Brief error reason
Error error
// Human readable error reason
Message string
}
// NewSyncResult generates new SyncResult with specific Action and Target
func NewSyncResult(action SyncAction, target interface{}) *SyncResult {
return &SyncResult{Action: action, Target: target}
}
// Fail fails the SyncResult with specific error and message
func (r *SyncResult) Fail(err error, msg string) {
r.Error, r.Message = err, msg
}
// PodSyncResult is the summary result of SyncPod() and KillPod()
type PodSyncResult struct {
// Result of different sync actions
SyncResults []*SyncResult
// Error encountered in SyncPod() and KillPod() that is not already included in SyncResults
SyncError error
}
// AddSyncResult adds multiple SyncResult to current PodSyncResult
func (p *PodSyncResult) AddSyncResult(result ...*SyncResult) {
p.SyncResults = append(p.SyncResults, result...)
}
// AddPodSyncResult merges a PodSyncResult to current one
func (p *PodSyncResult) AddPodSyncResult(result PodSyncResult) {
p.AddSyncResult(result.SyncResults...)
p.SyncError = result.SyncError
}
// Fail fails the PodSyncResult with an error occurred in SyncPod() and KillPod() itself
func (p *PodSyncResult) Fail(err error) {
p.SyncError = err
}
// Error returns an error summarizing all the errors in PodSyncResult
func (p *PodSyncResult) Error() error {
errlist := []error{}
if p.SyncError != nil {
errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v", p.SyncError))
}
for _, result := range p.SyncResults {
if result.Error != nil {
errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q", result.Action, result.Target,
result.Error, result.Message))
}
}
return utilerrors.NewAggregate(errlist)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/kubelet/container"
)
type fakeCache struct {
runtime container.Runtime
}
func NewFakeCache(runtime container.Runtime) container.Cache {
return &fakeCache{runtime: runtime}
}
func (c *fakeCache) Get(id types.UID) (*container.PodStatus, error) {
return c.runtime.GetPodStatus(context.TODO(), id, "", "")
}
func (c *fakeCache) GetNewerThan(id types.UID, minTime time.Time) (*container.PodStatus, error) {
return c.Get(id)
}
func (c *fakeCache) Set(id types.UID, status *container.PodStatus, err error, timestamp time.Time) (updated bool) {
return true
}
func (c *fakeCache) Delete(id types.UID) {
}
func (c *fakeCache) UpdateTime(_ time.Time) {
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// FakeReadyProvider implements a fake ready provider
type FakeReadyProvider struct {
kubecontainer.SourcesReadyProvider
}
// AllReady notifies caller that the Fake Provider is ready.
func (frp *FakeReadyProvider) AllReady() bool {
return true
}
// NewFakeReadyProvider creates a FakeReadyProvider object
func NewFakeReadyProvider() kubecontainer.SourcesReadyProvider {
return &FakeReadyProvider{}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"io"
"net/url"
"reflect"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/volume"
)
type TB interface {
Errorf(format string, args ...any)
}
type FakePod struct {
Pod *kubecontainer.Pod
NetnsPath string
}
// FakeRuntime is a fake container runtime for testing.
type FakeRuntime struct {
sync.Mutex
CalledFunctions []string
PodList []*FakePod
AllPodList []*FakePod
ImageList []kubecontainer.Image
ImageFsStats []*runtimeapi.FilesystemUsage
ContainerFsStats []*runtimeapi.FilesystemUsage
APIPodStatus v1.PodStatus
PodStatus kubecontainer.PodStatus
StartedPods []string
KilledPods []string
StartedContainers []string
KilledContainers []string
RuntimeStatus *kubecontainer.RuntimeStatus
VersionInfo string
APIVersionInfo string
RuntimeType string
SyncResults *kubecontainer.PodSyncResult
Err error
InspectErr error
StatusErr error
// If BlockImagePulls is true, then all PullImage() calls will be blocked until
// UnblockImagePulls() is called. This is used to simulate image pull latency
// from container runtime.
BlockImagePulls bool
imagePullTokenBucket chan bool
SwapBehavior map[string]kubetypes.SwapBehavior
T TB
}
const FakeHost = "localhost:12345"
type FakeStreamingRuntime struct {
*FakeRuntime
}
var _ kubecontainer.StreamingRuntime = &FakeStreamingRuntime{}
// FakeRuntime should implement Runtime.
var _ kubecontainer.Runtime = &FakeRuntime{}
type FakeVersion struct {
Version string
}
func (fv *FakeVersion) String() string {
return fv.Version
}
func (fv *FakeVersion) Compare(other string) (int, error) {
result := 0
if fv.Version > other {
result = 1
} else if fv.Version < other {
result = -1
}
return result, nil
}
type podsGetter interface {
GetPods(context.Context, bool) ([]*kubecontainer.Pod, error)
}
type FakeRuntimeCache struct {
getter podsGetter
}
func NewFakeRuntimeCache(getter podsGetter) kubecontainer.RuntimeCache {
return &FakeRuntimeCache{getter}
}
func (f *FakeRuntimeCache) GetPods(ctx context.Context) ([]*kubecontainer.Pod, error) {
return f.getter.GetPods(ctx, false)
}
func (f *FakeRuntimeCache) ForceUpdateIfOlder(context.Context, time.Time) error {
return nil
}
// UpdatePodCIDR fulfills the cri interface.
func (f *FakeRuntime) UpdatePodCIDR(_ context.Context, c string) error {
return nil
}
func (f *FakeRuntime) assertList(expect []string, test []string) bool {
if !reflect.DeepEqual(expect, test) {
f.T.Errorf("AssertList: expected %#v, got %#v", expect, test)
return false
}
return true
}
// AssertCalls test if the invoked functions are as expected.
func (f *FakeRuntime) AssertCalls(calls []string) bool {
f.Lock()
defer f.Unlock()
return f.assertList(calls, f.CalledFunctions)
}
// AssertCallCounts checks if a certain call is called for a certain of numbers
func (f *FakeRuntime) AssertCallCounts(funcName string, expectedCount int) bool {
f.Lock()
defer f.Unlock()
actualCount := 0
for _, c := range f.CalledFunctions {
if funcName == c {
actualCount += 1
}
}
if expectedCount != actualCount {
f.T.Errorf("AssertCallCounts: expected %s to be called %d times, but was actually called %d times.", funcName, expectedCount, actualCount)
return false
}
return true
}
func (f *FakeRuntime) AssertStartedPods(pods []string) bool {
f.Lock()
defer f.Unlock()
return f.assertList(pods, f.StartedPods)
}
func (f *FakeRuntime) AssertKilledPods(pods []string) bool {
f.Lock()
defer f.Unlock()
return f.assertList(pods, f.KilledPods)
}
func (f *FakeRuntime) AssertStartedContainers(containers []string) bool {
f.Lock()
defer f.Unlock()
return f.assertList(containers, f.StartedContainers)
}
func (f *FakeRuntime) AssertKilledContainers(containers []string) bool {
f.Lock()
defer f.Unlock()
return f.assertList(containers, f.KilledContainers)
}
func (f *FakeRuntime) Type() string {
return f.RuntimeType
}
func (f *FakeRuntime) Version(_ context.Context) (kubecontainer.Version, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "Version")
return &FakeVersion{Version: f.VersionInfo}, f.Err
}
func (f *FakeRuntime) APIVersion() (kubecontainer.Version, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "APIVersion")
return &FakeVersion{Version: f.APIVersionInfo}, f.Err
}
func (f *FakeRuntime) Status(_ context.Context) (*kubecontainer.RuntimeStatus, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "Status")
return f.RuntimeStatus, f.StatusErr
}
func (f *FakeRuntime) GetPods(_ context.Context, all bool) ([]*kubecontainer.Pod, error) {
f.Lock()
defer f.Unlock()
var pods []*kubecontainer.Pod
f.CalledFunctions = append(f.CalledFunctions, "GetPods")
if all {
for _, fakePod := range f.AllPodList {
pods = append(pods, fakePod.Pod)
}
} else {
for _, fakePod := range f.PodList {
pods = append(pods, fakePod.Pod)
}
}
return pods, f.Err
}
func (f *FakeRuntime) SyncPod(_ context.Context, pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "SyncPod")
f.StartedPods = append(f.StartedPods, string(pod.UID))
for _, c := range pod.Spec.Containers {
f.StartedContainers = append(f.StartedContainers, c.Name)
}
if f.SyncResults != nil {
return *f.SyncResults
}
// TODO(random-liu): Add SyncResult for starting and killing containers
if f.Err != nil {
result.Fail(f.Err)
}
return
}
func (f *FakeRuntime) KillPod(_ context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "KillPod")
f.KilledPods = append(f.KilledPods, string(runningPod.ID))
for _, c := range runningPod.Containers {
f.KilledContainers = append(f.KilledContainers, c.Name)
}
return f.Err
}
func (f *FakeRuntime) RunContainerInPod(container v1.Container, pod *v1.Pod, volumeMap map[string]volume.VolumePlugin) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "RunContainerInPod")
f.StartedContainers = append(f.StartedContainers, container.Name)
pod.Spec.Containers = append(pod.Spec.Containers, container)
for _, c := range pod.Spec.Containers {
if c.Name == container.Name { // Container already in the pod.
return f.Err
}
}
pod.Spec.Containers = append(pod.Spec.Containers, container)
return f.Err
}
func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "KillContainerInPod")
f.KilledContainers = append(f.KilledContainers, container.Name)
return f.Err
}
func (f *FakeRuntime) GeneratePodStatus(event *runtimeapi.ContainerEventResponse) *kubecontainer.PodStatus {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GeneratePodStatus")
status := f.PodStatus
return &status
}
func (f *FakeRuntime) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetPodStatus")
status := f.PodStatus
return &status, f.Err
}
func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetContainerLogs")
return f.Err
}
func (f *FakeRuntime) PullImage(ctx context.Context, image kubecontainer.ImageSpec, creds []credentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error) {
f.Lock()
f.CalledFunctions = append(f.CalledFunctions, "PullImage")
if f.Err == nil {
i := kubecontainer.Image{
ID: image.Image,
Spec: image,
}
f.ImageList = append(f.ImageList, i)
}
// if credentials were supplied for the pull at least return the first in the list
var retCreds *credentialprovider.TrackedAuthConfig = nil
if len(creds) > 0 {
retCreds = &creds[0]
}
if !f.BlockImagePulls {
f.Unlock()
return image.Image, retCreds, f.Err
}
retErr := f.Err
if f.imagePullTokenBucket == nil {
f.imagePullTokenBucket = make(chan bool, 1)
}
// Unlock before waiting for UnblockImagePulls calls, to avoid deadlock.
f.Unlock()
select {
case <-ctx.Done():
case <-f.imagePullTokenBucket:
}
return image.Image, retCreds, retErr
}
// UnblockImagePulls unblocks a certain number of image pulls, if BlockImagePulls is true.
func (f *FakeRuntime) UnblockImagePulls(count int) {
if f.imagePullTokenBucket != nil {
for i := 0; i < count; i++ {
select {
case f.imagePullTokenBucket <- true:
default:
}
}
}
}
func (f *FakeRuntime) GetImageRef(_ context.Context, image kubecontainer.ImageSpec) (string, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetImageRef")
for _, i := range f.ImageList {
if i.ID == image.Image {
return i.ID, nil
}
}
return "", f.InspectErr
}
func (f *FakeRuntime) GetImageSize(_ context.Context, image kubecontainer.ImageSpec) (uint64, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetImageSize")
return 0, f.Err
}
func (f *FakeRuntime) ListImages(_ context.Context) ([]kubecontainer.Image, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "ListImages")
return snapshot(f.ImageList), f.Err
}
func snapshot(imageList []kubecontainer.Image) []kubecontainer.Image {
result := make([]kubecontainer.Image, len(imageList))
copy(result, imageList)
return result
}
func (f *FakeRuntime) RemoveImage(_ context.Context, image kubecontainer.ImageSpec) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "RemoveImage")
index := 0
for i := range f.ImageList {
if f.ImageList[i].ID == image.Image {
index = i
break
}
}
f.ImageList = append(f.ImageList[:index], f.ImageList[index+1:]...)
return f.Err
}
func (f *FakeRuntime) GarbageCollect(_ context.Context, gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GarbageCollect")
return f.Err
}
func (f *FakeRuntime) DeleteContainer(_ context.Context, containerID kubecontainer.ContainerID) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "DeleteContainer")
return f.Err
}
func (f *FakeRuntime) CheckpointContainer(_ context.Context, options *runtimeapi.CheckpointContainerRequest) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "CheckpointContainer")
return f.Err
}
func (f *FakeRuntime) ListMetricDescriptors(_ context.Context) ([]*runtimeapi.MetricDescriptor, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "ListMetricDescriptors")
return nil, f.Err
}
func (f *FakeRuntime) ListPodSandboxMetrics(_ context.Context) ([]*runtimeapi.PodSandboxMetrics, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "ListPodSandboxMetrics")
return nil, f.Err
}
// SetContainerFsStats sets the containerFsStats for dependency injection.
func (f *FakeRuntime) SetContainerFsStats(val []*runtimeapi.FilesystemUsage) {
f.ContainerFsStats = val
}
// SetImageFsStats sets the ImageFsStats for dependency injection.
func (f *FakeRuntime) SetImageFsStats(val []*runtimeapi.FilesystemUsage) {
f.ImageFsStats = val
}
func (f *FakeRuntime) ImageStats(_ context.Context) (*kubecontainer.ImageStats, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "ImageStats")
return nil, f.Err
}
// ImageFsInfo returns a ImageFsInfoResponse given the DI injected values of ImageFsStats
// and ContainerFsStats.
func (f *FakeRuntime) ImageFsInfo(_ context.Context) (*runtimeapi.ImageFsInfoResponse, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "ImageFsInfo")
resp := &runtimeapi.ImageFsInfoResponse{
ImageFilesystems: f.ImageFsStats,
ContainerFilesystems: f.ContainerFsStats,
}
return resp, f.Err
}
func (f *FakeStreamingRuntime) GetExec(_ context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetExec")
return &url.URL{Host: FakeHost}, f.Err
}
func (f *FakeStreamingRuntime) GetAttach(_ context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetAttach")
return &url.URL{Host: FakeHost}, f.Err
}
func (f *FakeStreamingRuntime) GetPortForward(_ context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetPortForward")
return &url.URL{Host: FakeHost}, f.Err
}
type FakeContainerCommandRunner struct {
// what to return
Stdout string
Err error
// actual values when invoked
ContainerID kubecontainer.ContainerID
Cmd []string
}
var _ kubecontainer.CommandRunner = &FakeContainerCommandRunner{}
func (f *FakeContainerCommandRunner) RunInContainer(_ context.Context, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
// record invoked values
f.ContainerID = containerID
f.Cmd = cmd
return []byte(f.Stdout), f.Err
}
func (f *FakeRuntime) GetContainerStatus(_ context.Context, _ kubecontainer.ContainerID) (status *kubecontainer.Status, err error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "GetContainerStatus")
return nil, f.Err
}
func (f *FakeRuntime) GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) kubetypes.SwapBehavior {
if f.SwapBehavior != nil && f.SwapBehavior[container.Name] != "" {
return f.SwapBehavior[container.Name]
}
return kubetypes.NoSwap
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// FakeRuntimeHelper implements RuntimeHelper interfaces for testing purposes.
type FakeRuntimeHelper struct {
DNSServers []string
DNSSearches []string
DNSOptions []string
HostName string
HostDomain string
PodContainerDir string
RuntimeHandlers map[string]kubecontainer.RuntimeHandler
Err error
PodStats map[kubetypes.UID]*statsapi.PodStats
}
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(_ context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string, imageVolumes kubecontainer.ImageVolumes) (*kubecontainer.RunContainerOptions, func(), error) {
var opts kubecontainer.RunContainerOptions
if len(container.TerminationMessagePath) != 0 {
opts.PodContainerDir = f.PodContainerDir
}
return &opts, nil, nil
}
func (f *FakeRuntimeHelper) GetPodCgroupParent(pod *v1.Pod) string {
return ""
}
func (f *FakeRuntimeHelper) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
return &runtimeapi.DNSConfig{
Servers: f.DNSServers,
Searches: f.DNSSearches,
Options: f.DNSOptions}, f.Err
}
// This is not used by docker runtime.
func (f *FakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
return f.HostName, f.HostDomain, f.Err
}
func (f *FakeRuntimeHelper) GetPodDir(podUID kubetypes.UID) string {
return "/poddir/" + string(podUID)
}
func (f *FakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return nil
}
func (f *FakeRuntimeHelper) GetOrCreateUserNamespaceMappings(pod *v1.Pod, runtimeHandler string) (*runtimeapi.UserNamespace, error) {
featureEnabled := utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport)
if pod == nil || pod.Spec.HostUsers == nil {
return nil, nil
}
// pod.Spec.HostUsers is set to true/false
if !featureEnabled {
return nil, fmt.Errorf("the feature gate %q is disabled: can't set spec.HostUsers", features.UserNamespacesSupport)
}
if *pod.Spec.HostUsers {
return nil, nil
}
// From here onwards, hostUsers=false and the feature gate is enabled.
// if the pod requested a user namespace and the runtime doesn't support user namespaces then return an error.
if h, ok := f.RuntimeHandlers[runtimeHandler]; !ok {
return nil, fmt.Errorf("RuntimeClass handler %q not found", runtimeHandler)
} else if !h.SupportsUserNamespaces {
return nil, fmt.Errorf("RuntimeClass handler %q does not support user namespaces", runtimeHandler)
}
ids := &runtimeapi.IDMapping{
HostId: 65536,
ContainerId: 0,
Length: 65536,
}
return &runtimeapi.UserNamespace{
Mode: runtimeapi.NamespaceMode_POD,
Uids: []*runtimeapi.IDMapping{ids},
Gids: []*runtimeapi.IDMapping{ids},
}, nil
}
func (f *FakeRuntimeHelper) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return nil
}
func (f *FakeRuntimeHelper) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return nil
}
func (f *FakeRuntimeHelper) SetPodWatchCondition(_ kubetypes.UID, _ string, _ func(*kubecontainer.PodStatus) bool) {
// Not implemented.
}
func (f *FakeRuntimeHelper) PodCPUAndMemoryStats(_ context.Context, pod *v1.Pod, _ *kubecontainer.PodStatus) (*statsapi.PodStats, error) {
if stats, ok := f.PodStats[pod.UID]; ok {
return stats, nil
}
return nil, fmt.Errorf("stats for pod %q not found", pod.UID)
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package testing
import (
"io/fs"
mock "github.com/stretchr/testify/mock"
)
// NewMockDirEntry creates a new instance of MockDirEntry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockDirEntry(t interface {
mock.TestingT
Cleanup(func())
}) *MockDirEntry {
mock := &MockDirEntry{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockDirEntry is an autogenerated mock type for the DirEntry type
type MockDirEntry struct {
mock.Mock
}
type MockDirEntry_Expecter struct {
mock *mock.Mock
}
func (_m *MockDirEntry) EXPECT() *MockDirEntry_Expecter {
return &MockDirEntry_Expecter{mock: &_m.Mock}
}
// Info provides a mock function for the type MockDirEntry
func (_mock *MockDirEntry) Info() (fs.FileInfo, error) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for Info")
}
var r0 fs.FileInfo
var r1 error
if returnFunc, ok := ret.Get(0).(func() (fs.FileInfo, error)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() fs.FileInfo); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(fs.FileInfo)
}
}
if returnFunc, ok := ret.Get(1).(func() error); ok {
r1 = returnFunc()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockDirEntry_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info'
type MockDirEntry_Info_Call struct {
*mock.Call
}
// Info is a helper method to define mock.On call
func (_e *MockDirEntry_Expecter) Info() *MockDirEntry_Info_Call {
return &MockDirEntry_Info_Call{Call: _e.mock.On("Info")}
}
func (_c *MockDirEntry_Info_Call) Run(run func()) *MockDirEntry_Info_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockDirEntry_Info_Call) Return(fileInfo fs.FileInfo, err error) *MockDirEntry_Info_Call {
_c.Call.Return(fileInfo, err)
return _c
}
func (_c *MockDirEntry_Info_Call) RunAndReturn(run func() (fs.FileInfo, error)) *MockDirEntry_Info_Call {
_c.Call.Return(run)
return _c
}
// IsDir provides a mock function for the type MockDirEntry
func (_mock *MockDirEntry) IsDir() bool {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for IsDir")
}
var r0 bool
if returnFunc, ok := ret.Get(0).(func() bool); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// MockDirEntry_IsDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsDir'
type MockDirEntry_IsDir_Call struct {
*mock.Call
}
// IsDir is a helper method to define mock.On call
func (_e *MockDirEntry_Expecter) IsDir() *MockDirEntry_IsDir_Call {
return &MockDirEntry_IsDir_Call{Call: _e.mock.On("IsDir")}
}
func (_c *MockDirEntry_IsDir_Call) Run(run func()) *MockDirEntry_IsDir_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockDirEntry_IsDir_Call) Return(b bool) *MockDirEntry_IsDir_Call {
_c.Call.Return(b)
return _c
}
func (_c *MockDirEntry_IsDir_Call) RunAndReturn(run func() bool) *MockDirEntry_IsDir_Call {
_c.Call.Return(run)
return _c
}
// Name provides a mock function for the type MockDirEntry
func (_mock *MockDirEntry) Name() string {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for Name")
}
var r0 string
if returnFunc, ok := ret.Get(0).(func() string); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// MockDirEntry_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name'
type MockDirEntry_Name_Call struct {
*mock.Call
}
// Name is a helper method to define mock.On call
func (_e *MockDirEntry_Expecter) Name() *MockDirEntry_Name_Call {
return &MockDirEntry_Name_Call{Call: _e.mock.On("Name")}
}
func (_c *MockDirEntry_Name_Call) Run(run func()) *MockDirEntry_Name_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockDirEntry_Name_Call) Return(s string) *MockDirEntry_Name_Call {
_c.Call.Return(s)
return _c
}
func (_c *MockDirEntry_Name_Call) RunAndReturn(run func() string) *MockDirEntry_Name_Call {
_c.Call.Return(run)
return _c
}
// Type provides a mock function for the type MockDirEntry
func (_mock *MockDirEntry) Type() fs.FileMode {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for Type")
}
var r0 fs.FileMode
if returnFunc, ok := ret.Get(0).(func() fs.FileMode); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(fs.FileMode)
}
return r0
}
// MockDirEntry_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type'
type MockDirEntry_Type_Call struct {
*mock.Call
}
// Type is a helper method to define mock.On call
func (_e *MockDirEntry_Expecter) Type() *MockDirEntry_Type_Call {
return &MockDirEntry_Type_Call{Call: _e.mock.On("Type")}
}
func (_c *MockDirEntry_Type_Call) Run(run func()) *MockDirEntry_Type_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockDirEntry_Type_Call) Return(fileMode fs.FileMode) *MockDirEntry_Type_Call {
_c.Call.Return(fileMode)
return _c
}
func (_c *MockDirEntry_Type_Call) RunAndReturn(run func() fs.FileMode) *MockDirEntry_Type_Call {
_c.Call.Return(run)
return _c
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package testing
import (
"context"
"io"
"time"
mock "github.com/stretchr/testify/mock"
v10 "k8s.io/api/core/v1"
types0 "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types"
)
// NewMockRuntime creates a new instance of MockRuntime. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockRuntime(t interface {
mock.TestingT
Cleanup(func())
}) *MockRuntime {
mock := &MockRuntime{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockRuntime is an autogenerated mock type for the Runtime type
type MockRuntime struct {
mock.Mock
}
type MockRuntime_Expecter struct {
mock *mock.Mock
}
func (_m *MockRuntime) EXPECT() *MockRuntime_Expecter {
return &MockRuntime_Expecter{mock: &_m.Mock}
}
// APIVersion provides a mock function for the type MockRuntime
func (_mock *MockRuntime) APIVersion() (container.Version, error) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for APIVersion")
}
var r0 container.Version
var r1 error
if returnFunc, ok := ret.Get(0).(func() (container.Version, error)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() container.Version); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(container.Version)
}
}
if returnFunc, ok := ret.Get(1).(func() error); ok {
r1 = returnFunc()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_APIVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'APIVersion'
type MockRuntime_APIVersion_Call struct {
*mock.Call
}
// APIVersion is a helper method to define mock.On call
func (_e *MockRuntime_Expecter) APIVersion() *MockRuntime_APIVersion_Call {
return &MockRuntime_APIVersion_Call{Call: _e.mock.On("APIVersion")}
}
func (_c *MockRuntime_APIVersion_Call) Run(run func()) *MockRuntime_APIVersion_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockRuntime_APIVersion_Call) Return(version container.Version, err error) *MockRuntime_APIVersion_Call {
_c.Call.Return(version, err)
return _c
}
func (_c *MockRuntime_APIVersion_Call) RunAndReturn(run func() (container.Version, error)) *MockRuntime_APIVersion_Call {
_c.Call.Return(run)
return _c
}
// CheckpointContainer provides a mock function for the type MockRuntime
func (_mock *MockRuntime) CheckpointContainer(ctx context.Context, options *v1.CheckpointContainerRequest) error {
ret := _mock.Called(ctx, options)
if len(ret) == 0 {
panic("no return value specified for CheckpointContainer")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *v1.CheckpointContainerRequest) error); ok {
r0 = returnFunc(ctx, options)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntime_CheckpointContainer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckpointContainer'
type MockRuntime_CheckpointContainer_Call struct {
*mock.Call
}
// CheckpointContainer is a helper method to define mock.On call
// - ctx context.Context
// - options *v1.CheckpointContainerRequest
func (_e *MockRuntime_Expecter) CheckpointContainer(ctx interface{}, options interface{}) *MockRuntime_CheckpointContainer_Call {
return &MockRuntime_CheckpointContainer_Call{Call: _e.mock.On("CheckpointContainer", ctx, options)}
}
func (_c *MockRuntime_CheckpointContainer_Call) Run(run func(ctx context.Context, options *v1.CheckpointContainerRequest)) *MockRuntime_CheckpointContainer_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v1.CheckpointContainerRequest
if args[1] != nil {
arg1 = args[1].(*v1.CheckpointContainerRequest)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_CheckpointContainer_Call) Return(err error) *MockRuntime_CheckpointContainer_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntime_CheckpointContainer_Call) RunAndReturn(run func(ctx context.Context, options *v1.CheckpointContainerRequest) error) *MockRuntime_CheckpointContainer_Call {
_c.Call.Return(run)
return _c
}
// DeleteContainer provides a mock function for the type MockRuntime
func (_mock *MockRuntime) DeleteContainer(ctx context.Context, containerID container.ContainerID) error {
ret := _mock.Called(ctx, containerID)
if len(ret) == 0 {
panic("no return value specified for DeleteContainer")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ContainerID) error); ok {
r0 = returnFunc(ctx, containerID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntime_DeleteContainer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteContainer'
type MockRuntime_DeleteContainer_Call struct {
*mock.Call
}
// DeleteContainer is a helper method to define mock.On call
// - ctx context.Context
// - containerID container.ContainerID
func (_e *MockRuntime_Expecter) DeleteContainer(ctx interface{}, containerID interface{}) *MockRuntime_DeleteContainer_Call {
return &MockRuntime_DeleteContainer_Call{Call: _e.mock.On("DeleteContainer", ctx, containerID)}
}
func (_c *MockRuntime_DeleteContainer_Call) Run(run func(ctx context.Context, containerID container.ContainerID)) *MockRuntime_DeleteContainer_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 container.ContainerID
if args[1] != nil {
arg1 = args[1].(container.ContainerID)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_DeleteContainer_Call) Return(err error) *MockRuntime_DeleteContainer_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntime_DeleteContainer_Call) RunAndReturn(run func(ctx context.Context, containerID container.ContainerID) error) *MockRuntime_DeleteContainer_Call {
_c.Call.Return(run)
return _c
}
// GarbageCollect provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GarbageCollect(ctx context.Context, gcPolicy container.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
ret := _mock.Called(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
if len(ret) == 0 {
panic("no return value specified for GarbageCollect")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, container.GCPolicy, bool, bool) error); ok {
r0 = returnFunc(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntime_GarbageCollect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GarbageCollect'
type MockRuntime_GarbageCollect_Call struct {
*mock.Call
}
// GarbageCollect is a helper method to define mock.On call
// - ctx context.Context
// - gcPolicy container.GCPolicy
// - allSourcesReady bool
// - evictNonDeletedPods bool
func (_e *MockRuntime_Expecter) GarbageCollect(ctx interface{}, gcPolicy interface{}, allSourcesReady interface{}, evictNonDeletedPods interface{}) *MockRuntime_GarbageCollect_Call {
return &MockRuntime_GarbageCollect_Call{Call: _e.mock.On("GarbageCollect", ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)}
}
func (_c *MockRuntime_GarbageCollect_Call) Run(run func(ctx context.Context, gcPolicy container.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool)) *MockRuntime_GarbageCollect_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 container.GCPolicy
if args[1] != nil {
arg1 = args[1].(container.GCPolicy)
}
var arg2 bool
if args[2] != nil {
arg2 = args[2].(bool)
}
var arg3 bool
if args[3] != nil {
arg3 = args[3].(bool)
}
run(
arg0,
arg1,
arg2,
arg3,
)
})
return _c
}
func (_c *MockRuntime_GarbageCollect_Call) Return(err error) *MockRuntime_GarbageCollect_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntime_GarbageCollect_Call) RunAndReturn(run func(ctx context.Context, gcPolicy container.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error) *MockRuntime_GarbageCollect_Call {
_c.Call.Return(run)
return _c
}
// GeneratePodStatus provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GeneratePodStatus(event *v1.ContainerEventResponse) *container.PodStatus {
ret := _mock.Called(event)
if len(ret) == 0 {
panic("no return value specified for GeneratePodStatus")
}
var r0 *container.PodStatus
if returnFunc, ok := ret.Get(0).(func(*v1.ContainerEventResponse) *container.PodStatus); ok {
r0 = returnFunc(event)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*container.PodStatus)
}
}
return r0
}
// MockRuntime_GeneratePodStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GeneratePodStatus'
type MockRuntime_GeneratePodStatus_Call struct {
*mock.Call
}
// GeneratePodStatus is a helper method to define mock.On call
// - event *v1.ContainerEventResponse
func (_e *MockRuntime_Expecter) GeneratePodStatus(event interface{}) *MockRuntime_GeneratePodStatus_Call {
return &MockRuntime_GeneratePodStatus_Call{Call: _e.mock.On("GeneratePodStatus", event)}
}
func (_c *MockRuntime_GeneratePodStatus_Call) Run(run func(event *v1.ContainerEventResponse)) *MockRuntime_GeneratePodStatus_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.ContainerEventResponse
if args[0] != nil {
arg0 = args[0].(*v1.ContainerEventResponse)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_GeneratePodStatus_Call) Return(podStatus *container.PodStatus) *MockRuntime_GeneratePodStatus_Call {
_c.Call.Return(podStatus)
return _c
}
func (_c *MockRuntime_GeneratePodStatus_Call) RunAndReturn(run func(event *v1.ContainerEventResponse) *container.PodStatus) *MockRuntime_GeneratePodStatus_Call {
_c.Call.Return(run)
return _c
}
// GetContainerLogs provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GetContainerLogs(ctx context.Context, pod *v10.Pod, containerID container.ContainerID, logOptions *v10.PodLogOptions, stdout io.Writer, stderr io.Writer) error {
ret := _mock.Called(ctx, pod, containerID, logOptions, stdout, stderr)
if len(ret) == 0 {
panic("no return value specified for GetContainerLogs")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *v10.Pod, container.ContainerID, *v10.PodLogOptions, io.Writer, io.Writer) error); ok {
r0 = returnFunc(ctx, pod, containerID, logOptions, stdout, stderr)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntime_GetContainerLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetContainerLogs'
type MockRuntime_GetContainerLogs_Call struct {
*mock.Call
}
// GetContainerLogs is a helper method to define mock.On call
// - ctx context.Context
// - pod *v10.Pod
// - containerID container.ContainerID
// - logOptions *v10.PodLogOptions
// - stdout io.Writer
// - stderr io.Writer
func (_e *MockRuntime_Expecter) GetContainerLogs(ctx interface{}, pod interface{}, containerID interface{}, logOptions interface{}, stdout interface{}, stderr interface{}) *MockRuntime_GetContainerLogs_Call {
return &MockRuntime_GetContainerLogs_Call{Call: _e.mock.On("GetContainerLogs", ctx, pod, containerID, logOptions, stdout, stderr)}
}
func (_c *MockRuntime_GetContainerLogs_Call) Run(run func(ctx context.Context, pod *v10.Pod, containerID container.ContainerID, logOptions *v10.PodLogOptions, stdout io.Writer, stderr io.Writer)) *MockRuntime_GetContainerLogs_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v10.Pod
if args[1] != nil {
arg1 = args[1].(*v10.Pod)
}
var arg2 container.ContainerID
if args[2] != nil {
arg2 = args[2].(container.ContainerID)
}
var arg3 *v10.PodLogOptions
if args[3] != nil {
arg3 = args[3].(*v10.PodLogOptions)
}
var arg4 io.Writer
if args[4] != nil {
arg4 = args[4].(io.Writer)
}
var arg5 io.Writer
if args[5] != nil {
arg5 = args[5].(io.Writer)
}
run(
arg0,
arg1,
arg2,
arg3,
arg4,
arg5,
)
})
return _c
}
func (_c *MockRuntime_GetContainerLogs_Call) Return(err error) *MockRuntime_GetContainerLogs_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntime_GetContainerLogs_Call) RunAndReturn(run func(ctx context.Context, pod *v10.Pod, containerID container.ContainerID, logOptions *v10.PodLogOptions, stdout io.Writer, stderr io.Writer) error) *MockRuntime_GetContainerLogs_Call {
_c.Call.Return(run)
return _c
}
// GetContainerStatus provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GetContainerStatus(ctx context.Context, id container.ContainerID) (*container.Status, error) {
ret := _mock.Called(ctx, id)
if len(ret) == 0 {
panic("no return value specified for GetContainerStatus")
}
var r0 *container.Status
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ContainerID) (*container.Status, error)); ok {
return returnFunc(ctx, id)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ContainerID) *container.Status); ok {
r0 = returnFunc(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*container.Status)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context, container.ContainerID) error); ok {
r1 = returnFunc(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_GetContainerStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetContainerStatus'
type MockRuntime_GetContainerStatus_Call struct {
*mock.Call
}
// GetContainerStatus is a helper method to define mock.On call
// - ctx context.Context
// - id container.ContainerID
func (_e *MockRuntime_Expecter) GetContainerStatus(ctx interface{}, id interface{}) *MockRuntime_GetContainerStatus_Call {
return &MockRuntime_GetContainerStatus_Call{Call: _e.mock.On("GetContainerStatus", ctx, id)}
}
func (_c *MockRuntime_GetContainerStatus_Call) Run(run func(ctx context.Context, id container.ContainerID)) *MockRuntime_GetContainerStatus_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 container.ContainerID
if args[1] != nil {
arg1 = args[1].(container.ContainerID)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_GetContainerStatus_Call) Return(status *container.Status, err error) *MockRuntime_GetContainerStatus_Call {
_c.Call.Return(status, err)
return _c
}
func (_c *MockRuntime_GetContainerStatus_Call) RunAndReturn(run func(ctx context.Context, id container.ContainerID) (*container.Status, error)) *MockRuntime_GetContainerStatus_Call {
_c.Call.Return(run)
return _c
}
// GetContainerSwapBehavior provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GetContainerSwapBehavior(pod *v10.Pod, container1 *v10.Container) types.SwapBehavior {
ret := _mock.Called(pod, container1)
if len(ret) == 0 {
panic("no return value specified for GetContainerSwapBehavior")
}
var r0 types.SwapBehavior
if returnFunc, ok := ret.Get(0).(func(*v10.Pod, *v10.Container) types.SwapBehavior); ok {
r0 = returnFunc(pod, container1)
} else {
r0 = ret.Get(0).(types.SwapBehavior)
}
return r0
}
// MockRuntime_GetContainerSwapBehavior_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetContainerSwapBehavior'
type MockRuntime_GetContainerSwapBehavior_Call struct {
*mock.Call
}
// GetContainerSwapBehavior is a helper method to define mock.On call
// - pod *v10.Pod
// - container1 *v10.Container
func (_e *MockRuntime_Expecter) GetContainerSwapBehavior(pod interface{}, container1 interface{}) *MockRuntime_GetContainerSwapBehavior_Call {
return &MockRuntime_GetContainerSwapBehavior_Call{Call: _e.mock.On("GetContainerSwapBehavior", pod, container1)}
}
func (_c *MockRuntime_GetContainerSwapBehavior_Call) Run(run func(pod *v10.Pod, container1 *v10.Container)) *MockRuntime_GetContainerSwapBehavior_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v10.Pod
if args[0] != nil {
arg0 = args[0].(*v10.Pod)
}
var arg1 *v10.Container
if args[1] != nil {
arg1 = args[1].(*v10.Container)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_GetContainerSwapBehavior_Call) Return(swapBehavior types.SwapBehavior) *MockRuntime_GetContainerSwapBehavior_Call {
_c.Call.Return(swapBehavior)
return _c
}
func (_c *MockRuntime_GetContainerSwapBehavior_Call) RunAndReturn(run func(pod *v10.Pod, container1 *v10.Container) types.SwapBehavior) *MockRuntime_GetContainerSwapBehavior_Call {
_c.Call.Return(run)
return _c
}
// GetImageRef provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GetImageRef(ctx context.Context, image container.ImageSpec) (string, error) {
ret := _mock.Called(ctx, image)
if len(ret) == 0 {
panic("no return value specified for GetImageRef")
}
var r0 string
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ImageSpec) (string, error)); ok {
return returnFunc(ctx, image)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ImageSpec) string); ok {
r0 = returnFunc(ctx, image)
} else {
r0 = ret.Get(0).(string)
}
if returnFunc, ok := ret.Get(1).(func(context.Context, container.ImageSpec) error); ok {
r1 = returnFunc(ctx, image)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_GetImageRef_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetImageRef'
type MockRuntime_GetImageRef_Call struct {
*mock.Call
}
// GetImageRef is a helper method to define mock.On call
// - ctx context.Context
// - image container.ImageSpec
func (_e *MockRuntime_Expecter) GetImageRef(ctx interface{}, image interface{}) *MockRuntime_GetImageRef_Call {
return &MockRuntime_GetImageRef_Call{Call: _e.mock.On("GetImageRef", ctx, image)}
}
func (_c *MockRuntime_GetImageRef_Call) Run(run func(ctx context.Context, image container.ImageSpec)) *MockRuntime_GetImageRef_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 container.ImageSpec
if args[1] != nil {
arg1 = args[1].(container.ImageSpec)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_GetImageRef_Call) Return(s string, err error) *MockRuntime_GetImageRef_Call {
_c.Call.Return(s, err)
return _c
}
func (_c *MockRuntime_GetImageRef_Call) RunAndReturn(run func(ctx context.Context, image container.ImageSpec) (string, error)) *MockRuntime_GetImageRef_Call {
_c.Call.Return(run)
return _c
}
// GetImageSize provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GetImageSize(ctx context.Context, image container.ImageSpec) (uint64, error) {
ret := _mock.Called(ctx, image)
if len(ret) == 0 {
panic("no return value specified for GetImageSize")
}
var r0 uint64
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ImageSpec) (uint64, error)); ok {
return returnFunc(ctx, image)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ImageSpec) uint64); ok {
r0 = returnFunc(ctx, image)
} else {
r0 = ret.Get(0).(uint64)
}
if returnFunc, ok := ret.Get(1).(func(context.Context, container.ImageSpec) error); ok {
r1 = returnFunc(ctx, image)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_GetImageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetImageSize'
type MockRuntime_GetImageSize_Call struct {
*mock.Call
}
// GetImageSize is a helper method to define mock.On call
// - ctx context.Context
// - image container.ImageSpec
func (_e *MockRuntime_Expecter) GetImageSize(ctx interface{}, image interface{}) *MockRuntime_GetImageSize_Call {
return &MockRuntime_GetImageSize_Call{Call: _e.mock.On("GetImageSize", ctx, image)}
}
func (_c *MockRuntime_GetImageSize_Call) Run(run func(ctx context.Context, image container.ImageSpec)) *MockRuntime_GetImageSize_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 container.ImageSpec
if args[1] != nil {
arg1 = args[1].(container.ImageSpec)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_GetImageSize_Call) Return(v uint64, err error) *MockRuntime_GetImageSize_Call {
_c.Call.Return(v, err)
return _c
}
func (_c *MockRuntime_GetImageSize_Call) RunAndReturn(run func(ctx context.Context, image container.ImageSpec) (uint64, error)) *MockRuntime_GetImageSize_Call {
_c.Call.Return(run)
return _c
}
// GetPodStatus provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GetPodStatus(ctx context.Context, uid types0.UID, name string, namespace string) (*container.PodStatus, error) {
ret := _mock.Called(ctx, uid, name, namespace)
if len(ret) == 0 {
panic("no return value specified for GetPodStatus")
}
var r0 *container.PodStatus
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context, types0.UID, string, string) (*container.PodStatus, error)); ok {
return returnFunc(ctx, uid, name, namespace)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, types0.UID, string, string) *container.PodStatus); ok {
r0 = returnFunc(ctx, uid, name, namespace)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*container.PodStatus)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context, types0.UID, string, string) error); ok {
r1 = returnFunc(ctx, uid, name, namespace)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_GetPodStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodStatus'
type MockRuntime_GetPodStatus_Call struct {
*mock.Call
}
// GetPodStatus is a helper method to define mock.On call
// - ctx context.Context
// - uid types0.UID
// - name string
// - namespace string
func (_e *MockRuntime_Expecter) GetPodStatus(ctx interface{}, uid interface{}, name interface{}, namespace interface{}) *MockRuntime_GetPodStatus_Call {
return &MockRuntime_GetPodStatus_Call{Call: _e.mock.On("GetPodStatus", ctx, uid, name, namespace)}
}
func (_c *MockRuntime_GetPodStatus_Call) Run(run func(ctx context.Context, uid types0.UID, name string, namespace string)) *MockRuntime_GetPodStatus_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 types0.UID
if args[1] != nil {
arg1 = args[1].(types0.UID)
}
var arg2 string
if args[2] != nil {
arg2 = args[2].(string)
}
var arg3 string
if args[3] != nil {
arg3 = args[3].(string)
}
run(
arg0,
arg1,
arg2,
arg3,
)
})
return _c
}
func (_c *MockRuntime_GetPodStatus_Call) Return(podStatus *container.PodStatus, err error) *MockRuntime_GetPodStatus_Call {
_c.Call.Return(podStatus, err)
return _c
}
func (_c *MockRuntime_GetPodStatus_Call) RunAndReturn(run func(ctx context.Context, uid types0.UID, name string, namespace string) (*container.PodStatus, error)) *MockRuntime_GetPodStatus_Call {
_c.Call.Return(run)
return _c
}
// GetPods provides a mock function for the type MockRuntime
func (_mock *MockRuntime) GetPods(ctx context.Context, all bool) ([]*container.Pod, error) {
ret := _mock.Called(ctx, all)
if len(ret) == 0 {
panic("no return value specified for GetPods")
}
var r0 []*container.Pod
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context, bool) ([]*container.Pod, error)); ok {
return returnFunc(ctx, all)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, bool) []*container.Pod); ok {
r0 = returnFunc(ctx, all)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*container.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context, bool) error); ok {
r1 = returnFunc(ctx, all)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_GetPods_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPods'
type MockRuntime_GetPods_Call struct {
*mock.Call
}
// GetPods is a helper method to define mock.On call
// - ctx context.Context
// - all bool
func (_e *MockRuntime_Expecter) GetPods(ctx interface{}, all interface{}) *MockRuntime_GetPods_Call {
return &MockRuntime_GetPods_Call{Call: _e.mock.On("GetPods", ctx, all)}
}
func (_c *MockRuntime_GetPods_Call) Run(run func(ctx context.Context, all bool)) *MockRuntime_GetPods_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 bool
if args[1] != nil {
arg1 = args[1].(bool)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_GetPods_Call) Return(pods []*container.Pod, err error) *MockRuntime_GetPods_Call {
_c.Call.Return(pods, err)
return _c
}
func (_c *MockRuntime_GetPods_Call) RunAndReturn(run func(ctx context.Context, all bool) ([]*container.Pod, error)) *MockRuntime_GetPods_Call {
_c.Call.Return(run)
return _c
}
// ImageFsInfo provides a mock function for the type MockRuntime
func (_mock *MockRuntime) ImageFsInfo(ctx context.Context) (*v1.ImageFsInfoResponse, error) {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ImageFsInfo")
}
var r0 *v1.ImageFsInfoResponse
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) (*v1.ImageFsInfoResponse, error)); ok {
return returnFunc(ctx)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) *v1.ImageFsInfoResponse); ok {
r0 = returnFunc(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.ImageFsInfoResponse)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_ImageFsInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImageFsInfo'
type MockRuntime_ImageFsInfo_Call struct {
*mock.Call
}
// ImageFsInfo is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockRuntime_Expecter) ImageFsInfo(ctx interface{}) *MockRuntime_ImageFsInfo_Call {
return &MockRuntime_ImageFsInfo_Call{Call: _e.mock.On("ImageFsInfo", ctx)}
}
func (_c *MockRuntime_ImageFsInfo_Call) Run(run func(ctx context.Context)) *MockRuntime_ImageFsInfo_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_ImageFsInfo_Call) Return(imageFsInfoResponse *v1.ImageFsInfoResponse, err error) *MockRuntime_ImageFsInfo_Call {
_c.Call.Return(imageFsInfoResponse, err)
return _c
}
func (_c *MockRuntime_ImageFsInfo_Call) RunAndReturn(run func(ctx context.Context) (*v1.ImageFsInfoResponse, error)) *MockRuntime_ImageFsInfo_Call {
_c.Call.Return(run)
return _c
}
// ImageStats provides a mock function for the type MockRuntime
func (_mock *MockRuntime) ImageStats(ctx context.Context) (*container.ImageStats, error) {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ImageStats")
}
var r0 *container.ImageStats
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) (*container.ImageStats, error)); ok {
return returnFunc(ctx)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) *container.ImageStats); ok {
r0 = returnFunc(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*container.ImageStats)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_ImageStats_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImageStats'
type MockRuntime_ImageStats_Call struct {
*mock.Call
}
// ImageStats is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockRuntime_Expecter) ImageStats(ctx interface{}) *MockRuntime_ImageStats_Call {
return &MockRuntime_ImageStats_Call{Call: _e.mock.On("ImageStats", ctx)}
}
func (_c *MockRuntime_ImageStats_Call) Run(run func(ctx context.Context)) *MockRuntime_ImageStats_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_ImageStats_Call) Return(imageStats *container.ImageStats, err error) *MockRuntime_ImageStats_Call {
_c.Call.Return(imageStats, err)
return _c
}
func (_c *MockRuntime_ImageStats_Call) RunAndReturn(run func(ctx context.Context) (*container.ImageStats, error)) *MockRuntime_ImageStats_Call {
_c.Call.Return(run)
return _c
}
// KillPod provides a mock function for the type MockRuntime
func (_mock *MockRuntime) KillPod(ctx context.Context, pod *v10.Pod, runningPod container.Pod, gracePeriodOverride *int64) error {
ret := _mock.Called(ctx, pod, runningPod, gracePeriodOverride)
if len(ret) == 0 {
panic("no return value specified for KillPod")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, *v10.Pod, container.Pod, *int64) error); ok {
r0 = returnFunc(ctx, pod, runningPod, gracePeriodOverride)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntime_KillPod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KillPod'
type MockRuntime_KillPod_Call struct {
*mock.Call
}
// KillPod is a helper method to define mock.On call
// - ctx context.Context
// - pod *v10.Pod
// - runningPod container.Pod
// - gracePeriodOverride *int64
func (_e *MockRuntime_Expecter) KillPod(ctx interface{}, pod interface{}, runningPod interface{}, gracePeriodOverride interface{}) *MockRuntime_KillPod_Call {
return &MockRuntime_KillPod_Call{Call: _e.mock.On("KillPod", ctx, pod, runningPod, gracePeriodOverride)}
}
func (_c *MockRuntime_KillPod_Call) Run(run func(ctx context.Context, pod *v10.Pod, runningPod container.Pod, gracePeriodOverride *int64)) *MockRuntime_KillPod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v10.Pod
if args[1] != nil {
arg1 = args[1].(*v10.Pod)
}
var arg2 container.Pod
if args[2] != nil {
arg2 = args[2].(container.Pod)
}
var arg3 *int64
if args[3] != nil {
arg3 = args[3].(*int64)
}
run(
arg0,
arg1,
arg2,
arg3,
)
})
return _c
}
func (_c *MockRuntime_KillPod_Call) Return(err error) *MockRuntime_KillPod_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntime_KillPod_Call) RunAndReturn(run func(ctx context.Context, pod *v10.Pod, runningPod container.Pod, gracePeriodOverride *int64) error) *MockRuntime_KillPod_Call {
_c.Call.Return(run)
return _c
}
// ListImages provides a mock function for the type MockRuntime
func (_mock *MockRuntime) ListImages(ctx context.Context) ([]container.Image, error) {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ListImages")
}
var r0 []container.Image
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) ([]container.Image, error)); ok {
return returnFunc(ctx)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) []container.Image); ok {
r0 = returnFunc(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]container.Image)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_ListImages_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListImages'
type MockRuntime_ListImages_Call struct {
*mock.Call
}
// ListImages is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockRuntime_Expecter) ListImages(ctx interface{}) *MockRuntime_ListImages_Call {
return &MockRuntime_ListImages_Call{Call: _e.mock.On("ListImages", ctx)}
}
func (_c *MockRuntime_ListImages_Call) Run(run func(ctx context.Context)) *MockRuntime_ListImages_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_ListImages_Call) Return(images []container.Image, err error) *MockRuntime_ListImages_Call {
_c.Call.Return(images, err)
return _c
}
func (_c *MockRuntime_ListImages_Call) RunAndReturn(run func(ctx context.Context) ([]container.Image, error)) *MockRuntime_ListImages_Call {
_c.Call.Return(run)
return _c
}
// ListMetricDescriptors provides a mock function for the type MockRuntime
func (_mock *MockRuntime) ListMetricDescriptors(ctx context.Context) ([]*v1.MetricDescriptor, error) {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ListMetricDescriptors")
}
var r0 []*v1.MetricDescriptor
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) ([]*v1.MetricDescriptor, error)); ok {
return returnFunc(ctx)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) []*v1.MetricDescriptor); ok {
r0 = returnFunc(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v1.MetricDescriptor)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_ListMetricDescriptors_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListMetricDescriptors'
type MockRuntime_ListMetricDescriptors_Call struct {
*mock.Call
}
// ListMetricDescriptors is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockRuntime_Expecter) ListMetricDescriptors(ctx interface{}) *MockRuntime_ListMetricDescriptors_Call {
return &MockRuntime_ListMetricDescriptors_Call{Call: _e.mock.On("ListMetricDescriptors", ctx)}
}
func (_c *MockRuntime_ListMetricDescriptors_Call) Run(run func(ctx context.Context)) *MockRuntime_ListMetricDescriptors_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_ListMetricDescriptors_Call) Return(metricDescriptors []*v1.MetricDescriptor, err error) *MockRuntime_ListMetricDescriptors_Call {
_c.Call.Return(metricDescriptors, err)
return _c
}
func (_c *MockRuntime_ListMetricDescriptors_Call) RunAndReturn(run func(ctx context.Context) ([]*v1.MetricDescriptor, error)) *MockRuntime_ListMetricDescriptors_Call {
_c.Call.Return(run)
return _c
}
// ListPodSandboxMetrics provides a mock function for the type MockRuntime
func (_mock *MockRuntime) ListPodSandboxMetrics(ctx context.Context) ([]*v1.PodSandboxMetrics, error) {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ListPodSandboxMetrics")
}
var r0 []*v1.PodSandboxMetrics
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) ([]*v1.PodSandboxMetrics, error)); ok {
return returnFunc(ctx)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) []*v1.PodSandboxMetrics); ok {
r0 = returnFunc(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v1.PodSandboxMetrics)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_ListPodSandboxMetrics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListPodSandboxMetrics'
type MockRuntime_ListPodSandboxMetrics_Call struct {
*mock.Call
}
// ListPodSandboxMetrics is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockRuntime_Expecter) ListPodSandboxMetrics(ctx interface{}) *MockRuntime_ListPodSandboxMetrics_Call {
return &MockRuntime_ListPodSandboxMetrics_Call{Call: _e.mock.On("ListPodSandboxMetrics", ctx)}
}
func (_c *MockRuntime_ListPodSandboxMetrics_Call) Run(run func(ctx context.Context)) *MockRuntime_ListPodSandboxMetrics_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_ListPodSandboxMetrics_Call) Return(podSandboxMetricss []*v1.PodSandboxMetrics, err error) *MockRuntime_ListPodSandboxMetrics_Call {
_c.Call.Return(podSandboxMetricss, err)
return _c
}
func (_c *MockRuntime_ListPodSandboxMetrics_Call) RunAndReturn(run func(ctx context.Context) ([]*v1.PodSandboxMetrics, error)) *MockRuntime_ListPodSandboxMetrics_Call {
_c.Call.Return(run)
return _c
}
// PullImage provides a mock function for the type MockRuntime
func (_mock *MockRuntime) PullImage(ctx context.Context, image container.ImageSpec, credentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *v1.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error) {
ret := _mock.Called(ctx, image, credentials, podSandboxConfig)
if len(ret) == 0 {
panic("no return value specified for PullImage")
}
var r0 string
var r1 *credentialprovider.TrackedAuthConfig
var r2 error
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ImageSpec, []credentialprovider.TrackedAuthConfig, *v1.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error)); ok {
return returnFunc(ctx, image, credentials, podSandboxConfig)
}
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ImageSpec, []credentialprovider.TrackedAuthConfig, *v1.PodSandboxConfig) string); ok {
r0 = returnFunc(ctx, image, credentials, podSandboxConfig)
} else {
r0 = ret.Get(0).(string)
}
if returnFunc, ok := ret.Get(1).(func(context.Context, container.ImageSpec, []credentialprovider.TrackedAuthConfig, *v1.PodSandboxConfig) *credentialprovider.TrackedAuthConfig); ok {
r1 = returnFunc(ctx, image, credentials, podSandboxConfig)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*credentialprovider.TrackedAuthConfig)
}
}
if returnFunc, ok := ret.Get(2).(func(context.Context, container.ImageSpec, []credentialprovider.TrackedAuthConfig, *v1.PodSandboxConfig) error); ok {
r2 = returnFunc(ctx, image, credentials, podSandboxConfig)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// MockRuntime_PullImage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PullImage'
type MockRuntime_PullImage_Call struct {
*mock.Call
}
// PullImage is a helper method to define mock.On call
// - ctx context.Context
// - image container.ImageSpec
// - credentials []credentialprovider.TrackedAuthConfig
// - podSandboxConfig *v1.PodSandboxConfig
func (_e *MockRuntime_Expecter) PullImage(ctx interface{}, image interface{}, credentials interface{}, podSandboxConfig interface{}) *MockRuntime_PullImage_Call {
return &MockRuntime_PullImage_Call{Call: _e.mock.On("PullImage", ctx, image, credentials, podSandboxConfig)}
}
func (_c *MockRuntime_PullImage_Call) Run(run func(ctx context.Context, image container.ImageSpec, credentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *v1.PodSandboxConfig)) *MockRuntime_PullImage_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 container.ImageSpec
if args[1] != nil {
arg1 = args[1].(container.ImageSpec)
}
var arg2 []credentialprovider.TrackedAuthConfig
if args[2] != nil {
arg2 = args[2].([]credentialprovider.TrackedAuthConfig)
}
var arg3 *v1.PodSandboxConfig
if args[3] != nil {
arg3 = args[3].(*v1.PodSandboxConfig)
}
run(
arg0,
arg1,
arg2,
arg3,
)
})
return _c
}
func (_c *MockRuntime_PullImage_Call) Return(s string, trackedAuthConfig *credentialprovider.TrackedAuthConfig, err error) *MockRuntime_PullImage_Call {
_c.Call.Return(s, trackedAuthConfig, err)
return _c
}
func (_c *MockRuntime_PullImage_Call) RunAndReturn(run func(ctx context.Context, image container.ImageSpec, credentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *v1.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error)) *MockRuntime_PullImage_Call {
_c.Call.Return(run)
return _c
}
// RemoveImage provides a mock function for the type MockRuntime
func (_mock *MockRuntime) RemoveImage(ctx context.Context, image container.ImageSpec) error {
ret := _mock.Called(ctx, image)
if len(ret) == 0 {
panic("no return value specified for RemoveImage")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, container.ImageSpec) error); ok {
r0 = returnFunc(ctx, image)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntime_RemoveImage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveImage'
type MockRuntime_RemoveImage_Call struct {
*mock.Call
}
// RemoveImage is a helper method to define mock.On call
// - ctx context.Context
// - image container.ImageSpec
func (_e *MockRuntime_Expecter) RemoveImage(ctx interface{}, image interface{}) *MockRuntime_RemoveImage_Call {
return &MockRuntime_RemoveImage_Call{Call: _e.mock.On("RemoveImage", ctx, image)}
}
func (_c *MockRuntime_RemoveImage_Call) Run(run func(ctx context.Context, image container.ImageSpec)) *MockRuntime_RemoveImage_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 container.ImageSpec
if args[1] != nil {
arg1 = args[1].(container.ImageSpec)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_RemoveImage_Call) Return(err error) *MockRuntime_RemoveImage_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntime_RemoveImage_Call) RunAndReturn(run func(ctx context.Context, image container.ImageSpec) error) *MockRuntime_RemoveImage_Call {
_c.Call.Return(run)
return _c
}
// Status provides a mock function for the type MockRuntime
func (_mock *MockRuntime) Status(ctx context.Context) (*container.RuntimeStatus, error) {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Status")
}
var r0 *container.RuntimeStatus
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) (*container.RuntimeStatus, error)); ok {
return returnFunc(ctx)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) *container.RuntimeStatus); ok {
r0 = returnFunc(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*container.RuntimeStatus)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_Status_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Status'
type MockRuntime_Status_Call struct {
*mock.Call
}
// Status is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockRuntime_Expecter) Status(ctx interface{}) *MockRuntime_Status_Call {
return &MockRuntime_Status_Call{Call: _e.mock.On("Status", ctx)}
}
func (_c *MockRuntime_Status_Call) Run(run func(ctx context.Context)) *MockRuntime_Status_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_Status_Call) Return(runtimeStatus *container.RuntimeStatus, err error) *MockRuntime_Status_Call {
_c.Call.Return(runtimeStatus, err)
return _c
}
func (_c *MockRuntime_Status_Call) RunAndReturn(run func(ctx context.Context) (*container.RuntimeStatus, error)) *MockRuntime_Status_Call {
_c.Call.Return(run)
return _c
}
// SyncPod provides a mock function for the type MockRuntime
func (_mock *MockRuntime) SyncPod(ctx context.Context, pod *v10.Pod, podStatus *container.PodStatus, pullSecrets []v10.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult {
ret := _mock.Called(ctx, pod, podStatus, pullSecrets, backOff)
if len(ret) == 0 {
panic("no return value specified for SyncPod")
}
var r0 container.PodSyncResult
if returnFunc, ok := ret.Get(0).(func(context.Context, *v10.Pod, *container.PodStatus, []v10.Secret, *flowcontrol.Backoff) container.PodSyncResult); ok {
r0 = returnFunc(ctx, pod, podStatus, pullSecrets, backOff)
} else {
r0 = ret.Get(0).(container.PodSyncResult)
}
return r0
}
// MockRuntime_SyncPod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncPod'
type MockRuntime_SyncPod_Call struct {
*mock.Call
}
// SyncPod is a helper method to define mock.On call
// - ctx context.Context
// - pod *v10.Pod
// - podStatus *container.PodStatus
// - pullSecrets []v10.Secret
// - backOff *flowcontrol.Backoff
func (_e *MockRuntime_Expecter) SyncPod(ctx interface{}, pod interface{}, podStatus interface{}, pullSecrets interface{}, backOff interface{}) *MockRuntime_SyncPod_Call {
return &MockRuntime_SyncPod_Call{Call: _e.mock.On("SyncPod", ctx, pod, podStatus, pullSecrets, backOff)}
}
func (_c *MockRuntime_SyncPod_Call) Run(run func(ctx context.Context, pod *v10.Pod, podStatus *container.PodStatus, pullSecrets []v10.Secret, backOff *flowcontrol.Backoff)) *MockRuntime_SyncPod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 *v10.Pod
if args[1] != nil {
arg1 = args[1].(*v10.Pod)
}
var arg2 *container.PodStatus
if args[2] != nil {
arg2 = args[2].(*container.PodStatus)
}
var arg3 []v10.Secret
if args[3] != nil {
arg3 = args[3].([]v10.Secret)
}
var arg4 *flowcontrol.Backoff
if args[4] != nil {
arg4 = args[4].(*flowcontrol.Backoff)
}
run(
arg0,
arg1,
arg2,
arg3,
arg4,
)
})
return _c
}
func (_c *MockRuntime_SyncPod_Call) Return(podSyncResult container.PodSyncResult) *MockRuntime_SyncPod_Call {
_c.Call.Return(podSyncResult)
return _c
}
func (_c *MockRuntime_SyncPod_Call) RunAndReturn(run func(ctx context.Context, pod *v10.Pod, podStatus *container.PodStatus, pullSecrets []v10.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult) *MockRuntime_SyncPod_Call {
_c.Call.Return(run)
return _c
}
// Type provides a mock function for the type MockRuntime
func (_mock *MockRuntime) Type() string {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for Type")
}
var r0 string
if returnFunc, ok := ret.Get(0).(func() string); ok {
r0 = returnFunc()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// MockRuntime_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type'
type MockRuntime_Type_Call struct {
*mock.Call
}
// Type is a helper method to define mock.On call
func (_e *MockRuntime_Expecter) Type() *MockRuntime_Type_Call {
return &MockRuntime_Type_Call{Call: _e.mock.On("Type")}
}
func (_c *MockRuntime_Type_Call) Run(run func()) *MockRuntime_Type_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockRuntime_Type_Call) Return(s string) *MockRuntime_Type_Call {
_c.Call.Return(s)
return _c
}
func (_c *MockRuntime_Type_Call) RunAndReturn(run func() string) *MockRuntime_Type_Call {
_c.Call.Return(run)
return _c
}
// UpdatePodCIDR provides a mock function for the type MockRuntime
func (_mock *MockRuntime) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
ret := _mock.Called(ctx, podCIDR)
if len(ret) == 0 {
panic("no return value specified for UpdatePodCIDR")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = returnFunc(ctx, podCIDR)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntime_UpdatePodCIDR_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdatePodCIDR'
type MockRuntime_UpdatePodCIDR_Call struct {
*mock.Call
}
// UpdatePodCIDR is a helper method to define mock.On call
// - ctx context.Context
// - podCIDR string
func (_e *MockRuntime_Expecter) UpdatePodCIDR(ctx interface{}, podCIDR interface{}) *MockRuntime_UpdatePodCIDR_Call {
return &MockRuntime_UpdatePodCIDR_Call{Call: _e.mock.On("UpdatePodCIDR", ctx, podCIDR)}
}
func (_c *MockRuntime_UpdatePodCIDR_Call) Run(run func(ctx context.Context, podCIDR string)) *MockRuntime_UpdatePodCIDR_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 string
if args[1] != nil {
arg1 = args[1].(string)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntime_UpdatePodCIDR_Call) Return(err error) *MockRuntime_UpdatePodCIDR_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntime_UpdatePodCIDR_Call) RunAndReturn(run func(ctx context.Context, podCIDR string) error) *MockRuntime_UpdatePodCIDR_Call {
_c.Call.Return(run)
return _c
}
// Version provides a mock function for the type MockRuntime
func (_mock *MockRuntime) Version(ctx context.Context) (container.Version, error) {
ret := _mock.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for Version")
}
var r0 container.Version
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) (container.Version, error)); ok {
return returnFunc(ctx)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) container.Version); ok {
r0 = returnFunc(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(container.Version)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntime_Version_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Version'
type MockRuntime_Version_Call struct {
*mock.Call
}
// Version is a helper method to define mock.On call
// - ctx context.Context
func (_e *MockRuntime_Expecter) Version(ctx interface{}) *MockRuntime_Version_Call {
return &MockRuntime_Version_Call{Call: _e.mock.On("Version", ctx)}
}
func (_c *MockRuntime_Version_Call) Run(run func(ctx context.Context)) *MockRuntime_Version_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntime_Version_Call) Return(version container.Version, err error) *MockRuntime_Version_Call {
_c.Call.Return(version, err)
return _c
}
func (_c *MockRuntime_Version_Call) RunAndReturn(run func(ctx context.Context) (container.Version, error)) *MockRuntime_Version_Call {
_c.Call.Return(run)
return _c
}
// NewMockRuntimeCache creates a new instance of MockRuntimeCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockRuntimeCache(t interface {
mock.TestingT
Cleanup(func())
}) *MockRuntimeCache {
mock := &MockRuntimeCache{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockRuntimeCache is an autogenerated mock type for the RuntimeCache type
type MockRuntimeCache struct {
mock.Mock
}
type MockRuntimeCache_Expecter struct {
mock *mock.Mock
}
func (_m *MockRuntimeCache) EXPECT() *MockRuntimeCache_Expecter {
return &MockRuntimeCache_Expecter{mock: &_m.Mock}
}
// ForceUpdateIfOlder provides a mock function for the type MockRuntimeCache
func (_mock *MockRuntimeCache) ForceUpdateIfOlder(context1 context.Context, time1 time.Time) error {
ret := _mock.Called(context1, time1)
if len(ret) == 0 {
panic("no return value specified for ForceUpdateIfOlder")
}
var r0 error
if returnFunc, ok := ret.Get(0).(func(context.Context, time.Time) error); ok {
r0 = returnFunc(context1, time1)
} else {
r0 = ret.Error(0)
}
return r0
}
// MockRuntimeCache_ForceUpdateIfOlder_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceUpdateIfOlder'
type MockRuntimeCache_ForceUpdateIfOlder_Call struct {
*mock.Call
}
// ForceUpdateIfOlder is a helper method to define mock.On call
// - context1 context.Context
// - time1 time.Time
func (_e *MockRuntimeCache_Expecter) ForceUpdateIfOlder(context1 interface{}, time1 interface{}) *MockRuntimeCache_ForceUpdateIfOlder_Call {
return &MockRuntimeCache_ForceUpdateIfOlder_Call{Call: _e.mock.On("ForceUpdateIfOlder", context1, time1)}
}
func (_c *MockRuntimeCache_ForceUpdateIfOlder_Call) Run(run func(context1 context.Context, time1 time.Time)) *MockRuntimeCache_ForceUpdateIfOlder_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
var arg1 time.Time
if args[1] != nil {
arg1 = args[1].(time.Time)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockRuntimeCache_ForceUpdateIfOlder_Call) Return(err error) *MockRuntimeCache_ForceUpdateIfOlder_Call {
_c.Call.Return(err)
return _c
}
func (_c *MockRuntimeCache_ForceUpdateIfOlder_Call) RunAndReturn(run func(context1 context.Context, time1 time.Time) error) *MockRuntimeCache_ForceUpdateIfOlder_Call {
_c.Call.Return(run)
return _c
}
// GetPods provides a mock function for the type MockRuntimeCache
func (_mock *MockRuntimeCache) GetPods(context1 context.Context) ([]*container.Pod, error) {
ret := _mock.Called(context1)
if len(ret) == 0 {
panic("no return value specified for GetPods")
}
var r0 []*container.Pod
var r1 error
if returnFunc, ok := ret.Get(0).(func(context.Context) ([]*container.Pod, error)); ok {
return returnFunc(context1)
}
if returnFunc, ok := ret.Get(0).(func(context.Context) []*container.Pod); ok {
r0 = returnFunc(context1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*container.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = returnFunc(context1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MockRuntimeCache_GetPods_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPods'
type MockRuntimeCache_GetPods_Call struct {
*mock.Call
}
// GetPods is a helper method to define mock.On call
// - context1 context.Context
func (_e *MockRuntimeCache_Expecter) GetPods(context1 interface{}) *MockRuntimeCache_GetPods_Call {
return &MockRuntimeCache_GetPods_Call{Call: _e.mock.On("GetPods", context1)}
}
func (_c *MockRuntimeCache_GetPods_Call) Run(run func(context1 context.Context)) *MockRuntimeCache_GetPods_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 context.Context
if args[0] != nil {
arg0 = args[0].(context.Context)
}
run(
arg0,
)
})
return _c
}
func (_c *MockRuntimeCache_GetPods_Call) Return(pods []*container.Pod, err error) *MockRuntimeCache_GetPods_Call {
_c.Call.Return(pods, err)
return _c
}
func (_c *MockRuntimeCache_GetPods_Call) RunAndReturn(run func(context1 context.Context) ([]*container.Pod, error)) *MockRuntimeCache_GetPods_Call {
_c.Call.Return(run)
return _c
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"errors"
"os"
"sync"
"time"
)
// FakeOS mocks out certain OS calls to avoid perturbing the filesystem
// If a member of the form `*Fn` is set, that function will be called in place
// of the real call.
type FakeOS struct {
StatFn func(string) (os.FileInfo, error)
ReadDirFn func(string) ([]os.DirEntry, error)
MkdirAllFn func(string, os.FileMode) error
SymlinkFn func(string, string) error
GlobFn func(string, string) bool
HostName string
Removes []string
Files map[string][]*os.FileInfo
FilesLock sync.RWMutex
}
// Mkdir is a fake call that just returns nil.
func (f *FakeOS) MkdirAll(path string, perm os.FileMode) error {
if f.MkdirAllFn != nil {
return f.MkdirAllFn(path, perm)
}
return nil
}
// Symlink is a fake call that just returns nil.
func (f *FakeOS) Symlink(oldname string, newname string) error {
if f.SymlinkFn != nil {
return f.SymlinkFn(oldname, newname)
}
return nil
}
// Stat is a fake that returns an error
func (f *FakeOS) Stat(path string) (os.FileInfo, error) {
if f.StatFn != nil {
return f.StatFn(path)
}
return nil, errors.New("unimplemented testing mock")
}
// Remove is a fake call that returns nil.
func (f *FakeOS) Remove(path string) error {
f.Removes = append(f.Removes, path)
return nil
}
// RemoveAll is a fake call that just returns nil.
func (f *FakeOS) RemoveAll(path string) error {
f.Removes = append(f.Removes, path)
return nil
}
// Create is a fake call that creates a virtual file and returns nil.
func (f *FakeOS) Create(path string) (*os.File, error) {
f.FilesLock.Lock()
defer f.FilesLock.Unlock()
if f.Files == nil {
f.Files = make(map[string][]*os.FileInfo)
}
f.Files[path] = []*os.FileInfo{}
return nil, nil
}
// Chmod is a fake call that returns nil.
func (*FakeOS) Chmod(path string, perm os.FileMode) error {
return nil
}
// Hostname is a fake call that returns nil.
func (f *FakeOS) Hostname() (name string, err error) {
return f.HostName, nil
}
// Chtimes is a fake call that returns nil.
func (*FakeOS) Chtimes(path string, atime time.Time, mtime time.Time) error {
return nil
}
// Pipe is a fake call that returns nil.
func (*FakeOS) Pipe() (r *os.File, w *os.File, err error) {
return nil, nil, nil
}
// ReadDir is a fake call that returns the files under the directory.
func (f *FakeOS) ReadDir(dirname string) ([]os.DirEntry, error) {
if f.ReadDirFn != nil {
return f.ReadDirFn(dirname)
}
return nil, nil
}
// Glob is a fake call that returns list of virtual files matching a pattern.
func (f *FakeOS) Glob(pattern string) ([]string, error) {
if f.GlobFn != nil {
var res []string
f.FilesLock.RLock()
defer f.FilesLock.RUnlock()
for k := range f.Files {
if f.GlobFn(pattern, k) {
res = append(res, k)
}
}
return res, nil
}
return nil, nil
}
// Open is a fake call that returns nil.
func (*FakeOS) Open(name string) (*os.File, error) {
return nil, nil
}
// OpenFile is a fake call that return nil.
func (*FakeOS) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
return nil, nil
}
// Rename is a fake call that return nil.
func (*FakeOS) Rename(oldpath, newpath string) error {
return nil
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envvars
import (
"fmt"
"net"
"strconv"
"strings"
"k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
)
// FromServices builds environment variables that a container is started with,
// which tell the container where to find the services it may need, which are
// provided as an argument.
func FromServices(services []*v1.Service) []v1.EnvVar {
var result []v1.EnvVar
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
// the services passed to this method should be pre-filtered
// only services that have the cluster IP set should be included here
if !v1helper.IsServiceIPSet(service) {
continue
}
// Host
name := makeEnvVariableName(service.Name) + "_SERVICE_HOST"
result = append(result, v1.EnvVar{Name: name, Value: service.Spec.ClusterIP})
// First port - give it the backwards-compatible name
name = makeEnvVariableName(service.Name) + "_SERVICE_PORT"
result = append(result, v1.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))})
// All named ports (only the first may be unnamed, checked in validation)
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
if sp.Name != "" {
pn := name + "_" + makeEnvVariableName(sp.Name)
result = append(result, v1.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))})
}
}
// Docker-compatible vars.
result = append(result, makeLinkVariables(service)...)
}
return result
}
func makeEnvVariableName(str string) string {
// TODO: If we simplify to "all names are DNS1123Subdomains" this
// will need two tweaks:
// 1) Handle leading digits
// 2) Handle dots
return strings.ToUpper(strings.Replace(str, "-", "_", -1))
}
func makeLinkVariables(service *v1.Service) []v1.EnvVar {
prefix := makeEnvVariableName(service.Name)
all := []v1.EnvVar{}
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
protocol := string(v1.ProtocolTCP)
if sp.Protocol != "" {
protocol = string(sp.Protocol)
}
hostPort := net.JoinHostPort(service.Spec.ClusterIP, strconv.Itoa(int(sp.Port)))
if i == 0 {
// Docker special-cases the first port.
all = append(all, v1.EnvVar{
Name: prefix + "_PORT",
Value: fmt.Sprintf("%s://%s", strings.ToLower(protocol), hostPort),
})
}
portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol))
all = append(all, []v1.EnvVar{
{
Name: portPrefix,
Value: fmt.Sprintf("%s://%s", strings.ToLower(protocol), hostPort),
},
{
Name: portPrefix + "_PROTO",
Value: strings.ToLower(protocol),
},
{
Name: portPrefix + "_PORT",
Value: strconv.Itoa(int(sp.Port)),
},
{
Name: portPrefix + "_ADDR",
Value: service.Spec.ClusterIP,
},
}...)
}
return all
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"time"
"k8s.io/apimachinery/pkg/api/resource"
)
// Signal defines a signal that can trigger eviction of pods on a node.
type Signal string
const (
// SignalMemoryAvailable is memory available (i.e. capacity - workingSet), in bytes.
SignalMemoryAvailable Signal = "memory.available"
// SignalNodeFsAvailable is amount of storage available on filesystem that kubelet uses for volumes, daemon logs, etc.
SignalNodeFsAvailable Signal = "nodefs.available"
// SignalNodeFsInodesFree is amount of inodes available on filesystem that kubelet uses for volumes, daemon logs, etc.
SignalNodeFsInodesFree Signal = "nodefs.inodesFree"
// SignalImageFsAvailable is amount of storage available on filesystem that container runtime uses for storing images layers.
// If the container filesystem and image filesystem are not separate,
// than imagefs can store both image layers and writeable layers.
SignalImageFsAvailable Signal = "imagefs.available"
// SignalImageFsInodesFree is amount of inodes available on filesystem that container runtime uses for storing images layers.
// If the container filesystem and image filesystem are not separate,
// than imagefs can store both image layers and writeable layers.
SignalImageFsInodesFree Signal = "imagefs.inodesFree"
// SignalContainerFsAvailable is amount of storage available on filesystem that container runtime uses for container writable layers.
// In case of a single filesystem, containerfs=nodefs.
// In case of a image filesystem, containerfs=imagefs.
// We will override user settings and set to either imagefs or nodefs depending on configuration.
SignalContainerFsAvailable Signal = "containerfs.available"
// SignalContainerFsInodesFree is amount of inodes available on filesystem that container runtime uses for container writable layers.
// In case of a single filesystem, containerfs=nodefs.
// In case of a image filesystem, containerfs=imagefs.
// We will override user settings and set to either imagefs or nodefs depending on configuration.
SignalContainerFsInodesFree Signal = "containerfs.inodesFree"
// SignalAllocatableMemoryAvailable is amount of memory available for pod allocation (i.e. allocatable - workingSet (of pods), in bytes.
SignalAllocatableMemoryAvailable Signal = "allocatableMemory.available"
// SignalPIDAvailable is amount of PID available for pod allocation
SignalPIDAvailable Signal = "pid.available"
)
// ThresholdOperator is the operator used to express a Threshold.
type ThresholdOperator string
const (
// OpLessThan is the operator that expresses a less than operator.
OpLessThan ThresholdOperator = "LessThan"
)
// OpForSignal maps Signals to ThresholdOperators.
// Today, the only supported operator is "LessThan". This may change in the future,
// for example if "consumed" (as opposed to "available") type signals are added.
// In both cases the directionality of the threshold is implicit to the signal type
// (for a given signal, the decision to evict will be made when crossing the threshold
// from either above or below, never both). There is thus no reason to expose the
// operator in the Kubelet's public API. Instead, we internally map signal types to operators.
var OpForSignal = map[Signal]ThresholdOperator{
SignalMemoryAvailable: OpLessThan,
SignalNodeFsAvailable: OpLessThan,
SignalNodeFsInodesFree: OpLessThan,
SignalImageFsAvailable: OpLessThan,
SignalImageFsInodesFree: OpLessThan,
SignalContainerFsAvailable: OpLessThan,
SignalContainerFsInodesFree: OpLessThan,
SignalAllocatableMemoryAvailable: OpLessThan,
SignalPIDAvailable: OpLessThan,
}
// ThresholdValue is a value holder that abstracts literal versus percentage based quantity
type ThresholdValue struct {
// The following fields are exclusive. Only the topmost non-zero field is used.
// Quantity is a quantity associated with the signal that is evaluated against the specified operator.
Quantity *resource.Quantity
// Percentage represents the usage percentage over the total resource that is evaluated against the specified operator.
Percentage float32
}
// Threshold defines a metric for when eviction should occur.
type Threshold struct {
// Signal defines the entity that was measured.
Signal Signal
// Operator represents a relationship of a signal to a value.
Operator ThresholdOperator
// Value is the threshold the resource is evaluated against.
Value ThresholdValue
// GracePeriod represents the amount of time that a threshold must be met before eviction is triggered.
GracePeriod time.Duration
// MinReclaim represents the minimum amount of resource to reclaim if the threshold is met.
MinReclaim *ThresholdValue
}
// GetThresholdQuantity returns the expected quantity value for a thresholdValue
func GetThresholdQuantity(value ThresholdValue, capacity *resource.Quantity) *resource.Quantity {
if value.Quantity != nil {
res := value.Quantity.DeepCopy()
return &res
}
return resource.NewQuantity(int64(float64(capacity.Value())*float64(value.Percentage)), resource.BinarySI)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import (
"context"
"fmt"
"runtime"
"sort"
"sync"
"time"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/utils/clock"
resourcehelper "k8s.io/component-helpers/resource"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
podCleanupTimeout = 30 * time.Second
podCleanupPollFreq = time.Second
)
const (
// signalEphemeralContainerFsLimit is amount of storage available on filesystem requested by the container
signalEphemeralContainerFsLimit string = "ephemeralcontainerfs.limit"
// signalEphemeralPodFsLimit is amount of storage available on filesystem requested by the pod
signalEphemeralPodFsLimit string = "ephemeralpodfs.limit"
// signalEmptyDirFsLimit is amount of storage available on filesystem requested by an emptyDir
signalEmptyDirFsLimit string = "emptydirfs.limit"
// immediateEvictionGracePeriodSeconds is how long we give pods to shut down when we
// need them to evict quickly due to resource pressure
immediateEvictionGracePeriodSeconds = 1
)
// managerImpl implements Manager
type managerImpl struct {
// used to track time
clock clock.WithTicker
// config is how the manager is configured
config Config
// the function to invoke to kill a pod
killPodFunc KillPodFunc
// the interface that knows how to do image gc
imageGC ImageGC
// the interface that knows how to do container gc
containerGC ContainerGC
// protects access to internal state
sync.RWMutex
// node conditions are the set of conditions present
nodeConditions []v1.NodeConditionType
// captures when a node condition was last observed based on a threshold being met
nodeConditionsLastObservedAt nodeConditionsObservedAt
// nodeRef is a reference to the node
nodeRef *v1.ObjectReference
// used to record events about the node
recorder record.EventRecorder
// used to measure usage stats on system
summaryProvider stats.SummaryProvider
// records when a threshold was first observed
thresholdsFirstObservedAt thresholdsObservedAt
// records the set of thresholds that have been met (including graceperiod) but not yet resolved
thresholdsMet []evictionapi.Threshold
// signalToRankFunc maps a resource to ranking function for that resource.
signalToRankFunc map[evictionapi.Signal]rankFunc
// signalToNodeReclaimFuncs maps a resource to an ordered list of functions that know how to reclaim that resource.
signalToNodeReclaimFuncs map[evictionapi.Signal]nodeReclaimFuncs
// last observations from synchronize
lastObservations signalObservations
// dedicatedImageFs indicates if imagefs is on a separate device from the rootfs
dedicatedImageFs *bool
// splitContainerImageFs indicates if containerfs is on a separate device from imagefs
splitContainerImageFs *bool
// thresholdNotifiers is a list of memory threshold notifiers which each notify for a memory eviction threshold
thresholdNotifiers []ThresholdNotifier
// thresholdsLastUpdated is the last time the thresholdNotifiers were updated.
thresholdsLastUpdated time.Time
// whether can support local storage capacity isolation
localStorageCapacityIsolation bool
}
// ensure it implements the required interface
var _ Manager = &managerImpl{}
// NewManager returns a configured Manager and an associated admission handler to enforce eviction configuration.
func NewManager(
summaryProvider stats.SummaryProvider,
config Config,
killPodFunc KillPodFunc,
imageGC ImageGC,
containerGC ContainerGC,
recorder record.EventRecorder,
nodeRef *v1.ObjectReference,
clock clock.WithTicker,
localStorageCapacityIsolation bool,
) (Manager, lifecycle.PodAdmitHandler) {
manager := &managerImpl{
clock: clock,
killPodFunc: killPodFunc,
imageGC: imageGC,
containerGC: containerGC,
config: config,
recorder: recorder,
summaryProvider: summaryProvider,
nodeRef: nodeRef,
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
thresholdsFirstObservedAt: thresholdsObservedAt{},
dedicatedImageFs: nil,
splitContainerImageFs: nil,
thresholdNotifiers: []ThresholdNotifier{},
localStorageCapacityIsolation: localStorageCapacityIsolation,
}
return manager, manager
}
// Admit rejects a pod if its not safe to admit for node stability.
func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
m.RLock()
defer m.RUnlock()
if len(m.nodeConditions) == 0 {
return lifecycle.PodAdmitResult{Admit: true}
}
// Admit Critical pods even under resource pressure since they are required for system stability.
// https://github.com/kubernetes/kubernetes/issues/40573 has more details.
if kubelettypes.IsCriticalPod(attrs.Pod) {
return lifecycle.PodAdmitResult{Admit: true}
}
// Conditions other than memory pressure reject all pods
nodeOnlyHasMemoryPressureCondition := hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) && len(m.nodeConditions) == 1
if nodeOnlyHasMemoryPressureCondition {
notBestEffort := v1.PodQOSBestEffort != v1qos.GetPodQOS(attrs.Pod)
if notBestEffort {
return lifecycle.PodAdmitResult{Admit: true}
}
// When node has memory pressure, check BestEffort Pod's toleration:
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. DiskPressure.
if corev1helpers.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{
Key: v1.TaintNodeMemoryPressure,
Effect: v1.TaintEffectNoSchedule,
}) {
return lifecycle.PodAdmitResult{Admit: true}
}
}
return lifecycle.PodAdmitResult{
Admit: false,
Reason: Reason,
Message: fmt.Sprintf(nodeConditionMessageFmt, m.nodeConditions),
}
}
// Start starts the control loop to observe and response to low compute resources.
func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, monitoringInterval time.Duration) {
thresholdHandler := func(message string) {
klog.InfoS(message)
m.synchronize(diskInfoProvider, podFunc)
}
klog.InfoS("Eviction manager: starting control loop")
if m.config.KernelMemcgNotification || runtime.GOOS == "windows" {
for _, threshold := range m.config.Thresholds {
if threshold.Signal == evictionapi.SignalMemoryAvailable || threshold.Signal == evictionapi.SignalAllocatableMemoryAvailable {
notifier, err := NewMemoryThresholdNotifier(threshold, m.config.PodCgroupRoot, &CgroupNotifierFactory{}, thresholdHandler)
if err != nil {
klog.InfoS("Eviction manager: failed to create memory threshold notifier", "err", err)
} else {
go notifier.Start()
m.thresholdNotifiers = append(m.thresholdNotifiers, notifier)
}
}
}
}
// start the eviction manager monitoring
go func() {
for {
evictedPods, err := m.synchronize(diskInfoProvider, podFunc)
if evictedPods != nil && err == nil {
klog.InfoS("Eviction manager: pods evicted, waiting for pod to be cleaned up", "pods", klog.KObjSlice(evictedPods))
m.waitForPodsCleanup(podCleanedUpFunc, evictedPods)
} else {
if err != nil {
klog.ErrorS(err, "Eviction manager: failed to synchronize")
}
time.Sleep(monitoringInterval)
}
}
}()
}
// IsUnderMemoryPressure returns true if the node is under memory pressure.
func (m *managerImpl) IsUnderMemoryPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure)
}
// IsUnderDiskPressure returns true if the node is under disk pressure.
func (m *managerImpl) IsUnderDiskPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, v1.NodeDiskPressure)
}
// IsUnderPIDPressure returns true if the node is under PID pressure.
func (m *managerImpl) IsUnderPIDPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, v1.NodePIDPressure)
}
// synchronize is the main control loop that enforces eviction thresholds.
// Returns the pod that was killed, or nil if no pod was killed.
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) ([]*v1.Pod, error) {
ctx := context.Background()
// if we have nothing to do, just return
thresholds := m.config.Thresholds
if len(thresholds) == 0 && !m.localStorageCapacityIsolation {
return nil, nil
}
klog.V(3).InfoS("Eviction manager: synchronize housekeeping")
// build the ranking functions (if not yet known)
// TODO: have a function in cadvisor that lets us know if global housekeeping has completed
if m.dedicatedImageFs == nil {
hasImageFs, imageFsErr := diskInfoProvider.HasDedicatedImageFs(ctx)
if imageFsErr != nil {
// TODO: This should be refactored to log an error and retry the HasDedicatedImageFs
// If we have a transient error this will never be retried and we will not set eviction signals
klog.ErrorS(imageFsErr, "Eviction manager: failed to get HasDedicatedImageFs")
return nil, fmt.Errorf("eviction manager: failed to get HasDedicatedImageFs: %w", imageFsErr)
}
m.dedicatedImageFs = &hasImageFs
splitContainerImageFs, splitErr := diskInfoProvider.HasDedicatedContainerFs(ctx)
if splitErr != nil {
// A common error case is when there is no split filesystem
// there is an error finding the split filesystem label and we want to ignore these errors
klog.ErrorS(splitErr, "eviction manager: failed to check if we have separate container filesystem. Ignoring.")
}
// If we are a split filesystem but the feature is turned off
// we should return an error.
// This is a bad state.
if !utilfeature.DefaultFeatureGate.Enabled(features.KubeletSeparateDiskGC) && splitContainerImageFs {
splitDiskError := fmt.Errorf("KubeletSeparateDiskGC is turned off but we still have a split filesystem")
return nil, splitDiskError
}
thresholds, err := UpdateContainerFsThresholds(m.config.Thresholds, hasImageFs, splitContainerImageFs)
m.config.Thresholds = thresholds
if err != nil {
klog.ErrorS(err, "eviction manager: found conflicting containerfs eviction. Ignoring.")
}
m.splitContainerImageFs = &splitContainerImageFs
m.signalToRankFunc = buildSignalToRankFunc(hasImageFs, splitContainerImageFs)
m.signalToNodeReclaimFuncs = buildSignalToNodeReclaimFuncs(m.imageGC, m.containerGC, hasImageFs, splitContainerImageFs)
}
klog.V(3).InfoS("FileSystem detection", "DedicatedImageFs", m.dedicatedImageFs, "SplitImageFs", m.splitContainerImageFs)
activePods := podFunc()
updateStats := true
summary, err := m.summaryProvider.Get(ctx, updateStats)
if err != nil {
klog.ErrorS(err, "Eviction manager: failed to get summary stats")
return nil, nil
}
if m.clock.Since(m.thresholdsLastUpdated) > notifierRefreshInterval {
m.thresholdsLastUpdated = m.clock.Now()
for _, notifier := range m.thresholdNotifiers {
if err := notifier.UpdateThreshold(summary); err != nil {
klog.InfoS("Eviction manager: failed to update notifier", "notifier", notifier.Description(), "err", err)
}
}
}
// make observations and get a function to derive pod usage stats relative to those observations.
observations, statsFunc := makeSignalObservations(summary)
debugLogObservations("observations", observations)
// determine the set of thresholds met independent of grace period
thresholds = thresholdsMet(thresholds, observations, false)
debugLogThresholdsWithObservation("thresholds - ignoring grace period", thresholds, observations)
// determine the set of thresholds previously met that have not yet satisfied the associated min-reclaim
if len(m.thresholdsMet) > 0 {
thresholdsNotYetResolved := thresholdsMet(m.thresholdsMet, observations, true)
thresholds = mergeThresholds(thresholds, thresholdsNotYetResolved)
}
debugLogThresholdsWithObservation("thresholds - reclaim not satisfied", thresholds, observations)
// track when a threshold was first observed
now := m.clock.Now()
thresholdsFirstObservedAt := thresholdsFirstObservedAt(thresholds, m.thresholdsFirstObservedAt, now)
// the set of node conditions that are triggered by currently observed thresholds
nodeConditions := nodeConditions(thresholds)
if len(nodeConditions) > 0 {
klog.V(3).InfoS("Eviction manager: node conditions - observed", "nodeCondition", nodeConditions)
}
// track when a node condition was last observed
nodeConditionsLastObservedAt := nodeConditionsLastObservedAt(nodeConditions, m.nodeConditionsLastObservedAt, now)
// node conditions report true if it has been observed within the transition period window
nodeConditions = nodeConditionsObservedSince(nodeConditionsLastObservedAt, m.config.PressureTransitionPeriod, now)
if len(nodeConditions) > 0 {
klog.V(3).InfoS("Eviction manager: node conditions - transition period not met", "nodeCondition", nodeConditions)
}
// determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met)
thresholds = thresholdsMetGracePeriod(thresholdsFirstObservedAt, now)
debugLogThresholdsWithObservation("thresholds - grace periods satisfied", thresholds, observations)
// update internal state
m.Lock()
m.nodeConditions = nodeConditions
m.thresholdsFirstObservedAt = thresholdsFirstObservedAt
m.nodeConditionsLastObservedAt = nodeConditionsLastObservedAt
m.thresholdsMet = thresholds
// determine the set of thresholds whose stats have been updated since the last sync
thresholds = thresholdsUpdatedStats(thresholds, observations, m.lastObservations)
debugLogThresholdsWithObservation("thresholds - updated stats", thresholds, observations)
m.lastObservations = observations
m.Unlock()
// evict pods if there is a resource usage violation from local volume temporary storage
// If eviction happens in localStorageEviction function, skip the rest of eviction action
if m.localStorageCapacityIsolation {
if evictedPods := m.localStorageEviction(activePods, statsFunc); len(evictedPods) > 0 {
return evictedPods, nil
}
}
if len(thresholds) == 0 {
klog.V(3).InfoS("Eviction manager: no resources are starved")
return nil, nil
}
// rank the thresholds by eviction priority
sort.Sort(byEvictionPriority(thresholds))
thresholdToReclaim, resourceToReclaim, foundAny := getReclaimableThreshold(thresholds)
if !foundAny {
return nil, nil
}
klog.InfoS("Eviction manager: attempting to reclaim", "resourceName", resourceToReclaim)
// record an event about the resources we are now attempting to reclaim via eviction
m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim)
// check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods.
if m.reclaimNodeLevelResources(ctx, thresholdToReclaim.Signal, resourceToReclaim) {
klog.InfoS("Eviction manager: able to reduce resource pressure without evicting pods.", "resourceName", resourceToReclaim)
return nil, nil
}
klog.InfoS("Eviction manager: must evict pod(s) to reclaim", "resourceName", resourceToReclaim)
// rank the pods for eviction
rank, ok := m.signalToRankFunc[thresholdToReclaim.Signal]
if !ok {
klog.ErrorS(nil, "Eviction manager: no ranking function for signal", "threshold", thresholdToReclaim.Signal)
return nil, nil
}
// the only candidates viable for eviction are those pods that had anything running.
if len(activePods) == 0 {
klog.ErrorS(nil, "Eviction manager: eviction thresholds have been met, but no pods are active to evict")
return nil, nil
}
// rank the running pods for eviction for the specified resource
rank(activePods, statsFunc)
klog.InfoS("Eviction manager: pods ranked for eviction", "pods", klog.KObjSlice(activePods))
//record age of metrics for met thresholds that we are using for evictions.
for _, t := range thresholds {
timeObserved := observations[t.Signal].time
if !timeObserved.IsZero() {
metrics.EvictionStatsAge.WithLabelValues(string(t.Signal)).Observe(metrics.SinceInSeconds(timeObserved.Time))
}
}
// we kill at most a single pod during each eviction interval
for i := range activePods {
pod := activePods[i]
gracePeriodOverride := int64(immediateEvictionGracePeriodSeconds)
if !isHardEvictionThreshold(thresholdToReclaim) {
gracePeriodOverride = m.config.MaxPodGracePeriodSeconds
if pod.Spec.TerminationGracePeriodSeconds != nil {
gracePeriodOverride = min(m.config.MaxPodGracePeriodSeconds, *pod.Spec.TerminationGracePeriodSeconds)
}
}
message, annotations := evictionMessage(resourceToReclaim, pod, statsFunc, thresholds, observations)
condition := &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: pod.Generation,
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: message,
}
if m.evictPod(pod, gracePeriodOverride, message, annotations, condition) {
metrics.Evictions.WithLabelValues(string(thresholdToReclaim.Signal)).Inc()
return []*v1.Pod{pod}, nil
}
}
klog.InfoS("Eviction manager: unable to evict any pods from the node")
return nil, nil
}
func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods []*v1.Pod) {
timeout := m.clock.NewTimer(podCleanupTimeout)
defer timeout.Stop()
ticker := m.clock.NewTicker(podCleanupPollFreq)
defer ticker.Stop()
for {
select {
case <-timeout.C():
klog.InfoS("Eviction manager: timed out waiting for pods to be cleaned up", "pods", klog.KObjSlice(pods))
return
case <-ticker.C():
for i, pod := range pods {
if !podCleanedUpFunc(pod) {
break
}
if i == len(pods)-1 {
klog.InfoS("Eviction manager: pods successfully cleaned up", "pods", klog.KObjSlice(pods))
return
}
}
}
}
}
// reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required.
func (m *managerImpl) reclaimNodeLevelResources(ctx context.Context, signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool {
nodeReclaimFuncs := m.signalToNodeReclaimFuncs[signalToReclaim]
for _, nodeReclaimFunc := range nodeReclaimFuncs {
// attempt to reclaim the pressured resource.
if err := nodeReclaimFunc(ctx); err != nil {
klog.InfoS("Eviction manager: unexpected error when attempting to reduce resource pressure", "resourceName", resourceToReclaim, "err", err)
}
}
if len(nodeReclaimFuncs) > 0 {
summary, err := m.summaryProvider.Get(ctx, true)
if err != nil {
klog.ErrorS(err, "Eviction manager: failed to get summary stats after resource reclaim")
return false
}
// make observations and get a function to derive pod usage stats relative to those observations.
observations, _ := makeSignalObservations(summary)
debugLogObservations("observations after resource reclaim", observations)
// evaluate all thresholds independently of their grace period to see if with
// the new observations, we think we have met min reclaim goals
thresholds := thresholdsMet(m.config.Thresholds, observations, true)
debugLogThresholdsWithObservation("thresholds after resource reclaim - ignoring grace period", thresholds, observations)
if len(thresholds) == 0 {
return true
}
}
return false
}
// localStorageEviction checks the EmptyDir volume usage for each pod and determine whether it exceeds the specified limit and needs
// to be evicted. It also checks every container in the pod, if the container overlay usage exceeds the limit, the pod will be evicted too.
func (m *managerImpl) localStorageEviction(pods []*v1.Pod, statsFunc statsFunc) []*v1.Pod {
evicted := []*v1.Pod{}
for _, pod := range pods {
podStats, ok := statsFunc(pod)
if !ok {
continue
}
if m.emptyDirLimitEviction(podStats, pod) {
evicted = append(evicted, pod)
continue
}
if m.podEphemeralStorageLimitEviction(podStats, pod) {
evicted = append(evicted, pod)
continue
}
if m.containerEphemeralStorageLimitEviction(podStats, pod) {
evicted = append(evicted, pod)
}
}
return evicted
}
func (m *managerImpl) emptyDirLimitEviction(podStats statsapi.PodStats, pod *v1.Pod) bool {
podVolumeUsed := make(map[string]*resource.Quantity)
for _, volume := range podStats.VolumeStats {
podVolumeUsed[volume.Name] = resource.NewQuantity(int64(*volume.UsedBytes), resource.BinarySI)
}
for i := range pod.Spec.Volumes {
source := &pod.Spec.Volumes[i].VolumeSource
if source.EmptyDir != nil {
size := source.EmptyDir.SizeLimit
used := podVolumeUsed[pod.Spec.Volumes[i].Name]
if used != nil && size != nil && size.Sign() == 1 && used.Cmp(*size) > 0 {
// the emptyDir usage exceeds the size limit, evict the pod
if m.evictPod(pod, immediateEvictionGracePeriodSeconds, fmt.Sprintf(emptyDirMessageFmt, pod.Spec.Volumes[i].Name, size.String()), nil, nil) {
metrics.Evictions.WithLabelValues(signalEmptyDirFsLimit).Inc()
return true
}
return false
}
}
}
return false
}
func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStats, pod *v1.Pod) bool {
podLimits := resourcehelper.PodLimits(pod, resourcehelper.PodResourcesOptions{})
_, found := podLimits[v1.ResourceEphemeralStorage]
if !found {
return false
}
// pod stats api summarizes ephemeral storage usage (container, emptyDir, host[etc-hosts, logs])
podEphemeralStorageTotalUsage := &resource.Quantity{}
if podStats.EphemeralStorage != nil && podStats.EphemeralStorage.UsedBytes != nil {
podEphemeralStorageTotalUsage = resource.NewQuantity(int64(*podStats.EphemeralStorage.UsedBytes), resource.BinarySI)
}
podEphemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage]
if podEphemeralStorageTotalUsage.Cmp(podEphemeralStorageLimit) > 0 {
// the total usage of pod exceeds the total size limit of containers, evict the pod
message := fmt.Sprintf(podEphemeralStorageMessageFmt, podEphemeralStorageLimit.String())
if m.evictPod(pod, immediateEvictionGracePeriodSeconds, message, nil, nil) {
metrics.Evictions.WithLabelValues(signalEphemeralPodFsLimit).Inc()
return true
}
return false
}
return false
}
func (m *managerImpl) containerEphemeralStorageLimitEviction(podStats statsapi.PodStats, pod *v1.Pod) bool {
thresholdsMap := make(map[string]*resource.Quantity)
for _, container := range pod.Spec.Containers {
ephemeralLimit := container.Resources.Limits.StorageEphemeral()
if ephemeralLimit != nil && ephemeralLimit.Value() != 0 {
thresholdsMap[container.Name] = ephemeralLimit
}
}
for _, containerStat := range podStats.Containers {
containerUsed := diskUsage(containerStat.Logs)
if !*m.dedicatedImageFs {
containerUsed.Add(*diskUsage(containerStat.Rootfs))
}
if ephemeralStorageThreshold, ok := thresholdsMap[containerStat.Name]; ok {
if ephemeralStorageThreshold.Cmp(*containerUsed) < 0 {
if m.evictPod(pod, immediateEvictionGracePeriodSeconds, fmt.Sprintf(containerEphemeralStorageMessageFmt, containerStat.Name, ephemeralStorageThreshold.String()), nil, nil) {
metrics.Evictions.WithLabelValues(signalEphemeralContainerFsLimit).Inc()
return true
}
return false
}
}
}
return false
}
func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg string, annotations map[string]string, condition *v1.PodCondition) bool {
// If the pod is marked as critical and static, and support for critical pod annotations is enabled,
// do not evict such pods. Static pods are not re-admitted after evictions.
// https://github.com/kubernetes/kubernetes/issues/40573 has more details.
if kubelettypes.IsCriticalPod(pod) {
klog.ErrorS(nil, "Eviction manager: cannot evict a critical pod", "pod", klog.KObj(pod))
return false
}
// record that we are evicting the pod
m.recorder.AnnotatedEventf(pod, annotations, v1.EventTypeWarning, Reason, evictMsg)
// this is a blocking call and should only return when the pod and its containers are killed.
klog.V(3).InfoS("Evicting pod", "pod", klog.KObj(pod), "podUID", pod.UID, "message", evictMsg)
err := m.killPodFunc(pod, true, &gracePeriodOverride, func(status *v1.PodStatus) {
status.Phase = v1.PodFailed
status.Reason = Reason
status.Message = evictMsg
if condition != nil {
condition.ObservedGeneration = podutil.CalculatePodConditionObservedGeneration(status, pod.Generation, v1.DisruptionTarget)
podutil.UpdatePodCondition(status, condition)
}
})
if err != nil {
klog.ErrorS(err, "Eviction manager: pod failed to evict", "pod", klog.KObj(pod))
} else {
klog.InfoS("Eviction manager: pod is evicted successfully", "pod", klog.KObj(pod))
}
return true
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import (
"errors"
"fmt"
"sort"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
v1resource "k8s.io/kubernetes/pkg/api/v1/resource"
"k8s.io/kubernetes/pkg/features"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
volumeutils "k8s.io/kubernetes/pkg/volume/util"
)
const (
unsupportedEvictionSignal = "unsupported eviction signal %v"
// Reason is the reason reported back in status.
Reason = "Evicted"
// nodeLowMessageFmt is the message for evictions due to resource pressure.
nodeLowMessageFmt = "The node was low on resource: %v. "
// nodeConditionMessageFmt is the message for evictions due to resource pressure.
nodeConditionMessageFmt = "The node had condition: %v. "
// containerMessageFmt provides additional information for containers exceeding requests
containerMessageFmt = "Container %s was using %s, request is %s, has larger consumption of %v. "
// podMessageFmt provides additional information for pods exceeding requests
podMessageFmt = "Pod %s was using %s, request is %s, has larger consumption of %v. "
// containerEphemeralStorageMessageFmt provides additional information for containers which have exceeded their ES limit
containerEphemeralStorageMessageFmt = "Container %s exceeded its local ephemeral storage limit %q. "
// podEphemeralStorageMessageFmt provides additional information for pods which have exceeded their ES limit
podEphemeralStorageMessageFmt = "Pod ephemeral local storage usage exceeds the total limit of containers %s. "
// emptyDirMessageFmt provides additional information for empty-dir volumes which have exceeded their size limit
emptyDirMessageFmt = "Usage of EmptyDir volume %q exceeds the limit %q. "
// inodes, number. internal to this module, used to account for local disk inode consumption.
resourceInodes v1.ResourceName = "inodes"
// resourcePids, number. internal to this module, used to account for local pid consumption.
resourcePids v1.ResourceName = "pids"
// OffendingContainersKey is the key in eviction event annotations for the list of container names which exceeded their requests
OffendingContainersKey = "offending_containers"
// OffendingContainersUsageKey is the key in eviction event annotations for the list of usage of containers which exceeded their requests
OffendingContainersUsageKey = "offending_containers_usage"
// OffendingPodKey is the key in eviction event annotations for the pod name which exceeded its requests
OffendingPodKey = "offending_pod"
// OffendingPodUsageKey is the key in eviction event annotations for the pod usage which exceeded its requests
OffendingPodUsageKey = "offending_pod_usage"
// StarvedResourceKey is the key for the starved resource in eviction event annotations
StarvedResourceKey = "starved_resource"
// thresholdMetMessageFmt is the message for evictions due to resource pressure.
thresholdMetMessageFmt = "Threshold quantity: %v, available: %v. "
)
var (
// signalToNodeCondition maps a signal to the node condition to report if threshold is met.
signalToNodeCondition map[evictionapi.Signal]v1.NodeConditionType
// signalToResource maps a Signal to its associated Resource.
signalToResource map[evictionapi.Signal]v1.ResourceName
)
func init() {
// map eviction signals to node conditions
signalToNodeCondition = map[evictionapi.Signal]v1.NodeConditionType{}
signalToNodeCondition[evictionapi.SignalMemoryAvailable] = v1.NodeMemoryPressure
signalToNodeCondition[evictionapi.SignalAllocatableMemoryAvailable] = v1.NodeMemoryPressure
signalToNodeCondition[evictionapi.SignalImageFsAvailable] = v1.NodeDiskPressure
signalToNodeCondition[evictionapi.SignalContainerFsAvailable] = v1.NodeDiskPressure
signalToNodeCondition[evictionapi.SignalNodeFsAvailable] = v1.NodeDiskPressure
signalToNodeCondition[evictionapi.SignalImageFsInodesFree] = v1.NodeDiskPressure
signalToNodeCondition[evictionapi.SignalNodeFsInodesFree] = v1.NodeDiskPressure
signalToNodeCondition[evictionapi.SignalContainerFsInodesFree] = v1.NodeDiskPressure
signalToNodeCondition[evictionapi.SignalPIDAvailable] = v1.NodePIDPressure
// map signals to resources (and vice-versa)
signalToResource = map[evictionapi.Signal]v1.ResourceName{}
signalToResource[evictionapi.SignalMemoryAvailable] = v1.ResourceMemory
signalToResource[evictionapi.SignalAllocatableMemoryAvailable] = v1.ResourceMemory
signalToResource[evictionapi.SignalImageFsAvailable] = v1.ResourceEphemeralStorage
signalToResource[evictionapi.SignalImageFsInodesFree] = resourceInodes
signalToResource[evictionapi.SignalContainerFsAvailable] = v1.ResourceEphemeralStorage
signalToResource[evictionapi.SignalContainerFsInodesFree] = resourceInodes
signalToResource[evictionapi.SignalNodeFsAvailable] = v1.ResourceEphemeralStorage
signalToResource[evictionapi.SignalNodeFsInodesFree] = resourceInodes
signalToResource[evictionapi.SignalPIDAvailable] = resourcePids
}
// validSignal returns true if the signal is supported.
func validSignal(signal evictionapi.Signal) bool {
_, found := signalToResource[signal]
return found
}
// getReclaimableThreshold finds the threshold and resource to reclaim
func getReclaimableThreshold(thresholds []evictionapi.Threshold) (evictionapi.Threshold, v1.ResourceName, bool) {
for _, thresholdToReclaim := range thresholds {
if resourceToReclaim, ok := signalToResource[thresholdToReclaim.Signal]; ok {
return thresholdToReclaim, resourceToReclaim, true
}
klog.V(3).InfoS("Eviction manager: threshold was crossed, but reclaim is not implemented for this threshold.", "threshold", thresholdToReclaim.Signal)
}
return evictionapi.Threshold{}, "", false
}
// ParseThresholdConfig parses the flags for thresholds.
func ParseThresholdConfig(allocatableConfig []string, evictionHard, evictionSoft, evictionSoftGracePeriod, evictionMinimumReclaim map[string]string) ([]evictionapi.Threshold, error) {
results := []evictionapi.Threshold{}
hardThresholds, err := parseThresholdStatements(evictionHard)
if err != nil {
return nil, err
}
results = append(results, hardThresholds...)
softThresholds, err := parseThresholdStatements(evictionSoft)
if err != nil {
return nil, err
}
gracePeriods, err := parseGracePeriods(evictionSoftGracePeriod)
if err != nil {
return nil, err
}
minReclaims, err := parseMinimumReclaims(evictionMinimumReclaim)
if err != nil {
return nil, err
}
for i := range softThresholds {
signal := softThresholds[i].Signal
period, found := gracePeriods[signal]
if !found {
return nil, fmt.Errorf("grace period must be specified for the soft eviction threshold %v", signal)
}
softThresholds[i].GracePeriod = period
}
results = append(results, softThresholds...)
for i := range results {
if minReclaim, ok := minReclaims[results[i].Signal]; ok {
results[i].MinReclaim = &minReclaim
}
}
for _, key := range allocatableConfig {
if key == kubetypes.NodeAllocatableEnforcementKey {
results = addAllocatableThresholds(results)
break
}
}
return results, nil
}
func addAllocatableThresholds(thresholds []evictionapi.Threshold) []evictionapi.Threshold {
additionalThresholds := []evictionapi.Threshold{}
for _, threshold := range thresholds {
if threshold.Signal == evictionapi.SignalMemoryAvailable && isHardEvictionThreshold(threshold) {
// Copy the SignalMemoryAvailable to SignalAllocatableMemoryAvailable
additionalThresholds = append(additionalThresholds, evictionapi.Threshold{
Signal: evictionapi.SignalAllocatableMemoryAvailable,
Operator: threshold.Operator,
Value: threshold.Value,
MinReclaim: threshold.MinReclaim,
})
}
}
return append(append([]evictionapi.Threshold{}, thresholds...), additionalThresholds...)
}
// UpdateContainerFsThresholds will add containerfs eviction hard/soft
// settings based on container runtime settings.
// Thresholds are parsed from evictionHard and evictionSoft limits so we will override.
// If there is a single filesystem, then containerfs settings are same as nodefs.
// If there is a separate image filesystem for both containers and images then containerfs settings are same as imagefs.
func UpdateContainerFsThresholds(thresholds []evictionapi.Threshold, imageFs, separateContainerImageFs bool) ([]evictionapi.Threshold, error) {
hardNodeFsDisk := evictionapi.Threshold{}
softNodeFsDisk := evictionapi.Threshold{}
hardNodeINodeDisk := evictionapi.Threshold{}
softNodeINodeDisk := evictionapi.Threshold{}
hardImageFsDisk := evictionapi.Threshold{}
softImageFsDisk := evictionapi.Threshold{}
hardImageINodeDisk := evictionapi.Threshold{}
softImageINodeDisk := evictionapi.Threshold{}
hardContainerFsDisk := -1
softContainerFsDisk := -1
hardContainerFsINodes := -1
softContainerFsINodes := -1
// Find the imagefs and nodefs thresholds
var err error = nil
for idx, threshold := range thresholds {
if threshold.Signal == evictionapi.SignalImageFsAvailable && isHardEvictionThreshold(threshold) {
hardImageFsDisk = threshold
}
if threshold.Signal == evictionapi.SignalImageFsAvailable && !isHardEvictionThreshold(threshold) {
softImageFsDisk = threshold
}
if threshold.Signal == evictionapi.SignalImageFsInodesFree && isHardEvictionThreshold(threshold) {
hardImageINodeDisk = threshold
}
if threshold.Signal == evictionapi.SignalImageFsInodesFree && !isHardEvictionThreshold(threshold) {
softImageINodeDisk = threshold
}
if threshold.Signal == evictionapi.SignalNodeFsAvailable && isHardEvictionThreshold(threshold) {
hardNodeFsDisk = threshold
}
if threshold.Signal == evictionapi.SignalNodeFsAvailable && !isHardEvictionThreshold(threshold) {
softNodeFsDisk = threshold
}
if threshold.Signal == evictionapi.SignalNodeFsInodesFree && isHardEvictionThreshold(threshold) {
hardNodeINodeDisk = threshold
}
if threshold.Signal == evictionapi.SignalNodeFsInodesFree && !isHardEvictionThreshold(threshold) {
softNodeINodeDisk = threshold
}
// We are logging a warning and we will override the settings.
// In this case this is safe because we do not support a separate container filesystem.
// So we want either limits to be same as nodefs or imagefs.
if threshold.Signal == evictionapi.SignalContainerFsAvailable && isHardEvictionThreshold(threshold) {
err = errors.Join(fmt.Errorf("found containerfs.available for hard eviction. ignoring"))
hardContainerFsDisk = idx
}
if threshold.Signal == evictionapi.SignalContainerFsAvailable && !isHardEvictionThreshold(threshold) {
err = errors.Join(fmt.Errorf("found containerfs.available for soft eviction. ignoring"))
softContainerFsDisk = idx
}
if threshold.Signal == evictionapi.SignalContainerFsInodesFree && isHardEvictionThreshold(threshold) {
err = errors.Join(fmt.Errorf("found containerfs.inodesFree for hard eviction. ignoring"))
hardContainerFsINodes = idx
}
if threshold.Signal == evictionapi.SignalContainerFsInodesFree && !isHardEvictionThreshold(threshold) {
err = errors.Join(fmt.Errorf("found containerfs.inodesFree for soft eviction. ignoring"))
softContainerFsINodes = idx
}
}
// Either split disk case (containerfs=nodefs) or single filesystem
if (imageFs && separateContainerImageFs) || (!imageFs && !separateContainerImageFs) {
if hardContainerFsDisk != -1 {
thresholds[hardContainerFsDisk] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable, Operator: hardNodeFsDisk.Operator, Value: hardNodeFsDisk.Value, MinReclaim: hardNodeFsDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable,
Operator: hardNodeFsDisk.Operator,
Value: hardNodeFsDisk.Value,
MinReclaim: hardNodeFsDisk.MinReclaim,
})
}
if softContainerFsDisk != -1 {
thresholds[softContainerFsDisk] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable, GracePeriod: softNodeFsDisk.GracePeriod, Operator: softNodeFsDisk.Operator, Value: softNodeFsDisk.Value, MinReclaim: softNodeFsDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable,
Operator: softNodeFsDisk.Operator,
Value: softNodeFsDisk.Value,
MinReclaim: softNodeFsDisk.MinReclaim,
GracePeriod: softNodeFsDisk.GracePeriod,
})
}
if hardContainerFsINodes != -1 {
thresholds[hardContainerFsINodes] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree, Operator: hardNodeINodeDisk.Operator, Value: hardNodeINodeDisk.Value, MinReclaim: hardNodeINodeDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree,
Operator: hardNodeINodeDisk.Operator,
Value: hardNodeINodeDisk.Value,
MinReclaim: hardNodeINodeDisk.MinReclaim,
})
}
if softContainerFsINodes != -1 {
thresholds[softContainerFsINodes] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree, GracePeriod: softNodeINodeDisk.GracePeriod, Operator: softNodeINodeDisk.Operator, Value: softNodeINodeDisk.Value, MinReclaim: softNodeINodeDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree,
Operator: softNodeINodeDisk.Operator,
Value: softNodeINodeDisk.Value,
MinReclaim: softNodeINodeDisk.MinReclaim,
GracePeriod: softNodeINodeDisk.GracePeriod,
})
}
}
// Separate image filesystem case
if imageFs && !separateContainerImageFs {
if hardContainerFsDisk != -1 {
thresholds[hardContainerFsDisk] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable, Operator: hardImageFsDisk.Operator, Value: hardImageFsDisk.Value, MinReclaim: hardImageFsDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable,
Operator: hardImageFsDisk.Operator,
Value: hardImageFsDisk.Value,
MinReclaim: hardImageFsDisk.MinReclaim,
})
}
if softContainerFsDisk != -1 {
thresholds[softContainerFsDisk] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable, GracePeriod: softImageFsDisk.GracePeriod, Operator: softImageFsDisk.Operator, Value: softImageFsDisk.Value, MinReclaim: softImageFsDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsAvailable,
Operator: softImageFsDisk.Operator,
Value: softImageFsDisk.Value,
MinReclaim: softImageFsDisk.MinReclaim,
GracePeriod: softImageFsDisk.GracePeriod,
})
}
if hardContainerFsINodes != -1 {
thresholds[hardContainerFsINodes] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree, GracePeriod: hardImageINodeDisk.GracePeriod, Operator: hardImageINodeDisk.Operator, Value: hardImageINodeDisk.Value, MinReclaim: hardImageINodeDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree,
Operator: hardImageINodeDisk.Operator,
Value: hardImageINodeDisk.Value,
MinReclaim: hardImageINodeDisk.MinReclaim,
})
}
if softContainerFsINodes != -1 {
thresholds[softContainerFsINodes] = evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree, GracePeriod: softImageINodeDisk.GracePeriod, Operator: softImageINodeDisk.Operator, Value: softImageINodeDisk.Value, MinReclaim: softImageINodeDisk.MinReclaim,
}
} else {
thresholds = append(thresholds, evictionapi.Threshold{
Signal: evictionapi.SignalContainerFsInodesFree,
Operator: softImageINodeDisk.Operator,
Value: softImageINodeDisk.Value,
MinReclaim: softImageINodeDisk.MinReclaim,
GracePeriod: softImageINodeDisk.GracePeriod,
})
}
}
return thresholds, err
}
// parseThresholdStatements parses the input statements into a list of Threshold objects.
func parseThresholdStatements(statements map[string]string) ([]evictionapi.Threshold, error) {
if len(statements) == 0 {
return nil, nil
}
results := []evictionapi.Threshold{}
for signal, val := range statements {
result, err := parseThresholdStatement(evictionapi.Signal(signal), val)
if err != nil {
return nil, err
}
if result != nil {
results = append(results, *result)
}
}
return results, nil
}
// parseThresholdStatement parses a threshold statement and returns a threshold,
// or nil if the threshold should be ignored.
func parseThresholdStatement(signal evictionapi.Signal, val string) (*evictionapi.Threshold, error) {
if !validSignal(signal) {
return nil, fmt.Errorf(unsupportedEvictionSignal, signal)
}
operator := evictionapi.OpForSignal[signal]
if strings.HasSuffix(val, "%") {
// ignore 0% and 100%
if val == "0%" || val == "100%" {
return nil, nil
}
percentage, err := parsePercentage(val)
if err != nil {
return nil, err
}
if percentage < 0 {
return nil, fmt.Errorf("eviction percentage threshold %v must be >= 0%%: %s", signal, val)
}
// percentage is a float and should not be greater than 1 (100%)
if percentage > 1 {
return nil, fmt.Errorf("eviction percentage threshold %v must be <= 100%%: %s", signal, val)
}
return &evictionapi.Threshold{
Signal: signal,
Operator: operator,
Value: evictionapi.ThresholdValue{
Percentage: percentage,
},
}, nil
}
quantity, err := resource.ParseQuantity(val)
if err != nil {
return nil, err
}
if quantity.Sign() < 0 || quantity.IsZero() {
return nil, fmt.Errorf("eviction threshold %v must be positive: %s", signal, &quantity)
}
return &evictionapi.Threshold{
Signal: signal,
Operator: operator,
Value: evictionapi.ThresholdValue{
Quantity: &quantity,
},
}, nil
}
// parsePercentage parses a string representing a percentage value
func parsePercentage(input string) (float32, error) {
value, err := strconv.ParseFloat(strings.TrimRight(input, "%"), 32)
if err != nil {
return 0, err
}
return float32(value) / 100, nil
}
// parseGracePeriods parses the grace period statements
func parseGracePeriods(statements map[string]string) (map[evictionapi.Signal]time.Duration, error) {
if len(statements) == 0 {
return nil, nil
}
results := map[evictionapi.Signal]time.Duration{}
for signal, val := range statements {
signal := evictionapi.Signal(signal)
if !validSignal(signal) {
return nil, fmt.Errorf(unsupportedEvictionSignal, signal)
}
gracePeriod, err := time.ParseDuration(val)
if err != nil {
return nil, err
}
if gracePeriod < 0 {
return nil, fmt.Errorf("invalid eviction grace period specified: %v, must be a positive value", val)
}
results[signal] = gracePeriod
}
return results, nil
}
// parseMinimumReclaims parses the minimum reclaim statements
func parseMinimumReclaims(statements map[string]string) (map[evictionapi.Signal]evictionapi.ThresholdValue, error) {
if len(statements) == 0 {
return nil, nil
}
results := map[evictionapi.Signal]evictionapi.ThresholdValue{}
for signal, val := range statements {
signal := evictionapi.Signal(signal)
if !validSignal(signal) {
return nil, fmt.Errorf(unsupportedEvictionSignal, signal)
}
if strings.HasSuffix(val, "%") {
percentage, err := parsePercentage(val)
if err != nil {
return nil, err
}
if percentage <= 0 {
return nil, fmt.Errorf("eviction percentage minimum reclaim %v must be positive: %s", signal, val)
}
results[signal] = evictionapi.ThresholdValue{
Percentage: percentage,
}
continue
}
quantity, err := resource.ParseQuantity(val)
if err != nil {
return nil, err
}
if quantity.Sign() < 0 {
return nil, fmt.Errorf("negative eviction minimum reclaim specified for %v", signal)
}
results[signal] = evictionapi.ThresholdValue{
Quantity: &quantity,
}
}
return results, nil
}
// diskUsage converts used bytes into a resource quantity.
func diskUsage(fsStats *statsapi.FsStats) *resource.Quantity {
if fsStats == nil || fsStats.UsedBytes == nil {
return &resource.Quantity{Format: resource.BinarySI}
}
usage := int64(*fsStats.UsedBytes)
return resource.NewQuantity(usage, resource.BinarySI)
}
// inodeUsage converts inodes consumed into a resource quantity.
func inodeUsage(fsStats *statsapi.FsStats) *resource.Quantity {
if fsStats == nil || fsStats.InodesUsed == nil {
return &resource.Quantity{Format: resource.DecimalSI}
}
usage := int64(*fsStats.InodesUsed)
return resource.NewQuantity(usage, resource.DecimalSI)
}
// memoryUsage converts working set into a resource quantity.
func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity {
if memStats == nil || memStats.WorkingSetBytes == nil {
return &resource.Quantity{Format: resource.BinarySI}
}
usage := int64(*memStats.WorkingSetBytes)
return resource.NewQuantity(usage, resource.BinarySI)
}
// processUsage converts working set into a process count.
func processUsage(processStats *statsapi.ProcessStats) uint64 {
if processStats == nil || processStats.ProcessCount == nil {
return 0
}
usage := uint64(*processStats.ProcessCount)
return usage
}
// localVolumeNames returns the set of volumes for the pod that are local
// TODO: summary API should report what volumes consume local storage rather than hard-code here.
func localVolumeNames(pod *v1.Pod) []string {
result := []string{}
for _, volume := range pod.Spec.Volumes {
if volume.HostPath != nil ||
volumeutils.IsLocalEphemeralVolume(volume) {
result = append(result, volume.Name)
}
}
return result
}
// containerUsage aggregates container disk usage and inode consumption for the specified stats to measure.
func containerUsage(podStats statsapi.PodStats, statsToMeasure []fsStatsType) v1.ResourceList {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.DecimalSI}
for _, container := range podStats.Containers {
if hasFsStatsType(statsToMeasure, fsStatsRoot) {
disk.Add(*diskUsage(container.Rootfs))
inodes.Add(*inodeUsage(container.Rootfs))
}
if hasFsStatsType(statsToMeasure, fsStatsLogs) {
disk.Add(*diskUsage(container.Logs))
inodes.Add(*inodeUsage(container.Logs))
}
}
return v1.ResourceList{
v1.ResourceEphemeralStorage: disk,
resourceInodes: inodes,
}
}
// podLocalVolumeUsage aggregates pod local volumes disk usage and inode consumption for the specified stats to measure.
func podLocalVolumeUsage(volumeNames []string, podStats statsapi.PodStats) v1.ResourceList {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.DecimalSI}
for _, volumeName := range volumeNames {
for _, volumeStats := range podStats.VolumeStats {
if volumeStats.Name == volumeName {
disk.Add(*diskUsage(&volumeStats.FsStats))
inodes.Add(*inodeUsage(&volumeStats.FsStats))
break
}
}
}
return v1.ResourceList{
v1.ResourceEphemeralStorage: disk,
resourceInodes: inodes,
}
}
// podDiskUsage aggregates pod disk usage and inode consumption for the specified stats to measure.
func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.DecimalSI}
containerUsageList := containerUsage(podStats, statsToMeasure)
disk.Add(containerUsageList[v1.ResourceEphemeralStorage])
inodes.Add(containerUsageList[resourceInodes])
if hasFsStatsType(statsToMeasure, fsStatsLocalVolumeSource) {
volumeNames := localVolumeNames(pod)
podLocalVolumeUsageList := podLocalVolumeUsage(volumeNames, podStats)
disk.Add(podLocalVolumeUsageList[v1.ResourceEphemeralStorage])
inodes.Add(podLocalVolumeUsageList[resourceInodes])
}
return v1.ResourceList{
v1.ResourceEphemeralStorage: disk,
resourceInodes: inodes,
}, nil
}
// formatThreshold formats a threshold for logging.
func formatThreshold(threshold evictionapi.Threshold) string {
return fmt.Sprintf("threshold(signal=%v, operator=%v, value=%v, gracePeriod=%v)", threshold.Signal, threshold.Operator, evictionapi.ThresholdValue(threshold.Value), threshold.GracePeriod)
}
// cachedStatsFunc returns a statsFunc based on the provided pod stats.
func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc {
uid2PodStats := map[string]statsapi.PodStats{}
for i := range podStats {
uid2PodStats[podStats[i].PodRef.UID] = podStats[i]
}
return func(pod *v1.Pod) (statsapi.PodStats, bool) {
stats, found := uid2PodStats[string(pod.UID)]
return stats, found
}
}
// Cmp compares p1 and p2 and returns:
//
// -1 if p1 < p2
// 0 if p1 == p2
// +1 if p1 > p2
type cmpFunc func(p1, p2 *v1.Pod) int
// multiSorter implements the Sort interface, sorting changes within.
type multiSorter struct {
pods []*v1.Pod
cmp []cmpFunc
}
// Sort sorts the argument slice according to the less functions passed to OrderedBy.
func (ms *multiSorter) Sort(pods []*v1.Pod) {
ms.pods = pods
sort.Sort(ms)
}
// OrderedBy returns a Sorter that sorts using the cmp functions, in order.
// Call its Sort method to sort the data.
func orderedBy(cmp ...cmpFunc) *multiSorter {
return &multiSorter{
cmp: cmp,
}
}
// Len is part of sort.Interface.
func (ms *multiSorter) Len() int {
return len(ms.pods)
}
// Swap is part of sort.Interface.
func (ms *multiSorter) Swap(i, j int) {
ms.pods[i], ms.pods[j] = ms.pods[j], ms.pods[i]
}
// Less is part of sort.Interface.
func (ms *multiSorter) Less(i, j int) bool {
p1, p2 := ms.pods[i], ms.pods[j]
var k int
for k = 0; k < len(ms.cmp)-1; k++ {
cmpResult := ms.cmp[k](p1, p2)
// p1 is less than p2
if cmpResult < 0 {
return true
}
// p1 is greater than p2
if cmpResult > 0 {
return false
}
// we don't know yet
}
// the last cmp func is the final decider
return ms.cmp[k](p1, p2) < 0
}
// priority compares pods by Priority, if priority is enabled.
func priority(p1, p2 *v1.Pod) int {
priority1 := corev1helpers.PodPriority(p1)
priority2 := corev1helpers.PodPriority(p2)
if priority1 == priority2 {
return 0
}
if priority1 > priority2 {
return 1
}
return -1
}
// exceedMemoryRequests compares whether or not pods' memory usage exceeds their requests
func exceedMemoryRequests(stats statsFunc) cmpFunc {
return func(p1, p2 *v1.Pod) int {
p1Stats, p1Found := stats(p1)
p2Stats, p2Found := stats(p2)
if !p1Found || !p2Found {
// prioritize evicting the pod for which no stats were found
return cmpBool(!p1Found, !p2Found)
}
p1Memory := memoryUsage(p1Stats.Memory)
p2Memory := memoryUsage(p2Stats.Memory)
p1ExceedsRequests := p1Memory.Cmp(v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory)) == 1
p2ExceedsRequests := p2Memory.Cmp(v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory)) == 1
// prioritize evicting the pod which exceeds its requests
return cmpBool(p1ExceedsRequests, p2ExceedsRequests)
}
}
// memory compares pods by largest consumer of memory relative to request.
func memory(stats statsFunc) cmpFunc {
return func(p1, p2 *v1.Pod) int {
p1Stats, p1Found := stats(p1)
p2Stats, p2Found := stats(p2)
if !p1Found || !p2Found {
// prioritize evicting the pod for which no stats were found
return cmpBool(!p1Found, !p2Found)
}
// adjust p1, p2 usage relative to the request (if any)
p1Memory := memoryUsage(p1Stats.Memory)
p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory)
p1Memory.Sub(p1Request)
p2Memory := memoryUsage(p2Stats.Memory)
p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory)
p2Memory.Sub(p2Request)
// prioritize evicting the pod which has the larger consumption of memory
return p2Memory.Cmp(*p1Memory)
}
}
// process compares pods by largest consumer of process number relative to request.
func process(stats statsFunc) cmpFunc {
return func(p1, p2 *v1.Pod) int {
p1Stats, p1Found := stats(p1)
p2Stats, p2Found := stats(p2)
if !p1Found || !p2Found {
// prioritize evicting the pod for which no stats were found
return cmpBool(!p1Found, !p2Found)
}
p1Process := processUsage(p1Stats.ProcessStats)
p2Process := processUsage(p2Stats.ProcessStats)
return int(p2Process - p1Process)
}
}
// exceedDiskRequests compares whether or not pods' disk usage exceeds their requests
func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc {
return func(p1, p2 *v1.Pod) int {
p1Stats, p1Found := stats(p1)
p2Stats, p2Found := stats(p2)
if !p1Found || !p2Found {
// prioritize evicting the pod for which no stats were found
return cmpBool(!p1Found, !p2Found)
}
p1Usage, p1Err := podDiskUsage(p1Stats, p1, fsStatsToMeasure)
p2Usage, p2Err := podDiskUsage(p2Stats, p2, fsStatsToMeasure)
if p1Err != nil || p2Err != nil {
// prioritize evicting the pod which had an error getting stats
return cmpBool(p1Err != nil, p2Err != nil)
}
p1Disk := p1Usage[diskResource]
p2Disk := p2Usage[diskResource]
p1ExceedsRequests := p1Disk.Cmp(v1resource.GetResourceRequestQuantity(p1, diskResource)) == 1
p2ExceedsRequests := p2Disk.Cmp(v1resource.GetResourceRequestQuantity(p2, diskResource)) == 1
// prioritize evicting the pod which exceeds its requests
return cmpBool(p1ExceedsRequests, p2ExceedsRequests)
}
}
// disk compares pods by largest consumer of disk relative to request for the specified disk resource.
func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc {
return func(p1, p2 *v1.Pod) int {
p1Stats, p1Found := stats(p1)
p2Stats, p2Found := stats(p2)
if !p1Found || !p2Found {
// prioritize evicting the pod for which no stats were found
return cmpBool(!p1Found, !p2Found)
}
p1Usage, p1Err := podDiskUsage(p1Stats, p1, fsStatsToMeasure)
p2Usage, p2Err := podDiskUsage(p2Stats, p2, fsStatsToMeasure)
if p1Err != nil || p2Err != nil {
// prioritize evicting the pod which had an error getting stats
return cmpBool(p1Err != nil, p2Err != nil)
}
// adjust p1, p2 usage relative to the request (if any)
p1Disk := p1Usage[diskResource]
p2Disk := p2Usage[diskResource]
p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceEphemeralStorage)
p1Disk.Sub(p1Request)
p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceEphemeralStorage)
p2Disk.Sub(p2Request)
// prioritize evicting the pod which has the larger consumption of disk
return p2Disk.Cmp(p1Disk)
}
}
// cmpBool compares booleans, placing true before false
func cmpBool(a, b bool) int {
if a == b {
return 0
}
if !b {
return -1
}
return 1
}
// rankMemoryPressure orders the input pods for eviction in response to memory pressure.
// It ranks by whether or not the pod's usage exceeds its requests, then by priority, and
// finally by memory usage above requests.
func rankMemoryPressure(pods []*v1.Pod, stats statsFunc) {
orderedBy(exceedMemoryRequests(stats), priority, memory(stats)).Sort(pods)
}
// rankPIDPressure orders the input pods by priority in response to PID pressure.
func rankPIDPressure(pods []*v1.Pod, stats statsFunc) {
orderedBy(priority, process(stats)).Sort(pods)
}
// rankDiskPressureFunc returns a rankFunc that measures the specified fs stats.
func rankDiskPressureFunc(fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) rankFunc {
return func(pods []*v1.Pod, stats statsFunc) {
orderedBy(exceedDiskRequests(stats, fsStatsToMeasure, diskResource), priority, disk(stats, fsStatsToMeasure, diskResource)).Sort(pods)
}
}
// byEvictionPriority implements sort.Interface for []v1.ResourceName.
type byEvictionPriority []evictionapi.Threshold
func (a byEvictionPriority) Len() int { return len(a) }
func (a byEvictionPriority) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Less ranks memory before all other resources, and ranks thresholds with no resource to reclaim last
func (a byEvictionPriority) Less(i, j int) bool {
_, jSignalHasResource := signalToResource[a[j].Signal]
return a[i].Signal == evictionapi.SignalMemoryAvailable || a[i].Signal == evictionapi.SignalAllocatableMemoryAvailable || !jSignalHasResource
}
// makeSignalObservations derives observations using the specified summary provider.
func makeSignalObservations(summary *statsapi.Summary) (signalObservations, statsFunc) {
// build the function to work against for pod stats
statsFunc := cachedStatsFunc(summary.Pods)
// build an evaluation context for current eviction signals
result := signalObservations{}
memoryAvailableSignal := makeMemoryAvailableSignalObservation(summary)
if memoryAvailableSignal != nil {
result[evictionapi.SignalMemoryAvailable] = *memoryAvailableSignal
}
if allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods); err != nil {
klog.ErrorS(err, "Eviction manager: failed to construct signal", "signal", evictionapi.SignalAllocatableMemoryAvailable)
} else {
if memory := allocatableContainer.Memory; memory != nil && memory.AvailableBytes != nil && memory.WorkingSetBytes != nil {
result[evictionapi.SignalAllocatableMemoryAvailable] = signalObservation{
available: resource.NewQuantity(int64(*memory.AvailableBytes), resource.BinarySI),
capacity: resource.NewQuantity(int64(*memory.AvailableBytes+*memory.WorkingSetBytes), resource.BinarySI),
time: memory.Time,
}
}
}
if nodeFs := summary.Node.Fs; nodeFs != nil {
if nodeFs.AvailableBytes != nil && nodeFs.CapacityBytes != nil {
result[evictionapi.SignalNodeFsAvailable] = signalObservation{
available: resource.NewQuantity(int64(*nodeFs.AvailableBytes), resource.BinarySI),
capacity: resource.NewQuantity(int64(*nodeFs.CapacityBytes), resource.BinarySI),
time: nodeFs.Time,
}
}
if nodeFs.InodesFree != nil && nodeFs.Inodes != nil {
result[evictionapi.SignalNodeFsInodesFree] = signalObservation{
available: resource.NewQuantity(int64(*nodeFs.InodesFree), resource.DecimalSI),
capacity: resource.NewQuantity(int64(*nodeFs.Inodes), resource.DecimalSI),
time: nodeFs.Time,
}
}
}
if summary.Node.Runtime != nil {
if imageFs := summary.Node.Runtime.ImageFs; imageFs != nil {
if imageFs.AvailableBytes != nil && imageFs.CapacityBytes != nil {
result[evictionapi.SignalImageFsAvailable] = signalObservation{
available: resource.NewQuantity(int64(*imageFs.AvailableBytes), resource.BinarySI),
capacity: resource.NewQuantity(int64(*imageFs.CapacityBytes), resource.BinarySI),
time: imageFs.Time,
}
}
if imageFs.InodesFree != nil && imageFs.Inodes != nil {
result[evictionapi.SignalImageFsInodesFree] = signalObservation{
available: resource.NewQuantity(int64(*imageFs.InodesFree), resource.DecimalSI),
capacity: resource.NewQuantity(int64(*imageFs.Inodes), resource.DecimalSI),
time: imageFs.Time,
}
}
}
if containerFs := summary.Node.Runtime.ContainerFs; containerFs != nil {
if containerFs.AvailableBytes != nil && containerFs.CapacityBytes != nil {
result[evictionapi.SignalContainerFsAvailable] = signalObservation{
available: resource.NewQuantity(int64(*containerFs.AvailableBytes), resource.BinarySI),
capacity: resource.NewQuantity(int64(*containerFs.CapacityBytes), resource.BinarySI),
time: containerFs.Time,
}
}
if containerFs.InodesFree != nil && containerFs.Inodes != nil {
result[evictionapi.SignalContainerFsInodesFree] = signalObservation{
available: resource.NewQuantity(int64(*containerFs.InodesFree), resource.DecimalSI),
capacity: resource.NewQuantity(int64(*containerFs.Inodes), resource.DecimalSI),
time: containerFs.Time,
}
}
}
}
if rlimit := summary.Node.Rlimit; rlimit != nil {
if rlimit.NumOfRunningProcesses != nil && rlimit.MaxPID != nil {
available := int64(*rlimit.MaxPID) - int64(*rlimit.NumOfRunningProcesses)
result[evictionapi.SignalPIDAvailable] = signalObservation{
available: resource.NewQuantity(available, resource.DecimalSI),
capacity: resource.NewQuantity(int64(*rlimit.MaxPID), resource.DecimalSI),
time: rlimit.Time,
}
}
}
return result, statsFunc
}
func getSysContainer(sysContainers []statsapi.ContainerStats, name string) (*statsapi.ContainerStats, error) {
for _, cont := range sysContainers {
if cont.Name == name {
return &cont, nil
}
}
return nil, fmt.Errorf("system container %q not found in metrics", name)
}
// thresholdsMet returns the set of thresholds that were met independent of grace period
func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObservations, enforceMinReclaim bool) []evictionapi.Threshold {
results := []evictionapi.Threshold{}
for i := range thresholds {
threshold := thresholds[i]
observed, found := observations[threshold.Signal]
if !found {
klog.InfoS("Eviction manager: no observation found for eviction signal", "signal", threshold.Signal)
continue
}
// determine if we have met the specified threshold
thresholdMet := false
quantity := evictionapi.GetThresholdQuantity(threshold.Value, observed.capacity)
// if enforceMinReclaim is specified, we compare relative to value - minreclaim
if enforceMinReclaim && threshold.MinReclaim != nil {
quantity.Add(*evictionapi.GetThresholdQuantity(*threshold.MinReclaim, observed.capacity))
}
thresholdResult := quantity.Cmp(*observed.available)
switch threshold.Operator {
case evictionapi.OpLessThan:
thresholdMet = thresholdResult > 0
}
if thresholdMet {
results = append(results, threshold)
}
}
return results
}
func debugLogObservations(logPrefix string, observations signalObservations) {
klogV := klog.V(3)
if !klogV.Enabled() {
return
}
for k, v := range observations {
if !v.time.IsZero() {
klogV.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", signalToResource[k], "available", v.available, "capacity", v.capacity, "time", v.time)
} else {
klogV.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", signalToResource[k], "available", v.available, "capacity", v.capacity)
}
}
}
func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionapi.Threshold, observations signalObservations) {
klogV := klog.V(3)
if !klogV.Enabled() {
return
}
for i := range thresholds {
threshold := thresholds[i]
observed, found := observations[threshold.Signal]
if found {
quantity := evictionapi.GetThresholdQuantity(threshold.Value, observed.capacity)
klogV.InfoS("Eviction manager: threshold observed resource", "log", logPrefix, "signal", threshold.Signal, "resourceName", signalToResource[threshold.Signal], "quantity", quantity, "available", observed.available)
} else {
klogV.InfoS("Eviction manager: threshold had no observation", "log", logPrefix, "signal", threshold.Signal)
}
}
}
func thresholdsUpdatedStats(thresholds []evictionapi.Threshold, observations, lastObservations signalObservations) []evictionapi.Threshold {
results := []evictionapi.Threshold{}
for i := range thresholds {
threshold := thresholds[i]
observed, found := observations[threshold.Signal]
if !found {
klog.InfoS("Eviction manager: no observation found for eviction signal", "signal", threshold.Signal)
continue
}
last, found := lastObservations[threshold.Signal]
if !found || observed.time.IsZero() || observed.time.After(last.time.Time) {
results = append(results, threshold)
}
}
return results
}
// thresholdsFirstObservedAt merges the input set of thresholds with the previous observation to determine when active set of thresholds were initially met.
func thresholdsFirstObservedAt(thresholds []evictionapi.Threshold, lastObservedAt thresholdsObservedAt, now time.Time) thresholdsObservedAt {
results := thresholdsObservedAt{}
for i := range thresholds {
observedAt, found := lastObservedAt[thresholds[i]]
if !found {
observedAt = now
}
results[thresholds[i]] = observedAt
}
return results
}
// thresholdsMetGracePeriod returns the set of thresholds that have satisfied associated grace period
func thresholdsMetGracePeriod(observedAt thresholdsObservedAt, now time.Time) []evictionapi.Threshold {
results := []evictionapi.Threshold{}
for threshold, at := range observedAt {
duration := now.Sub(at)
if duration < threshold.GracePeriod {
klog.V(2).InfoS("Eviction manager: eviction criteria not yet met", "threshold", formatThreshold(threshold), "duration", duration)
continue
}
results = append(results, threshold)
}
return results
}
// nodeConditions returns the set of node conditions associated with a threshold
func nodeConditions(thresholds []evictionapi.Threshold) []v1.NodeConditionType {
results := []v1.NodeConditionType{}
for _, threshold := range thresholds {
if nodeCondition, found := signalToNodeCondition[threshold.Signal]; found {
if !hasNodeCondition(results, nodeCondition) {
results = append(results, nodeCondition)
}
}
}
return results
}
// nodeConditionsLastObservedAt merges the input with the previous observation to determine when a condition was most recently met.
func nodeConditionsLastObservedAt(nodeConditions []v1.NodeConditionType, lastObservedAt nodeConditionsObservedAt, now time.Time) nodeConditionsObservedAt {
results := nodeConditionsObservedAt{}
// the input conditions were observed "now"
for i := range nodeConditions {
results[nodeConditions[i]] = now
}
// the conditions that were not observed now are merged in with their old time
for key, value := range lastObservedAt {
_, found := results[key]
if !found {
results[key] = value
}
}
return results
}
// nodeConditionsObservedSince returns the set of conditions that have been observed within the specified period
func nodeConditionsObservedSince(observedAt nodeConditionsObservedAt, period time.Duration, now time.Time) []v1.NodeConditionType {
results := []v1.NodeConditionType{}
for nodeCondition, at := range observedAt {
duration := now.Sub(at)
if duration < period {
results = append(results, nodeCondition)
}
}
return results
}
// hasFsStatsType returns true if the fsStat is in the input list
func hasFsStatsType(inputs []fsStatsType, item fsStatsType) bool {
for _, input := range inputs {
if input == item {
return true
}
}
return false
}
// hasNodeCondition returns true if the node condition is in the input list
func hasNodeCondition(inputs []v1.NodeConditionType, item v1.NodeConditionType) bool {
for _, input := range inputs {
if input == item {
return true
}
}
return false
}
// mergeThresholds will merge both threshold lists eliminating duplicates.
func mergeThresholds(inputsA []evictionapi.Threshold, inputsB []evictionapi.Threshold) []evictionapi.Threshold {
results := inputsA
for _, threshold := range inputsB {
if !hasThreshold(results, threshold) {
results = append(results, threshold)
}
}
return results
}
// hasThreshold returns true if the threshold is in the input list
func hasThreshold(inputs []evictionapi.Threshold, item evictionapi.Threshold) bool {
for _, input := range inputs {
if input.GracePeriod == item.GracePeriod && input.Operator == item.Operator && input.Signal == item.Signal && compareThresholdValue(input.Value, item.Value) {
return true
}
}
return false
}
// compareThresholdValue returns true if the two thresholdValue objects are logically the same
func compareThresholdValue(a evictionapi.ThresholdValue, b evictionapi.ThresholdValue) bool {
if a.Quantity != nil {
if b.Quantity == nil {
return false
}
return a.Quantity.Cmp(*b.Quantity) == 0
}
if b.Quantity != nil {
return false
}
return a.Percentage == b.Percentage
}
// isHardEvictionThreshold returns true if eviction should immediately occur
func isHardEvictionThreshold(threshold evictionapi.Threshold) bool {
return threshold.GracePeriod == time.Duration(0)
}
func isAllocatableEvictionThreshold(threshold evictionapi.Threshold) bool {
return threshold.Signal == evictionapi.SignalAllocatableMemoryAvailable
}
// buildSignalToRankFunc returns ranking functions associated with resources
func buildSignalToRankFunc(withImageFs bool, imageContainerSplitFs bool) map[evictionapi.Signal]rankFunc {
signalToRankFunc := map[evictionapi.Signal]rankFunc{
evictionapi.SignalMemoryAvailable: rankMemoryPressure,
evictionapi.SignalAllocatableMemoryAvailable: rankMemoryPressure,
evictionapi.SignalPIDAvailable: rankPIDPressure,
}
// usage of an imagefs is optional
// We have a dedicated Image filesystem (images and containers are on same disk)
// then we assume it is just a separate imagefs
if withImageFs && !imageContainerSplitFs {
// with an imagefs, nodefs pod rank func for eviction only includes logs and local volumes
signalToRankFunc[evictionapi.SignalNodeFsAvailable] = rankDiskPressureFunc([]fsStatsType{fsStatsLogs, fsStatsLocalVolumeSource}, v1.ResourceEphemeralStorage)
signalToRankFunc[evictionapi.SignalNodeFsInodesFree] = rankDiskPressureFunc([]fsStatsType{fsStatsLogs, fsStatsLocalVolumeSource}, resourceInodes)
// with an imagefs, imagefs pod rank func for eviction only includes rootfs
signalToRankFunc[evictionapi.SignalImageFsAvailable] = rankDiskPressureFunc([]fsStatsType{fsStatsRoot, fsStatsImages}, v1.ResourceEphemeralStorage)
signalToRankFunc[evictionapi.SignalImageFsInodesFree] = rankDiskPressureFunc([]fsStatsType{fsStatsRoot, fsStatsImages}, resourceInodes)
signalToRankFunc[evictionapi.SignalContainerFsAvailable] = signalToRankFunc[evictionapi.SignalImageFsAvailable]
signalToRankFunc[evictionapi.SignalContainerFsInodesFree] = signalToRankFunc[evictionapi.SignalImageFsInodesFree]
// If both imagefs and container fs are on separate disks
// we want to track the writeable layer in containerfs signals.
} else if withImageFs && imageContainerSplitFs {
// with an imagefs, nodefs pod rank func for eviction only includes logs and local volumes
signalToRankFunc[evictionapi.SignalNodeFsAvailable] = rankDiskPressureFunc([]fsStatsType{fsStatsLogs, fsStatsLocalVolumeSource, fsStatsRoot}, v1.ResourceEphemeralStorage)
signalToRankFunc[evictionapi.SignalNodeFsInodesFree] = rankDiskPressureFunc([]fsStatsType{fsStatsLogs, fsStatsLocalVolumeSource, fsStatsRoot}, resourceInodes)
signalToRankFunc[evictionapi.SignalContainerFsAvailable] = signalToRankFunc[evictionapi.SignalNodeFsAvailable]
signalToRankFunc[evictionapi.SignalContainerFsInodesFree] = signalToRankFunc[evictionapi.SignalNodeFsInodesFree]
// with an imagefs, containerfs pod rank func for eviction only includes rootfs
signalToRankFunc[evictionapi.SignalImageFsAvailable] = rankDiskPressureFunc([]fsStatsType{fsStatsImages}, v1.ResourceEphemeralStorage)
signalToRankFunc[evictionapi.SignalImageFsInodesFree] = rankDiskPressureFunc([]fsStatsType{fsStatsImages}, resourceInodes)
// If image fs is not on separate disk as root but container fs is
} else {
// without an imagefs, nodefs pod rank func for eviction looks at all fs stats.
// since imagefs and nodefs share a common device, they share common ranking functions.
signalToRankFunc[evictionapi.SignalNodeFsAvailable] = rankDiskPressureFunc([]fsStatsType{fsStatsImages, fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, v1.ResourceEphemeralStorage)
signalToRankFunc[evictionapi.SignalNodeFsInodesFree] = rankDiskPressureFunc([]fsStatsType{fsStatsImages, fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, resourceInodes)
signalToRankFunc[evictionapi.SignalImageFsAvailable] = rankDiskPressureFunc([]fsStatsType{fsStatsImages, fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, v1.ResourceEphemeralStorage)
signalToRankFunc[evictionapi.SignalImageFsInodesFree] = rankDiskPressureFunc([]fsStatsType{fsStatsImages, fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, resourceInodes)
signalToRankFunc[evictionapi.SignalContainerFsAvailable] = signalToRankFunc[evictionapi.SignalNodeFsAvailable]
signalToRankFunc[evictionapi.SignalContainerFsInodesFree] = signalToRankFunc[evictionapi.SignalNodeFsInodesFree]
}
return signalToRankFunc
}
// PodIsEvicted returns true if the reported pod status is due to an eviction.
func PodIsEvicted(podStatus v1.PodStatus) bool {
return podStatus.Phase == v1.PodFailed && podStatus.Reason == Reason
}
// buildSignalToNodeReclaimFuncs returns reclaim functions associated with resources.
func buildSignalToNodeReclaimFuncs(imageGC ImageGC, containerGC ContainerGC, withImageFs bool, splitContainerImageFs bool) map[evictionapi.Signal]nodeReclaimFuncs {
signalToReclaimFunc := map[evictionapi.Signal]nodeReclaimFuncs{}
// usage of an imagefs is optional
if withImageFs && !splitContainerImageFs {
// with an imagefs, nodefs pressure should just delete logs
signalToReclaimFunc[evictionapi.SignalNodeFsAvailable] = nodeReclaimFuncs{}
signalToReclaimFunc[evictionapi.SignalNodeFsInodesFree] = nodeReclaimFuncs{}
// with an imagefs, imagefs pressure should delete unused images
signalToReclaimFunc[evictionapi.SignalImageFsAvailable] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers, imageGC.DeleteUnusedImages}
signalToReclaimFunc[evictionapi.SignalImageFsInodesFree] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers, imageGC.DeleteUnusedImages}
signalToReclaimFunc[evictionapi.SignalContainerFsAvailable] = signalToReclaimFunc[evictionapi.SignalImageFsAvailable]
signalToReclaimFunc[evictionapi.SignalContainerFsInodesFree] = signalToReclaimFunc[evictionapi.SignalImageFsInodesFree]
// usage of imagefs and container fs on separate disks
// containers gc on containerfs pressure
// image gc on imagefs pressure
} else if withImageFs && splitContainerImageFs {
// with an imagefs, imagefs pressure should delete unused images
signalToReclaimFunc[evictionapi.SignalImageFsAvailable] = nodeReclaimFuncs{imageGC.DeleteUnusedImages}
signalToReclaimFunc[evictionapi.SignalImageFsInodesFree] = nodeReclaimFuncs{imageGC.DeleteUnusedImages}
// with an split fs and imagefs, containerfs pressure should delete unused containers
signalToReclaimFunc[evictionapi.SignalNodeFsAvailable] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers}
signalToReclaimFunc[evictionapi.SignalNodeFsInodesFree] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers}
signalToReclaimFunc[evictionapi.SignalContainerFsAvailable] = signalToReclaimFunc[evictionapi.SignalNodeFsAvailable]
signalToReclaimFunc[evictionapi.SignalContainerFsInodesFree] = signalToReclaimFunc[evictionapi.SignalNodeFsInodesFree]
} else {
// without an imagefs, nodefs pressure should delete logs, and unused images
// since imagefs, containerfs and nodefs share a common device, they share common reclaim functions
signalToReclaimFunc[evictionapi.SignalNodeFsAvailable] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers, imageGC.DeleteUnusedImages}
signalToReclaimFunc[evictionapi.SignalNodeFsInodesFree] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers, imageGC.DeleteUnusedImages}
signalToReclaimFunc[evictionapi.SignalImageFsAvailable] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers, imageGC.DeleteUnusedImages}
signalToReclaimFunc[evictionapi.SignalImageFsInodesFree] = nodeReclaimFuncs{containerGC.DeleteAllUnusedContainers, imageGC.DeleteUnusedImages}
signalToReclaimFunc[evictionapi.SignalContainerFsAvailable] = signalToReclaimFunc[evictionapi.SignalNodeFsAvailable]
signalToReclaimFunc[evictionapi.SignalContainerFsInodesFree] = signalToReclaimFunc[evictionapi.SignalNodeFsInodesFree]
}
return signalToReclaimFunc
}
// evictionMessage constructs a useful message about why an eviction occurred, and annotations to provide metadata about the eviction
func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats statsFunc, thresholds []evictionapi.Threshold, observations signalObservations) (message string, annotations map[string]string) {
annotations = make(map[string]string)
message = fmt.Sprintf(nodeLowMessageFmt, resourceToReclaim)
quantity, available := getThresholdMetInfo(resourceToReclaim, thresholds, observations)
if quantity != nil && available != nil {
message += fmt.Sprintf(thresholdMetMessageFmt, quantity, available)
}
exceededContainers := []string{}
containerUsage := []string{}
podStats, ok := stats(pod)
if !ok {
return
}
// Pod level resources will be included in eviction message, along container resources
if resourceToReclaim == v1.ResourceMemory && utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
if podRequest, ok := pod.Spec.Resources.Requests[resourceToReclaim]; ok && podStats.Memory != nil {
podUsage := memoryUsage(podStats.Memory)
message += fmt.Sprintf(podMessageFmt, pod.Name, podUsage.String(), podRequest.String(), resourceToReclaim)
annotations[OffendingPodKey] = pod.Name
annotations[OffendingPodUsageKey] = podUsage.String()
}
}
// Since the resources field cannot be specified for ephemeral containers,
// they will always be blamed for resource overuse when an eviction occurs.
// That’s why only regular, init and restartable init containers are considered
// for the eviction message.
containers := pod.Spec.Containers
if len(pod.Spec.InitContainers) != 0 {
containers = append(containers, pod.Spec.InitContainers...)
}
for _, containerStats := range podStats.Containers {
for _, container := range containers {
if container.Name == containerStats.Name {
requests := container.Resources.Requests[resourceToReclaim]
var usage *resource.Quantity
switch resourceToReclaim {
case v1.ResourceEphemeralStorage:
if containerStats.Rootfs != nil && containerStats.Rootfs.UsedBytes != nil && containerStats.Logs != nil && containerStats.Logs.UsedBytes != nil {
usage = resource.NewQuantity(int64(*containerStats.Rootfs.UsedBytes+*containerStats.Logs.UsedBytes), resource.BinarySI)
}
case v1.ResourceMemory:
if containerStats.Memory != nil && containerStats.Memory.WorkingSetBytes != nil {
usage = resource.NewQuantity(int64(*containerStats.Memory.WorkingSetBytes), resource.BinarySI)
}
}
if usage != nil && usage.Cmp(requests) > 0 {
message += fmt.Sprintf(containerMessageFmt, container.Name, usage.String(), requests.String(), resourceToReclaim)
exceededContainers = append(exceededContainers, container.Name)
containerUsage = append(containerUsage, usage.String())
}
// Found the container to compare resource usage with,
// so it's safe to break out of the containers loop here.
break
}
}
}
annotations[OffendingContainersKey] = strings.Join(exceededContainers, ",")
annotations[OffendingContainersUsageKey] = strings.Join(containerUsage, ",")
annotations[StarvedResourceKey] = string(resourceToReclaim)
return
}
// getThresholdMetInfo get the threshold quantity and available for the resource resourceToReclaim
func getThresholdMetInfo(resourceToReclaim v1.ResourceName, thresholds []evictionapi.Threshold, observations signalObservations) (quantity *resource.Quantity, available *resource.Quantity) {
for i := range thresholds {
threshold := thresholds[i]
if signalToResource[threshold.Signal] == resourceToReclaim {
observed, found := observations[threshold.Signal]
if found {
quantity := evictionapi.GetThresholdQuantity(threshold.Value, observed.capacity)
return quantity, observed.available
}
}
}
return nil, nil
}
//go:build !windows
// +build !windows
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import (
"k8s.io/apimachinery/pkg/api/resource"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
func makeMemoryAvailableSignalObservation(summary *statsapi.Summary) *signalObservation {
if memory := summary.Node.Memory; memory != nil && memory.AvailableBytes != nil && memory.WorkingSetBytes != nil {
return &signalObservation{
available: resource.NewQuantity(int64(*memory.AvailableBytes), resource.BinarySI),
capacity: resource.NewQuantity(int64(*memory.AvailableBytes+*memory.WorkingSetBytes), resource.BinarySI),
time: memory.Time,
}
}
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import "time"
const (
// this prevents constantly updating the memcg notifier if synchronize
// is run frequently.
notifierRefreshInterval = 10 * time.Second
)
// CgroupNotifierFactory knows how to make CgroupNotifiers which integrate with the kernel
type CgroupNotifierFactory struct{}
var _ NotifierFactory = &CgroupNotifierFactory{}
// NewCgroupNotifier implements the NotifierFactory interface
func (n *CgroupNotifierFactory) NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error) {
return NewCgroupNotifier(path, attribute, threshold)
}
//go:build !windows
// +build !windows
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
)
const (
memoryUsageAttribute = "memory.usage_in_bytes"
)
type linuxMemoryThresholdNotifier struct {
threshold evictionapi.Threshold
cgroupPath string
events chan struct{}
factory NotifierFactory
handler func(string)
notifier CgroupNotifier
}
var _ ThresholdNotifier = &linuxMemoryThresholdNotifier{}
// NewMemoryThresholdNotifier creates a ThresholdNotifier which is designed to respond to the given threshold.
// UpdateThreshold must be called once before the threshold will be active.
func NewMemoryThresholdNotifier(threshold evictionapi.Threshold, cgroupRoot string, factory NotifierFactory, handler func(string)) (ThresholdNotifier, error) {
cgroups, err := cm.GetCgroupSubsystems()
if err != nil {
return nil, err
}
cgpath, found := cgroups.MountPoints["memory"]
if !found || len(cgpath) == 0 {
return nil, fmt.Errorf("memory cgroup mount point not found")
}
if isAllocatableEvictionThreshold(threshold) {
// for allocatable thresholds, point the cgroup notifier at the allocatable cgroup
cgpath += cgroupRoot
}
return &linuxMemoryThresholdNotifier{
threshold: threshold,
cgroupPath: cgpath,
events: make(chan struct{}),
handler: handler,
factory: factory,
}, nil
}
func (m *linuxMemoryThresholdNotifier) UpdateThreshold(summary *statsapi.Summary) error {
memoryStats := summary.Node.Memory
if isAllocatableEvictionThreshold(m.threshold) {
allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods)
if err != nil {
return err
}
memoryStats = allocatableContainer.Memory
}
if memoryStats == nil || memoryStats.UsageBytes == nil || memoryStats.WorkingSetBytes == nil || memoryStats.AvailableBytes == nil {
return fmt.Errorf("summary was incomplete. Expected MemoryStats and all subfields to be non-nil, but got %+v", memoryStats)
}
// Set threshold on usage to capacity - eviction_hard + inactive_file,
// since we want to be notified when working_set = capacity - eviction_hard
inactiveFile := resource.NewQuantity(int64(*memoryStats.UsageBytes-*memoryStats.WorkingSetBytes), resource.BinarySI)
capacity := resource.NewQuantity(int64(*memoryStats.AvailableBytes+*memoryStats.WorkingSetBytes), resource.BinarySI)
evictionThresholdQuantity := evictionapi.GetThresholdQuantity(m.threshold.Value, capacity)
memcgThreshold := capacity.DeepCopy()
memcgThreshold.Sub(*evictionThresholdQuantity)
memcgThreshold.Add(*inactiveFile)
klog.V(3).InfoS("Eviction manager: setting notifier to capacity", "notifier", m.Description(), "capacity", memcgThreshold.String())
if m.notifier != nil {
m.notifier.Stop()
}
newNotifier, err := m.factory.NewCgroupNotifier(m.cgroupPath, memoryUsageAttribute, memcgThreshold.Value())
if err != nil {
return err
}
m.notifier = newNotifier
go m.notifier.Start(m.events)
return nil
}
func (m *linuxMemoryThresholdNotifier) Start() {
klog.InfoS("Eviction manager: created memoryThresholdNotifier", "notifier", m.Description())
for range m.events {
m.handler(fmt.Sprintf("eviction manager: %s crossed", m.Description()))
}
}
func (m *linuxMemoryThresholdNotifier) Description() string {
var hard, allocatable string
if isHardEvictionThreshold(m.threshold) {
hard = "hard "
} else {
hard = "soft "
}
if isAllocatableEvictionThreshold(m.threshold) {
allocatable = "allocatable "
}
return fmt.Sprintf("%s%smemory eviction threshold", hard, allocatable)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import (
"fmt"
"sync"
"time"
libcontainercgroups "github.com/opencontainers/cgroups"
"golang.org/x/sys/unix"
"k8s.io/klog/v2"
)
const (
// eventSize is the number of bytes returned by a successful read from an eventfd
// see http://man7.org/linux/man-pages/man2/eventfd.2.html for more information
eventSize = 8
// numFdEvents is the number of events we can record at once.
// If EpollWait finds more than this, they will be missed.
numFdEvents = 6
)
type linuxCgroupNotifier struct {
eventfd int
epfd int
stop chan struct{}
stopLock sync.Mutex
}
var _ CgroupNotifier = &linuxCgroupNotifier{}
// NewCgroupNotifier returns a linuxCgroupNotifier, which performs cgroup control operations required
// to receive notifications from the cgroup when the threshold is crossed in either direction.
func NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error) {
// cgroupv2 does not support monitoring cgroup memory thresholds using cgroup.event_control.
// Instead long term, on cgroupv2 kubelet should rely on combining usage of memory.low on root pods cgroup with inotify notifications on memory.events and or PSI pressure.
// For now, let's return a fake "disabled" cgroup notifier on cgroupv2.
// https://github.com/kubernetes/kubernetes/issues/106331
if libcontainercgroups.IsCgroup2UnifiedMode() {
return &disabledThresholdNotifier{}, nil
}
var watchfd, eventfd, epfd, controlfd int
var err error
watchfd, err = unix.Open(fmt.Sprintf("%s/%s", path, attribute), unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
}
defer unix.Close(watchfd)
controlfd, err = unix.Open(fmt.Sprintf("%s/cgroup.event_control", path), unix.O_WRONLY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
}
defer unix.Close(controlfd)
eventfd, err = unix.Eventfd(0, unix.EFD_CLOEXEC)
if err != nil {
return nil, err
}
if eventfd < 0 {
err = fmt.Errorf("eventfd call failed")
return nil, err
}
defer func() {
// Close eventfd if we get an error later in initialization
if err != nil {
unix.Close(eventfd)
}
}()
epfd, err = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
if err != nil {
return nil, err
}
if epfd < 0 {
err = fmt.Errorf("EpollCreate1 call failed")
return nil, err
}
defer func() {
// Close epfd if we get an error later in initialization
if err != nil {
unix.Close(epfd)
}
}()
config := fmt.Sprintf("%d %d %d", eventfd, watchfd, threshold)
_, err = unix.Write(controlfd, []byte(config))
if err != nil {
return nil, err
}
return &linuxCgroupNotifier{
eventfd: eventfd,
epfd: epfd,
stop: make(chan struct{}),
}, nil
}
func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) {
err := unix.EpollCtl(n.epfd, unix.EPOLL_CTL_ADD, n.eventfd, &unix.EpollEvent{
Fd: int32(n.eventfd),
Events: unix.EPOLLIN,
})
if err != nil {
klog.InfoS("Eviction manager: error adding epoll eventfd", "err", err)
return
}
buf := make([]byte, eventSize)
for {
select {
case <-n.stop:
return
default:
}
event, err := wait(n.epfd, n.eventfd, notifierRefreshInterval)
if err != nil {
klog.InfoS("Eviction manager: error while waiting for memcg events", "err", err)
return
} else if !event {
// Timeout on wait. This is expected if the threshold was not crossed
continue
}
// Consume the event from the eventfd
_, err = unix.Read(n.eventfd, buf)
if err != nil {
klog.InfoS("Eviction manager: error reading memcg events", "err", err)
return
}
eventCh <- struct{}{}
}
}
// wait waits up to notifierRefreshInterval for an event on the Epoll FD for the
// eventfd we are concerned about. It returns an error if one occurs, and true
// if the consumer should read from the eventfd.
func wait(epfd, eventfd int, timeout time.Duration) (bool, error) {
events := make([]unix.EpollEvent, numFdEvents+1)
timeoutMS := int(timeout / time.Millisecond)
n, err := unix.EpollWait(epfd, events, timeoutMS)
if n == -1 {
if err == unix.EINTR {
// Interrupt, ignore the error
return false, nil
}
return false, err
}
if n == 0 {
// Timeout
return false, nil
}
if n > numFdEvents {
return false, fmt.Errorf("epoll_wait returned more events than we know what to do with")
}
for _, event := range events[:n] {
if event.Fd == int32(eventfd) {
if event.Events&unix.EPOLLHUP != 0 || event.Events&unix.EPOLLERR != 0 || event.Events&unix.EPOLLIN != 0 {
// EPOLLHUP: should not happen, but if it does, treat it as a wakeup.
// EPOLLERR: If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
// EPOLLIN: There is data to read.
return true, nil
}
}
}
// An event occurred that we don't care about.
return false, nil
}
func (n *linuxCgroupNotifier) Stop() {
n.stopLock.Lock()
defer n.stopLock.Unlock()
select {
case <-n.stop:
// the linuxCgroupNotifier is already stopped
return
default:
}
unix.Close(n.eventfd)
unix.Close(n.epfd)
close(n.stop)
}
// disabledThresholdNotifier is a fake diasbled threshold notifier that performs no-ops.
type disabledThresholdNotifier struct{}
func (*disabledThresholdNotifier) Start(_ chan<- struct{}) {}
func (*disabledThresholdNotifier) Stop() {}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"context"
"fmt"
"k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// throttleImagePulling wraps kubecontainer.ImageService to throttle image
// pulling based on the given QPS and burst limits. If QPS is zero, defaults
// to no throttling.
func throttleImagePulling(imageService kubecontainer.ImageService, qps float32, burst int) kubecontainer.ImageService {
if qps == 0.0 {
return imageService
}
return &throttledImageService{
ImageService: imageService,
limiter: flowcontrol.NewTokenBucketRateLimiter(qps, burst),
}
}
type throttledImageService struct {
kubecontainer.ImageService
limiter flowcontrol.RateLimiter
}
func (ts throttledImageService) PullImage(ctx context.Context, image kubecontainer.ImageSpec, credentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error) {
if ts.limiter.TryAccept() {
return ts.ImageService.PullImage(ctx, image, credentials, podSandboxConfig)
}
return "", nil, fmt.Errorf("pull QPS exceeded")
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"context"
goerrors "errors"
"fmt"
"math"
"sort"
"strings"
"sync"
"time"
"go.opentelemetry.io/otel/trace"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
)
// instrumentationScope is OpenTelemetry instrumentation scope name
const instrumentationScope = "k8s.io/kubernetes/pkg/kubelet/images"
// When RuntimeClassInImageCriAPI feature gate is enabled, imageRecord is
// indexed as imageId-RuntimeHandler
const imageIndexTupleFormat = "%s,%s"
// ImageGarbageCollectedTotalReason* specify the reason why an image was garbage collected
// in the `image_garbage_collected_total` metric.
const (
ImageGarbageCollectedTotalReasonAge = "age"
ImageGarbageCollectedTotalReasonSpace = "space"
)
// PostImageGCHook allows external sources to react to GC collect events.
// `remainingImages` is a list of images that were left on the system after garbage
// collection finished.
type PostImageGCHook func(ctx context.Context, remainingImages []string, gcStart time.Time)
// StatsProvider is an interface for fetching stats used during image garbage
// collection.
type StatsProvider interface {
// ImageFsStats returns the stats of the image filesystem.
ImageFsStats(ctx context.Context) (*statsapi.FsStats, *statsapi.FsStats, error)
}
// ImageGCManager is an interface for managing lifecycle of all images.
// Implementation is thread-safe.
type ImageGCManager interface {
// Applies the garbage collection policy. Errors include being unable to free
// enough space as per the garbage collection policy.
GarbageCollect(ctx context.Context, beganGC time.Time) error
// Start async garbage collection of images.
Start(ctx context.Context)
GetImageList() ([]container.Image, error)
// Delete all unused images.
DeleteUnusedImages(ctx context.Context) error
}
// ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in
// which garbage collection will be run.
type ImageGCPolicy struct {
// Any usage above this threshold will always trigger garbage collection.
// This is the highest usage we will allow.
HighThresholdPercent int
// Any usage below this threshold will never trigger garbage collection.
// This is the lowest threshold we will try to garbage collect to.
LowThresholdPercent int
// Minimum age at which an image can be garbage collected.
MinAge time.Duration
// Maximum age after which an image can be garbage collected, regardless of disk usage.
// Currently gated by MaximumImageGCAge feature gate and Kubelet configuration.
// If 0, the feature is disabled.
MaxAge time.Duration
}
type realImageGCManager struct {
// Container runtime
runtime container.Runtime
// Records of images and their use. Indexed by ImageId.
// If RuntimeClassInImageCriAPI feature gate is enabled, imageRecords
// are identified by a tuple of (imageId,runtimeHandler) that is passed
// from ListImages() call. If no runtimehandler is specified in response
// to ListImages() by the container runtime, only imageID will be used as
// the index of this map.
imageRecords map[string]*imageRecord
imageRecordsLock sync.Mutex
// The image garbage collection policy in use.
policy ImageGCPolicy
// statsProvider provides stats used during image garbage collection.
statsProvider StatsProvider
// Recorder for Kubernetes events.
recorder record.EventRecorder
// Reference to this node.
nodeRef *v1.ObjectReference
// imageCache is the cache of latest image list.
imageCache imageCache
postGCHooks []PostImageGCHook
// tracer for recording spans
tracer trace.Tracer
}
// imageCache caches latest result of ListImages.
type imageCache struct {
// sync.Mutex is the mutex protects the image cache.
sync.Mutex
// images is the image cache.
images []container.Image
}
// set sorts the input list and updates image cache.
// 'i' takes ownership of the list, you should not reference the list again
// after calling this function.
func (i *imageCache) set(images []container.Image) {
i.Lock()
defer i.Unlock()
// The image list needs to be sorted when it gets read and used in
// setNodeStatusImages. We sort the list on write instead of on read,
// because the image cache is more often read than written
sort.Sort(sliceutils.ByImageSize(images))
i.images = images
}
// get gets image list from image cache.
// NOTE: The caller of get() should not do mutating operations on the
// returned list that could cause data race against other readers (e.g.
// in-place sorting the returned list)
func (i *imageCache) get() []container.Image {
i.Lock()
defer i.Unlock()
return i.images
}
// Information about the images we track.
type imageRecord struct {
// runtime handler used to pull this image
runtimeHandlerUsedToPullImage string
// Time when this image was first detected.
firstDetected time.Time
// Time when we last saw this image being used.
lastUsed time.Time
// Size of the image in bytes.
size int64
// Pinned status of the image
pinned bool
}
// NewImageGCManager instantiates a new ImageGCManager object.
func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, postGCHooks []PostImageGCHook, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, tracerProvider trace.TracerProvider) (ImageGCManager, error) {
// Validate policy.
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
}
if policy.LowThresholdPercent < 0 || policy.LowThresholdPercent > 100 {
return nil, fmt.Errorf("invalid LowThresholdPercent %d, must be in range [0-100]", policy.LowThresholdPercent)
}
if policy.LowThresholdPercent > policy.HighThresholdPercent {
return nil, fmt.Errorf("LowThresholdPercent %d can not be higher than HighThresholdPercent %d", policy.LowThresholdPercent, policy.HighThresholdPercent)
}
tracer := tracerProvider.Tracer(instrumentationScope)
im := &realImageGCManager{
runtime: runtime,
policy: policy,
imageRecords: make(map[string]*imageRecord),
statsProvider: statsProvider,
recorder: recorder,
nodeRef: nodeRef,
postGCHooks: postGCHooks,
tracer: tracer,
}
return im, nil
}
func (im *realImageGCManager) Start(ctx context.Context) {
logger := klog.FromContext(ctx)
go wait.Until(func() {
_, err := im.detectImages(ctx, time.Now())
if err != nil {
logger.Info("Failed to monitor images", "err", err)
}
}, 5*time.Minute, wait.NeverStop)
// Start a goroutine periodically updates image cache.
go wait.Until(func() {
images, err := im.runtime.ListImages(ctx)
if err != nil {
logger.Info("Failed to update image list", "err", err)
} else {
im.imageCache.set(images)
}
}, 30*time.Second, wait.NeverStop)
}
// Get a list of images on this node
func (im *realImageGCManager) GetImageList() ([]container.Image, error) {
return im.imageCache.get(), nil
}
func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.Set[string], error) {
logger := klog.FromContext(ctx)
isRuntimeClassInImageCriAPIEnabled := utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI)
imagesInUse := sets.New[string]()
images, err := im.runtime.ListImages(ctx)
if err != nil {
return imagesInUse, err
}
pods, err := im.runtime.GetPods(ctx, true)
if err != nil {
return imagesInUse, err
}
// Make a set of images in use by containers.
for _, pod := range pods {
for _, container := range pod.Containers {
if err := im.handleImageVolumes(ctx, imagesInUse, container, pod, images); err != nil {
return imagesInUse, err
}
if !isRuntimeClassInImageCriAPIEnabled {
logger.V(5).Info("Container uses image", "pod", klog.KRef(pod.Namespace, pod.Name), "containerName", container.Name, "containerImage", container.Image, "imageID", container.ImageID, "imageRef", container.ImageRef)
imagesInUse.Insert(container.ImageID)
} else {
imageKey := getImageTuple(container.ImageID, container.ImageRuntimeHandler)
logger.V(5).Info("Container uses image", "pod", klog.KRef(pod.Namespace, pod.Name), "containerName", container.Name, "containerImage", container.Image, "imageID", container.ImageID, "imageRef", container.ImageRef, "imageKey", imageKey)
imagesInUse.Insert(imageKey)
}
}
}
// Add new images and record those being used.
now := time.Now()
currentImages := sets.New[string]()
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
for _, image := range images {
imageKey := image.ID
if !isRuntimeClassInImageCriAPIEnabled {
logger.V(5).Info("Adding image ID to currentImages", "imageID", imageKey)
} else {
imageKey = getImageTuple(image.ID, image.Spec.RuntimeHandler)
logger.V(5).Info("Adding image ID with runtime class to currentImages", "imageKey", imageKey, "runtimeHandler", image.Spec.RuntimeHandler)
}
currentImages.Insert(imageKey)
// New image, set it as detected now.
if _, ok := im.imageRecords[imageKey]; !ok {
logger.V(5).Info("Image ID is new", "imageID", imageKey, "runtimeHandler", image.Spec.RuntimeHandler)
im.imageRecords[imageKey] = &imageRecord{
firstDetected: detectTime,
runtimeHandlerUsedToPullImage: image.Spec.RuntimeHandler,
}
}
// Set last used time to now if the image is being used.
if isImageUsed(imageKey, imagesInUse) {
logger.V(5).Info("Setting Image ID lastUsed", "imageID", imageKey, "lastUsed", now)
im.imageRecords[imageKey].lastUsed = now
}
logger.V(5).Info("Image ID has size", "imageID", imageKey, "size", image.Size)
im.imageRecords[imageKey].size = image.Size
logger.V(5).Info("Image ID is pinned", "imageID", imageKey, "pinned", image.Pinned)
im.imageRecords[imageKey].pinned = image.Pinned
}
// Remove old images from our records.
for image := range im.imageRecords {
if !currentImages.Has(image) {
logger.V(5).Info("Image ID is no longer present; removing from imageRecords", "imageID", image)
delete(im.imageRecords, image)
}
}
return imagesInUse, nil
}
// handleImageVolumes ensures that image volumes are considered as images in use.
func (im *realImageGCManager) handleImageVolumes(ctx context.Context, imagesInUse sets.Set[string], container *container.Container, pod *container.Pod, images []container.Image) error {
logger := klog.FromContext(ctx)
if !utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
return nil
}
status, err := im.runtime.GetContainerStatus(ctx, container.ID)
if err != nil {
return fmt.Errorf("get container status: %w", err)
}
for _, mount := range status.Mounts {
for _, image := range images {
if mount.Image != nil && mount.Image.Image == image.ID {
logger.V(5).Info("Container uses image as mount", "pod", klog.KRef(pod.Namespace, pod.Name), "containerName", container.Name, "imageID", image.ID)
imagesInUse.Insert(image.ID)
}
}
}
return nil
}
func (im *realImageGCManager) GarbageCollect(ctx context.Context, beganGC time.Time) error {
ctx, otelSpan := im.tracer.Start(ctx, "Images/GarbageCollect")
logger := klog.FromContext(ctx)
defer otelSpan.End()
freeTime := time.Now()
images, err := im.imagesInEvictionOrder(ctx, freeTime)
if err != nil {
return err
}
images, err = im.freeOldImages(ctx, images, freeTime, beganGC)
if err != nil {
return err
}
// Get disk usage on disk holding images.
fsStats, _, err := im.statsProvider.ImageFsStats(ctx)
if err != nil {
return err
}
var capacity, available int64
if fsStats.CapacityBytes != nil {
capacity = int64(*fsStats.CapacityBytes)
}
if fsStats.AvailableBytes != nil {
available = int64(*fsStats.AvailableBytes)
}
if available > capacity {
logger.Info("Availability is larger than capacity", "available", available, "capacity", capacity)
available = capacity
}
// Check valid capacity.
if capacity == 0 {
err := goerrors.New("invalid capacity 0 on image filesystem")
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
return err
}
// If over the max threshold, free enough to place us at the lower threshold.
usagePercent := 100 - int(available*100/capacity)
if usagePercent >= im.policy.HighThresholdPercent {
amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available
logger.Info("Disk usage on image filesystem is over the high threshold, trying to free bytes down to the low threshold", "usage", usagePercent, "highThreshold", im.policy.HighThresholdPercent, "amountToFree", amountToFree, "lowThreshold", im.policy.LowThresholdPercent)
remainingImages, freed, err := im.freeSpace(ctx, amountToFree, freeTime, images)
if err != nil {
// Failed to delete images, eg due to a read-only filesystem.
return err
}
im.runPostGCHooks(ctx, remainingImages, freeTime)
if freed < amountToFree {
// This usually means the disk is full for reasons other than container
// images, such as logs, volumes, or other files. However, it could also
// be due to an unusually large number or size of in-use container images.
message := fmt.Sprintf("Insufficient free disk space on the node's image filesystem (%d%% of %s used). "+
"Failed to free sufficient space by deleting unused images (freed %d bytes). "+
"Investigate disk usage, as it could be used by active images, logs, volumes, or other data.",
usagePercent, formatSize(capacity), freed)
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, "%s", message)
return fmt.Errorf("%s", message)
}
}
return nil
}
func (im *realImageGCManager) runPostGCHooks(ctx context.Context, remainingImages []string, gcStartTime time.Time) {
for _, h := range im.postGCHooks {
h(ctx, remainingImages, gcStartTime)
}
}
func (im *realImageGCManager) freeOldImages(ctx context.Context, images []evictionInfo, freeTime, beganGC time.Time) ([]evictionInfo, error) {
if im.policy.MaxAge == 0 {
return images, nil
}
// Wait until the MaxAge has passed since the Kubelet has started,
// or else we risk prematurely garbage collecting images.
if freeTime.Sub(beganGC) <= im.policy.MaxAge {
return images, nil
}
var deletionErrors []error
logger := klog.FromContext(ctx)
remainingImages := make([]evictionInfo, 0)
for _, image := range images {
logger.V(5).Info("Evaluating image ID for possible garbage collection based on image age", "imageID", image.id)
// Evaluate whether image is older than MaxAge.
if freeTime.Sub(image.lastUsed) > im.policy.MaxAge {
if err := im.freeImage(ctx, image, ImageGarbageCollectedTotalReasonAge); err != nil {
deletionErrors = append(deletionErrors, err)
remainingImages = append(remainingImages, image)
continue
}
continue
}
remainingImages = append(remainingImages, image)
}
if len(deletionErrors) > 0 {
return remainingImages, fmt.Errorf("wanted to free images older than %v, encountered errors in image deletion: %v", im.policy.MaxAge, errors.NewAggregate(deletionErrors))
}
return remainingImages, nil
}
func (im *realImageGCManager) DeleteUnusedImages(ctx context.Context) error {
logger := klog.FromContext(ctx)
logger.Info("Attempting to delete unused images")
freeTime := time.Now()
images, err := im.imagesInEvictionOrder(ctx, freeTime)
if err != nil {
return err
}
remainingImages, _, err := im.freeSpace(ctx, math.MaxInt64, freeTime, images)
if err != nil {
return err
}
im.runPostGCHooks(ctx, remainingImages, freeTime)
return nil
}
// Tries to free bytesToFree worth of images on the disk.
//
// Returns the images that are still available after the cleanup, the number of bytes freed
// and an error if any occurred. The number of bytes freed is always returned.
// Note that error may be nil and the number of bytes free may be less
// than bytesToFree.
func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, freeTime time.Time, images []evictionInfo) ([]string, int64, error) {
// Delete unused images until we've freed up enough space.
var deletionErrors []error
logger := klog.FromContext(ctx)
spaceFreed := int64(0)
var imagesLeft []string
for _, image := range images {
logger.V(5).Info("Evaluating image ID for possible garbage collection based on disk usage", "imageID", image.id, "runtimeHandler", image.runtimeHandlerUsedToPullImage)
// Images that are currently in used were given a newer lastUsed.
if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) {
imagesLeft = append(imagesLeft, image.id)
logger.V(5).Info("Image ID was used too recently, not eligible for garbage collection", "imageID", image.id, "lastUsed", image.lastUsed, "freeTime", freeTime)
continue
}
// Avoid garbage collect the image if the image is not old enough.
// In such a case, the image may have just been pulled down, and will be used by a container right away.
if freeTime.Sub(image.firstDetected) < im.policy.MinAge {
imagesLeft = append(imagesLeft, image.id)
logger.V(5).Info("Image ID's age is less than the policy's minAge, not eligible for garbage collection", "imageID", image.id, "age", freeTime.Sub(image.firstDetected), "minAge", im.policy.MinAge)
continue
}
if err := im.freeImage(ctx, image, ImageGarbageCollectedTotalReasonSpace); err != nil {
deletionErrors = append(deletionErrors, err)
imagesLeft = append(imagesLeft, image.id)
continue
}
spaceFreed += image.size
if spaceFreed >= bytesToFree {
break
}
}
if len(deletionErrors) > 0 {
return nil, spaceFreed, fmt.Errorf("wanted to free %d bytes, but freed %d bytes space with errors in image deletion: %w", bytesToFree, spaceFreed, errors.NewAggregate(deletionErrors))
}
return imagesLeft, spaceFreed, nil
}
func (im *realImageGCManager) freeImage(ctx context.Context, image evictionInfo, reason string) error {
isRuntimeClassInImageCriAPIEnabled := utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI)
// Remove image. Continue despite errors.
var err error
logger := klog.FromContext(ctx)
logger.Info("Removing image to free bytes", "imageID", image.id, "size", image.size, "runtimeHandler", image.runtimeHandlerUsedToPullImage)
err = im.runtime.RemoveImage(ctx, container.ImageSpec{Image: image.id, RuntimeHandler: image.runtimeHandlerUsedToPullImage})
if err != nil {
return err
}
imageKey := image.id
if isRuntimeClassInImageCriAPIEnabled {
imageKey = getImageTuple(image.id, image.runtimeHandlerUsedToPullImage)
}
im.imageRecordsLock.Lock()
delete(im.imageRecords, imageKey)
im.imageRecordsLock.Unlock()
metrics.ImageGarbageCollectedTotal.WithLabelValues(reason).Inc()
return err
}
// Queries all of the image records and arranges them in a slice of evictionInfo, sorted based on last time used, ignoring images pinned by the runtime.
func (im *realImageGCManager) imagesInEvictionOrder(ctx context.Context, freeTime time.Time) ([]evictionInfo, error) {
isRuntimeClassInImageCriAPIEnabled := utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI)
imagesInUse, err := im.detectImages(ctx, freeTime)
if err != nil {
return nil, err
}
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
logger := klog.FromContext(ctx)
// Get all images in eviction order.
images := make([]evictionInfo, 0, len(im.imageRecords))
for image, record := range im.imageRecords {
if isImageUsed(image, imagesInUse) {
logger.V(5).Info("Image ID is being used", "imageID", image)
continue
}
// Check if image is pinned, prevent garbage collection
if record.pinned {
logger.V(5).Info("Image is pinned, skipping garbage collection", "imageID", image)
continue
}
if !isRuntimeClassInImageCriAPIEnabled {
images = append(images, evictionInfo{
id: image,
imageRecord: *record,
})
} else {
imageID := getImageIDFromTuple(image)
// Ensure imageID is valid or else continue
if imageID == "" {
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, "ImageID is not valid, skipping, ImageID: %v", imageID)
continue
}
images = append(images, evictionInfo{
id: imageID,
imageRecord: *record,
})
}
}
sort.Sort(byLastUsedAndDetected(images))
return images, nil
}
// formatSize returns a human-readable string for a given size in bytes.
func formatSize(sizeBytes int64) string {
const (
KiB = 1024
MiB = 1024 * KiB
GiB = 1024 * MiB
TiB = 1024 * GiB
)
size := float64(sizeBytes)
switch {
case size < KiB:
return fmt.Sprintf("%d B", int64(size))
case size < MiB:
return fmt.Sprintf("%.1f KiB", size/KiB)
case size < GiB:
return fmt.Sprintf("%.1f MiB", size/MiB)
case size < TiB:
return fmt.Sprintf("%.1f GiB", size/GiB)
default:
return fmt.Sprintf("%.1f TiB", size/TiB)
}
}
// If RuntimeClassInImageCriAPI feature gate is enabled, imageRecords
// are identified by a tuple of (imageId,runtimeHandler) that is passed
// from ListImages() call. If no runtimehandler is specified in response
// to ListImages() by the container runtime, only imageID will be will
// be returned.
func getImageTuple(imageID, runtimeHandler string) string {
if runtimeHandler == "" {
return imageID
}
return fmt.Sprintf(imageIndexTupleFormat, imageID, runtimeHandler)
}
// get imageID from the imageTuple
func getImageIDFromTuple(image string) string {
imageTuples := strings.Split(image, ",")
return imageTuples[0]
}
type evictionInfo struct {
id string
imageRecord
}
type byLastUsedAndDetected []evictionInfo
func (ev byLastUsedAndDetected) Len() int { return len(ev) }
func (ev byLastUsedAndDetected) Swap(i, j int) { ev[i], ev[j] = ev[j], ev[i] }
func (ev byLastUsedAndDetected) Less(i, j int) bool {
// Sort by last used, break ties by detected.
if ev[i].lastUsed.Equal(ev[j].lastUsed) {
return ev[i].firstDetected.Before(ev[j].firstDetected)
}
return ev[i].lastUsed.Before(ev[j].lastUsed)
}
func isImageUsed(imageID string, imagesInUse sets.Set[string]) bool {
// Check the image ID.
if _, ok := imagesInUse[imageID]; ok {
return true
}
return false
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"context"
"fmt"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
crierrors "k8s.io/cri-api/pkg/errors"
"k8s.io/kubernetes/pkg/credentialprovider"
credentialproviderplugin "k8s.io/kubernetes/pkg/credentialprovider/plugin"
credentialprovidersecrets "k8s.io/kubernetes/pkg/credentialprovider/secrets"
"k8s.io/kubernetes/pkg/features"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images/pullmanager"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/util/parsers"
)
type ImagePodPullingTimeRecorder interface {
RecordImageStartedPulling(podUID types.UID)
RecordImageFinishedPulling(podUID types.UID)
}
// imageManager provides the functionalities for image pulling.
type imageManager struct {
recorder record.EventRecorder
imageService kubecontainer.ImageService
imagePullManager pullmanager.ImagePullManager
backOff *flowcontrol.Backoff
prevPullErrMsg sync.Map
// It will check the presence of the image, and report the 'image pulling', image pulled' events correspondingly.
puller imagePuller
nodeKeyring credentialprovider.DockerKeyring
podPullingTimeRecorder ImagePodPullingTimeRecorder
}
var _ ImageManager = &imageManager{}
// NewImageManager instantiates a new ImageManager object.
func NewImageManager(
recorder record.EventRecorder,
nodeKeyring credentialprovider.DockerKeyring,
imageService kubecontainer.ImageService,
imagePullManager pullmanager.ImagePullManager,
imageBackOff *flowcontrol.Backoff,
serialized bool,
maxParallelImagePulls *int32,
qps float32,
burst int,
podPullingTimeRecorder ImagePodPullingTimeRecorder,
) ImageManager {
imageService = throttleImagePulling(imageService, qps, burst)
var puller imagePuller
if serialized {
puller = newSerialImagePuller(imageService)
} else {
puller = newParallelImagePuller(imageService, maxParallelImagePulls)
}
return &imageManager{
recorder: recorder,
imageService: imageService,
imagePullManager: imagePullManager,
nodeKeyring: nodeKeyring,
backOff: imageBackOff,
puller: puller,
podPullingTimeRecorder: podPullingTimeRecorder,
}
}
// imagePullPrecheck inspects the pull policy and checks for image presence accordingly,
// returning (imageRef, error msg, err) and logging any errors.
func (m *imageManager) imagePullPrecheck(ctx context.Context, objRef *v1.ObjectReference, logPrefix string, pullPolicy v1.PullPolicy, spec *kubecontainer.ImageSpec, requestedImage string) (imageRef string, msg string, err error) {
switch pullPolicy {
case v1.PullAlways:
return "", msg, nil
case v1.PullIfNotPresent, v1.PullNever:
imageRef, err = m.imageService.GetImageRef(ctx, *spec)
if err != nil {
msg = fmt.Sprintf("Failed to inspect image %q: %v", imageRef, err)
m.logIt(objRef, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
return "", msg, ErrImageInspect
}
}
if len(imageRef) == 0 && pullPolicy == v1.PullNever {
msg, err = m.imageNotPresentOnNeverPolicyError(logPrefix, objRef, requestedImage)
return "", msg, err
}
return imageRef, msg, nil
}
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (m *imageManager) logIt(objRef *v1.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if objRef != nil {
m.recorder.Event(objRef, eventtype, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}
}
// imageNotPresentOnNeverPolicy error is a utility function that emits an event about
// an image not being present and returns the appropriate error to be passed on.
//
// Called in 2 scenarios:
// 1. image is not present with `imagePullPolicy: Never“
// 2. image is present but cannot be accessed with the presented set of credentials
//
// We don't want to reveal the presence of an image if it cannot be accessed, hence we
// want the same behavior in both the above scenarios.
func (m *imageManager) imageNotPresentOnNeverPolicyError(logPrefix string, objRef *v1.ObjectReference, requestedImage string) (string, error) {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", requestedImage)
m.logIt(objRef, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, klog.Warning)
return msg, ErrImageNeverPull
}
// EnsureImageExists pulls the image for the specified pod and requestedImage, and returns
// (imageRef, error message, error).
func (m *imageManager) EnsureImageExists(ctx context.Context, objRef *v1.ObjectReference, pod *v1.Pod, requestedImage string, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig, podRuntimeHandler string, pullPolicy v1.PullPolicy) (imageRef, message string, err error) {
logPrefix := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, requestedImage)
// If the image contains no tag or digest, a default tag should be applied.
image, err := applyDefaultImageTag(requestedImage)
if err != nil {
msg := fmt.Sprintf("Failed to apply default image tag %q: %v", requestedImage, err)
m.logIt(objRef, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
return "", msg, ErrInvalidImageName
}
var podAnnotations []kubecontainer.Annotation
for k, v := range pod.GetAnnotations() {
podAnnotations = append(podAnnotations, kubecontainer.Annotation{
Name: k,
Value: v,
})
}
spec := kubecontainer.ImageSpec{
Image: image,
Annotations: podAnnotations,
RuntimeHandler: podRuntimeHandler,
}
imageRef, message, err = m.imagePullPrecheck(ctx, objRef, logPrefix, pullPolicy, &spec, requestedImage)
if err != nil {
return "", message, err
}
if imageRef != "" && !utilfeature.DefaultFeatureGate.Enabled(features.KubeletEnsureSecretPulledImages) {
msg := fmt.Sprintf("Container image %q already present on machine", requestedImage)
m.logIt(objRef, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, klog.Info)
return imageRef, msg, nil
}
repoToPull, _, _, err := parsers.ParseImageName(spec.Image)
if err != nil {
return "", err.Error(), err
}
// construct the dynamic keyring using the providers we have in the kubelet
var podName, podNamespace, podUID string
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletServiceAccountTokenForCredentialProviders) {
sandboxMetadata := podSandboxConfig.GetMetadata()
podName = sandboxMetadata.Name
podNamespace = sandboxMetadata.Namespace
podUID = sandboxMetadata.Uid
}
externalCredentialProviderKeyring := credentialproviderplugin.NewExternalCredentialProviderDockerKeyring(
podNamespace,
podName,
podUID,
pod.Spec.ServiceAccountName)
keyring, err := credentialprovidersecrets.MakeDockerKeyring(pullSecrets, credentialprovider.UnionDockerKeyring{m.nodeKeyring, externalCredentialProviderKeyring})
if err != nil {
return "", err.Error(), err
}
pullCredentials, _ := keyring.Lookup(repoToPull)
if imageRef != "" {
var imagePullSecrets []kubeletconfiginternal.ImagePullSecret
// we don't take the audience of the service account into account, so there can only
// be one imagePullServiceAccount per pod when we try to make a decision.
var imagePullServiceAccount *kubeletconfiginternal.ImagePullServiceAccount
for _, s := range pullCredentials {
if s.Source == nil {
// we're only interested in creds that are not node accessible
continue
}
switch {
case s.Source.Secret != nil:
imagePullSecrets = append(imagePullSecrets, kubeletconfiginternal.ImagePullSecret{
UID: s.Source.Secret.UID,
Name: s.Source.Secret.Name,
Namespace: s.Source.Secret.Namespace,
CredentialHash: s.AuthConfigHash,
})
case s.Source.ServiceAccount != nil && imagePullServiceAccount == nil:
imagePullServiceAccount = &kubeletconfiginternal.ImagePullServiceAccount{
UID: s.Source.ServiceAccount.UID,
Name: s.Source.ServiceAccount.Name,
Namespace: s.Source.ServiceAccount.Namespace,
}
}
}
pullRequired := m.imagePullManager.MustAttemptImagePull(ctx, requestedImage, imageRef, imagePullSecrets, imagePullServiceAccount)
if !pullRequired {
msg := fmt.Sprintf("Container image %q already present on machine and can be accessed by the pod", requestedImage)
m.logIt(objRef, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, klog.Info)
return imageRef, msg, nil
}
}
if pullPolicy == v1.PullNever {
// The image is present as confirmed by imagePullPrecheck but it apparently
// wasn't accessible given the credentials check by the imagePullManager.
msg, err := m.imageNotPresentOnNeverPolicyError(logPrefix, objRef, requestedImage)
return "", msg, err
}
return m.pullImage(ctx, logPrefix, objRef, pod.UID, requestedImage, spec, pullCredentials, podSandboxConfig)
}
func (m *imageManager) pullImage(ctx context.Context, logPrefix string, objRef *v1.ObjectReference, podUID types.UID, image string, imgSpec kubecontainer.ImageSpec, pullCredentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (imageRef, message string, err error) {
var pullSucceeded bool
var finalPullCredentials *credentialprovider.TrackedAuthConfig
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletEnsureSecretPulledImages) {
if err := m.imagePullManager.RecordPullIntent(image); err != nil {
return "", fmt.Sprintf("Failed to record image pull intent for container image %q: %v", image, err), err
}
defer func() {
if pullSucceeded {
m.imagePullManager.RecordImagePulled(ctx, image, imageRef, trackedToImagePullCreds(finalPullCredentials))
} else {
m.imagePullManager.RecordImagePullFailed(ctx, image)
}
}()
}
backOffKey := fmt.Sprintf("%s_%s", podUID, image)
if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", image)
m.logIt(objRef, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, klog.Info)
// Wrap the error from the actual pull if available.
// This information is populated to the pods
// .status.containerStatuses[*].state.waiting.message.
prevPullErrMsg, ok := m.prevPullErrMsg.Load(backOffKey)
if ok {
msg = fmt.Sprintf("%s: %s", msg, prevPullErrMsg)
}
return "", msg, ErrImagePullBackOff
}
// Ensure that the map cannot grow indefinitely.
m.prevPullErrMsg.Delete(backOffKey)
m.podPullingTimeRecorder.RecordImageStartedPulling(podUID)
m.logIt(objRef, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("Pulling image %q", image), klog.Info)
startTime := time.Now()
pullChan := make(chan pullResult)
m.puller.pullImage(ctx, imgSpec, pullCredentials, pullChan, podSandboxConfig)
imagePullResult := <-pullChan
if imagePullResult.err != nil {
m.logIt(objRef, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", image, imagePullResult.err), klog.Warning)
m.backOff.Next(backOffKey, m.backOff.Clock.Now())
msg, err := evalCRIPullErr(image, imagePullResult.err)
// Store the actual pull error for providing that information during
// the image pull back-off.
m.prevPullErrMsg.Store(backOffKey, fmt.Sprintf("%s: %s", err, msg))
return "", msg, err
}
m.podPullingTimeRecorder.RecordImageFinishedPulling(podUID)
imagePullDuration := time.Since(startTime).Truncate(time.Millisecond)
m.logIt(objRef, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q in %v (%v including waiting). Image size: %v bytes.",
image, imagePullResult.pullDuration.Truncate(time.Millisecond), imagePullDuration, imagePullResult.imageSize), klog.Info)
metrics.ImagePullDuration.WithLabelValues(metrics.GetImageSizeBucket(imagePullResult.imageSize)).Observe(imagePullDuration.Seconds())
m.backOff.GC()
finalPullCredentials = imagePullResult.credentialsUsed
pullSucceeded = true
return imagePullResult.imageRef, "", nil
}
func evalCRIPullErr(imgRef string, err error) (errMsg string, errRes error) {
// Error assertions via errors.Is is not supported by gRPC (remote runtime) errors right now.
// See https://github.com/grpc/grpc-go/issues/3616
if strings.HasPrefix(err.Error(), crierrors.ErrRegistryUnavailable.Error()) {
errMsg = fmt.Sprintf(
"image pull failed for %s because the registry is unavailable%s",
imgRef,
// Trim the error name from the message to convert errors like:
// "RegistryUnavailable: a more detailed explanation" to:
// "...because the registry is unavailable: a more detailed explanation"
strings.TrimPrefix(err.Error(), crierrors.ErrRegistryUnavailable.Error()),
)
return errMsg, crierrors.ErrRegistryUnavailable
}
if strings.HasPrefix(err.Error(), crierrors.ErrSignatureValidationFailed.Error()) {
errMsg = fmt.Sprintf(
"image pull failed for %s because the signature validation failed%s",
imgRef,
// Trim the error name from the message to convert errors like:
// "SignatureValidationFailed: a more detailed explanation" to:
// "...because the signature validation failed: a more detailed explanation"
strings.TrimPrefix(err.Error(), crierrors.ErrSignatureValidationFailed.Error()),
)
return errMsg, crierrors.ErrSignatureValidationFailed
}
// Fallback for no specific error
return err.Error(), ErrImagePull
}
// applyDefaultImageTag parses a docker image string, if it doesn't contain any tag or digest,
// a default tag will be applied.
func applyDefaultImageTag(image string) (string, error) {
_, tag, digest, err := parsers.ParseImageName(image)
if err != nil {
return "", err
}
// we just concatenate the image name with the default tag here instead
if len(digest) == 0 && len(tag) > 0 && !strings.HasSuffix(image, ":"+tag) {
// we just concatenate the image name with the default tag here instead
// of using dockerref.WithTag(named, ...) because that would cause the
// image to be fully qualified as docker.io/$name if it's a short name
// (e.g. just busybox). We don't want that to happen to keep the CRI
// agnostic wrt image names and default hostnames.
image = image + ":" + tag
}
return image, nil
}
func trackedToImagePullCreds(trackedCreds *credentialprovider.TrackedAuthConfig) *kubeletconfiginternal.ImagePullCredentials {
ret := &kubeletconfiginternal.ImagePullCredentials{}
switch {
case trackedCreds == nil, trackedCreds.Source == nil:
ret.NodePodsAccessible = true
case trackedCreds.Source.Secret != nil:
sourceSecret := trackedCreds.Source.Secret
ret.KubernetesSecrets = []kubeletconfiginternal.ImagePullSecret{
{
UID: sourceSecret.UID,
Name: sourceSecret.Name,
Namespace: sourceSecret.Namespace,
CredentialHash: trackedCreds.AuthConfigHash,
},
}
case trackedCreds.Source.ServiceAccount != nil:
sourceServiceAccount := trackedCreds.Source.ServiceAccount
ret.KubernetesServiceAccounts = []kubeletconfiginternal.ImagePullServiceAccount{
{
UID: sourceServiceAccount.UID,
Name: sourceServiceAccount.Name,
Namespace: sourceServiceAccount.Namespace,
},
}
}
return ret
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"context"
"time"
"k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
type pullResult struct {
imageRef string
imageSize uint64
err error
pullDuration time.Duration
credentialsUsed *credentialprovider.TrackedAuthConfig
}
type imagePuller interface {
pullImage(context.Context, kubecontainer.ImageSpec, []credentialprovider.TrackedAuthConfig, chan<- pullResult, *runtimeapi.PodSandboxConfig)
}
var _, _ imagePuller = ¶llelImagePuller{}, &serialImagePuller{}
type parallelImagePuller struct {
imageService kubecontainer.ImageService
tokens chan struct{}
}
func newParallelImagePuller(imageService kubecontainer.ImageService, maxParallelImagePulls *int32) imagePuller {
if maxParallelImagePulls == nil || *maxParallelImagePulls < 1 {
return ¶llelImagePuller{imageService, nil}
}
return ¶llelImagePuller{imageService, make(chan struct{}, *maxParallelImagePulls)}
}
func (pip *parallelImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, credentials []credentialprovider.TrackedAuthConfig, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
go func() {
if pip.tokens != nil {
pip.tokens <- struct{}{}
defer func() { <-pip.tokens }()
}
startTime := time.Now()
imageRef, creds, err := pip.imageService.PullImage(ctx, spec, credentials, podSandboxConfig)
var size uint64
if err == nil && imageRef != "" {
// Getting the image size with best effort, ignoring the error.
size, _ = pip.imageService.GetImageSize(ctx, spec)
}
pullChan <- pullResult{
imageRef: imageRef,
imageSize: size,
err: err,
pullDuration: time.Since(startTime),
credentialsUsed: creds,
}
}()
}
// Maximum number of image pull requests than can be queued.
const maxImagePullRequests = 10
type serialImagePuller struct {
imageService kubecontainer.ImageService
pullRequests chan *imagePullRequest
}
func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller {
imagePuller := &serialImagePuller{imageService, make(chan *imagePullRequest, maxImagePullRequests)}
go wait.Until(imagePuller.processImagePullRequests, time.Second, wait.NeverStop)
return imagePuller
}
type imagePullRequest struct {
ctx context.Context
spec kubecontainer.ImageSpec
credentials []credentialprovider.TrackedAuthConfig
pullChan chan<- pullResult
podSandboxConfig *runtimeapi.PodSandboxConfig
}
func (sip *serialImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, credentials []credentialprovider.TrackedAuthConfig, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) {
sip.pullRequests <- &imagePullRequest{
ctx: ctx,
spec: spec,
credentials: credentials,
pullChan: pullChan,
podSandboxConfig: podSandboxConfig,
}
}
func (sip *serialImagePuller) processImagePullRequests() {
for pullRequest := range sip.pullRequests {
startTime := time.Now()
imageRef, creds, err := sip.imageService.PullImage(pullRequest.ctx, pullRequest.spec, pullRequest.credentials, pullRequest.podSandboxConfig)
var size uint64
if err == nil && imageRef != "" {
// Getting the image size with best effort, ignoring the error.
size, _ = sip.imageService.GetImageSize(pullRequest.ctx, pullRequest.spec)
}
pullRequest.pullChan <- pullResult{
imageRef: imageRef,
imageSize: size,
err: err,
// Note: pullDuration includes getting the image size.
pullDuration: time.Since(startTime),
credentialsUsed: creds,
}
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullmanager
import (
"bytes"
"crypto/sha256"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/errors"
kubeletconfigv1alpha1 "k8s.io/kubelet/config/v1alpha1"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigvint1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1"
)
const (
cacheFilesSHA256Prefix = "sha256-"
tmpFilesSuffix = ".tmp"
)
var _ PullRecordsAccessor = &fsPullRecordsAccessor{}
// fsPullRecordsAccessor uses the filesystem to read/write ImagePullIntent/ImagePulledRecord
// records.
type fsPullRecordsAccessor struct {
pullingDir string
pulledDir string
encoder runtime.Encoder
decoder runtime.Decoder
}
// NewFSPullRecordsAccessor returns an accessor for the ImagePullIntent/ImagePulledRecord
// records with a filesystem as the backing database.
func NewFSPullRecordsAccessor(kubeletDir string) (*fsPullRecordsAccessor, error) {
kubeletConfigEncoder, kubeletConfigDecoder, err := createKubeletConfigSchemeEncoderDecoder()
if err != nil {
return nil, err
}
accessor := &fsPullRecordsAccessor{
pullingDir: filepath.Join(kubeletDir, "image_manager", "pulling"),
pulledDir: filepath.Join(kubeletDir, "image_manager", "pulled"),
encoder: kubeletConfigEncoder,
decoder: kubeletConfigDecoder,
}
if err := os.MkdirAll(accessor.pullingDir, 0700); err != nil {
return nil, err
}
if err := os.MkdirAll(accessor.pulledDir, 0700); err != nil {
return nil, err
}
return accessor, nil
}
func (f *fsPullRecordsAccessor) WriteImagePullIntent(image string) error {
intent := kubeletconfiginternal.ImagePullIntent{
Image: image,
}
intentBytes := bytes.NewBuffer([]byte{})
if err := f.encoder.Encode(&intent, intentBytes); err != nil {
return err
}
return writeFile(f.pullingDir, cacheFilename(image), intentBytes.Bytes())
}
func (f *fsPullRecordsAccessor) ListImagePullIntents() ([]*kubeletconfiginternal.ImagePullIntent, error) {
var intents []*kubeletconfiginternal.ImagePullIntent
// walk the pulling directory for any pull intent records
err := processDirFiles(f.pullingDir,
func(filePath string, fileContent []byte) error {
intent, err := decodeIntent(f.decoder, fileContent)
if err != nil {
return fmt.Errorf("failed to deserialize content of file %q into ImagePullIntent: %w", filePath, err)
}
intents = append(intents, intent)
return nil
})
return intents, err
}
func (f *fsPullRecordsAccessor) ImagePullIntentExists(image string) (bool, error) {
intentRecordPath := filepath.Join(f.pullingDir, cacheFilename(image))
intentBytes, err := os.ReadFile(intentRecordPath)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
intent, err := decodeIntent(f.decoder, intentBytes)
if err != nil {
return false, err
}
return intent.Image == image, nil
}
func (f *fsPullRecordsAccessor) DeleteImagePullIntent(image string) error {
err := os.Remove(filepath.Join(f.pullingDir, cacheFilename(image)))
if os.IsNotExist(err) {
return nil
}
return err
}
func (f *fsPullRecordsAccessor) GetImagePulledRecord(imageRef string) (*kubeletconfiginternal.ImagePulledRecord, bool, error) {
recordBytes, err := os.ReadFile(filepath.Join(f.pulledDir, cacheFilename(imageRef)))
if os.IsNotExist(err) {
return nil, false, nil
} else if err != nil {
return nil, false, err
}
pulledRecord, err := decodePulledRecord(f.decoder, recordBytes)
if err != nil {
return nil, true, err
}
if pulledRecord.ImageRef != imageRef {
return nil, false, nil
}
return pulledRecord, true, err
}
func (f *fsPullRecordsAccessor) ListImagePulledRecords() ([]*kubeletconfiginternal.ImagePulledRecord, error) {
var pullRecords []*kubeletconfiginternal.ImagePulledRecord
err := processDirFiles(f.pulledDir,
func(filePath string, fileContent []byte) error {
pullRecord, err := decodePulledRecord(f.decoder, fileContent)
if err != nil {
return fmt.Errorf("failed to deserialize content of file %q into ImagePulledRecord: %w", filePath, err)
}
pullRecords = append(pullRecords, pullRecord)
return nil
})
return pullRecords, err
}
func (f *fsPullRecordsAccessor) WriteImagePulledRecord(pulledRecord *kubeletconfiginternal.ImagePulledRecord) error {
recordBytes := bytes.NewBuffer([]byte{})
if err := f.encoder.Encode(pulledRecord, recordBytes); err != nil {
return fmt.Errorf("failed to serialize ImagePulledRecord: %w", err)
}
return writeFile(f.pulledDir, cacheFilename(pulledRecord.ImageRef), recordBytes.Bytes())
}
func (f *fsPullRecordsAccessor) DeleteImagePulledRecord(imageRef string) error {
err := os.Remove(filepath.Join(f.pulledDir, cacheFilename(imageRef)))
if os.IsNotExist(err) {
return nil
}
return err
}
func cacheFilename(image string) string {
return fmt.Sprintf("%s%x", cacheFilesSHA256Prefix, sha256.Sum256([]byte(image)))
}
// writeFile writes `content` to the file with name `filename` in directory `dir`.
// It assures write atomicity by creating a temporary file first and only after
// a successful write, it move the temp file in place of the target.
func writeFile(dir, filename string, content []byte) error {
// create target folder if it does not exists yet
if err := os.MkdirAll(dir, 0700); err != nil {
return fmt.Errorf("failed to create directory %q: %w", dir, err)
}
targetPath := filepath.Join(dir, filename)
tmpPath := targetPath + tmpFilesSuffix
if err := os.WriteFile(tmpPath, content, 0600); err != nil {
_ = os.Remove(tmpPath) // attempt a delete in case the file was at least partially written
return fmt.Errorf("failed to create temporary file %q: %w", tmpPath, err)
}
if err := os.Rename(tmpPath, targetPath); err != nil {
_ = os.Remove(tmpPath) // attempt a cleanup
return err
}
return nil
}
// processDirFiles reads files in a given directory and peforms `fileAction` action on those.
func processDirFiles(dirName string, fileAction func(filePath string, fileContent []byte) error) error {
var walkErrors []error
err := filepath.WalkDir(dirName, func(path string, d fs.DirEntry, err error) error {
if err != nil {
walkErrors = append(walkErrors, err)
return nil
}
if path == dirName {
return nil
}
if d.IsDir() {
return filepath.SkipDir
}
// skip files we didn't write or .tmp files
if filename := d.Name(); !strings.HasPrefix(filename, cacheFilesSHA256Prefix) || strings.HasSuffix(filename, tmpFilesSuffix) {
return nil
}
fileContent, err := os.ReadFile(path)
if err != nil {
walkErrors = append(walkErrors, fmt.Errorf("failed to read %q: %w", path, err))
return nil
}
if err := fileAction(path, fileContent); err != nil {
walkErrors = append(walkErrors, err)
return nil
}
return nil
})
if err != nil {
walkErrors = append(walkErrors, err)
}
return errors.NewAggregate(walkErrors)
}
// createKubeletCOnfigSchemeEncoderDecoder creates strict-encoding encoder and
// decoder for the internal and alpha kubelet config APIs.
func createKubeletConfigSchemeEncoderDecoder() (runtime.Encoder, runtime.Decoder, error) {
const mediaType = runtime.ContentTypeJSON
scheme := runtime.NewScheme()
if err := kubeletconfigvint1alpha1.AddToScheme(scheme); err != nil {
return nil, nil, err
}
if err := kubeletconfiginternal.AddToScheme(scheme); err != nil {
return nil, nil, err
}
// use the strict scheme to fail on unknown fields
codecs := serializer.NewCodecFactory(scheme, serializer.EnableStrict)
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, nil, fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType)
}
return codecs.EncoderForVersion(info.Serializer, kubeletconfigv1alpha1.SchemeGroupVersion), codecs.UniversalDecoder(), nil
}
func decodeIntent(d runtime.Decoder, objBytes []byte) (*kubeletconfiginternal.ImagePullIntent, error) {
obj, _, err := d.Decode(objBytes, nil, nil)
if err != nil {
return nil, err
}
intentObj, ok := obj.(*kubeletconfiginternal.ImagePullIntent)
if !ok {
return nil, fmt.Errorf("failed to convert object to *ImagePullIntent: %T", obj)
}
return intentObj, nil
}
func decodePulledRecord(d runtime.Decoder, objBytes []byte) (*kubeletconfiginternal.ImagePulledRecord, error) {
obj, _, err := d.Decode(objBytes, nil, nil)
if err != nil {
return nil, err
}
pulledRecord, ok := obj.(*kubeletconfiginternal.ImagePulledRecord)
if !ok {
return nil, fmt.Errorf("failed to convert object to *ImagePulledRecord: %T", obj)
}
return pulledRecord, nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullmanager
import (
"context"
"fmt"
"slices"
"strings"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/parsers"
)
var _ ImagePullManager = &PullManager{}
// writeRecordWhileMatchingLimit is a limit at which we stop writing yet-uncached
// records that we found when we were checking if an image pull must be attempted.
// This is to prevent unbounded writes in cases of high namespace turnover.
const writeRecordWhileMatchingLimit = 100
// PullManager is an implementation of the ImagePullManager. It
// tracks images pulled by the kubelet by creating records about ongoing and
// successful pulls.
// It tracks the credentials used with each successful pull in order to be able
// to distinguish tenants requesting access to an image that exists on the kubelet's
// node.
type PullManager struct {
recordsAccessor PullRecordsAccessor
imagePolicyEnforcer ImagePullPolicyEnforcer
imageService kubecontainer.ImageService
intentAccessors *StripedLockSet // image -> sync.Mutex
intentCounters *sync.Map // image -> number of current in-flight pulls
pulledAccessors *StripedLockSet // imageRef -> sync.Mutex
}
func NewImagePullManager(ctx context.Context, recordsAccessor PullRecordsAccessor, imagePullPolicy ImagePullPolicyEnforcer, imageService kubecontainer.ImageService, lockStripesNum int32) (*PullManager, error) {
m := &PullManager{
recordsAccessor: recordsAccessor,
imagePolicyEnforcer: imagePullPolicy,
imageService: imageService,
intentAccessors: NewStripedLockSet(lockStripesNum),
intentCounters: &sync.Map{},
pulledAccessors: NewStripedLockSet(lockStripesNum),
}
m.initialize(ctx)
return m, nil
}
func (f *PullManager) RecordPullIntent(image string) error {
f.intentAccessors.Lock(image)
defer f.intentAccessors.Unlock(image)
if err := f.recordsAccessor.WriteImagePullIntent(image); err != nil {
return fmt.Errorf("failed to record image pull intent: %w", err)
}
f.incrementIntentCounterForImage(image)
return nil
}
func (f *PullManager) RecordImagePulled(ctx context.Context, image, imageRef string, credentials *kubeletconfiginternal.ImagePullCredentials) {
logger := klog.FromContext(ctx)
if err := f.writePulledRecordIfChanged(ctx, image, imageRef, credentials); err != nil {
logger.Error(err, "failed to write image pulled record", "imageRef", imageRef)
return
}
// Notice we don't decrement in case of record write error, which leaves dangling
// imagePullIntents and refCount in the intentCounters map.
// This is done so that the successfully pulled image is still considered as pulled by the kubelet.
// The kubelet will attempt to turn the imagePullIntent into a pulled record again when
// it's restarted.
f.decrementImagePullIntent(ctx, image)
}
// writePulledRecordIfChanged writes an ImagePulledRecord into the f.pulledDir directory.
// `image` is an image from a container of a Pod object.
// `imageRef` is a reference to the `image“ as used by the CRI.
// `credentials` is a set of credentials that should be written to a new/merged into
// an existing record.
//
// If `credentials` is nil, it marks a situation where an image was pulled under
// unknown circumstances. We should record the image as tracked but no credentials
// should be written in order to force credential verification when the image is
// accessed the next time.
func (f *PullManager) writePulledRecordIfChanged(ctx context.Context, image, imageRef string, credentials *kubeletconfiginternal.ImagePullCredentials) error {
logger := klog.FromContext(ctx)
f.pulledAccessors.Lock(imageRef)
defer f.pulledAccessors.Unlock(imageRef)
sanitizedImage, err := trimImageTagDigest(image)
if err != nil {
return fmt.Errorf("invalid image name %q: %w", image, err)
}
pulledRecord, _, err := f.recordsAccessor.GetImagePulledRecord(imageRef)
if err != nil {
logger.Info("failed to retrieve an ImagePulledRecord", "image", image, "err", err)
pulledRecord = nil
}
var pulledRecordChanged bool
if pulledRecord == nil {
pulledRecordChanged = true
pulledRecord = &kubeletconfiginternal.ImagePulledRecord{
LastUpdatedTime: metav1.Time{Time: time.Now()},
ImageRef: imageRef,
CredentialMapping: make(map[string]kubeletconfiginternal.ImagePullCredentials),
}
// just the existence of the pulled record for a given imageRef is enough
// for us to consider it kubelet-pulled. The kubelet should fail safe
// if it does not find a credential record for the specific image, and it
// must require credential validation
if credentials != nil {
pulledRecord.CredentialMapping[sanitizedImage] = *credentials
}
} else {
pulledRecord, pulledRecordChanged = pulledRecordMergeNewCreds(pulledRecord, sanitizedImage, credentials)
}
if !pulledRecordChanged {
return nil
}
return f.recordsAccessor.WriteImagePulledRecord(pulledRecord)
}
func (f *PullManager) RecordImagePullFailed(ctx context.Context, image string) {
f.decrementImagePullIntent(ctx, image)
}
// decrementImagePullIntent decreses the number of how many times image pull
// intent for a given `image` was requested, and removes the ImagePullIntent file
// if the reference counter for the image reaches zero.
func (f *PullManager) decrementImagePullIntent(ctx context.Context, image string) {
logger := klog.FromContext(ctx)
f.intentAccessors.Lock(image)
defer f.intentAccessors.Unlock(image)
if f.getIntentCounterForImage(image) <= 1 {
if err := f.recordsAccessor.DeleteImagePullIntent(image); err != nil {
logger.Error(err, "failed to remove image pull intent", "image", image)
return
}
// only delete the intent counter once the file was deleted to be consistent
// with the records
f.intentCounters.Delete(image)
return
}
f.decrementIntentCounterForImage(image)
}
func (f *PullManager) MustAttemptImagePull(ctx context.Context, image, imageRef string, podSecrets []kubeletconfiginternal.ImagePullSecret, podServiceAccount *kubeletconfiginternal.ImagePullServiceAccount) bool {
if len(imageRef) == 0 {
return true
}
logger := klog.FromContext(ctx)
var imagePulledByKubelet bool
var pulledRecord *kubeletconfiginternal.ImagePulledRecord
err := func() error {
// don't allow changes to the files we're using for our decision
f.pulledAccessors.Lock(imageRef)
defer f.pulledAccessors.Unlock(imageRef)
f.intentAccessors.Lock(image)
defer f.intentAccessors.Unlock(image)
var err error
var exists bool
pulledRecord, exists, err = f.recordsAccessor.GetImagePulledRecord(imageRef)
switch {
case err != nil:
return err
case exists:
imagePulledByKubelet = true
case pulledRecord != nil:
imagePulledByKubelet = true
default:
// optimized check - we can check the intent number, however, if it's zero
// it may only mean kubelet restarted since writing the intent record and
// we must fall back to the actual cache
imagePulledByKubelet = f.getIntentCounterForImage(image) > 0
if imagePulledByKubelet {
break
}
if exists, err := f.recordsAccessor.ImagePullIntentExists(image); err != nil {
return fmt.Errorf("failed to check existence of an image pull intent: %w", err)
} else if exists {
imagePulledByKubelet = true
}
}
return nil
}()
if err != nil {
logger.Error(err, "Unable to access cache records about image pulls")
return true
}
if !f.imagePolicyEnforcer.RequireCredentialVerificationForImage(image, imagePulledByKubelet) {
return false
}
if pulledRecord == nil {
// we have no proper records of the image being pulled in the past, we can short-circuit here
return true
}
sanitizedImage, err := trimImageTagDigest(image)
if err != nil {
logger.Error(err, "failed to parse image name, forcing image credentials reverification", "image", sanitizedImage)
return true
}
cachedCreds, ok := pulledRecord.CredentialMapping[sanitizedImage]
if !ok {
return true
}
if cachedCreds.NodePodsAccessible {
// anyone on this node can access the image
return false
}
if len(cachedCreds.KubernetesSecrets) == 0 && len(cachedCreds.KubernetesServiceAccounts) == 0 {
return true
}
for _, podSecret := range podSecrets {
for _, cachedSecret := range cachedCreds.KubernetesSecrets {
// we need to check hash len in case hashing failed while storing the record in the keyring
hashesMatch := len(cachedSecret.CredentialHash) > 0 && podSecret.CredentialHash == cachedSecret.CredentialHash
secretCoordinatesMatch := podSecret.UID == cachedSecret.UID &&
podSecret.Namespace == cachedSecret.Namespace &&
podSecret.Name == cachedSecret.Name
if hashesMatch {
if !secretCoordinatesMatch && len(cachedCreds.KubernetesSecrets) < writeRecordWhileMatchingLimit {
// While we're only matching at this point, we want to ensure this secret is considered valid in the future
// and so we make an additional write to the cache.
// writePulledRecord() is a noop in case the secret with the updated hash already appears in the cache.
if err := f.writePulledRecordIfChanged(ctx, image, imageRef, &kubeletconfiginternal.ImagePullCredentials{KubernetesSecrets: []kubeletconfiginternal.ImagePullSecret{podSecret}}); err != nil {
logger.Error(err, "failed to write an image pulled record", "image", image, "imageRef", imageRef)
}
}
return false
}
if secretCoordinatesMatch {
if !hashesMatch && len(cachedCreds.KubernetesSecrets) < writeRecordWhileMatchingLimit {
// While we're only matching at this point, we want to ensure the updated credentials are considered valid in the future
// and so we make an additional write to the cache.
// writePulledRecord() is a noop in case the hash got updated in the meantime.
if err := f.writePulledRecordIfChanged(ctx, image, imageRef, &kubeletconfiginternal.ImagePullCredentials{KubernetesSecrets: []kubeletconfiginternal.ImagePullSecret{podSecret}}); err != nil {
logger.Error(err, "failed to write an image pulled record", "image", image, "imageRef", imageRef)
}
return false
}
}
}
}
if podServiceAccount != nil && slices.Contains(cachedCreds.KubernetesServiceAccounts, *podServiceAccount) {
// we found a matching service account, no need to pull the image
return false
}
return true
}
func (f *PullManager) PruneUnknownRecords(ctx context.Context, imageList []string, until time.Time) {
f.pulledAccessors.GlobalLock()
defer f.pulledAccessors.GlobalUnlock()
logger := klog.FromContext(ctx)
pulledRecords, err := f.recordsAccessor.ListImagePulledRecords()
if err != nil {
logger.Error(err, "there were errors listing ImagePulledRecords, garbage collection will proceed with incomplete records list")
}
imagesInUse := sets.New(imageList...)
for _, imageRecord := range pulledRecords {
if !imageRecord.LastUpdatedTime.Time.Before(until) {
// the image record was only updated after the GC started
continue
}
if imagesInUse.Has(imageRecord.ImageRef) {
continue
}
if err := f.recordsAccessor.DeleteImagePulledRecord(imageRecord.ImageRef); err != nil {
logger.Error(err, "failed to remove an ImagePulledRecord", "imageRef", imageRecord.ImageRef)
}
}
}
// initialize gathers all the images from pull intent records that exist
// from the previous kubelet runs.
// If the CRI reports any of the above images as already pulled, we turn the
// pull intent into a pulled record and the original pull intent is deleted.
//
// This method is not thread-safe and it should only be called upon the creation
// of the PullManager.
func (f *PullManager) initialize(ctx context.Context) {
logger := klog.FromContext(ctx)
pullIntents, err := f.recordsAccessor.ListImagePullIntents()
if err != nil {
logger.Error(err, "there were errors listing ImagePullIntents, continuing with an incomplete records list")
}
if len(pullIntents) == 0 {
return
}
imageObjs, err := f.imageService.ListImages(ctx)
if err != nil {
logger.Error(err, "failed to list images")
}
inFlightPulls := sets.New[string]()
for _, intent := range pullIntents {
inFlightPulls.Insert(intent.Image)
}
// Each of the images known to the CRI might consist of multiple tags and digests,
// which is what we track in the ImagePullIntent - we need to go through all of these
// for each image.
for _, imageObj := range imageObjs {
existingRecordedImages := searchForExistingTagDigest(inFlightPulls, imageObj)
for _, image := range existingRecordedImages.UnsortedList() {
if err := f.writePulledRecordIfChanged(ctx, image, imageObj.ID, nil); err != nil {
logger.Error(err, "failed to write an image pull record", "imageRef", imageObj.ID)
continue
}
if err := f.recordsAccessor.DeleteImagePullIntent(image); err != nil {
logger.V(2).Info("failed to remove image pull intent file", "imageName", image, "error", err)
}
}
}
}
func (f *PullManager) incrementIntentCounterForImage(image string) {
f.intentCounters.Store(image, f.getIntentCounterForImage(image)+1)
}
func (f *PullManager) decrementIntentCounterForImage(image string) {
f.intentCounters.Store(image, f.getIntentCounterForImage(image)-1)
}
func (f *PullManager) getIntentCounterForImage(image string) int32 {
intentNumAny, ok := f.intentCounters.Load(image)
if !ok {
return 0
}
intentNum, ok := intentNumAny.(int32)
if !ok {
panic(fmt.Sprintf("expected the intentCounters sync map to only contain int32 values, got %T", intentNumAny))
}
return intentNum
}
// searchForExistingTagDigest loops through the `image` RepoDigests and RepoTags
// and tries to find all image digests/tags in `inFlightPulls`, which is a map of
// containerImage -> pulling intent path.
func searchForExistingTagDigest(inFlightPulls sets.Set[string], image kubecontainer.Image) sets.Set[string] {
existingRecordedImages := sets.New[string]()
for _, digest := range image.RepoDigests {
if ok := inFlightPulls.Has(digest); ok {
existingRecordedImages.Insert(digest)
}
}
for _, tag := range image.RepoTags {
if ok := inFlightPulls.Has(tag); ok {
existingRecordedImages.Insert(tag)
}
}
return existingRecordedImages
}
type kubeSecretCoordinates struct {
UID string
Namespace string
Name string
}
// pulledRecordMergeNewCreds merges the credentials from `newCreds` into the `orig`
// record for the `imageNoTagDigest` image.
// `imageNoTagDigest` is the content of the `image` field from a pod's container
// after any tag or digest were removed from it.
//
// NOTE: pulledRecordMergeNewCreds() may be often called in the read path of
// PullManager.MustAttemptImagePull() and so it's desirable to limit allocations
// (e.g. DeepCopy()) until it is necessary.
func pulledRecordMergeNewCreds(orig *kubeletconfiginternal.ImagePulledRecord, imageNoTagDigest string, newCreds *kubeletconfiginternal.ImagePullCredentials) (*kubeletconfiginternal.ImagePulledRecord, bool) {
if newCreds == nil {
// no new credential information to record
return orig, false
}
if !newCreds.NodePodsAccessible && len(newCreds.KubernetesSecrets) == 0 && len(newCreds.KubernetesServiceAccounts) == 0 {
// we don't have any secret, service account credentials or node-wide access to record
return orig, false
}
selectedCreds, found := orig.CredentialMapping[imageNoTagDigest]
if !found {
ret := orig.DeepCopy()
if ret.CredentialMapping == nil {
ret.CredentialMapping = make(map[string]kubeletconfiginternal.ImagePullCredentials)
}
ret.CredentialMapping[imageNoTagDigest] = *newCreds
ret.LastUpdatedTime = metav1.Time{Time: time.Now()}
return ret, true
}
if selectedCreds.NodePodsAccessible {
return orig, false
}
switch {
case newCreds.NodePodsAccessible:
selectedCreds.NodePodsAccessible = true
selectedCreds.KubernetesSecrets = nil
selectedCreds.KubernetesServiceAccounts = nil
ret := orig.DeepCopy()
ret.CredentialMapping[imageNoTagDigest] = selectedCreds
ret.LastUpdatedTime = metav1.Time{Time: time.Now()}
return ret, true
case len(newCreds.KubernetesSecrets) > 0:
var secretsChanged bool
selectedCreds.KubernetesSecrets, secretsChanged = mergePullSecrets(selectedCreds.KubernetesSecrets, newCreds.KubernetesSecrets)
if !secretsChanged {
return orig, false
}
case len(newCreds.KubernetesServiceAccounts) > 0:
var serviceAccountsChanged bool
selectedCreds.KubernetesServiceAccounts, serviceAccountsChanged = mergePullServiceAccounts(selectedCreds.KubernetesServiceAccounts, newCreds.KubernetesServiceAccounts)
if !serviceAccountsChanged {
return orig, false
}
}
ret := orig.DeepCopy()
ret.CredentialMapping[imageNoTagDigest] = selectedCreds
ret.LastUpdatedTime = metav1.Time{Time: time.Now()}
return ret, true
}
// mergePullSecrets merges two slices of ImagePullSecret object into one while
// keeping the objects unique per `Namespace, Name, UID` key.
//
// In case an object from the `new` slice has the same `Namespace, Name, UID` combination
// as an object from `orig`, the result will use the CredentialHash value of the
// object from `new`.
//
// The returned slice is sorted by Namespace, Name and UID (in this order). Also
// returns an indicator whether the set of input secrets chaged.
func mergePullSecrets(orig, new []kubeletconfiginternal.ImagePullSecret) ([]kubeletconfiginternal.ImagePullSecret, bool) {
credSet := make(map[kubeSecretCoordinates]string)
for _, secret := range orig {
credSet[kubeSecretCoordinates{
UID: secret.UID,
Namespace: secret.Namespace,
Name: secret.Name,
}] = secret.CredentialHash
}
changed := false
for _, s := range new {
key := kubeSecretCoordinates{UID: s.UID, Namespace: s.Namespace, Name: s.Name}
if existingHash, ok := credSet[key]; !ok || existingHash != s.CredentialHash {
changed = true
credSet[key] = s.CredentialHash
}
}
if !changed {
return orig, false
}
ret := make([]kubeletconfiginternal.ImagePullSecret, 0, len(credSet))
for coords, hash := range credSet {
ret = append(ret, kubeletconfiginternal.ImagePullSecret{
UID: coords.UID,
Namespace: coords.Namespace,
Name: coords.Name,
CredentialHash: hash,
})
}
// we don't need to use the stable version because secret coordinates used for ordering are unique in the set
slices.SortFunc(ret, imagePullSecretLess)
return ret, true
}
// imagePullSecretLess is a helper function to define ordering in a slice of
// ImagePullSecret objects.
func imagePullSecretLess(a, b kubeletconfiginternal.ImagePullSecret) int {
if cmp := strings.Compare(a.Namespace, b.Namespace); cmp != 0 {
return cmp
}
if cmp := strings.Compare(a.Name, b.Name); cmp != 0 {
return cmp
}
return strings.Compare(a.UID, b.UID)
}
// trimImageTagDigest removes the tag and digest from an image name
func trimImageTagDigest(containerImage string) (string, error) {
imageName, _, _, err := parsers.ParseImageName(containerImage)
return imageName, err
}
// mergePullServiceAccounts merges two slices of ImagePullServiceAccount object into one while
// keeping the objects unique per `Namespace, Name, UID` key.
// The returned slice is sorted by Namespace, Name and UID (in this order).
// Also returns an indicator whether the set of input service accounts changed.
func mergePullServiceAccounts(orig, new []kubeletconfiginternal.ImagePullServiceAccount) ([]kubeletconfiginternal.ImagePullServiceAccount, bool) {
credSet := sets.New[kubeletconfiginternal.ImagePullServiceAccount]()
for _, serviceAccount := range orig {
credSet.Insert(serviceAccount)
}
changed := false
for _, s := range new {
if !credSet.Has(s) {
changed = true
credSet.Insert(s)
}
}
if !changed {
return orig, false
}
ret := credSet.UnsortedList()
slices.SortFunc(ret, imagePullServiceAccountLess)
return ret, true
}
// imagePullServiceAccountLess is a helper function to define ordering in a slice of
// ImagePullServiceAccount objects.
func imagePullServiceAccountLess(a, b kubeletconfiginternal.ImagePullServiceAccount) int {
if cmp := strings.Compare(a.Namespace, b.Namespace); cmp != 0 {
return cmp
}
if cmp := strings.Compare(a.Name, b.Name); cmp != 0 {
return cmp
}
if cmp := strings.Compare(a.UID, b.UID); cmp != 0 {
return cmp
}
return 0
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullmanager
import (
"fmt"
"strings"
dockerref "github.com/distribution/reference"
"k8s.io/apimachinery/pkg/util/sets"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
// ImagePullPolicyEnforcer defines a class of functions implementing a credential
// verification policies for image pulls. These function determines whether the
// implemented policy requires credential verification based on image name, local
// image presence and existence of records about previous image pulls.
//
// `image` is an image name from a Pod's container "image" field.
// `imagePresent` informs whether the `image` is present on the node.
// `imagePulledByKubelet` marks that ImagePulledRecord or ImagePullingIntent records
// for the `image` exist on the node, meaning it was pulled by the kubelet somewhere
// in the past.
type ImagePullPolicyEnforcer interface {
RequireCredentialVerificationForImage(image string, imagePulledByKubelet bool) bool
}
// ImagePullPolicyEnforcerFunc is a function type that implements the ImagePullPolicyEnforcer interface
type ImagePullPolicyEnforcerFunc func(image string, imagePulledByKubelet bool) bool
func (e ImagePullPolicyEnforcerFunc) RequireCredentialVerificationForImage(image string, imagePulledByKubelet bool) bool {
return e(image, imagePulledByKubelet)
}
func NewImagePullCredentialVerificationPolicy(policy kubeletconfiginternal.ImagePullCredentialsVerificationPolicy, imageAllowList []string) (ImagePullPolicyEnforcer, error) {
switch policy {
case kubeletconfiginternal.NeverVerify:
return NeverVerifyImagePullPolicy(), nil
case kubeletconfiginternal.NeverVerifyPreloadedImages:
return NeverVerifyPreloadedPullPolicy(), nil
case kubeletconfiginternal.NeverVerifyAllowlistedImages:
return NewNeverVerifyAllowListedPullPolicy(imageAllowList)
case kubeletconfiginternal.AlwaysVerify:
return AlwaysVerifyImagePullPolicy(), nil
default:
return nil, fmt.Errorf("unknown image pull credential verification policy: %v", policy)
}
}
func NeverVerifyImagePullPolicy() ImagePullPolicyEnforcerFunc {
return func(image string, imagePulledByKubelet bool) bool {
return false
}
}
func NeverVerifyPreloadedPullPolicy() ImagePullPolicyEnforcerFunc {
return func(image string, imagePulledByKubelet bool) bool {
return imagePulledByKubelet
}
}
func AlwaysVerifyImagePullPolicy() ImagePullPolicyEnforcerFunc {
return func(image string, imagePulledByKubelet bool) bool {
return true
}
}
type NeverVerifyAllowlistedImages struct {
absoluteURLs sets.Set[string]
prefixes []string
}
func NewNeverVerifyAllowListedPullPolicy(allowList []string) (*NeverVerifyAllowlistedImages, error) {
policy := &NeverVerifyAllowlistedImages{
absoluteURLs: sets.New[string](),
}
for _, pattern := range allowList {
normalizedPattern, isWildcard, err := getAllowlistImagePattern(pattern)
if err != nil {
return nil, err
}
if isWildcard {
policy.prefixes = append(policy.prefixes, normalizedPattern)
} else {
policy.absoluteURLs.Insert(normalizedPattern)
}
}
return policy, nil
}
func (p *NeverVerifyAllowlistedImages) RequireCredentialVerificationForImage(image string, imagePulledByKubelet bool) bool {
return !p.imageMatches(image)
}
func (p *NeverVerifyAllowlistedImages) imageMatches(image string) bool {
if p.absoluteURLs.Has(image) {
return true
}
for _, prefix := range p.prefixes {
if strings.HasPrefix(image, prefix) {
return true
}
}
return false
}
func ValidateAllowlistImagesPatterns(patterns []string) error {
for _, p := range patterns {
if _, _, err := getAllowlistImagePattern(p); err != nil {
return err
}
}
return nil
}
func getAllowlistImagePattern(pattern string) (string, bool, error) {
if pattern != strings.TrimSpace(pattern) {
return "", false, fmt.Errorf("leading/trailing spaces are not allowed: %s", pattern)
}
trimmedPattern := pattern
isWildcard := false
if strings.HasSuffix(pattern, "/*") {
isWildcard = true
trimmedPattern = strings.TrimSuffix(trimmedPattern, "*")
}
if len(trimmedPattern) == 0 {
return "", false, fmt.Errorf("the supplied pattern is too short: %s", pattern)
}
if strings.ContainsRune(trimmedPattern, '*') {
return "", false, fmt.Errorf("not a valid wildcard pattern, only patterns ending with '/*' are allowed: %s", pattern)
}
if isWildcard {
if len(trimmedPattern) == 1 {
return "", false, fmt.Errorf("at least registry hostname is required")
}
} else { // not a wildcard
image, err := dockerref.ParseNormalizedNamed(trimmedPattern)
if err != nil {
return "", false, fmt.Errorf("failed to parse as an image name: %w", err)
}
if trimmedPattern != image.Name() { // image.Name() returns the image name without tag/digest
return "", false, fmt.Errorf("neither tag nor digest is accepted in an image reference: %s", pattern)
}
return trimmedPattern, false, nil
}
return trimmedPattern, true, nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullmanager
import (
"hash/fnv"
"sync"
)
// StripedLockSet allows context locking based on string keys, where each key
// is mapped to a an index in a size-limited slice of locks.
type StripedLockSet struct {
locks []sync.Mutex
size int32
}
// NewStripedLockSet creates a StripedLockSet with `size` number of locks to be
// used for locking context based on string keys.
// The size will be normalized to stay in the <1, 31> interval.
func NewStripedLockSet(size int32) *StripedLockSet {
size = max(size, 1) // make sure we're at least at size 1
return &StripedLockSet{
locks: make([]sync.Mutex, min(31, size)),
size: size,
}
}
func (s *StripedLockSet) Lock(key string) {
s.locks[keyToID(key, s.size)].Lock()
}
func (s *StripedLockSet) Unlock(key string) {
s.locks[keyToID(key, s.size)].Unlock()
}
func (s *StripedLockSet) GlobalLock() {
for i := range s.locks {
s.locks[i].Lock()
}
}
func (s *StripedLockSet) GlobalUnlock() {
for i := range s.locks {
s.locks[i].Unlock()
}
}
func keyToID(key string, sliceSize int32) uint32 {
h := fnv.New32()
h.Write([]byte(key))
return h.Sum32() % uint32(sliceSize)
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullmanager
import (
"fmt"
"sync"
"sync/atomic"
"k8s.io/klog/v2"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/utils/lru"
)
type lruCache[K comparable, V any] struct {
cache *lru.Cache
maxSize int
// authoritative indicates if we can consider the cached records an
// authoritative source.
// False if the cache is evicted any records because it reached capacity, or
// if there were errors during its initialization.
authoritative atomic.Bool
// deletingKeys is used by the eviction function to distinguish between keys
// being explicitly deleted and keys being removed because the cache was too
// large or being cleared.
//
// This is only modified by lruCache.Delete().
deletingKeys sync.Map
}
func newLRUCache[K comparable, V any](size int) *lruCache[K, V] {
c := lru.New(size)
l := &lruCache[K, V]{
maxSize: size,
cache: c,
deletingKeys: sync.Map{},
}
if err := c.SetEvictionFunc(func(key lru.Key, _ any) {
if _, shouldIgnore := l.deletingKeys.Load(key); shouldIgnore {
return
}
// any eviction makes our cache non-authoritative
l.authoritative.Store(false)
}); err != nil {
panic(fmt.Sprintf("failed to set eviction function to the LRU cache: %v", err))
}
return l
}
func (c *lruCache[K, V]) Get(key K) (*V, bool) {
value, found := c.cache.Get(key)
if !found {
return nil, false
}
if value == nil {
return nil, true
}
return value.(*V), true
}
func (c *lruCache[K, V]) Set(key K, value *V) { c.cache.Add(key, value) }
func (c *lruCache[K, V]) Len() int { return c.cache.Len() }
func (c *lruCache[K, V]) Clear() { c.cache.Clear() }
// Delete will prevent authoritative cache status changes.
//
// Must be called locked with an external write lock on `key`.
func (c *lruCache[K, V]) Delete(key K) {
c.deletingKeys.Store(key, struct{}{})
defer c.deletingKeys.Delete(key)
c.cache.Remove(key)
}
// cachedPullRecordsAccessor implements a write-through cache layer on top
// of another PullRecordsAccessor
type cachedPullRecordsAccessor struct {
delegate PullRecordsAccessor
intentsLocks *StripedLockSet
intents *lruCache[string, kubeletconfiginternal.ImagePullIntent]
pulledRecordsLocks *StripedLockSet
pulledRecords *lruCache[string, kubeletconfiginternal.ImagePulledRecord]
}
func NewCachedPullRecordsAccessor(delegate PullRecordsAccessor, intentsCacheSize, pulledRecordsCacheSize, stripedLocksSize int32) *cachedPullRecordsAccessor {
intentsCacheSize = min(intentsCacheSize, 1024)
pulledRecordsCacheSize = min(pulledRecordsCacheSize, 2000)
c := &cachedPullRecordsAccessor{
delegate: delegate,
intentsLocks: NewStripedLockSet(stripedLocksSize),
intents: newLRUCache[string, kubeletconfiginternal.ImagePullIntent](int(intentsCacheSize)),
pulledRecordsLocks: NewStripedLockSet(stripedLocksSize),
pulledRecords: newLRUCache[string, kubeletconfiginternal.ImagePulledRecord](int(pulledRecordsCacheSize)),
}
// warm our caches and set authoritative
_, err := c.ListImagePullIntents()
if err != nil {
klog.InfoS("there was an error initializing the image pull intents cache, the cache will work in a non-authoritative mode until the intents are listed successfully", "error", err)
}
_, err = c.ListImagePulledRecords()
if err != nil {
klog.InfoS("there was an error initializing the image pulled records cache, the cache will work in a non-authoritative mode until the pulled records are listed successfully", "error", err)
}
return c
}
func (c *cachedPullRecordsAccessor) ListImagePullIntents() ([]*kubeletconfiginternal.ImagePullIntent, error) {
return cacheRefreshingList(
c.intents,
c.intentsLocks,
c.delegate.ListImagePullIntents,
pullIntentToCacheKey,
)
}
func (c *cachedPullRecordsAccessor) ImagePullIntentExists(image string) (bool, error) {
// do the cheap Get() lock-free
if _, exists := c.intents.Get(image); exists {
return true, nil
}
// on a miss, lock on the image
c.intentsLocks.Lock(image)
defer c.intentsLocks.Unlock(image)
// check again if the image exists in the cache under image lock
if _, exists := c.intents.Get(image); exists {
return true, nil
}
// if the cache is authoritative, return false on a miss
if c.intents.authoritative.Load() {
return false, nil
}
// fall through to the expensive lookup
exists, err := c.delegate.ImagePullIntentExists(image)
if err == nil && exists {
c.intents.Set(image, &kubeletconfiginternal.ImagePullIntent{
Image: image,
})
}
return exists, err
}
func (c *cachedPullRecordsAccessor) WriteImagePullIntent(image string) error {
c.intentsLocks.Lock(image)
defer c.intentsLocks.Unlock(image)
if err := c.delegate.WriteImagePullIntent(image); err != nil {
return err
}
c.intents.Set(image, &kubeletconfiginternal.ImagePullIntent{
Image: image,
})
return nil
}
func (c *cachedPullRecordsAccessor) DeleteImagePullIntent(image string) error {
c.intentsLocks.Lock(image)
defer c.intentsLocks.Unlock(image)
if err := c.delegate.DeleteImagePullIntent(image); err != nil {
return err
}
c.intents.Delete(image)
return nil
}
func (c *cachedPullRecordsAccessor) ListImagePulledRecords() ([]*kubeletconfiginternal.ImagePulledRecord, error) {
return cacheRefreshingList(
c.pulledRecords,
c.pulledRecordsLocks,
c.delegate.ListImagePulledRecords,
pulledRecordToCacheKey,
)
}
func (c *cachedPullRecordsAccessor) GetImagePulledRecord(imageRef string) (*kubeletconfiginternal.ImagePulledRecord, bool, error) {
// do the cheap Get() lock-free
pulledRecord, exists := c.pulledRecords.Get(imageRef)
if exists {
return pulledRecord, true, nil
}
// on a miss, lock on the imageRef
c.pulledRecordsLocks.Lock(imageRef)
defer c.pulledRecordsLocks.Unlock(imageRef)
// check again if the imageRef exists in the cache under imageRef lock
pulledRecord, exists = c.pulledRecords.Get(imageRef)
if exists {
return pulledRecord, true, nil
}
// if the cache is authoritative, return false on a miss
if c.pulledRecords.authoritative.Load() {
return nil, false, nil
}
// fall through to the expensive lookup
pulledRecord, exists, err := c.delegate.GetImagePulledRecord(imageRef)
if err == nil && exists {
c.pulledRecords.Set(imageRef, pulledRecord)
}
return pulledRecord, exists, err
}
func (c *cachedPullRecordsAccessor) WriteImagePulledRecord(record *kubeletconfiginternal.ImagePulledRecord) error {
c.pulledRecordsLocks.Lock(record.ImageRef)
defer c.pulledRecordsLocks.Unlock(record.ImageRef)
if err := c.delegate.WriteImagePulledRecord(record); err != nil {
return err
}
c.pulledRecords.Set(record.ImageRef, record)
return nil
}
func (c *cachedPullRecordsAccessor) DeleteImagePulledRecord(imageRef string) error {
c.pulledRecordsLocks.Lock(imageRef)
defer c.pulledRecordsLocks.Unlock(imageRef)
if err := c.delegate.DeleteImagePulledRecord(imageRef); err != nil {
return err
}
c.pulledRecords.Delete(imageRef)
return nil
}
func cacheRefreshingList[K comparable, V any](
cache *lruCache[K, V],
delegateLocks *StripedLockSet,
listRecordsFunc func() ([]*V, error),
recordToKey func(*V) K,
) ([]*V, error) {
wasAuthoritative := cache.authoritative.Load()
if !wasAuthoritative {
// doing a full list gives us an opportunity to become authoritative
// if we get back an error-free result that fits in our cache
delegateLocks.GlobalLock()
defer delegateLocks.GlobalUnlock()
}
results, err := listRecordsFunc()
if wasAuthoritative {
return results, err
}
resultsAreAuthoritative := err == nil && len(results) < cache.maxSize
// populate the cache if that would make our cache authoritative or if the cache is currently empty
if resultsAreAuthoritative || cache.Len() == 0 {
cache.Clear()
// populate up to maxSize results in the cache
for _, record := range results[:min(len(results), cache.maxSize)] {
cache.Set(recordToKey(record), record)
}
cache.authoritative.Store(resultsAreAuthoritative)
}
return results, err
}
func pullIntentToCacheKey(intent *kubeletconfiginternal.ImagePullIntent) string {
return intent.Image
}
func pulledRecordToCacheKey(record *kubeletconfiginternal.ImagePulledRecord) string {
return record.ImageRef
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pullmanager
import (
"context"
"time"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
var _ ImagePullManager = &NoopImagePullManager{}
type NoopImagePullManager struct{}
func (m *NoopImagePullManager) RecordPullIntent(string) error { return nil }
func (m *NoopImagePullManager) RecordImagePulled(context.Context, string, string, *kubeletconfiginternal.ImagePullCredentials) {
}
func (m *NoopImagePullManager) RecordImagePullFailed(context.Context, string) {}
func (m *NoopImagePullManager) MustAttemptImagePull(context.Context, string, string, []kubeletconfiginternal.ImagePullSecret, *kubeletconfiginternal.ImagePullServiceAccount) bool {
return false
}
func (m *NoopImagePullManager) PruneUnknownRecords(context.Context, []string, time.Time) {}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package kubelet
import (
"context"
"fmt"
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
func init() {
testing.Init()
}
func FuzzSyncPod(data []byte) int {
syncTypes := []kubetypes.SyncPodType{kubetypes.SyncPodCreate,
kubetypes.SyncPodUpdate,
kubetypes.SyncPodSync,
kubetypes.SyncPodKill}
t := &testing.T{}
f := fuzz.NewConsumer(data)
pod2 := &v1.Pod{}
err := f.GenerateStruct(pod2)
if err != nil {
return 0
}
syncTypeIndex, err := f.GetInt()
if err != nil {
return 0
}
syncType := syncTypes[syncTypeIndex%len(syncTypes)]
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
/*manager := testKubelet.fakeMirrorClient
_ = manager*/
pod := podWithUIDNameNs("12345678", "bar", "foo")
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
pods := []*v1.Pod{pod, pod2}
kl.podManager.SetPods(pods)
_, _ = kl.SyncPod(context.Background(), syncType, pod, nil, &kubecontainer.PodStatus{})
return 1
}
func FuzzStrategicMergePatch(data []byte) int {
if len(data) < 10 {
return 0
}
if (len(data) % 2) != 0 {
return 0
}
original := data[:len(data)/2]
patch := data[(len(data)/2)+1:]
_, _ = strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
return 1
}
func FuzzconvertToAPIContainerStatuses(data []byte) int {
t := &testing.T{}
f := fuzz.NewConsumer(data)
pod := &v1.Pod{}
err := f.GenerateStruct(pod)
if err != nil {
return 0
}
currentStatus := &kubecontainer.PodStatus{} // leave empty at first
err = f.GenerateStruct(currentStatus)
if err != nil {
return 0
}
previousStatus, err := createContainerStatuses(f)
if err != nil {
return 0
}
containers, err := createContainers(f)
if err != nil {
return 0
}
hasInitContainers, err := f.GetBool()
if err != nil {
return 0
}
isInitContainer, err := f.GetBool()
if err != nil {
return 0
}
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
_ = kl.convertToAPIContainerStatuses(pod, currentStatus, previousStatus, containers, hasInitContainers, isInitContainer)
return 1
}
func createContainers(f *fuzz.ConsumeFuzzer) ([]v1.Container, error) {
containers := make([]v1.Container, 0)
noOfContainers, err := f.GetInt()
if err != nil {
return containers, err
}
for i := 0; i < noOfContainers%30; i++ {
c := v1.Container{}
err := f.GenerateStruct(&c)
if err != nil {
return containers, err
}
containers = append(containers, c)
}
return containers, nil
}
func createContainerStatuses(f *fuzz.ConsumeFuzzer) ([]v1.ContainerStatus, error) {
containerStatuses := make([]v1.ContainerStatus, 0)
noOfContainerStatuses, err := f.GetInt()
if err != nil {
return containerStatuses, err
}
for i := 0; i < noOfContainerStatuses%30; i++ {
c := v1.ContainerStatus{}
err := f.GenerateStruct(&c)
if err != nil {
return containerStatuses, err
}
containerStatuses = append(containerStatuses, c)
}
return containerStatuses, nil
}
func FuzzHandlePodCleanups(data []byte) int {
t := &testing.T{}
f := fuzz.NewConsumer(data)
pod := &kubecontainer.Pod{}
err := f.GenerateStruct(pod)
if err != nil {
return 0
}
podID := pod.ID
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
fakeRuntime := testKubelet.fakeRuntime
fakeContainerManager := testKubelet.fakeContainerManager
fakeContainerManager.PodContainerManager.AddPodFromCgroups(pod) // add pod to mock cgroup
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: pod},
}
kubelet := testKubelet.kubelet
kubelet.cgroupsPerQOS = true
kubelet.HandlePodCleanups(context.Background())
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{podID}; !reflect.DeepEqual(actual, expected) {
panic(fmt.Sprintf("expected %v to be deleted, got %v\n", expected, actual))
}
fakeRuntime.AssertKilledPods([]string(nil))
return 1
}
func FuzzMakeEnvironmentVariables(data []byte) int {
t := &testing.T{}
f := fuzz.NewConsumer(data)
testPod := &v1.Pod{}
err := f.GenerateStruct(testPod)
if err != nil {
return 0
}
container := &v1.Container{}
err = f.GenerateStruct(container)
if err != nil {
return 0
}
podIP, err := f.GetString()
if err != nil {
return 0
}
podIPs := make([]string, 0)
err = f.CreateSlice(&podIPs)
if err != nil {
return 0
}
kl := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer kl.Cleanup()
_, _ = kl.kubelet.makeEnvironmentVariables(testPod, container, podIP, podIPs, kubecontainer.VolumeMap{})
return 1
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"crypto/tls"
"errors"
"fmt"
"math"
"net"
"net/http"
"os"
"path/filepath"
sysruntime "runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
inuserns "github.com/moby/sys/userns"
"github.com/opencontainers/selinux/go-selinux"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace"
"k8s.io/client-go/informers"
"k8s.io/mount-utils"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
netutils "k8s.io/utils/net"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformersv1 "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/certificate"
"k8s.io/client-go/util/flowcontrol"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/component-base/zpages/flagz"
"k8s.io/component-helpers/apimachinery/lease"
resourcehelper "k8s.io/component-helpers/resource"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
remote "k8s.io/cri-client/pkg"
"k8s.io/klog/v2"
pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/allocation"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
"k8s.io/kubernetes/pkg/kubelet/clustertrustbundle"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown"
oomwatcher "k8s.io/kubernetes/pkg/kubelet/oom"
"k8s.io/kubernetes/pkg/kubelet/pleg"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
plugincache "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
"k8s.io/kubernetes/pkg/kubelet/preemption"
"k8s.io/kubernetes/pkg/kubelet/prober"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
"k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/server"
servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/kubelet/token"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/userns"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
"k8s.io/kubernetes/pkg/kubelet/watchdog"
httpprobe "k8s.io/kubernetes/pkg/probe/http"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/utils/clock"
)
const (
// Max amount of time to wait for the container runtime to come up.
maxWaitForContainerRuntime = 30 * time.Second
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5
// nodeReadyGracePeriod is the period to allow for before fast status update is
// terminated and container runtime not being ready is logged without verbosity guard.
nodeReadyGracePeriod = 120 * time.Second
// DefaultContainerLogsDir is the location of container logs.
DefaultContainerLogsDir = "/var/log/containers"
// MaxCrashLoopBackOff is the max backoff period for container restarts, exported for the e2e test
MaxCrashLoopBackOff = v1beta1.MaxContainerBackOff
// reducedMaxCrashLoopBackOff is the default max backoff period for container restarts when the alpha feature
// gate ReduceDefaultCrashLoopBackOffDecay is enabled
reducedMaxCrashLoopBackOff = 60 * time.Second
// Initial period for the exponential backoff for container restarts.
initialCrashLoopBackOff = time.Second * 10
// reducedInitialCrashLoopBackOff is the default initial backoff period for container restarts when the alpha feature
// gate ReduceDefaultCrashLoopBackOffDecay is enabled
reducedInitialCrashLoopBackOff = 1 * time.Second
// MaxImageBackOff is the max backoff period for image pulls, exported for the e2e test
MaxImageBackOff = 300 * time.Second
// Period for performing global cleanup tasks.
housekeepingPeriod = time.Second * 2
// Duration at which housekeeping failed to satisfy the invariant that
// housekeeping should be fast to avoid blocking pod config (while
// housekeeping is running no new pods are started or deleted).
housekeepingWarningDuration = time.Second * 1
// Period after which the runtime cache expires - set to slightly longer than
// the expected length between housekeeping periods, which explicitly refreshes
// the cache.
runtimeCacheRefreshPeriod = housekeepingPeriod + housekeepingWarningDuration
// Period for performing eviction monitoring.
// ensure this is kept in sync with internal cadvisor housekeeping.
evictionMonitoringPeriod = time.Second * 10
// The path in containers' filesystems where the hosts file is mounted.
linuxEtcHostsPath = "/etc/hosts"
windowsEtcHostsPath = "C:\\Windows\\System32\\drivers\\etc\\hosts"
// Capacity of the channel for receiving pod lifecycle events. This number
// is a bit arbitrary and may be adjusted in the future.
plegChannelCapacity = 1000
// Generic PLEG relies on relisting for discovering container events.
// A longer period means that kubelet will take longer to detect container
// changes and to update pod status. On the other hand, a shorter period
// will cause more frequent relisting (e.g., container runtime operations),
// leading to higher cpu usage.
// Note that even though we set the period to 1s, the relisting itself can
// take more than 1s to finish if the container runtime responds slowly
// and/or when there are many container changes in one cycle.
genericPlegRelistPeriod = time.Second * 1
genericPlegRelistThreshold = time.Minute * 3
// Generic PLEG relist period and threshold when used with Evented PLEG.
eventedPlegRelistPeriod = time.Second * 300
eventedPlegRelistThreshold = time.Minute * 10
eventedPlegMaxStreamRetries = 5
// backOffPeriod is the period to back off when pod syncing results in an
// error.
backOffPeriod = time.Second * 10
// Initial period for the exponential backoff for image pulls.
imageBackOffPeriod = time.Second * 10
// ContainerGCPeriod is the period for performing container garbage collection.
ContainerGCPeriod = time.Minute
// ImageGCPeriod is the period for performing image garbage collection.
ImageGCPeriod = 5 * time.Minute
// Minimum number of dead containers to keep in a pod
minDeadContainerInPod = 1
// nodeLeaseRenewIntervalFraction is the fraction of lease duration to renew the lease
nodeLeaseRenewIntervalFraction = 0.25
// instrumentationScope is the name of OpenTelemetry instrumentation scope
instrumentationScope = "k8s.io/kubernetes/pkg/kubelet"
)
var (
// ContainerLogsDir can be overwritten for testing usage
ContainerLogsDir = DefaultContainerLogsDir
etcHostsPath = getContainerEtcHostsPath()
admissionRejectionReasons = sets.New[string](
lifecycle.AppArmorNotAdmittedReason,
lifecycle.PodOSSelectorNodeLabelDoesNotMatch,
lifecycle.PodOSNotSupported,
lifecycle.InvalidNodeInfo,
lifecycle.InitContainerRestartPolicyForbidden,
lifecycle.SupplementalGroupsPolicyNotSupported,
lifecycle.UnexpectedAdmissionError,
lifecycle.UnknownReason,
lifecycle.UnexpectedPredicateFailureType,
lifecycle.OutOfCPU,
lifecycle.OutOfMemory,
lifecycle.OutOfEphemeralStorage,
lifecycle.OutOfPods,
lifecycle.PodLevelResourcesNotAdmittedReason,
tainttoleration.ErrReasonNotMatch,
eviction.Reason,
sysctl.ForbiddenReason,
topologymanager.ErrorTopologyAffinity,
nodeshutdown.NodeShutdownNotAdmittedReason,
volumemanager.VolumeAttachmentLimitExceededReason,
)
// This is exposed for unit tests.
goos = sysruntime.GOOS
)
func getContainerEtcHostsPath() string {
if goos == "windows" {
return windowsEtcHostsPath
}
return linuxEtcHostsPath
}
// SyncHandler is an interface implemented by Kubelet, for testability
type SyncHandler interface {
HandlePodAdditions(pods []*v1.Pod)
HandlePodUpdates(pods []*v1.Pod)
HandlePodRemoves(pods []*v1.Pod)
HandlePodReconcile(pods []*v1.Pod)
HandlePodSyncs(pods []*v1.Pod)
HandlePodCleanups(ctx context.Context) error
}
// Option is a functional option type for Kubelet
type Option func(*Kubelet)
// Bootstrap is a bootstrapping interface for kubelet, targets the initialization protocol
type Bootstrap interface {
GetConfiguration() kubeletconfiginternal.KubeletConfiguration
BirthCry()
StartGarbageCollection()
ListenAndServe(kubeCfg *kubeletconfiginternal.KubeletConfiguration, tlsOptions *server.TLSOptions, auth server.AuthInterface, tp trace.TracerProvider)
ListenAndServeReadOnly(address net.IP, port uint, tp trace.TracerProvider)
ListenAndServePodResources(ctx context.Context)
Run(<-chan kubetypes.PodUpdate)
}
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
// these objects while we figure out a more comprehensive dependency injection story for the Kubelet.
type Dependencies struct {
Options []Option
// Injected Dependencies
Flagz flagz.Reader
Auth server.AuthInterface
CAdvisorInterface cadvisor.Interface
ContainerManager cm.ContainerManager
EventClient v1core.EventsGetter
HeartbeatClient clientset.Interface
OnHeartbeatFailure func()
KubeClient clientset.Interface
Mounter mount.Interface
HostUtil hostutil.HostUtils
OOMAdjuster *oom.OOMAdjuster
OSInterface kubecontainer.OSInterface
PodConfig *config.PodConfig
ProbeManager prober.Manager
Recorder record.EventRecorder
Subpather subpath.Interface
TracerProvider trace.TracerProvider
VolumePlugins []volume.VolumePlugin
DynamicPluginProber volume.DynamicPluginProber
TLSOptions *server.TLSOptions
RemoteRuntimeService internalapi.RuntimeService
RemoteImageService internalapi.ImageManagerService
PodStartupLatencyTracker util.PodStartupLatencyTracker
NodeStartupLatencyTracker util.NodeStartupLatencyTracker
HealthChecker watchdog.HealthChecker
// remove it after cadvisor.UsingLegacyCadvisorStats dropped.
useLegacyCadvisorStats bool
}
// newCrashLoopBackOff configures the backoff maximum to be used
// by kubelet for container restarts depending on the alpha gates
// and kubelet configuration set
func newCrashLoopBackOff(kubeCfg *kubeletconfiginternal.KubeletConfiguration) (time.Duration, time.Duration) {
boMax := MaxCrashLoopBackOff
boInitial := initialCrashLoopBackOff
if utilfeature.DefaultFeatureGate.Enabled(features.ReduceDefaultCrashLoopBackOffDecay) {
boMax = reducedMaxCrashLoopBackOff
boInitial = reducedInitialCrashLoopBackOff
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletCrashLoopBackOffMax) {
// operator-invoked configuration always has precedence if valid
boMax = kubeCfg.CrashLoopBackOff.MaxContainerRestartPeriod.Duration
if boMax < boInitial {
boInitial = boMax
}
}
return boMax, boInitial
}
// makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, nodeHasSynced func() bool) (*config.PodConfig, error) {
manifestURLHeader := make(http.Header)
if len(kubeCfg.StaticPodURLHeader) > 0 {
for k, v := range kubeCfg.StaticPodURLHeader {
for i := range v {
manifestURLHeader.Add(k, v[i])
}
}
}
// source of all configuration
cfg := config.NewPodConfig(config.PodConfigNotificationIncremental, kubeDeps.Recorder, kubeDeps.PodStartupLatencyTracker)
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
// define file config source
if kubeCfg.StaticPodPath != "" {
klog.InfoS("Adding static pod path", "path", kubeCfg.StaticPodPath)
config.NewSourceFile(logger, kubeCfg.StaticPodPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(ctx, kubetypes.FileSource))
}
// define url config source
if kubeCfg.StaticPodURL != "" {
klog.InfoS("Adding pod URL with HTTP header", "URL", kubeCfg.StaticPodURL, "header", manifestURLHeader)
config.NewSourceURL(logger, kubeCfg.StaticPodURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(ctx, kubetypes.HTTPSource))
}
if kubeDeps.KubeClient != nil {
klog.InfoS("Adding apiserver pod source")
config.NewSourceApiserver(logger, kubeDeps.KubeClient, nodeName, nodeHasSynced, cfg.Channel(ctx, kubetypes.ApiserverSource))
}
return cfg, nil
}
// PreInitRuntimeService will init runtime service before RunKubelet.
func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies) error {
remoteImageEndpoint := kubeCfg.ImageServiceEndpoint
if remoteImageEndpoint == "" && kubeCfg.ContainerRuntimeEndpoint != "" {
remoteImageEndpoint = kubeCfg.ContainerRuntimeEndpoint
}
var err error
logger := klog.Background()
if kubeDeps.RemoteRuntimeService, err = remote.NewRemoteRuntimeService(kubeCfg.ContainerRuntimeEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.TracerProvider, &logger); err != nil {
return err
}
if kubeDeps.RemoteImageService, err = remote.NewRemoteImageService(remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.TracerProvider, &logger); err != nil {
return err
}
kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(kubeCfg.ContainerRuntimeEndpoint)
return nil
}
// NewMainKubelet instantiates a new Kubelet object along with all the required internal modules.
// No initialization of Kubelet and its modules should happen here.
func NewMainKubelet(ctx context.Context,
kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps *Dependencies,
crOptions *config.ContainerRuntimeOptions,
hostname string,
nodeName types.NodeName,
nodeIPs []net.IP,
providerID string,
cloudProvider string,
certDirectory string,
rootDirectory string,
podLogsDirectory string,
imageCredentialProviderConfigPath string,
imageCredentialProviderBinDir string,
registerNode bool,
registerWithTaints []v1.Taint,
allowedUnsafeSysctls []string,
experimentalMounterPath string,
kernelMemcgNotification bool,
experimentalNodeAllocatableIgnoreEvictionThreshold bool,
minimumGCAge metav1.Duration,
maxPerPodContainerCount int32,
maxContainerCount int32,
nodeLabels map[string]string,
nodeStatusMaxImages int32,
seccompDefault bool,
) (*Kubelet, error) {
logger := klog.FromContext(ctx)
if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
}
if podLogsDirectory == "" {
return nil, errors.New("pod logs root directory is empty")
}
if kubeCfg.SyncFrequency.Duration <= 0 {
return nil, fmt.Errorf("invalid sync frequency %d", kubeCfg.SyncFrequency.Duration)
}
if !cloudprovider.IsExternal(cloudProvider) && len(cloudProvider) != 0 {
cloudprovider.DisableWarningForProvider(cloudProvider)
return nil, cloudprovider.ErrorForDisabledProvider(cloudProvider)
}
var nodeHasSynced cache.InformerSynced
var nodeInformer coreinformersv1.NodeInformer
var nodeLister corelisters.NodeLister
// If kubeClient == nil, we are running in standalone mode (i.e. no API servers)
// If not nil, we are running as part of a cluster and should sync w/API
if kubeDeps.KubeClient != nil {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.Set{metav1.ObjectNameField: string(nodeName)}.String()
}))
nodeInformer = kubeInformers.Core().V1().Nodes()
nodeLister = nodeInformer.Lister()
nodeHasSynced = func() bool {
return kubeInformers.Core().V1().Nodes().Informer().HasSynced()
}
kubeInformers.Start(wait.NeverStop)
klog.InfoS("Attempting to sync node with API server")
} else {
// we don't have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }
klog.InfoS("Kubelet is running in standalone mode, will skip API server sync")
}
if kubeDeps.PodConfig == nil {
var err error
kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName, nodeHasSynced)
if err != nil {
return nil, err
}
}
containerGCPolicy := kubecontainer.GCPolicy{
MinAge: minimumGCAge.Duration,
MaxPerPodContainer: int(maxPerPodContainerCount),
MaxContainers: int(maxContainerCount),
}
daemonEndpoints := &v1.NodeDaemonEndpoints{
KubeletEndpoint: v1.DaemonEndpoint{Port: kubeCfg.Port},
}
imageGCPolicy := images.ImageGCPolicy{
MinAge: kubeCfg.ImageMinimumGCAge.Duration,
HighThresholdPercent: int(kubeCfg.ImageGCHighThresholdPercent),
LowThresholdPercent: int(kubeCfg.ImageGCLowThresholdPercent),
}
if utilfeature.DefaultFeatureGate.Enabled(features.ImageMaximumGCAge) {
imageGCPolicy.MaxAge = kubeCfg.ImageMaximumGCAge.Duration
} else if kubeCfg.ImageMaximumGCAge.Duration != 0 {
klog.InfoS("ImageMaximumGCAge flag enabled, but corresponding feature gate is not enabled. Ignoring flag.")
}
enforceNodeAllocatable := kubeCfg.EnforceNodeAllocatable
if experimentalNodeAllocatableIgnoreEvictionThreshold {
// Do not provide kubeCfg.EnforceNodeAllocatable to eviction threshold parsing if we are not enforcing Evictions
enforceNodeAllocatable = []string{}
}
thresholds, err := eviction.ParseThresholdConfig(enforceNodeAllocatable, kubeCfg.EvictionHard, kubeCfg.EvictionSoft, kubeCfg.EvictionSoftGracePeriod, kubeCfg.EvictionMinimumReclaim)
if err != nil {
return nil, err
}
evictionConfig := eviction.Config{
PressureTransitionPeriod: kubeCfg.EvictionPressureTransitionPeriod.Duration,
MaxPodGracePeriodSeconds: int64(kubeCfg.EvictionMaxPodGracePeriod),
Thresholds: thresholds,
KernelMemcgNotification: kernelMemcgNotification,
PodCgroupRoot: kubeDeps.ContainerManager.GetPodCgroupRoot(),
}
var serviceLister corelisters.ServiceLister
var serviceHasSynced cache.InformerSynced
if kubeDeps.KubeClient != nil {
// don't watch headless services, they are not needed since this informer is only used to create the environment variables for pods.
// See https://issues.k8s.io/122394
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermNotEqualSelector("spec.clusterIP", v1.ClusterIPNone).String()
}))
serviceLister = kubeInformers.Core().V1().Services().Lister()
serviceHasSynced = kubeInformers.Core().V1().Services().Informer().HasSynced
kubeInformers.Start(wait.NeverStop)
} else {
serviceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
serviceLister = corelisters.NewServiceLister(serviceIndexer)
serviceHasSynced = func() bool { return true }
}
// construct a node reference used for events
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string(nodeName),
UID: types.UID(nodeName),
Namespace: "",
}
oomWatcher, err := oomwatcher.NewWatcher(kubeDeps.Recorder)
if err != nil {
if inuserns.RunningInUserNS() {
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletInUserNamespace) {
// oomwatcher.NewWatcher returns "open /dev/kmsg: operation not permitted" error,
// when running in a user namespace with sysctl value `kernel.dmesg_restrict=1`.
klog.V(2).InfoS("Failed to create an oomWatcher (running in UserNS, ignoring)", "err", err)
oomWatcher = nil
} else {
klog.ErrorS(err, "Failed to create an oomWatcher (running in UserNS, Hint: enable KubeletInUserNamespace feature flag to ignore the error)")
return nil, err
}
} else {
return nil, err
}
}
clusterDNS := make([]net.IP, 0, len(kubeCfg.ClusterDNS))
for _, ipEntry := range kubeCfg.ClusterDNS {
ip := netutils.ParseIPSloppy(ipEntry)
if ip == nil {
klog.InfoS("Invalid clusterDNS IP", "IP", ipEntry)
} else {
clusterDNS = append(clusterDNS, ip)
}
}
// A TLS transport is needed to make HTTPS-based container lifecycle requests,
// but we do not have the information necessary to do TLS verification.
//
// This client must not be modified to include credentials, because it is
// critical that credentials not leak from the client to arbitrary hosts.
insecureContainerLifecycleHTTPClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
CheckRedirect: httpprobe.RedirectChecker(false),
}
tracer := kubeDeps.TracerProvider.Tracer(instrumentationScope)
klet := &Kubelet{
hostname: hostname,
nodeName: nodeName,
kubeClient: kubeDeps.KubeClient,
heartbeatClient: kubeDeps.HeartbeatClient,
onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure,
rootDirectory: filepath.Clean(rootDirectory),
podLogsDirectory: podLogsDirectory,
resyncInterval: kubeCfg.SyncFrequency.Duration,
sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources),
registerNode: registerNode,
registerWithTaints: registerWithTaints,
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, nodeIPs, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
serviceLister: serviceLister,
serviceHasSynced: serviceHasSynced,
nodeLister: nodeLister,
nodeHasSynced: nodeHasSynced,
recorder: kubeDeps.Recorder,
cadvisor: kubeDeps.CAdvisorInterface,
externalCloudProvider: cloudprovider.IsExternal(cloudProvider),
providerID: providerID,
nodeRef: nodeRef,
nodeLabels: nodeLabels,
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
nodeStatusReportFrequency: kubeCfg.NodeStatusReportFrequency.Duration,
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
hostutil: kubeDeps.HostUtil,
subpather: kubeDeps.Subpather,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
syncLoopMonitor: atomic.Value{},
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
nodeIPs: nodeIPs,
nodeIPValidator: validateNodeIP,
clock: clock.RealClock{},
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
nodeStatusMaxImages: nodeStatusMaxImages,
tracer: tracer,
nodeStartupLatencyTracker: kubeDeps.NodeStartupLatencyTracker,
healthChecker: kubeDeps.HealthChecker,
flagz: kubeDeps.Flagz,
}
var secretManager secret.Manager
var configMapManager configmap.Manager
if klet.kubeClient != nil {
switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy {
case kubeletconfiginternal.WatchChangeDetectionStrategy:
secretManager = secret.NewWatchingSecretManager(klet.kubeClient, klet.resyncInterval)
configMapManager = configmap.NewWatchingConfigMapManager(klet.kubeClient, klet.resyncInterval)
case kubeletconfiginternal.TTLCacheChangeDetectionStrategy:
secretManager = secret.NewCachingSecretManager(
klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
configMapManager = configmap.NewCachingConfigMapManager(
klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
case kubeletconfiginternal.GetChangeDetectionStrategy:
secretManager = secret.NewSimpleSecretManager(klet.kubeClient)
configMapManager = configmap.NewSimpleConfigMapManager(klet.kubeClient)
default:
return nil, fmt.Errorf("unknown configmap and secret manager mode: %v", kubeCfg.ConfigMapAndSecretChangeDetectionStrategy)
}
klet.secretManager = secretManager
klet.configMapManager = configMapManager
}
machineInfo, err := klet.cadvisor.MachineInfo()
if err != nil {
return nil, err
}
// Avoid collector collects it as a timestamped metric
// See PR #95210 and #97006 for more details.
machineInfo.Timestamp = time.Time{}
klet.setCachedMachineInfo(machineInfo)
imageBackOff := flowcontrol.NewBackOff(imageBackOffPeriod, MaxImageBackOff)
klet.livenessManager = proberesults.NewManager()
klet.readinessManager = proberesults.NewManager()
klet.startupManager = proberesults.NewManager()
klet.podCache = kubecontainer.NewCache()
klet.mirrorPodClient = kubepod.NewBasicMirrorClient(klet.kubeClient, string(nodeName), nodeLister)
klet.podManager = kubepod.NewBasicPodManager()
klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet, kubeDeps.PodStartupLatencyTracker)
klet.allocationManager = allocation.NewManager(
klet.getRootDir(),
klet.containerManager.GetNodeConfig(),
klet.containerManager.GetNodeAllocatableAbsolute(),
klet.statusManager,
func(pod *v1.Pod) { klet.HandlePodSyncs([]*v1.Pod{pod}) },
klet.GetActivePods,
klet.podManager.GetPodByUID,
klet.sourcesReady,
kubeDeps.Recorder,
)
klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration, kubeDeps.Recorder)
klet.runtimeService = kubeDeps.RemoteRuntimeService
if kubeDeps.KubeClient != nil {
klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient)
}
// setup containerLogManager for CRI container runtime
containerLogManager, err := logs.NewContainerLogManager(
klet.runtimeService,
kubeDeps.OSInterface,
kubeCfg.ContainerLogMaxSize,
int(kubeCfg.ContainerLogMaxFiles),
int(kubeCfg.ContainerLogMaxWorkers),
kubeCfg.ContainerLogMonitorInterval,
)
if err != nil {
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
}
klet.containerLogManager = containerLogManager
klet.reasonCache = NewReasonCache()
klet.workQueue = queue.NewBasicWorkQueue(klet.clock)
klet.podWorkers = newPodWorkers(
klet,
kubeDeps.Recorder,
klet.workQueue,
klet.resyncInterval,
backOffPeriod,
klet.podCache,
klet.allocationManager,
)
var singleProcessOOMKill *bool
if sysruntime.GOOS == "linux" {
if !util.IsCgroup2UnifiedMode() {
// This is a default behavior for cgroups v1.
singleProcessOOMKill = ptr.To(true)
} else {
if kubeCfg.SingleProcessOOMKill == nil {
singleProcessOOMKill = ptr.To(false)
} else {
singleProcessOOMKill = kubeCfg.SingleProcessOOMKill
}
}
}
tokenManager := token.NewManager(kubeDeps.KubeClient)
getServiceAccount := func(namespace, name string) (*v1.ServiceAccount, error) {
return nil, fmt.Errorf("get service account is not implemented")
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletServiceAccountTokenForCredentialProviders) {
getServiceAccount = func(namespace, name string) (*v1.ServiceAccount, error) {
if klet.kubeClient == nil {
return nil, errors.New("cannot get ServiceAccounts when kubelet is in standalone mode")
}
return klet.kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{})
}
}
runtime, postImageGCHooks, err := kuberuntime.NewKubeGenericRuntimeManager(
ctx,
kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
klet.livenessManager,
klet.readinessManager,
klet.startupManager,
rootDirectory,
podLogsDirectory,
machineInfo,
klet.podWorkers,
kubeCfg.MaxPods,
kubeDeps.OSInterface,
klet,
insecureContainerLifecycleHTTPClient,
imageBackOff,
kubeCfg.SerializeImagePulls,
kubeCfg.MaxParallelImagePulls,
float32(kubeCfg.RegistryPullQPS),
int(kubeCfg.RegistryBurst),
kubeCfg.ImagePullCredentialsVerificationPolicy,
kubeCfg.PreloadedImagesVerificationAllowlist,
imageCredentialProviderConfigPath,
imageCredentialProviderBinDir,
singleProcessOOMKill,
kubeCfg.CPUCFSQuota,
kubeCfg.CPUCFSQuotaPeriod,
kubeDeps.RemoteRuntimeService,
kubeDeps.RemoteImageService,
kubeDeps.ContainerManager,
klet.containerLogManager,
klet.runtimeClassManager,
klet.allocationManager,
seccompDefault,
kubeCfg.MemorySwap.SwapBehavior,
kubeDeps.ContainerManager.GetNodeAllocatableAbsolute,
*kubeCfg.MemoryThrottlingFactor,
kubeDeps.PodStartupLatencyTracker,
kubeDeps.TracerProvider,
tokenManager,
getServiceAccount,
)
if err != nil {
return nil, err
}
klet.containerRuntime = runtime
klet.streamingRuntime = runtime
klet.runner = runtime
klet.allocationManager.SetContainerRuntime(runtime)
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime, runtimeCacheRefreshPeriod)
if err != nil {
return nil, err
}
klet.runtimeCache = runtimeCache
// common provider to get host file system usage associated with a pod managed by kubelet
hostStatsProvider := stats.NewHostStatsProvider(kubecontainer.RealOS{}, func(podUID types.UID) string {
return getEtcHostsPath(klet.getPodDir(podUID))
}, podLogsDirectory)
cadvisorStatsProvider := stats.NewCadvisorStatsProvider(
klet.cadvisor,
klet.resourceAnalyzer,
klet.podManager,
klet.containerRuntime,
klet.statusManager,
hostStatsProvider,
kubeDeps.ContainerManager,
)
if kubeDeps.useLegacyCadvisorStats {
klet.StatsProvider = cadvisorStatsProvider
} else {
klet.StatsProvider = stats.NewCRIStatsProvider(
klet.cadvisor,
klet.resourceAnalyzer,
klet.podManager,
kubeDeps.RemoteRuntimeService,
kubeDeps.RemoteImageService,
hostStatsProvider,
utilfeature.DefaultFeatureGate.Enabled(features.PodAndContainerStatsFromCRI),
cadvisorStatsProvider,
)
}
eventChannel := make(chan *pleg.PodLifecycleEvent, plegChannelCapacity)
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
// adjust Generic PLEG relisting period and threshold to higher value when Evented PLEG is turned on
genericRelistDuration := &pleg.RelistDuration{
RelistPeriod: eventedPlegRelistPeriod,
RelistThreshold: eventedPlegRelistThreshold,
}
klet.pleg = pleg.NewGenericPLEG(logger, klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
// In case Evented PLEG has to fall back on Generic PLEG due to an error,
// Evented PLEG should be able to reset the Generic PLEG relisting duration
// to the default value.
eventedRelistDuration := &pleg.RelistDuration{
RelistPeriod: genericPlegRelistPeriod,
RelistThreshold: genericPlegRelistThreshold,
}
klet.eventedPleg, err = pleg.NewEventedPLEG(logger, klet.containerRuntime, klet.runtimeService, eventChannel,
klet.podCache, klet.pleg, eventedPlegMaxStreamRetries, eventedRelistDuration, clock.RealClock{})
if err != nil {
return nil, err
}
} else {
genericRelistDuration := &pleg.RelistDuration{
RelistPeriod: genericPlegRelistPeriod,
RelistThreshold: genericPlegRelistThreshold,
}
klet.pleg = pleg.NewGenericPLEG(logger, klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
}
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy)
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
klet.runtimeState.addHealthCheck("EventedPLEG", klet.eventedPleg.Healthy)
}
if _, err := klet.updatePodCIDR(ctx, kubeCfg.PodCIDR); err != nil {
klog.ErrorS(err, "Pod CIDR update failed")
}
// setup containerGC
containerGC, err := kubecontainer.NewContainerGC(klet.containerRuntime, containerGCPolicy, klet.sourcesReady)
if err != nil {
return nil, err
}
klet.containerGC = containerGC
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, max(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
// setup imageManager
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, postImageGCHooks, kubeDeps.Recorder, nodeRef, imageGCPolicy, kubeDeps.TracerProvider)
if err != nil {
return nil, fmt.Errorf("failed to initialize image manager: %v", err)
}
klet.imageManager = imageManager
if kubeDeps.TLSOptions != nil {
if kubeCfg.ServerTLSBootstrap && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, klet.getLastObservedNodeAddresses, certDirectory)
if err != nil {
return nil, fmt.Errorf("failed to initialize certificate manager: %w", err)
}
} else if kubeDeps.TLSOptions.CertFile != "" && kubeDeps.TLSOptions.KeyFile != "" && utilfeature.DefaultFeatureGate.Enabled(features.ReloadKubeletServerCertificateFile) {
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateDynamicFileManager(kubeDeps.TLSOptions.CertFile, kubeDeps.TLSOptions.KeyFile)
if err != nil {
return nil, fmt.Errorf("failed to initialize file based certificate manager: %w", err)
}
}
if klet.serverCertificateManager != nil {
kubeDeps.TLSOptions.Config.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
cert := klet.serverCertificateManager.Current()
if cert == nil {
return nil, fmt.Errorf("no serving certificate available for the kubelet")
}
return cert, nil
}
}
}
if kubeDeps.ProbeManager != nil {
klet.probeManager = kubeDeps.ProbeManager
} else {
klet.probeManager = prober.NewManager(
klet.statusManager,
klet.livenessManager,
klet.readinessManager,
klet.startupManager,
klet.runner,
kubeDeps.Recorder)
}
var clusterTrustBundleManager clustertrustbundle.Manager = &clustertrustbundle.NoopManager{}
if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundleProjection) {
clusterTrustBundleManager = clustertrustbundle.NewLazyInformerManager(ctx, kubeDeps.KubeClient, 2*int(kubeCfg.MaxPods))
klog.InfoS("ClusterTrustBundle informer will be started eventually once a trust bundle is requested")
} else {
klog.InfoS("Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled")
}
if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(
kubeDeps.KubeClient,
0,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", string(nodeName)).String()
}),
)
podCertificateManager := podcertificate.NewIssuingManager(
kubeDeps.KubeClient,
klet.podManager,
kubeInformers.Certificates().V1alpha1().PodCertificateRequests(),
nodeInformer,
nodeName,
clock.RealClock{},
)
klet.podCertificateManager = podCertificateManager
kubeInformers.Start(ctx.Done())
go podCertificateManager.Run(ctx)
} else {
klet.podCertificateManager = &podcertificate.NoOpManager{}
klog.InfoS("Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled")
}
// NewInitializedVolumePluginMgr initializes some storageErrors on the Kubelet runtimeState (in csi_plugin.go init)
// which affects node ready status. This function must be called before Kubelet is initialized so that the Node
// ReadyState is accurate with the storage state.
klet.volumePluginMgr, err = NewInitializedVolumePluginMgr(klet, secretManager, configMapManager, tokenManager, clusterTrustBundleManager, kubeDeps.VolumePlugins, kubeDeps.DynamicPluginProber)
if err != nil {
return nil, err
}
klet.pluginManager = pluginmanager.NewPluginManager(
klet.getPluginsRegistrationDir(), /* sockDir */
kubeDeps.Recorder,
)
// If the experimentalMounterPathFlag is set, we do not want to
// check node capabilities since the mount path is not the default
if len(experimentalMounterPath) != 0 {
// Replace the nameserver in containerized-mounter's rootfs/etc/resolv.conf with kubelet.ClusterDNS
// so that service name could be resolved
klet.dnsConfigurer.SetupDNSinContainerizedMounter(logger, experimentalMounterPath)
}
// setup volumeManager
klet.volumeManager = volumemanager.NewVolumeManager(
kubeCfg.EnableControllerAttachDetach,
nodeName,
klet.podManager,
klet.podWorkers,
klet.kubeClient,
klet.volumePluginMgr,
kubeDeps.Mounter,
kubeDeps.HostUtil,
klet.getPodsDir(),
kubeDeps.Recorder,
volumepathhandler.NewBlockVolumePathHandler())
boMax, base := newCrashLoopBackOff(kubeCfg)
klet.crashLoopBackOff = flowcontrol.NewBackOff(base, boMax)
klet.crashLoopBackOff.HasExpiredFunc = func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
return eventTime.Sub(lastUpdate) > 600*time.Second
}
// setup eviction manager
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig,
killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock, kubeCfg.LocalStorageCapacityIsolation)
klet.evictionManager = evictionManager
handlers := []lifecycle.PodAdmitHandler{}
handlers = append(handlers, evictionAdmitHandler)
// Safe, allowed sysctls can always be used as unsafe sysctls in the spec.
// Hence, we concatenate those two lists.
safeAndUnsafeSysctls := append(sysctl.SafeSysctlAllowlist(ctx), allowedUnsafeSysctls...)
sysctlsAllowlist, err := sysctl.NewAllowlist(safeAndUnsafeSysctls)
if err != nil {
return nil, err
}
handlers = append(handlers, sysctlsAllowlist)
// enable active deadline handler
activeDeadlineHandler, err := newActiveDeadlineHandler(klet.statusManager, kubeDeps.Recorder, klet.clock)
if err != nil {
return nil, err
}
klet.AddPodSyncLoopHandler(activeDeadlineHandler)
klet.AddPodSyncHandler(activeDeadlineHandler)
handlers = append(handlers, klet.containerManager.GetAllocateResourcesPodAdmitHandler())
criticalPodAdmissionHandler := preemption.NewCriticalPodAdmissionHandler(klet.getAllocatedPods, killPodNow(klet.podWorkers, kubeDeps.Recorder), kubeDeps.Recorder)
handlers = append(handlers, lifecycle.NewPredicateAdmitHandler(klet.getNodeAnyWay, criticalPodAdmissionHandler, klet.containerManager.UpdatePluginResources))
// apply functional Option's
for _, opt := range kubeDeps.Options {
opt(klet)
}
if goos == "linux" {
// AppArmor is a Linux kernel security module and it does not support other operating systems.
klet.appArmorValidator = apparmor.NewValidator()
handlers = append(handlers, lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
}
handlers = append(handlers, lifecycle.NewPodFeaturesAdmitHandler())
leaseDuration := time.Duration(kubeCfg.NodeLeaseDurationSeconds) * time.Second
renewInterval := time.Duration(float64(leaseDuration) * nodeLeaseRenewIntervalFraction)
klet.nodeLeaseController = lease.NewController(
klet.clock,
klet.heartbeatClient,
string(klet.nodeName),
kubeCfg.NodeLeaseDurationSeconds,
klet.onRepeatedHeartbeatFailure,
renewInterval,
string(klet.nodeName),
v1.NamespaceNodeLease,
util.SetNodeOwnerFunc(ctx, klet.heartbeatClient, string(klet.nodeName)))
// setup node shutdown manager
shutdownManager := nodeshutdown.NewManager(&nodeshutdown.Config{
Logger: logger,
VolumeManager: klet.volumeManager,
Recorder: kubeDeps.Recorder,
NodeRef: nodeRef,
GetPodsFunc: klet.GetActivePods,
KillPodFunc: killPodNow(klet.podWorkers, kubeDeps.Recorder),
SyncNodeStatusFunc: klet.syncNodeStatus,
ShutdownGracePeriodRequested: kubeCfg.ShutdownGracePeriod.Duration,
ShutdownGracePeriodCriticalPods: kubeCfg.ShutdownGracePeriodCriticalPods.Duration,
ShutdownGracePeriodByPodPriority: kubeCfg.ShutdownGracePeriodByPodPriority,
StateDirectory: rootDirectory,
})
klet.shutdownManager = shutdownManager
klet.usernsManager, err = userns.MakeUserNsManager(logger, klet)
if err != nil {
return nil, fmt.Errorf("create user namespace manager: %w", err)
}
handlers = append(handlers, shutdownManager)
klet.allocationManager.AddPodAdmitHandlers(handlers)
// Finally, put the most recent version of the config on the Kubelet, so
// people can see how it was configured.
klet.kubeletConfiguration = *kubeCfg
// Generating the status funcs should be the last thing we do,
// since this relies on the rest of the Kubelet having been constructed.
klet.setNodeStatusFuncs = klet.defaultNodeStatusFuncs()
return klet, nil
}
type serviceLister interface {
List(labels.Selector) ([]*v1.Service, error)
}
// Kubelet is the main kubelet implementation.
type Kubelet struct {
kubeletConfiguration kubeletconfiginternal.KubeletConfiguration
// hostname is the hostname the kubelet detected or was given via flag/config
hostname string
nodeName types.NodeName
runtimeCache kubecontainer.RuntimeCache
kubeClient clientset.Interface
heartbeatClient clientset.Interface
// mirrorPodClient is used to create and delete mirror pods in the API for static
// pods.
mirrorPodClient kubepod.MirrorClient
rootDirectory string
podLogsDirectory string
// onRepeatedHeartbeatFailure is called when a heartbeat operation fails more than once. optional.
onRepeatedHeartbeatFailure func()
// podManager stores the desired set of admitted pods and mirror pods that the kubelet should be
// running. The actual set of running pods is stored on the podWorkers. The manager is populated
// by the kubelet config loops which abstracts receiving configuration from many different sources
// (api for regular pods, local filesystem or http for static pods). The manager may be consulted
// by other components that need to see the set of desired pods. Note that not all desired pods are
// running, and not all running pods are in the podManager - for instance, force deleting a pod
// from the apiserver will remove it from the podManager, but the pod may still be terminating and
// tracked by the podWorkers. Components that need to know the actual consumed resources of the
// node or are driven by podWorkers and the sync*Pod methods (status, volume, stats) should also
// consult the podWorkers when reconciling.
//
// TODO: review all kubelet components that need the actual set of pods (vs the desired set)
// and update them to use podWorkers instead of podManager. This may introduce latency in some
// methods, but avoids race conditions and correctly accounts for terminating pods that have
// been force deleted or static pods that have been updated.
// https://github.com/kubernetes/kubernetes/issues/116970
podManager kubepod.Manager
// podWorkers is responsible for driving the lifecycle state machine of each pod. The worker is
// notified of config changes, updates, periodic reconciliation, container runtime updates, and
// evictions of all desired pods and will invoke reconciliation methods per pod in separate
// goroutines. The podWorkers are authoritative in the kubelet for what pods are actually being
// run and their current state:
//
// * syncing: pod should be running (syncPod)
// * terminating: pod should be stopped (syncTerminatingPod)
// * terminated: pod should have all resources cleaned up (syncTerminatedPod)
//
// and invoke the handler methods that correspond to each state. Components within the
// kubelet that need to know the phase of the pod in order to correctly set up or tear down
// resources must consult the podWorkers.
//
// Once a pod has been accepted by the pod workers, no other pod with that same UID (and
// name+namespace, for static pods) will be started until the first pod has fully terminated
// and been cleaned up by SyncKnownPods. This means a pod may be desired (in API), admitted
// (in pod manager), and requested (by invoking UpdatePod) but not start for an arbitrarily
// long interval because a prior pod is still terminating.
//
// As an event-driven (by UpdatePod) controller, the podWorkers must periodically be resynced
// by the kubelet invoking SyncKnownPods with the desired state (admitted pods in podManager).
// Since the podManager may be unaware of some running pods due to force deletion, the
// podWorkers are responsible for triggering a sync of pods that are no longer desired but
// must still run to completion.
podWorkers PodWorkers
// evictionManager observes the state of the node for situations that could impact node stability
// and evicts pods (sets to phase Failed with reason Evicted) to reduce resource pressure. The
// eviction manager acts on the actual state of the node and considers the podWorker to be
// authoritative.
evictionManager eviction.Manager
// probeManager tracks the set of running pods and ensures any user-defined periodic checks are
// run to introspect the state of each pod. The probe manager acts on the actual state of the node
// and is notified of pods by the podWorker. The probe manager is the authoritative source of the
// most recent probe status and is responsible for notifying the status manager, which
// synthesizes them into the overall pod status.
probeManager prober.Manager
// secretManager caches the set of secrets used by running pods on this node. The podWorkers
// notify the secretManager when pods are started and terminated, and the secretManager must
// then keep the needed secrets up-to-date as they change.
secretManager secret.Manager
// configMapManager caches the set of config maps used by running pods on this node. The
// podWorkers notify the configMapManager when pods are started and terminated, and the
// configMapManager must then keep the needed config maps up-to-date as they change.
configMapManager configmap.Manager
// volumeManager observes the set of running pods and is responsible for attaching, mounting,
// unmounting, and detaching as those pods move through their lifecycle. It periodically
// synchronizes the set of known volumes to the set of actually desired volumes and cleans up
// any orphaned volumes. The volume manager considers the podWorker to be authoritative for
// which pods are running.
volumeManager volumemanager.VolumeManager
// statusManager receives updated pod status updates from the podWorker and updates the API
// status of those pods to match. The statusManager is authoritative for the synthesized
// status of the pod from the kubelet's perspective (other components own the individual
// elements of status) and should be consulted by components in preference to assembling
// that status themselves. Note that the status manager is downstream of the pod worker
// and components that need to check whether a pod is still running should instead directly
// consult the pod worker.
statusManager status.Manager
// allocationManager manages allocated resources for pods.
allocationManager allocation.Manager
// podCertificateManager is fed updates as pods are added and removed from
// the node, and requests certificates for them based on their configured
// pod certificate volumes.
podCertificateManager podcertificate.Manager
// resyncInterval is the interval between periodic full reconciliations of
// pods on this node.
resyncInterval time.Duration
// sourcesReady records the sources seen by the kubelet, it is thread-safe.
sourcesReady config.SourcesReady
// Optional, defaults to /logs/ from /var/log
logServer http.Handler
// Optional, defaults to simple Docker implementation
runner kubecontainer.CommandRunner
// cAdvisor used for container information.
cadvisor cadvisor.Interface
// Set to true to have the node register itself with the apiserver.
registerNode bool
// List of taints to add to a node object when the kubelet registers itself.
registerWithTaints []v1.Taint
// for internal book keeping; access only from within registerWithApiserver
registrationCompleted bool
// dnsConfigurer is used for setting up DNS resolver configuration when launching pods.
dnsConfigurer *dns.Configurer
// serviceLister knows how to list services
serviceLister serviceLister
// serviceHasSynced indicates whether services have been sync'd at least once.
// Check this before trusting a response from the lister.
serviceHasSynced cache.InformerSynced
// nodeLister knows how to list nodes
nodeLister corelisters.NodeLister
// nodeHasSynced indicates whether nodes have been sync'd at least once.
// Check this before trusting a response from the node lister.
nodeHasSynced cache.InformerSynced
// a list of node labels to register
nodeLabels map[string]string
// Last timestamp when runtime responded on ping.
// Mutex is used to protect this value.
runtimeState *runtimeState
// Volume plugins.
volumePluginMgr *volume.VolumePluginMgr
// Manages container health check results.
livenessManager proberesults.Manager
readinessManager proberesults.Manager
startupManager proberesults.Manager
// The EventRecorder to use
recorder record.EventRecorder
// Policy for handling garbage collection of dead containers.
containerGC kubecontainer.GC
// Manager for image garbage collection.
imageManager images.ImageGCManager
// Manager for container logs.
containerLogManager logs.ContainerLogManager
// Cached MachineInfo returned by cadvisor.
machineInfoLock sync.RWMutex
machineInfo *cadvisorapi.MachineInfo
// Handles certificate rotations.
serverCertificateManager certificate.Manager
// Indicates that the node initialization happens in an external cloud controller
externalCloudProvider bool
// Reference to this node.
nodeRef *v1.ObjectReference
// Container runtime.
containerRuntime kubecontainer.Runtime
// Streaming runtime handles container streaming.
streamingRuntime kubecontainer.StreamingRuntime
// Container runtime service (needed by container runtime Start()).
runtimeService internalapi.RuntimeService
// reasonCache caches the failure reason of the last creation of all containers, which is
// used for generating ContainerStatus.
reasonCache *ReasonCache
// containerRuntimeReadyExpected indicates whether container runtime being ready is expected
// so errors are logged without verbosity guard, to avoid excessive error logs at node startup.
// It's false during the node initialization period of nodeReadyGracePeriod, and after that
// it's set to true by fastStatusUpdateOnce when it exits.
containerRuntimeReadyExpected bool
// nodeStatusUpdateFrequency specifies how often kubelet computes node status. If node lease
// feature is not enabled, it is also the frequency that kubelet posts node status to master.
// In that case, be cautious when changing the constant, it must work with nodeMonitorGracePeriod
// in nodecontroller. There are several constraints:
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
// N means number of retries allowed for kubelet to post node status. It is pointless
// to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there
// will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency.
// The constant must be less than podEvictionTimeout.
// 2. nodeStatusUpdateFrequency needs to be large enough for kubelet to generate node
// status. Kubelet may fail to update node status reliably if the value is too small,
// as it takes time to gather all necessary node information.
nodeStatusUpdateFrequency time.Duration
// nodeStatusReportFrequency is the frequency that kubelet posts node
// status to master. It is only used when node lease feature is enabled.
nodeStatusReportFrequency time.Duration
// delayAfterNodeStatusChange is the one-time random duration that we add to the next node status report interval
// every time when there's an actual node status change or kubelet restart. But all future node status update that
// is not caused by real status change will stick with nodeStatusReportFrequency. The random duration is a uniform
// distribution over [-0.5*nodeStatusReportFrequency, 0.5*nodeStatusReportFrequency]
delayAfterNodeStatusChange time.Duration
// lastStatusReportTime is the time when node status was last reported.
lastStatusReportTime time.Time
// syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe.
// This lock is used by Kubelet.syncNodeStatus and Kubelet.fastNodeStatusUpdate functions and shouldn't be used anywhere else.
syncNodeStatusMux sync.Mutex
// updatePodCIDRMux is a lock on updating pod CIDR, because this path is not thread-safe.
// This lock is used by Kubelet.updatePodCIDR function and shouldn't be used anywhere else.
updatePodCIDRMux sync.Mutex
// updateRuntimeMux is a lock on updating runtime, because this path is not thread-safe.
// This lock is used by Kubelet.updateRuntimeUp, Kubelet.fastNodeStatusUpdate and
// Kubelet.HandlerSupportsUserNamespaces functions and shouldn't be used anywhere else.
updateRuntimeMux sync.Mutex
// nodeLeaseController claims and renews the node lease for this Kubelet
nodeLeaseController lease.Controller
// pleg observes the state of the container runtime and notifies the kubelet of changes to containers, which
// notifies the podWorkers to reconcile the state of the pod (for instance, if a container dies and needs to
// be restarted).
pleg pleg.PodLifecycleEventGenerator
// eventedPleg supplements the pleg to deliver edge-driven container changes with low-latency.
eventedPleg pleg.PodLifecycleEventGenerator
// Store kubecontainer.PodStatus for all pods.
podCache kubecontainer.Cache
// os is a facade for various syscalls that need to be mocked during testing.
os kubecontainer.OSInterface
// Watcher of out of memory events.
oomWatcher oomwatcher.Watcher
// Monitor resource usage
resourceAnalyzer serverstats.ResourceAnalyzer
// Whether or not we should have the QOS cgroup hierarchy for resource management
cgroupsPerQOS bool
// If non-empty, pass this to the container runtime as the root cgroup.
cgroupRoot string
// Mounter to use for volumes.
mounter mount.Interface
// hostutil to interact with filesystems
hostutil hostutil.HostUtils
// subpather to execute subpath actions
subpather subpath.Interface
// Manager of non-Runtime containers.
containerManager cm.ContainerManager
// Maximum Number of Pods which can be run by this Kubelet
maxPods int
// Monitor Kubelet's sync loop
syncLoopMonitor atomic.Value
// Container restart Backoff
crashLoopBackOff *flowcontrol.Backoff
// Information about the ports which are opened by daemons on Node running this Kubelet server.
daemonEndpoints *v1.NodeDaemonEndpoints
// A queue used to trigger pod workers.
workQueue queue.WorkQueue
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
oneTimeInitializer sync.Once
// If set, use this IP address or addresses for the node
nodeIPs []net.IP
// use this function to validate the kubelet nodeIP
nodeIPValidator func(net.IP) error
// If non-nil, this is a unique identifier for the node in an external database, eg. cloudprovider
providerID string
// clock is an interface that provides time related functionality in a way that makes it
// easy to test the code.
clock clock.WithTicker
// handlers called during the tryUpdateNodeStatus cycle
setNodeStatusFuncs []func(context.Context, *v1.Node) error
lastNodeUnschedulableLock sync.Mutex
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
lastNodeUnschedulable bool
// the list of handlers to call during pod sync loop.
lifecycle.PodSyncLoopHandlers
// the list of handlers to call during pod sync.
lifecycle.PodSyncHandlers
// the number of allowed pods per core
podsPerCore int
// enableControllerAttachDetach indicates the Attach/Detach controller
// should manage attachment/detachment of volumes scheduled to this node,
// and disable kubelet from executing any attach/detach operations
enableControllerAttachDetach bool
// trigger deleting containers in a pod
containerDeletor *podContainerDeletor
// config iptables util rules
makeIPTablesUtilChains bool
// The AppArmor validator for checking whether AppArmor is supported.
appArmorValidator apparmor.Validator
// StatsProvider provides the node and the container stats.
StatsProvider *stats.Provider
// pluginmanager runs a set of asynchronous loops that figure out which
// plugins need to be registered/unregistered based on this node and makes it so.
pluginManager pluginmanager.PluginManager
// This flag sets a maximum number of images to report in the node status.
nodeStatusMaxImages int32
// Handles RuntimeClass objects for the Kubelet.
runtimeClassManager *runtimeclass.Manager
// Handles node shutdown events for the Node.
shutdownManager nodeshutdown.Manager
// Manage user namespaces
usernsManager *userns.UsernsManager
// OpenTelemetry Tracer
tracer trace.Tracer
// Track node startup latencies
nodeStartupLatencyTracker util.NodeStartupLatencyTracker
// Health check kubelet
healthChecker watchdog.HealthChecker
// flagz is the Reader interface to get flags for flagz page.
flagz flagz.Reader
}
// ListPodStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodStats(ctx)
}
// ListPodCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodCPUAndMemoryStats(ctx)
}
// PodCPUAndMemoryStats is delegated to StatsProvider
func (kl *Kubelet) PodCPUAndMemoryStats(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) (*statsapi.PodStats, error) {
return kl.StatsProvider.PodCPUAndMemoryStats(ctx, pod, podStatus)
}
// ListPodStatsAndUpdateCPUNanoCoreUsage is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx)
}
// ImageFsStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ImageFsStats(ctx context.Context) (*statsapi.FsStats, *statsapi.FsStats, error) {
return kl.StatsProvider.ImageFsStats(ctx)
}
// GetCgroupStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
return kl.StatsProvider.GetCgroupStats(cgroupName, updateStats)
}
// GetCgroupCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) GetCgroupCPUAndMemoryStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, error) {
return kl.StatsProvider.GetCgroupCPUAndMemoryStats(cgroupName, updateStats)
}
// RootFsStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) RootFsStats() (*statsapi.FsStats, error) {
return kl.StatsProvider.RootFsStats()
}
// RlimitStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) RlimitStats() (*statsapi.RlimitStats, error) {
return kl.StatsProvider.RlimitStats()
}
// setupDataDirs creates:
// 1. the root directory
// 2. the pods directory
// 3. the plugins directory
// 4. the pod-resources directory
// 5. the checkpoint directory
// 6. the pod logs root directory
func (kl *Kubelet) setupDataDirs() error {
if cleanedRoot := filepath.Clean(kl.rootDirectory); cleanedRoot != kl.rootDirectory {
return fmt.Errorf("rootDirectory not in canonical form: expected %s, was %s", cleanedRoot, kl.rootDirectory)
}
pluginRegistrationDir := kl.getPluginsRegistrationDir()
pluginsDir := kl.getPluginsDir()
if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil {
return fmt.Errorf("error creating root directory: %v", err)
}
if err := utilfs.MkdirAll(kl.getPodLogsDir(), 0750); err != nil {
return fmt.Errorf("error creating pod logs root directory %q: %w", kl.getPodLogsDir(), err)
}
if err := kl.hostutil.MakeRShared(kl.getRootDir()); err != nil {
return fmt.Errorf("error configuring root directory: %v", err)
}
if err := os.MkdirAll(kl.getPodsDir(), 0750); err != nil {
return fmt.Errorf("error creating pods directory: %v", err)
}
if err := utilfs.MkdirAll(kl.getPluginsDir(), 0750); err != nil {
return fmt.Errorf("error creating plugins directory: %v", err)
}
if err := utilfs.MkdirAll(kl.getPluginsRegistrationDir(), 0750); err != nil {
return fmt.Errorf("error creating plugins registry directory: %v", err)
}
if err := os.MkdirAll(kl.getPodResourcesDir(), 0750); err != nil {
return fmt.Errorf("error creating podresources directory: %v", err)
}
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerCheckpoint) {
if err := utilfs.MkdirAll(kl.getCheckpointsDir(), 0700); err != nil {
return fmt.Errorf("error creating checkpoint directory: %v", err)
}
}
if selinux.GetEnabled() {
err := selinux.SetFileLabel(pluginRegistrationDir, config.KubeletPluginsDirSELinuxLabel)
if err != nil {
klog.InfoS("Unprivileged containerized plugins might not work, could not set selinux context on plugin registration dir", "path", pluginRegistrationDir, "err", err)
}
err = selinux.SetFileLabel(pluginsDir, config.KubeletPluginsDirSELinuxLabel)
if err != nil {
klog.InfoS("Unprivileged containerized plugins might not work, could not set selinux context on plugins dir", "path", pluginsDir, "err", err)
}
}
return nil
}
// StartGarbageCollection starts garbage collection threads.
func (kl *Kubelet) StartGarbageCollection() {
loggedContainerGCFailure := false
go wait.Until(func() {
ctx := context.Background()
if err := kl.containerGC.GarbageCollect(ctx); err != nil {
klog.ErrorS(err, "Container garbage collection failed")
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error())
loggedContainerGCFailure = true
} else {
var vLevel klog.Level = 4
if loggedContainerGCFailure {
vLevel = 1
loggedContainerGCFailure = false
}
klog.V(vLevel).InfoS("Container garbage collection succeeded")
}
}, ContainerGCPeriod, wait.NeverStop)
// when the high threshold is set to 100, and the max age is 0 (or the max age feature is disabled)
// stub the image GC manager
if kl.kubeletConfiguration.ImageGCHighThresholdPercent == 100 &&
(!utilfeature.DefaultFeatureGate.Enabled(features.ImageMaximumGCAge) || kl.kubeletConfiguration.ImageMaximumGCAge.Duration == 0) {
klog.V(2).InfoS("ImageGCHighThresholdPercent is set 100 and ImageMaximumGCAge is 0, Disable image GC")
return
}
prevImageGCFailed := false
beganGC := time.Now()
go wait.Until(func() {
ctx := context.Background()
if err := kl.imageManager.GarbageCollect(ctx, beganGC); err != nil {
if prevImageGCFailed {
klog.ErrorS(err, "Image garbage collection failed multiple times in a row")
// Only create an event for repeated failures
kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, events.ImageGCFailed, err.Error())
} else {
klog.ErrorS(err, "Image garbage collection failed once. Stats initialization may not have completed yet")
}
prevImageGCFailed = true
} else {
var vLevel klog.Level = 4
if prevImageGCFailed {
vLevel = 1
prevImageGCFailed = false
}
klog.V(vLevel).InfoS("Image garbage collection succeeded")
}
}, ImageGCPeriod, wait.NeverStop)
}
// initializeModules will initialize internal modules that do not require the container runtime to be up.
// Note that the modules here must not depend on modules that are not initialized here.
func (kl *Kubelet) initializeModules(ctx context.Context) error {
// Prometheus metrics.
metrics.Register()
metrics.RegisterCollectors(
collectors.NewVolumeStatsCollector(kl),
collectors.NewLogMetricsCollector(kl.StatsProvider.ListPodStats),
)
metrics.SetNodeName(kl.nodeName)
servermetrics.Register()
// Setup filesystem directories.
if err := kl.setupDataDirs(); err != nil {
return err
}
// If the container logs directory does not exist, create it.
if _, err := os.Stat(ContainerLogsDir); err != nil {
if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil {
return fmt.Errorf("failed to create directory %q: %v", ContainerLogsDir, err)
}
}
if goos == "windows" {
// On Windows we should not allow other users to read the logs directory
// to avoid allowing non-root containers from reading the logs of other containers.
if err := utilfs.Chmod(ContainerLogsDir, 0750); err != nil {
return fmt.Errorf("failed to set permissions on directory %q: %w", ContainerLogsDir, err)
}
}
// Start the image manager.
kl.imageManager.Start(ctx)
// Start the certificate manager if it was enabled.
if kl.serverCertificateManager != nil {
kl.serverCertificateManager.Start()
}
// Start out of memory watcher.
if kl.oomWatcher != nil {
if err := kl.oomWatcher.Start(ctx, kl.nodeRef); err != nil {
return fmt.Errorf("failed to start OOM watcher: %w", err)
}
}
// Start resource analyzer
kl.resourceAnalyzer.Start()
return nil
}
// initializeRuntimeDependentModules will initialize internal modules that require the container runtime to be up.
func (kl *Kubelet) initializeRuntimeDependentModules() {
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
ctx := context.TODO()
if err := kl.cadvisor.Start(); err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
klog.ErrorS(err, "Failed to start cAdvisor")
os.Exit(1)
}
// trigger on-demand stats collection once so that we have capacity information for ephemeral storage.
// ignore any errors, since if stats collection is not successful, the container manager will fail to start below.
kl.StatsProvider.GetCgroupStats("/", true)
// Start container manager.
node, err := kl.getNodeAnyWay()
if err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
klog.ErrorS(err, "Kubelet failed to get node info")
os.Exit(1)
}
// containerManager must start after cAdvisor because it needs filesystem capacity information
if err := kl.containerManager.Start(ctx, node, kl.GetActivePods, kl.getNodeAnyWay, kl.sourcesReady, kl.statusManager, kl.runtimeService, kl.supportLocalStorageCapacityIsolation()); err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
klog.ErrorS(err, "Failed to start ContainerManager")
os.Exit(1)
}
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
// Eviction decisions are based on the allocated (rather than desired) pod resources.
kl.evictionManager.Start(kl.StatsProvider, kl.getAllocatedPods, kl.PodIsFinished, evictionMonitoringPeriod)
// container log manager must start after container runtime is up to retrieve information from container runtime
// and inform container to reopen log file after log rotation.
kl.containerLogManager.Start(ctx)
// Adding Registration Callback function for CSI Driver
kl.pluginManager.AddHandler(pluginwatcherapi.CSIPlugin, plugincache.PluginHandler(csi.PluginHandler))
// Adding Registration Callback function for DRA Plugin and Device Plugin
for name, handler := range kl.containerManager.GetPluginRegistrationHandlers() {
kl.pluginManager.AddHandler(name, handler)
}
// Start the plugin manager
klog.V(4).InfoS("Starting plugin manager")
go kl.pluginManager.Run(ctx, kl.sourcesReady, wait.NeverStop)
err = kl.shutdownManager.Start()
if err != nil {
// The shutdown manager is not critical for kubelet, so log failure, but don't block Kubelet startup if there was a failure starting it.
klog.ErrorS(err, "Failed to start node shutdown manager")
}
}
// Run starts the kubelet reacting to config updates
func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
ctx := context.Background()
if kl.logServer == nil {
file := http.FileServer(http.Dir(nodeLogDir))
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLogQuery) && kl.kubeletConfiguration.EnableSystemLogQuery {
kl.logServer = http.StripPrefix("/logs/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if nlq, errs := newNodeLogQuery(req.URL.Query()); len(errs) > 0 {
http.Error(w, errs.ToAggregate().Error(), http.StatusBadRequest)
return
} else if nlq != nil {
if req.URL.Path != "/" && req.URL.Path != "" {
http.Error(w, "path not allowed in query mode", http.StatusNotAcceptable)
return
}
if errs := nlq.validate(); len(errs) > 0 {
http.Error(w, errs.ToAggregate().Error(), http.StatusNotAcceptable)
return
}
// Validation ensures that the request does not query services and files at the same time
if len(nlq.Services) > 0 {
journal.ServeHTTP(w, req)
return
}
// Validation ensures that the request does not explicitly query multiple files at the same time
if len(nlq.Files) == 1 {
// Account for the \ being used on Windows clients
req.URL.Path = filepath.ToSlash(nlq.Files[0])
}
}
// Fall back in case the caller is directly trying to query a file
// Example: kubectl get --raw /api/v1/nodes/$name/proxy/logs/foo.log
file.ServeHTTP(w, req)
}))
} else {
kl.logServer = http.StripPrefix("/logs/", file)
}
}
if kl.kubeClient == nil {
klog.InfoS("No API server defined - no node status update will be sent")
}
if err := kl.initializeModules(ctx); err != nil {
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
klog.ErrorS(err, "Failed to initialize internal modules")
os.Exit(1)
}
if err := kl.cgroupVersionCheck(); err != nil {
klog.V(2).InfoS("Warning: cgroup check", "error", err)
}
// Start the allocation manager
if kl.allocationManager != nil {
kl.allocationManager.Run(ctx)
}
// Start volume manager
go kl.volumeManager.Run(ctx, kl.sourcesReady)
if kl.kubeClient != nil {
// Start two go-routines to update the status.
//
// The first will report to the apiserver every nodeStatusUpdateFrequency and is aimed to provide regular status intervals,
// while the second is used to provide a more timely status update during initialization and runs an one-shot update to the apiserver
// once the node becomes ready, then exits afterwards.
//
// Introduce some small jittering to ensure that over time the requests won't start
// accumulating at approximately the same time from the set of nodes due to priority and
// fairness effect.
go func() {
// Call updateRuntimeUp once before syncNodeStatus to make sure kubelet had already checked runtime state
// otherwise when restart kubelet, syncNodeStatus will report node notReady in first report period
kl.updateRuntimeUp()
wait.JitterUntil(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, 0.04, true, wait.NeverStop)
}()
go kl.fastStatusUpdateOnce()
// start syncing lease
go kl.nodeLeaseController.Run(context.Background())
// Mirror pods for static pods may not be created immediately during node startup
// due to node registration or informer sync delays. They will be created eventually
// when static pods are resynced (every 1-1.5 minutes).
// To ensure kube-scheduler is aware of static pod resource usage faster,
// mirror pods are created as soon as the node registers.
go kl.fastStaticPodsRegistration(ctx)
}
go wait.Until(kl.updateRuntimeUp, 5*time.Second, wait.NeverStop)
// Set up iptables util rules
if kl.makeIPTablesUtilChains {
kl.initNetworkUtil()
}
// Start component sync loops.
kl.statusManager.Start(ctx)
// Start syncing RuntimeClasses if enabled.
if kl.runtimeClassManager != nil {
kl.runtimeClassManager.Start(wait.NeverStop)
}
// Start the pod lifecycle event generator.
kl.pleg.Start()
// Start eventedPLEG only if EventedPLEG feature gate is enabled.
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
kl.eventedPleg.Start()
}
if utilfeature.DefaultFeatureGate.Enabled(features.SystemdWatchdog) && kl.healthChecker != nil {
kl.healthChecker.SetHealthCheckers(kl, kl.containerManager.GetHealthCheckers())
}
kl.syncLoop(ctx, updates, kl)
}
// SyncPod is the transaction script for the sync of a single pod (setting up)
// a pod. This method is reentrant and expected to converge a pod towards the
// desired state of the spec. The reverse (teardown) is handled in
// SyncTerminatingPod and SyncTerminatedPod. If SyncPod exits without error,
// then the pod runtime state is in sync with the desired configuration state
// (pod is running). If SyncPod exits with a transient error, the next
// invocation of SyncPod is expected to make progress towards reaching the
// desired state. SyncPod exits with isTerminal when the pod was detected to
// have reached a terminal lifecycle phase due to container exits (for
// RestartNever or RestartOnFailure) and the next method invoked will be
// SyncTerminatingPod. If the pod terminates for any other reason, SyncPod
// will receive a context cancellation and should exit as soon as possible.
//
// Arguments:
//
// updateType - whether this is a create (first time) or an update, should
// only be used for metrics since this method must be reentrant
//
// pod - the pod that is being set up
//
// mirrorPod - the mirror pod known to the kubelet for this pod, if any
//
// podStatus - the most recent pod status observed for this pod which can
// be used to determine the set of actions that should be taken during
// this loop of SyncPod
//
// The workflow is:
// - If the pod is being created, record pod worker start latency
// - Call generateAPIPodStatus to prepare an v1.PodStatus for the pod
// - If the pod is being seen as running for the first time, record pod
// start latency
// - Update the status of the pod in the status manager
// - Stop the pod's containers if it should not be running due to soft
// admission
// - Ensure any background tracking for a runnable pod is started
// - Create a mirror pod if the pod is a static pod, and does not
// already have a mirror pod
// - Create the data directories for the pod if they do not exist
// - Wait for volumes to attach/mount
// - Fetch the pull secrets for the pod
// - Call the container runtime's SyncPod callback
// - Update the traffic shaping for the pod's ingress and egress limits
//
// If any step of this workflow errors, the error is returned, and is repeated
// on the next SyncPod call.
//
// This operation writes all events that are dispatched in order to provide
// the most accurate information possible about an error situation to aid debugging.
// Callers should not write an event if this operation returns an error.
func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) {
ctx, otelSpan := kl.tracer.Start(ctx, "syncPod", trace.WithAttributes(
semconv.K8SPodUIDKey.String(string(pod.UID)),
attribute.String("k8s.pod", klog.KObj(pod).String()),
semconv.K8SPodNameKey.String(pod.Name),
attribute.String("k8s.pod.update_type", updateType.String()),
semconv.K8SNamespaceNameKey.String(pod.Namespace),
))
logger := klog.FromContext(ctx)
klog.V(4).InfoS("SyncPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
defer func() {
if err != nil {
otelSpan.RecordError(err)
otelSpan.SetStatus(codes.Error, err.Error())
}
klog.V(4).InfoS("SyncPod exit", "pod", klog.KObj(pod), "podUID", pod.UID, "isTerminal", isTerminal)
otelSpan.End()
}()
// Latency measurements for the main workflow are relative to the
// first time the pod was seen by kubelet.
var firstSeenTime time.Time
if firstSeenTimeStr, ok := pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]; ok {
firstSeenTime = kubetypes.ConvertToTimestamp(firstSeenTimeStr).Get()
}
// Record pod worker start latency if being created
// TODO: make pod workers record their own latencies
if updateType == kubetypes.SyncPodCreate {
if !firstSeenTime.IsZero() {
// This is the first time we are syncing the pod. Record the latency
// since kubelet first saw the pod if firstSeenTime is set.
metrics.PodWorkerStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
} else {
klog.V(3).InfoS("First seen time not recorded for pod",
"podUID", pod.UID,
"pod", klog.KObj(pod))
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// Check whether a resize is in progress so we can set the PodResizeInProgressCondition accordingly.
kl.allocationManager.CheckPodResizeInProgress(pod, podStatus)
// TODO(#132851): There is a race condition here, where the goroutine in the
// allocation manager may allocate a new resize and unconditionally set the
// PodResizeInProgressCondition before we set the status below.
}
// Generate final API pod status with pod and status manager status
apiPodStatus := kl.generateAPIPodStatus(pod, podStatus, false)
// The pod IP may be changed in generateAPIPodStatus if the pod is using host network. (See #24576)
// TODO(random-liu): After writing pod spec into container labels, check whether pod is using host network, and
// set pod IP to hostIP directly in runtime.GetPodStatus
podStatus.IPs = make([]string, 0, len(apiPodStatus.PodIPs))
for _, ipInfo := range apiPodStatus.PodIPs {
podStatus.IPs = append(podStatus.IPs, ipInfo.IP)
}
if len(podStatus.IPs) == 0 && len(apiPodStatus.PodIP) > 0 {
podStatus.IPs = []string{apiPodStatus.PodIP}
}
// If the pod is terminal, we don't need to continue to setup the pod
if apiPodStatus.Phase == v1.PodSucceeded || apiPodStatus.Phase == v1.PodFailed {
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
isTerminal = true
return isTerminal, nil
}
// Record the time it takes for the pod to become running
// since kubelet first saw the pod if firstSeenTime is set.
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok || existingStatus.Phase == v1.PodPending && apiPodStatus.Phase == v1.PodRunning &&
!firstSeenTime.IsZero() {
metrics.PodStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
}
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
// If the network plugin is not ready, only start the pod if it uses the host network
if err := kl.runtimeState.networkErrors(); err != nil && !kubecontainer.IsHostNetworkPod(pod) {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, err)
return false, fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, err)
}
// ensure the kubelet knows about referenced secrets or configmaps used by the pod
if !kl.podWorkers.IsPodTerminationRequested(pod.UID) {
if kl.secretManager != nil {
kl.secretManager.RegisterPod(pod)
}
if kl.configMapManager != nil {
kl.configMapManager.RegisterPod(pod)
}
}
// Create Cgroups for the pod and apply resource parameters
// to them if cgroups-per-qos flag is enabled.
pcm := kl.containerManager.NewPodContainerManager()
// If pod has already been terminated then we need not create
// or update the pod's cgroup
// TODO: once context cancellation is added this check can be removed
if !kl.podWorkers.IsPodTerminationRequested(pod.UID) {
// When the kubelet is restarted with the cgroups-per-qos
// flag enabled, all the pod's running containers
// should be killed intermittently and brought back up
// under the qos cgroup hierarchy.
// Check if this is the pod's first sync
firstSync := true
for _, containerStatus := range apiPodStatus.ContainerStatuses {
if containerStatus.State.Running != nil {
firstSync = false
break
}
}
// Don't kill containers in pod if pod's cgroups already
// exists or the pod is running for the first time
podKilled := false
if !pcm.Exists(pod) && !firstSync {
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err := kl.killPod(ctx, pod, p, nil); err == nil {
podKilled = true
} else {
if wait.Interrupted(err) {
return false, nil
}
klog.ErrorS(err, "KillPod failed", "pod", klog.KObj(pod), "podStatus", podStatus)
}
}
// Create and Update pod's Cgroups
// Don't create cgroups for run once pod if it was killed above
// The current policy is not to restart the run once pods when
// the kubelet is restarted with the new flag as run once pods are
// expected to run only once and if the kubelet is restarted then
// they are not expected to run again.
// We don't create and apply updates to cgroup if its a run once pod and was killed above
runOnce := pod.Spec.RestartPolicy == v1.RestartPolicyNever
// With ContainerRestartRules, if any container is restartable, the pod should be restarted.
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
for _, c := range pod.Spec.Containers {
if podutil.IsContainerRestartable(pod.Spec, c) {
runOnce = false
}
}
}
if !podKilled || !runOnce {
if !pcm.Exists(pod) {
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
klog.V(2).InfoS("Failed to update QoS cgroups while syncing pod", "pod", klog.KObj(pod), "err", err)
}
if err := pcm.EnsureExists(pod); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err)
return false, fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err)
}
}
}
}
// Create Mirror Pod for Static Pod if it doesn't already exist
kl.tryReconcileMirrorPods(ctx, pod, mirrorPod)
// Make data directories for the pod
if err := kl.makePodDataDirs(pod); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err)
klog.ErrorS(err, "Unable to make pod data directories for pod", "pod", klog.KObj(pod))
return false, err
}
// Wait for volumes to attach/mount
if err := kl.volumeManager.WaitForAttachAndMount(ctx, pod); err != nil {
var volumeAttachLimitErr *volumemanager.VolumeAttachLimitExceededError
if errors.As(err, &volumeAttachLimitErr) {
kl.rejectPod(pod, volumemanager.VolumeAttachmentLimitExceededReason, volumeAttachLimitErr.Error())
recordAdmissionRejection(volumemanager.VolumeAttachmentLimitExceededReason)
return true, nil
}
if !wait.Interrupted(err) {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to attach or mount volumes: %v", err)
klog.ErrorS(err, "Unable to attach or mount volumes for pod; skipping pod", "pod", klog.KObj(pod))
}
return false, err
}
// Fetch the pull secrets for the pod
pullSecrets := kl.getPullSecretsForPod(pod)
// Ensure the pod is being probed
kl.probeManager.AddPod(ctx, pod)
// TODO(#113606): use cancellation from the incoming context parameter, which comes from the pod worker.
// Currently, using cancellation from that context causes test failures. To remove this WithoutCancel,
// any wait.Interrupted errors need to be filtered from result and bypass the reasonCache - cancelling
// the context for SyncPod is a known and deliberate error, not a generic error.
// Use WithoutCancel instead of a new context.TODO() to propagate trace context
// Call the container runtime's SyncPod callback
sctx := context.WithoutCancel(ctx)
result := kl.containerRuntime.SyncPod(sctx, pod, podStatus, pullSecrets, kl.crashLoopBackOff)
kl.reasonCache.Update(pod.UID, result)
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
for _, r := range result.SyncResults {
if r.Action == kubecontainer.ResizePodInPlace && r.Error != nil {
// If the condition already exists, the observedGeneration does not get updated.
kl.statusManager.SetPodResizeInProgressCondition(pod.UID, v1.PodReasonError, r.Message, pod.Generation)
}
}
}
return false, result.Error()
}
// SyncTerminatingPod is expected to terminate all running containers in a pod. Once this method
// returns without error, the pod is considered to be terminated and it will be safe to clean up any
// pod state that is tied to the lifetime of running containers. The next method invoked will be
// SyncTerminatedPod. This method is expected to return with the grace period provided and the
// provided context may be cancelled if the duration is exceeded. The method may also be interrupted
// with a context cancellation if the grace period is shortened by the user or the kubelet (such as
// during eviction). This method is not guaranteed to be called if a pod is force deleted from the
// configuration and the kubelet is restarted - SyncTerminatingRuntimePod handles those orphaned
// pods.
func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
// TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker.
// Currently, using that context causes test failures.
ctx, otelSpan := kl.tracer.Start(context.Background(), "syncTerminatingPod", trace.WithAttributes(
semconv.K8SPodUIDKey.String(string(pod.UID)),
attribute.String("k8s.pod", klog.KObj(pod).String()),
semconv.K8SPodNameKey.String(pod.Name),
semconv.K8SNamespaceNameKey.String(pod.Namespace),
))
logger := klog.FromContext(ctx)
defer otelSpan.End()
klog.V(4).InfoS("SyncTerminatingPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
defer klog.V(4).InfoS("SyncTerminatingPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
apiPodStatus := kl.generateAPIPodStatus(pod, podStatus, false)
if podStatusFn != nil {
podStatusFn(&apiPodStatus)
}
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
if gracePeriod != nil {
klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", *gracePeriod)
} else {
klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", nil)
}
kl.probeManager.StopLivenessAndStartup(pod)
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err := kl.killPod(ctx, pod, p, gracePeriod); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
// there was an error killing the pod, so we return that error directly
utilruntime.HandleError(err)
return err
}
// Once the containers are stopped, we can stop probing for liveness and readiness.
// TODO: once a pod is terminal, certain probes (liveness exec) could be stopped immediately after
// the detection of a container shutdown or (for readiness) after the first failure. Tracked as
// https://github.com/kubernetes/kubernetes/issues/107894 although may not be worth optimizing.
kl.probeManager.RemovePod(pod)
// Guard against consistency issues in KillPod implementations by checking that there are no
// running containers. This method is invoked infrequently so this is effectively free and can
// catch race conditions introduced by callers updating pod status out of order.
// TODO: have KillPod return the terminal status of stopped containers and write that into the
// cache immediately
stoppedPodStatus, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil {
klog.ErrorS(err, "Unable to read pod status prior to final pod termination", "pod", klog.KObj(pod), "podUID", pod.UID)
return err
}
preserveDataFromBeforeStopping(stoppedPodStatus, podStatus)
var runningContainers []string
type container struct {
Name string
State string
ExitCode int
FinishedAt string
}
var containers []container
klogV := klog.V(4)
klogVEnabled := klogV.Enabled()
for _, s := range stoppedPodStatus.ContainerStatuses {
if s.State == kubecontainer.ContainerStateRunning {
runningContainers = append(runningContainers, s.ID.String())
}
if klogVEnabled {
containers = append(containers, container{Name: s.Name, State: string(s.State), ExitCode: s.ExitCode, FinishedAt: s.FinishedAt.UTC().Format(time.RFC3339Nano)})
}
}
if klogVEnabled {
sort.Slice(containers, func(i, j int) bool { return containers[i].Name < containers[j].Name })
klog.V(4).InfoS("Post-termination container state", "pod", klog.KObj(pod), "podUID", pod.UID, "containers", containers)
}
if len(runningContainers) > 0 {
return fmt.Errorf("detected running containers after a successful KillPod, CRI violation: %v", runningContainers)
}
// NOTE: resources must be unprepared AFTER all containers have stopped
// and BEFORE the pod status is changed on the API server
// to avoid race conditions with the resource deallocation code in kubernetes core.
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if err := kl.UnprepareDynamicResources(ctx, pod); err != nil {
return err
}
}
// Compute and update the status in cache once the pods are no longer running.
// The computation is done here to ensure the pod status used for it contains
// information about the container end states (including exit codes) - when
// SyncTerminatedPod is called the containers may already be removed.
apiPodStatus = kl.generateAPIPodStatus(pod, stoppedPodStatus, true)
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
// we have successfully stopped all containers, the pod is terminating, our status is "done"
klog.V(4).InfoS("Pod termination stopped all running containers", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
// preserveDataFromBeforeStopping preserves data, like IPs, which are expected
// to be sent to the API server after termination, but are no longer returned by
// containerRuntime.GetPodStatus for a stopped pod.
// Note that Kubelet restart, after the pod is stopped, may still cause losing
// track of the data.
func preserveDataFromBeforeStopping(stoppedPodStatus, podStatus *kubecontainer.PodStatus) {
stoppedPodStatus.IPs = podStatus.IPs
}
// SyncTerminatingRuntimePod is expected to terminate running containers in a pod that we have no
// configuration for. Once this method returns without error, any remaining local state can be safely
// cleaned up by background processes in each subsystem. Unlike syncTerminatingPod, we lack
// knowledge of the full pod spec and so cannot perform lifecycle related operations, only ensure
// that the remnant of the running pod is terminated and allow garbage collection to proceed. We do
// not update the status of the pod because with the source of configuration removed, we have no
// place to send that status.
func (kl *Kubelet) SyncTerminatingRuntimePod(_ context.Context, runningPod *kubecontainer.Pod) error {
// TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker.
// Currently, using that context causes test failures.
ctx := context.Background()
pod := runningPod.ToAPIPod()
klog.V(4).InfoS("SyncTerminatingRuntimePod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
defer klog.V(4).InfoS("SyncTerminatingRuntimePod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
// we kill the pod directly since we have lost all other information about the pod.
klog.V(4).InfoS("Orphaned running pod terminating without grace period", "pod", klog.KObj(pod), "podUID", pod.UID)
// TODO: this should probably be zero, to bypass any waiting (needs fixes in container runtime)
gracePeriod := int64(1)
if err := kl.killPod(ctx, pod, *runningPod, &gracePeriod); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
// there was an error killing the pod, so we return that error directly
utilruntime.HandleError(err)
return err
}
klog.V(4).InfoS("Pod termination stopped all running orphaned containers", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
// SyncTerminatedPod cleans up a pod that has terminated (has no running containers).
// The invocations in this call are expected to tear down all pod resources.
// When this method exits the pod is expected to be ready for cleanup. This method
// reduces the latency of pod cleanup but is not guaranteed to get called in all scenarios.
//
// Because the kubelet has no local store of information, all actions in this method that modify
// on-disk state must be reentrant and be garbage collected by HandlePodCleanups or a separate loop.
// This typically occurs when a pod is force deleted from configuration (local disk or API) and the
// kubelet restarts in the middle of the action.
func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error {
ctx, otelSpan := kl.tracer.Start(ctx, "syncTerminatedPod", trace.WithAttributes(
semconv.K8SPodUIDKey.String(string(pod.UID)),
attribute.String("k8s.pod", klog.KObj(pod).String()),
semconv.K8SPodNameKey.String(pod.Name),
semconv.K8SNamespaceNameKey.String(pod.Namespace),
))
logger := klog.FromContext(ctx)
defer otelSpan.End()
klog.V(4).InfoS("SyncTerminatedPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
defer klog.V(4).InfoS("SyncTerminatedPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
// generate the final status of the pod
// TODO: should we simply fold this into TerminatePod? that would give a single pod update
apiPodStatus := kl.generateAPIPodStatus(pod, podStatus, true)
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
// volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied
// before syncTerminatedPod is invoked)
if err := kl.volumeManager.WaitForUnmount(ctx, pod); err != nil {
return err
}
klog.V(4).InfoS("Pod termination unmounted volumes", "pod", klog.KObj(pod), "podUID", pod.UID)
// This waiting loop relies on the background cleanup which starts after pod workers respond
// true for ShouldPodRuntimeBeRemoved, which happens after `SyncTerminatingPod` is completed.
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
volumesExist := kl.podVolumesExist(pod.UID)
if volumesExist {
klog.V(3).InfoS("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod), "podUID", pod.UID)
}
return !volumesExist, nil
}); err != nil {
return err
}
klog.V(3).InfoS("Pod termination cleaned up volume paths", "pod", klog.KObj(pod), "podUID", pod.UID)
// After volume unmount is complete, let the secret and configmap managers know we're done with this pod
if kl.secretManager != nil {
kl.secretManager.UnregisterPod(pod)
}
if kl.configMapManager != nil {
kl.configMapManager.UnregisterPod(pod)
}
// Note: we leave pod containers to be reclaimed in the background since dockershim requires the
// container for retrieving logs and we want to make sure logs are available until the pod is
// physically deleted.
// remove any cgroups in the hierarchy for pods that are no longer running.
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
name, _ := pcm.GetPodContainerName(pod)
if err := pcm.Destroy(name); err != nil {
return err
}
klog.V(4).InfoS("Pod termination removed cgroups", "pod", klog.KObj(pod), "podUID", pod.UID)
}
kl.usernsManager.Release(logger, pod.UID)
// mark the final pod status
kl.statusManager.TerminatePod(logger, pod)
klog.V(4).InfoS("Pod is terminated and will need no more status updates", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
// Get pods which should be resynchronized. Currently, the following pod should be resynchronized:
// - pod whose work is ready.
// - internal modules that request sync of a pod.
//
// This method does not return orphaned pods (those known only to the pod worker that may have
// been deleted from configuration). Those pods are synced by HandlePodCleanups as a consequence
// of driving the state machine to completion.
//
// TODO: Consider synchronizing all pods which have not recently been acted on to be resilient
// to bugs that might prevent updates from being delivered (such as the previous bug with
// orphaned pods). Instead of asking the work queue for pending work, consider asking the
// PodWorker which pods should be synced.
func (kl *Kubelet) getPodsToSync() []*v1.Pod {
allPods := kl.podManager.GetPods()
podUIDs := kl.workQueue.GetWork()
podUIDSet := sets.New[string]()
for _, podUID := range podUIDs {
podUIDSet.Insert(string(podUID))
}
var podsToSync []*v1.Pod
for _, pod := range allPods {
if podUIDSet.Has(string(pod.UID)) {
// The work of the pod is ready
podsToSync = append(podsToSync, pod)
continue
}
for _, podSyncLoopHandler := range kl.PodSyncLoopHandlers {
if podSyncLoopHandler.ShouldSync(pod) {
podsToSync = append(podsToSync, pod)
break
}
}
}
return podsToSync
}
// deletePod deletes the pod from the internal state of the kubelet by:
// 1. stopping the associated pod worker asynchronously
// 2. signaling to kill the pod by sending on the podKillingCh channel
//
// deletePod returns an error if not all sources are ready or the pod is not
// found in the runtime cache.
func (kl *Kubelet) deletePod(pod *v1.Pod) error {
if pod == nil {
return fmt.Errorf("deletePod does not allow nil pod")
}
if !kl.sourcesReady.AllReady() {
// If the sources aren't ready, skip deletion, as we may accidentally delete pods
// for sources that haven't reported yet.
return fmt.Errorf("skipping delete because sources aren't ready yet")
}
klog.V(3).InfoS("Pod has been deleted and must be killed", "pod", klog.KObj(pod), "podUID", pod.UID)
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodKill,
})
// We leave the volume/directory cleanup to the periodic cleanup routine.
return nil
}
// rejectPod records an event about the pod with the given reason and message,
// and updates the pod to the failed phase in the status manager.
func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) {
kl.recorder.Eventf(pod, v1.EventTypeWarning, reason, message)
kl.statusManager.SetPodStatus(klog.TODO(), pod, v1.PodStatus{
QOSClass: v1qos.GetPodQOS(pod), // keep it as is
Phase: v1.PodFailed,
Reason: reason,
Message: "Pod was rejected: " + message})
}
func recordAdmissionRejection(reason string) {
// It is possible that the "reason" label can have high cardinality.
// To avoid this metric from exploding, we create an allowlist of known
// reasons, and only record reasons from this list. Use "Other" reason
// for the rest.
if admissionRejectionReasons.Has(reason) {
metrics.AdmissionRejectionsTotal.WithLabelValues(reason).Inc()
} else if strings.HasPrefix(reason, lifecycle.InsufficientResourcePrefix) {
// non-extended resources (like cpu, memory, ephemeral-storage, pods)
// are already included in admissionRejectionReasons.
metrics.AdmissionRejectionsTotal.WithLabelValues("OutOfExtendedResources").Inc()
} else {
metrics.AdmissionRejectionsTotal.WithLabelValues("Other").Inc()
}
}
// syncLoop is the main loop for processing changes. It watches for changes from
// three channels (file, apiserver, and http) and creates a union of them. For
// any new change seen, will run a sync against desired state and running state. If
// no changes are seen to the configuration, will synchronize the last known desired
// state every sync-frequency seconds. Never returns.
func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpdate, handler SyncHandler) {
klog.InfoS("Starting kubelet main sync loop")
// The syncTicker wakes up kubelet to checks if there are any pod workers
// that need to be sync'd. A one-second period is sufficient because the
// sync interval is defaulted to 10s.
syncTicker := time.NewTicker(time.Second)
defer syncTicker.Stop()
housekeepingTicker := time.NewTicker(housekeepingPeriod)
defer housekeepingTicker.Stop()
plegCh := kl.pleg.Watch()
const (
base = 100 * time.Millisecond
max = 5 * time.Second
factor = 2
)
duration := base
// Responsible for checking limits in resolv.conf
// The limits do not have anything to do with individual pods
// Since this is called in syncLoop, we don't need to call it anywhere else
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
kl.dnsConfigurer.CheckLimitsForResolvConf(klog.FromContext(ctx))
}
for {
if err := kl.runtimeState.runtimeErrors(); err != nil {
klog.ErrorS(err, "Skipping pod synchronization")
// exponential backoff
time.Sleep(duration)
duration = time.Duration(math.Min(float64(max), factor*float64(duration)))
continue
}
// reset backoff if we have a success
duration = base
kl.syncLoopMonitor.Store(kl.clock.Now())
if !kl.syncLoopIteration(ctx, updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
break
}
kl.syncLoopMonitor.Store(kl.clock.Now())
}
}
// syncLoopIteration reads from various channels and dispatches pods to the
// given handler.
//
// Arguments:
// 1. configCh: a channel to read config events from
// 2. handler: the SyncHandler to dispatch pods to
// 3. syncCh: a channel to read periodic sync events from
// 4. housekeepingCh: a channel to read housekeeping events from
// 5. plegCh: a channel to read PLEG updates from
//
// Events are also read from the kubelet liveness manager's update channel.
//
// The workflow is to read from one of the channels, handle that event, and
// update the timestamp in the sync loop monitor.
//
// Here is an appropriate place to note that despite the syntactical
// similarity to the switch statement, the case statements in a select are
// evaluated in a pseudorandom order if there are multiple channels ready to
// read from when the select is evaluated. In other words, case statements
// are evaluated in random order, and you can not assume that the case
// statements evaluate in order if multiple channels have events.
//
// With that in mind, in truly no particular order, the different channels
// are handled as follows:
//
// - configCh: dispatch the pods for the config change to the appropriate
// handler callback for the event type
// - plegCh: update the runtime cache; sync pod
// - syncCh: sync all pods waiting for sync
// - housekeepingCh: trigger cleanup of pods
// - health manager: sync pods that have failed or in which one or more
// containers have failed health checks
func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
logger := klog.FromContext(ctx)
select {
case u, open := <-configCh:
// Update from a config source; dispatch it to the right handler
// callback.
if !open {
klog.ErrorS(nil, "Update channel is closed, exiting the sync loop")
return false
}
switch u.Op {
case kubetypes.ADD:
klog.V(2).InfoS("SyncLoop ADD", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
// After restarting, kubelet will get all existing pods through
// ADD as if they are new pods. These pods will then go through the
// admission process and *may* be rejected. This can be resolved
// once we have checkpointing.
handler.HandlePodAdditions(u.Pods)
case kubetypes.UPDATE:
klog.V(2).InfoS("SyncLoop UPDATE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
handler.HandlePodUpdates(u.Pods)
case kubetypes.REMOVE:
klog.V(2).InfoS("SyncLoop REMOVE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
handler.HandlePodRemoves(u.Pods)
case kubetypes.RECONCILE:
klog.V(4).InfoS("SyncLoop RECONCILE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
handler.HandlePodReconcile(u.Pods)
case kubetypes.DELETE:
klog.V(2).InfoS("SyncLoop DELETE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
// DELETE is treated as a UPDATE because of graceful deletion.
handler.HandlePodUpdates(u.Pods)
case kubetypes.SET:
// TODO: Do we want to support this?
klog.ErrorS(nil, "Kubelet does not support snapshot update")
default:
klog.ErrorS(nil, "Invalid operation type received", "operation", u.Op)
}
kl.sourcesReady.AddSource(u.Source)
case e := <-plegCh:
if isSyncPodWorthy(e) {
// PLEG event for a pod; sync it.
if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {
klog.V(2).InfoS("SyncLoop (PLEG): event for pod", "pod", klog.KObj(pod), "event", e)
handler.HandlePodSyncs([]*v1.Pod{pod})
} else {
// If the pod no longer exists, ignore the event.
klog.V(4).InfoS("SyncLoop (PLEG): pod does not exist, ignore irrelevant event", "event", e)
}
}
if e.Type == pleg.ContainerDied {
if containerID, ok := e.Data.(string); ok {
kl.cleanUpContainersInPod(e.ID, containerID)
}
}
case <-syncCh:
// Sync pods waiting for sync
podsToSync := kl.getPodsToSync()
if len(podsToSync) == 0 {
break
}
klog.V(4).InfoS("SyncLoop (SYNC) pods", "total", len(podsToSync), "pods", klog.KObjSlice(podsToSync))
handler.HandlePodSyncs(podsToSync)
case update := <-kl.livenessManager.Updates():
if update.Result == proberesults.Failure {
handleProbeSync(kl, update, handler, "liveness", "unhealthy")
}
case update := <-kl.readinessManager.Updates():
ready := update.Result == proberesults.Success
kl.statusManager.SetContainerReadiness(logger, update.PodUID, update.ContainerID, ready)
status := "not ready"
if ready {
status = "ready"
}
handleProbeSync(kl, update, handler, "readiness", status)
case update := <-kl.startupManager.Updates():
started := update.Result == proberesults.Success
kl.statusManager.SetContainerStartup(logger, update.PodUID, update.ContainerID, started)
status := "unhealthy"
if started {
status = "started"
}
handleProbeSync(kl, update, handler, "startup", status)
case update := <-kl.containerManager.Updates():
pods := []*v1.Pod{}
for _, p := range update.PodUIDs {
if pod, ok := kl.podManager.GetPodByUID(types.UID(p)); ok {
klog.V(3).InfoS("SyncLoop (containermanager): event for pod", "pod", klog.KObj(pod), "event", update)
pods = append(pods, pod)
} else {
// If the pod no longer exists, ignore the event.
klog.V(4).InfoS("SyncLoop (containermanager): pod does not exist, ignore devices updates", "event", update)
}
}
if len(pods) > 0 {
// Updating the pod by syncing it again
// We do not apply the optimization by updating the status directly, but can do it later
handler.HandlePodSyncs(pods)
}
case <-housekeepingCh:
if !kl.sourcesReady.AllReady() {
// If the sources aren't ready or volume manager has not yet synced the states,
// skip housekeeping, as we may accidentally delete pods from unready sources.
klog.V(4).InfoS("SyncLoop (housekeeping, skipped): sources aren't ready yet")
} else {
start := time.Now()
klog.V(4).InfoS("SyncLoop (housekeeping)")
if err := handler.HandlePodCleanups(ctx); err != nil {
klog.ErrorS(err, "Failed cleaning pods")
}
duration := time.Since(start)
if duration > housekeepingWarningDuration {
klog.ErrorS(fmt.Errorf("housekeeping took too long"), "Housekeeping took longer than expected", "expected", housekeepingWarningDuration, "actual", duration.Round(time.Millisecond))
}
klog.V(4).InfoS("SyncLoop (housekeeping) end", "duration", duration.Round(time.Millisecond))
}
}
return true
}
func handleProbeSync(kl *Kubelet, update proberesults.Update, handler SyncHandler, probe, status string) {
// We should not use the pod from manager, because it is never updated after initialization.
pod, ok := kl.podManager.GetPodByUID(update.PodUID)
if !ok {
// If the pod no longer exists, ignore the update.
klog.V(4).InfoS("SyncLoop (probe): ignore irrelevant update", "probe", probe, "status", status, "update", update)
return
}
klog.V(1).InfoS("SyncLoop (probe)", "probe", probe, "status", status, "pod", klog.KObj(pod))
handler.HandlePodSyncs([]*v1.Pod{pod})
}
// HandlePodAdditions is the callback in SyncHandler for pods being added from
// a config source.
func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
start := kl.clock.Now()
sort.Sort(sliceutils.PodsByCreationTime(pods))
var pendingResizes []types.UID
for _, pod := range pods {
// Always add the pod to the pod manager. Kubelet relies on the pod
// manager as the source of truth for the desired state. If a pod does
// not exist in the pod manager, it means that it has been deleted in
// the apiserver and no action (other than cleanup) is required.
kl.podManager.AddPod(pod)
kl.podCertificateManager.TrackPod(context.TODO(), pod)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
StartTime: start,
})
continue
}
// Only go through the admission process if the pod is not requested
// for termination by another part of the kubelet. If the pod is already
// using resources (previously admitted), the pod worker is going to be
// shutting it down. If the pod hasn't started yet, we know that when
// the pod worker is invoked it will also avoid setting up the pod, so
// we simply avoid doing any work.
// We also do not try to admit the pod that is already in terminated state.
if !kl.podWorkers.IsPodTerminationRequested(pod.UID) && !podutil.IsPodPhaseTerminal(pod.Status.Phase) {
// Check if we can admit the pod; if not, reject it.
// We failed pods that we rejected, so activePods include all admitted
// pods that are alive.
if ok, reason, message := kl.allocationManager.AddPod(kl.GetActivePods(), pod); !ok {
kl.rejectPod(pod, reason, message)
// We avoid recording the metric in canAdmitPod because it's called
// repeatedly during a resize, which would inflate the metric.
// Instead, we record the metric here in HandlePodAdditions for new pods
// and capture resize events separately.
recordAdmissionRejection(reason)
continue
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// Backfill the queue of pending resizes, but only after all the pods have
// been added. This ensures that no resizes get resolved until all the
// existing pods are added.
_, updatedFromAllocation := kl.allocationManager.UpdatePodFromAllocation(pod)
if updatedFromAllocation {
pendingResizes = append(pendingResizes, pod.UID)
}
}
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodCreate,
StartTime: start,
})
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
kl.statusManager.BackfillPodResizeConditions(pods)
for _, uid := range pendingResizes {
kl.allocationManager.PushPendingResize(uid)
}
if len(pendingResizes) > 0 {
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodsAdded)
}
}
}
// HandlePodUpdates is the callback in the SyncHandler interface for pods
// being updated from a config source.
func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
oldPod, _ := kl.podManager.GetPodByUID(pod.UID)
kl.podManager.UpdatePod(pod)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if recordContainerResizeOperations(oldPod, pod) {
_, updatedFromAllocation := kl.allocationManager.UpdatePodFromAllocation(pod)
if updatedFromAllocation {
kl.allocationManager.PushPendingResize(pod.UID)
// TODO(natasha41575): If the resize is immediately actuated, it will trigger a pod sync
// and we will end up calling UpdatePod twice. Figure out if there is a way to avoid this.
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodUpdated)
} else {
// We can hit this case if a pending resize has been reverted,
// so we need to clear the pending resize condition.
kl.statusManager.ClearPodResizePendingCondition(pod.UID)
}
}
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
StartTime: start,
})
}
}
// recordContainerResizeOperations records if any of the pod's containers needs to be resized, and returns
// true if so
func recordContainerResizeOperations(oldPod, newPod *v1.Pod) bool {
hasResize := false
if oldPod == nil {
// This should never happen.
return true
}
for oldContainer, containerType := range podutil.ContainerIter(&oldPod.Spec, podutil.InitContainers|podutil.Containers) {
if !allocation.IsResizableContainer(oldContainer, containerType) {
continue
}
var newContainer *v1.Container
for new, newType := range podutil.ContainerIter(&newPod.Spec, podutil.InitContainers|podutil.Containers) {
if !allocation.IsResizableContainer(new, newType) {
continue
}
if new.Name == oldContainer.Name && containerType == newType {
newContainer = new
}
}
newResources := newContainer.Resources
oldResources := oldContainer.Resources
if op := resizeOperationForResources(newResources.Requests.Memory(), oldResources.Requests.Memory()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("memory", "requests", op).Inc()
}
if op := resizeOperationForResources(newResources.Limits.Memory(), oldResources.Limits.Memory()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("memory", "limits", op).Inc()
}
if op := resizeOperationForResources(newResources.Requests.Cpu(), oldResources.Requests.Cpu()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("cpu", "requests", op).Inc()
}
if op := resizeOperationForResources(newResources.Limits.Cpu(), oldResources.Limits.Cpu()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("cpu", "limits", op).Inc()
}
}
return hasResize
}
func resizeOperationForResources(new, old *resource.Quantity) string {
if new.IsZero() && !old.IsZero() {
return "remove"
}
if old.IsZero() && !new.IsZero() {
return "add"
}
if new.Cmp(*old) < 0 {
return "decrease"
}
if new.Cmp(*old) > 0 {
return "increase"
}
return ""
}
// HandlePodRemoves is the callback in the SyncHandler interface for pods
// being removed from a config source.
func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
kl.podCertificateManager.ForgetPod(context.TODO(), pod)
kl.podManager.RemovePod(pod)
kl.allocationManager.RemovePod(pod.UID)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
StartTime: start,
})
continue
}
// Deletion is allowed to fail because the periodic cleanup routine
// will trigger deletion again.
if err := kl.deletePod(pod); err != nil {
klog.V(2).InfoS("Failed to delete pod", "pod", klog.KObj(pod), "err", err)
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodsRemoved)
}
}
// HandlePodReconcile is the callback in the SyncHandler interface for pods
// that should be reconciled. Pods are reconciled when only the status of the
// pod is updated in the API.
func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) {
start := kl.clock.Now()
retryPendingResizes := false
hasPendingResizes := kl.allocationManager.HasPendingResizes()
for _, pod := range pods {
// Update the pod in pod manager, status manager will do periodically reconcile according
// to the pod manager.
oldPod, _ := kl.podManager.GetPodByUID(pod.UID)
kl.podManager.UpdatePod(pod)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
// Static pods should be reconciled the same way as regular pods
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// If there are pending resizes, check whether the requests shrank as a result of the status
// resources changing.
if hasPendingResizes && !retryPendingResizes && oldPod != nil {
opts := resourcehelper.PodResourcesOptions{
UseStatusResources: true,
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
// Ignore desired resources when aggregating the resources.
allocatedOldPod, _ := kl.allocationManager.UpdatePodFromAllocation(oldPod)
allocatedPod, _ := kl.allocationManager.UpdatePodFromAllocation(pod)
oldRequest := resourcehelper.PodRequests(allocatedOldPod, opts)
newRequest := resourcehelper.PodRequests(allocatedPod, opts)
// If cpu or memory requests shrank, then retry the pending resizes.
retryPendingResizes = newRequest.Memory().Cmp(*oldRequest.Memory()) < 0 ||
newRequest.Cpu().Cmp(*oldRequest.Cpu()) < 0
}
}
// TODO: reconcile being calculated in the config manager is questionable, and avoiding
// extra syncs may no longer be necessary. Reevaluate whether Reconcile and Sync can be
// merged (after resolving the next two TODOs).
// Reconcile Pod "Ready" condition if necessary. Trigger sync pod for reconciliation.
// TODO: this should be unnecessary today - determine what is the cause for this to
// be different than Sync, or if there is a better place for it. For instance, we have
// needsReconcile in kubelet/config, here, and in status_manager.
if status.NeedToReconcilePodReadiness(pod) {
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodSync,
StartTime: start,
})
}
// After an evicted pod is synced, all dead containers in the pod can be removed.
// TODO: this is questionable - status read is async and during eviction we already
// expect to not have some container info. The pod worker knows whether a pod has
// been evicted, so if this is about minimizing the time to react to an eviction we
// can do better. If it's about preserving pod status info we can also do better.
if eviction.PodIsEvicted(pod.Status) {
if podStatus, err := kl.podCache.Get(pod.UID); err == nil {
kl.containerDeletor.deleteContainersInPod("", podStatus, true)
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if retryPendingResizes {
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodResized)
}
}
}
// HandlePodSyncs is the callback in the syncHandler interface for pods
// that should be dispatched to pod workers for sync.
func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
klog.V(2).InfoS("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
// Syncing a mirror pod is a programmer error since the intent of sync is to
// batch notify all pending work. We should make it impossible to double sync,
// but for now log a programmer error to prevent accidental introduction.
klog.V(3).InfoS("Programmer error, HandlePodSyncs does not expect to receive mirror pods", "podUID", pod.UID, "mirrorPodUID", mirrorPod.UID)
continue
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodSync,
StartTime: start,
})
}
}
// LatestLoopEntryTime returns the last time in the sync loop monitor.
func (kl *Kubelet) LatestLoopEntryTime() time.Time {
val := kl.syncLoopMonitor.Load()
if val == nil {
return time.Time{}
}
return val.(time.Time)
}
// SyncLoopHealthCheck checks if kubelet's sync loop that updates containers is working.
func (kl *Kubelet) SyncLoopHealthCheck(req *http.Request) error {
duration := kl.resyncInterval * 2
minDuration := time.Minute * 5
if duration < minDuration {
duration = minDuration
}
enterLoopTime := kl.LatestLoopEntryTime()
if !enterLoopTime.IsZero() && time.Now().After(enterLoopTime.Add(duration)) {
return fmt.Errorf("sync Loop took longer than expected")
}
return nil
}
// updateRuntimeUp calls the container runtime status callback, initializing
// the runtime dependent modules when the container runtime first comes up,
// and returns an error if the status check fails. If the status check is OK,
// update the container runtime uptime in the kubelet runtimeState.
func (kl *Kubelet) updateRuntimeUp() {
kl.updateRuntimeMux.Lock()
defer kl.updateRuntimeMux.Unlock()
ctx := context.Background()
s, err := kl.containerRuntime.Status(ctx)
if err != nil {
klog.ErrorS(err, "Container runtime sanity check failed")
return
}
if s == nil {
klog.ErrorS(nil, "Container runtime status is nil")
return
}
// Periodically log the whole runtime status for debugging.
klog.V(4).InfoS("Container runtime status", "status", s)
klogErrorS := klog.ErrorS
if !kl.containerRuntimeReadyExpected {
klogErrorS = klog.V(4).ErrorS
}
networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady)
if networkReady == nil || !networkReady.Status {
klogErrorS(nil, "Container runtime network not ready", "networkReady", networkReady)
kl.runtimeState.setNetworkState(fmt.Errorf("container runtime network not ready: %v", networkReady))
} else {
// Set nil if the container runtime network is ready.
kl.runtimeState.setNetworkState(nil)
}
// information in RuntimeReady condition will be propagated to NodeReady condition.
runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady)
// If RuntimeReady is not set or is false, report an error.
if runtimeReady == nil || !runtimeReady.Status {
klogErrorS(nil, "Container runtime not ready", "runtimeReady", runtimeReady)
kl.runtimeState.setRuntimeState(fmt.Errorf("container runtime not ready: %v", runtimeReady))
return
}
kl.runtimeState.setRuntimeState(nil)
kl.runtimeState.setRuntimeHandlers(s.Handlers)
kl.runtimeState.setRuntimeFeatures(s.Features)
kl.oneTimeInitializer.Do(kl.initializeRuntimeDependentModules)
kl.runtimeState.setRuntimeSync(kl.clock.Now())
}
// GetConfiguration returns the KubeletConfiguration used to configure the kubelet.
func (kl *Kubelet) GetConfiguration() kubeletconfiginternal.KubeletConfiguration {
return kl.kubeletConfiguration
}
// BirthCry sends an event that the kubelet has started up.
func (kl *Kubelet) BirthCry() {
// Make an event that kubelet restarted.
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeNormal, events.StartingKubelet, "Starting kubelet.")
}
// ListenAndServe runs the kubelet HTTP server.
func (kl *Kubelet) ListenAndServe(kubeCfg *kubeletconfiginternal.KubeletConfiguration, tlsOptions *server.TLSOptions,
auth server.AuthInterface, tp trace.TracerProvider) {
server.ListenAndServeKubeletServer(kl, kl.resourceAnalyzer, kl.containerManager.GetHealthCheckers(), kl.flagz, kubeCfg, tlsOptions, auth, tp)
}
// ListenAndServeReadOnly runs the kubelet HTTP server in read-only mode.
func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint, tp trace.TracerProvider) {
server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, kl.containerManager.GetHealthCheckers(), kl.flagz, address, port, tp)
}
type kubeletPodsProvider struct {
kl *Kubelet
}
func (pp *kubeletPodsProvider) GetActivePods() []*v1.Pod {
return pp.kl.GetActivePods()
}
func (pp *kubeletPodsProvider) GetPods() []*v1.Pod {
return pp.kl.podManager.GetPods()
}
func (pp *kubeletPodsProvider) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return pp.kl.podManager.GetPodByName(namespace, name)
}
// ListenAndServePodResources runs the kubelet podresources grpc service
func (kl *Kubelet) ListenAndServePodResources(ctx context.Context) {
endpoint, err := util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket)
if err != nil {
klog.V(2).InfoS("Failed to get local endpoint for PodResources endpoint", "err", err)
return
}
providers := podresources.PodResourcesProviders{
Pods: &kubeletPodsProvider{kl: kl},
Devices: kl.containerManager,
Cpus: kl.containerManager,
Memory: kl.containerManager,
DynamicResources: kl.containerManager,
}
server.ListenAndServePodResources(ctx, endpoint, providers)
}
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID string) {
if podStatus, err := kl.podCache.Get(podID); err == nil {
// When an evicted or deleted pod has already synced, all containers can be removed.
removeAll := kl.podWorkers.ShouldPodContentBeRemoved(podID)
kl.containerDeletor.deleteContainersInPod(exitedContainerID, podStatus, removeAll)
}
}
// fastStatusUpdateOnce starts a loop that checks if the current state of kubelet + container runtime
// would be able to turn the node ready, and sync the ready state to the apiserver as soon as possible.
// Function returns after the node status update after such event, or when the node is already ready.
// Function is executed only during Kubelet start which improves latency to ready node by updating
// kubelet state, runtime status and node statuses ASAP.
func (kl *Kubelet) fastStatusUpdateOnce() {
ctx := context.Background()
start := kl.clock.Now()
stopCh := make(chan struct{})
// Keep trying to make fast node status update until either timeout is reached or an update is successful.
wait.Until(func() {
// fastNodeStatusUpdate returns true when it succeeds or when the grace period has expired
// (status was not updated within nodeReadyGracePeriod and the second argument below gets true),
// then we close the channel and abort the loop.
if kl.fastNodeStatusUpdate(ctx, kl.clock.Since(start) >= nodeReadyGracePeriod) {
close(stopCh)
}
}, 100*time.Millisecond, stopCh)
}
// CheckpointContainer tries to checkpoint a container. The parameters are used to
// look up the specified container. If the container specified by the given parameters
// cannot be found an error is returned. If the container is found the container
// engine will be asked to checkpoint the given container into the kubelet's default
// checkpoint directory.
func (kl *Kubelet) CheckpointContainer(
ctx context.Context,
podUID types.UID,
podFullName,
containerName string,
options *runtimeapi.CheckpointContainerRequest,
) error {
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container %v not found", containerName)
}
options.Location = filepath.Join(
kl.getCheckpointsDir(),
fmt.Sprintf(
"checkpoint-%s-%s-%s.tar",
podFullName,
containerName,
time.Now().Format(time.RFC3339),
),
)
options.ContainerId = string(container.ID.ID)
if err := kl.containerRuntime.CheckpointContainer(ctx, options); err != nil {
return err
}
return nil
}
// ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics.
func (kl *Kubelet) ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error) {
return kl.containerRuntime.ListMetricDescriptors(ctx)
}
// ListPodSandboxMetrics retrieves the metrics for all pod sandboxes.
func (kl *Kubelet) ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error) {
return kl.containerRuntime.ListPodSandboxMetrics(ctx)
}
func (kl *Kubelet) supportLocalStorageCapacityIsolation() bool {
return kl.GetConfiguration().LocalStorageCapacityIsolation
}
// isSyncPodWorthy filters out events that are not worthy of pod syncing
func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
// ContainerRemoved doesn't affect pod state
return event.Type != pleg.ContainerRemoved
}
// PrepareDynamicResources calls the container Manager PrepareDynamicResources API
// This method implements the RuntimeHelper interface
func (kl *Kubelet) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return kl.containerManager.PrepareDynamicResources(ctx, pod)
}
// UnprepareDynamicResources calls the container Manager UnprepareDynamicResources API
// This method implements the RuntimeHelper interface
func (kl *Kubelet) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return kl.containerManager.UnprepareDynamicResources(ctx, pod)
}
// Ensure Mirror Pod for Static Pod exists and matches the current pod definition.
// The function logs and ignores any errors.
func (kl *Kubelet) tryReconcileMirrorPods(ctx context.Context, staticPod, mirrorPod *v1.Pod) {
if !kubetypes.IsStaticPod(staticPod) {
return
}
deleted := false
if mirrorPod != nil {
if mirrorPod.DeletionTimestamp != nil || !kubepod.IsMirrorPodOf(mirrorPod, staticPod) {
// The mirror pod is semantically different from the static pod. Remove
// it. The mirror pod will get recreated later.
klog.InfoS("Trying to delete pod", "pod", klog.KObj(mirrorPod), "podUID", mirrorPod.UID)
podFullName := kubecontainer.GetPodFullName(staticPod)
if ok, err := kl.mirrorPodClient.DeleteMirrorPod(ctx, podFullName, &mirrorPod.UID); err != nil {
klog.ErrorS(err, "Failed deleting mirror pod", "pod", klog.KObj(mirrorPod))
} else if ok {
deleted = ok
klog.InfoS("Deleted mirror pod as it didn't match the static Pod", "pod", klog.KObj(mirrorPod))
}
}
}
if mirrorPod == nil || deleted {
node, err := kl.GetNode()
if err != nil {
klog.ErrorS(err, "No need to create a mirror pod, since failed to get node info from the cluster", "node", klog.KRef("", string(kl.nodeName)))
} else if node.DeletionTimestamp != nil {
klog.InfoS("No need to create a mirror pod, since node has been removed from the cluster", "node", klog.KRef("", string(kl.nodeName)))
} else {
klog.InfoS("Creating a mirror pod for static pod", "pod", klog.KObj(staticPod))
if err := kl.mirrorPodClient.CreateMirrorPod(ctx, staticPod); err != nil {
klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(staticPod))
}
}
}
}
// Ensure Mirror Pod for Static Pod exists as soon as node is registered.
func (kl *Kubelet) fastStaticPodsRegistration(ctx context.Context) {
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
_, err := kl.GetNode()
if err == nil {
return true, nil
}
klog.V(4).ErrorS(err, "Unable to register mirror pod because node is not registered yet", "node", klog.KRef("", string(kl.nodeName)))
return false, nil
}); err != nil {
klog.V(4).ErrorS(err, "Failed to wait until node is registered", "node", klog.KRef("", string(kl.nodeName)))
}
staticPodToMirrorPodMap := kl.podManager.GetStaticPodToMirrorPodMap()
for staticPod, mirrorPod := range staticPodToMirrorPodMap {
kl.tryReconcileMirrorPods(ctx, staticPod, mirrorPod)
}
}
func (kl *Kubelet) SetPodWatchCondition(podUID types.UID, conditionKey string, condition pleg.WatchCondition) {
kl.pleg.SetPodWatchCondition(podUID, conditionKey, condition)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
cadvisorv2 "github.com/google/cadvisor/info/v2"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilpath "k8s.io/utils/path"
utilstrings "k8s.io/utils/strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
utilnode "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume/csi"
)
// getRootDir returns the full path to the directory under which kubelet can
// store data. These functions are useful to pass interfaces to other modules
// that may need to know where to write data without getting a whole kubelet
// instance.
func (kl *Kubelet) getRootDir() string {
return kl.rootDirectory
}
// getPodLogsDir returns the full path to the directory that kubelet can use
// to store pod's log files. This defaults to /var/log/pods if not specified
// otherwise in the config file.
func (kl *Kubelet) getPodLogsDir() string {
return kl.podLogsDirectory
}
// getPodsDir returns the full path to the directory under which pod
// directories are created.
func (kl *Kubelet) getPodsDir() string {
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodsDirName)
}
// getPluginsDir returns the full path to the directory under which plugin
// directories are created. Plugins can use these directories for data that
// they need to persist. Plugins should create subdirectories under this named
// after their own names.
func (kl *Kubelet) getPluginsDir() string {
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName)
}
// getPluginsRegistrationDir returns the full path to the directory under which
// plugins socket should be placed to be registered.
// More information is available about plugin registration in the pluginwatcher
// module
func (kl *Kubelet) getPluginsRegistrationDir() string {
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsRegistrationDirName)
}
// getPluginDir returns a data directory name for a given plugin name.
// Plugins can use these directories to store data that they need to persist.
// For per-pod plugin data, see getPodPluginDir.
func (kl *Kubelet) getPluginDir(pluginName string) string {
return filepath.Join(kl.getPluginsDir(), pluginName)
}
// getCheckpointsDir returns a data directory name for checkpoints.
// Checkpoints can be stored in this directory for further use.
func (kl *Kubelet) getCheckpointsDir() string {
return filepath.Join(kl.getRootDir(), config.DefaultKubeletCheckpointsDirName)
}
// getVolumeDevicePluginsDir returns the full path to the directory under which plugin
// directories are created. Plugins can use these directories for data that
// they need to persist. Plugins should create subdirectories under this named
// after their own names.
func (kl *Kubelet) getVolumeDevicePluginsDir() string {
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName)
}
// getVolumeDevicePluginDir returns a data directory name for a given plugin name.
// Plugins can use these directories to store data that they need to persist.
// For per-pod plugin data, see getVolumeDevicePluginsDir.
func (kl *Kubelet) getVolumeDevicePluginDir(pluginName string) string {
return filepath.Join(kl.getVolumeDevicePluginsDir(), pluginName, config.DefaultKubeletVolumeDevicesDirName)
}
// GetPodDir returns the full path to the per-pod data directory for the
// specified pod. This directory may not exist if the pod does not exist.
func (kl *Kubelet) GetPodDir(podUID types.UID) string {
return kl.getPodDir(podUID)
}
// ListPodsFromDisk gets a list of pods that have data directories.
func (kl *Kubelet) ListPodsFromDisk() ([]types.UID, error) {
return kl.listPodsFromDisk()
}
// HandlerSupportsUserNamespaces checks whether the specified handler supports
// user namespaces.
func (kl *Kubelet) HandlerSupportsUserNamespaces(rtHandler string) (bool, error) {
rtHandlers := kl.runtimeState.runtimeHandlers()
if len(rtHandlers) == 0 {
// The slice is empty if the runtime is old and doesn't support this message.
return false, nil
}
for _, h := range rtHandlers {
if h.Name == rtHandler {
return h.SupportsUserNamespaces, nil
}
}
return false, fmt.Errorf("the handler %q is not known", rtHandler)
}
// GetKubeletMappings gets the additional IDs allocated for the Kubelet.
func (kl *Kubelet) GetKubeletMappings() (uint32, uint32, error) {
return kl.getKubeletMappings()
}
func (kl *Kubelet) GetMaxPods() int {
return kl.maxPods
}
func (kl *Kubelet) GetUserNamespacesIDsPerPod() uint32 {
userNs := kl.kubeletConfiguration.UserNamespaces
if userNs == nil {
return config.DefaultKubeletUserNamespacesIDsPerPod
}
idsPerPod := userNs.IDsPerPod
if idsPerPod == nil || *idsPerPod == 0 {
return config.DefaultKubeletUserNamespacesIDsPerPod
}
// The value is already validated to be <= MaxUint32,
// so we can safely drop the upper bits.
return uint32(*idsPerPod)
}
// getPodDir returns the full path to the per-pod directory for the pod with
// the given UID.
func (kl *Kubelet) getPodDir(podUID types.UID) string {
return filepath.Join(kl.getPodsDir(), string(podUID))
}
// getPodVolumesSubpathsDir returns the full path to the per-pod subpaths directory under
// which subpath volumes are created for the specified pod. This directory may not
// exist if the pod does not exist or subpaths are not specified.
func (kl *Kubelet) getPodVolumeSubpathsDir(podUID types.UID) string {
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeSubpathsDirName)
}
// getPodVolumesDir returns the full path to the per-pod data directory under
// which volumes are created for the specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodVolumesDir(podUID types.UID) string {
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumesDirName)
}
// getPodVolumeDir returns the full path to the directory which represents the
// named volume under the named plugin for specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
return filepath.Join(kl.getPodVolumesDir(podUID), pluginName, volumeName)
}
// getPodVolumeDevicesDir returns the full path to the per-pod data directory under
// which volumes are created for the specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodVolumeDevicesDir(podUID types.UID) string {
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeDevicesDirName)
}
// getPodVolumeDeviceDir returns the full path to the directory which represents the
// named plugin for specified pod. This directory may not exist if the pod does not exist.
func (kl *Kubelet) getPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
return filepath.Join(kl.getPodVolumeDevicesDir(podUID), pluginName)
}
// getPodPluginsDir returns the full path to the per-pod data directory under
// which plugins may store data for the specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodPluginsDir(podUID types.UID) string {
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletPluginsDirName)
}
// getPodPluginDir returns a data directory name for a given plugin name for a
// given pod UID. Plugins can use these directories to store data that they
// need to persist. For non-per-pod plugin data, see getPluginDir.
func (kl *Kubelet) getPodPluginDir(podUID types.UID, pluginName string) string {
return filepath.Join(kl.getPodPluginsDir(podUID), pluginName)
}
// getPodContainerDir returns the full path to the per-pod data directory under
// which container data is held for the specified pod. This directory may not
// exist if the pod or container does not exist.
func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string {
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletContainersDirName, ctrName)
}
// getPodResourcesSocket returns the full path to the directory containing the pod resources socket
func (kl *Kubelet) getPodResourcesDir() string {
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodResourcesDirName)
}
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
// pods.
func (kl *Kubelet) GetPods() []*v1.Pod {
pods := kl.podManager.GetPods()
for i, p := range pods {
// Pod cache does not get updated status for static pods.
// TODO(tallclair): Most callers of GetPods() do not need pod status. We should either parameterize this,
// or move the status injection to only the callers that do need it (maybe just the /pods http handler?).
if kubelettypes.IsStaticPod(p) {
if status, ok := kl.statusManager.GetPodStatus(p.UID); ok {
// do not mutate the cache
p = p.DeepCopy()
p.Status = status
pods[i] = p
}
}
}
return pods
}
// GetRunningPods returns all pods running on kubelet from looking at the
// container runtime cache. This function converts kubecontainer.Pod to
// v1.Pod, so only the fields that exist in both kubecontainer.Pod and
// v1.Pod are considered meaningful.
func (kl *Kubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) {
pods, err := kl.runtimeCache.GetPods(ctx)
if err != nil {
return nil, err
}
apiPods := make([]*v1.Pod, 0, len(pods))
for _, pod := range pods {
apiPods = append(apiPods, pod.ToAPIPod())
}
return apiPods, nil
}
// GetPodByFullName gets the pod with the given 'full' name, which
// incorporates the namespace as well as whether the pod was found.
func (kl *Kubelet) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
return kl.podManager.GetPodByFullName(podFullName)
}
// GetPodByName provides the first pod that matches namespace and name, as well
// as whether the pod was found.
func (kl *Kubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return kl.podManager.GetPodByName(namespace, name)
}
// GetPodByCgroupfs provides the pod that maps to the specified cgroup, as well
// as whether the pod was found.
func (kl *Kubelet) GetPodByCgroupfs(cgroupfs string) (*v1.Pod, bool) {
pcm := kl.containerManager.NewPodContainerManager()
if result, podUID := pcm.IsPodCgroup(cgroupfs); result {
return kl.podManager.GetPodByUID(podUID)
}
return nil, false
}
// getRuntime returns the current Runtime implementation in use by the kubelet.
func (kl *Kubelet) getRuntime() kubecontainer.Runtime {
return kl.containerRuntime
}
// GetNode returns the node info for the configured node name of this Kubelet.
func (kl *Kubelet) GetNode() (*v1.Node, error) {
if kl.kubeClient == nil {
return kl.initialNode(context.TODO())
}
return kl.nodeLister.Get(string(kl.nodeName))
}
// getNodeAnyWay() must return a *v1.Node which is required by RunGeneralPredicates().
// The *v1.Node is obtained as follows:
// Return kubelet's nodeInfo for this node, except on error or if in standalone mode,
// in which case return a manufactured nodeInfo representing a node with no pods,
// zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
if kl.kubeClient != nil {
if n, err := kl.nodeLister.Get(string(kl.nodeName)); err == nil {
return n, nil
}
}
return kl.initialNode(context.TODO())
}
// GetNodeConfig returns the container manager node config.
func (kl *Kubelet) GetNodeConfig() cm.NodeConfig {
return kl.containerManager.GetNodeConfig()
}
// GetPodCgroupRoot returns the listeral cgroupfs value for the cgroup containing all pods
func (kl *Kubelet) GetPodCgroupRoot() string {
return kl.containerManager.GetPodCgroupRoot()
}
// getHostIPsAnyWay attempts to return the host IPs from kubelet's nodeInfo, or
// the initialNode.
func (kl *Kubelet) getHostIPsAnyWay() ([]net.IP, error) {
node, err := kl.getNodeAnyWay()
if err != nil {
return nil, err
}
return utilnode.GetNodeHostIPs(node)
}
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return kl.volumeManager.GetExtraSupplementalGroupsForPod(pod)
}
// getPodVolumePathListFromDisk returns a list of the volume paths by reading the
// volume directories for the given pod from the disk.
func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {
volumes := []string{}
podVolDir := kl.getPodVolumesDir(podUID)
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
return volumes, fmt.Errorf("error checking if path %q exists: %v", podVolDir, pathErr)
} else if !pathExists {
klog.V(6).InfoS("Path does not exist", "path", podVolDir)
return volumes, nil
}
volumePluginDirs, err := os.ReadDir(podVolDir)
if err != nil {
klog.ErrorS(err, "Could not read directory", "path", podVolDir)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {
volumePluginName := volumePluginDir.Name()
volumePluginPath := filepath.Join(podVolDir, volumePluginName)
volumeDirs, err := utilpath.ReadDirNoStat(volumePluginPath)
if err != nil {
return volumes, fmt.Errorf("could not read directory %s: %v", volumePluginPath, err)
}
unescapePluginName := utilstrings.UnescapeQualifiedName(volumePluginName)
if unescapePluginName != csi.CSIPluginName {
for _, volumeDir := range volumeDirs {
volumes = append(volumes, filepath.Join(volumePluginPath, volumeDir))
}
} else {
// For CSI volumes, the mounted volume path has an extra sub path "/mount", so also add it
// to the list if the mounted path exists.
for _, volumeDir := range volumeDirs {
path := filepath.Join(volumePluginPath, volumeDir)
csimountpath := csi.GetCSIMounterPath(path)
if pathExists, _ := mount.PathExists(csimountpath); pathExists {
volumes = append(volumes, csimountpath)
}
}
}
}
return volumes, nil
}
func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string, error) {
mountedVolumes := []string{}
volumePaths, err := kl.getPodVolumePathListFromDisk(podUID)
if err != nil {
return mountedVolumes, err
}
// Only use IsLikelyNotMountPoint to check might not cover all cases. For CSI volumes that
// either: 1) don't mount or 2) bind mount in the rootfs, the mount check will not work as expected.
// We plan to remove this mountpoint check as a condition before deleting pods since it is
// not reliable and the condition might be different for different types of volumes. But it requires
// a reliable way to clean up unused volume dir to avoid problems during pod deletion. See discussion in issue #74650
for _, volumePath := range volumePaths {
isNotMount, err := kl.mounter.IsLikelyNotMountPoint(volumePath)
if err != nil {
return mountedVolumes, fmt.Errorf("fail to check mount point %q: %v", volumePath, err)
}
if !isNotMount {
mountedVolumes = append(mountedVolumes, volumePath)
}
}
return mountedVolumes, nil
}
// getPodVolumeSubpathListFromDisk returns a list of the volume-subpath paths by reading the
// subpath directories for the given pod from the disk.
func (kl *Kubelet) getPodVolumeSubpathListFromDisk(podUID types.UID) ([]string, error) {
volumes := []string{}
podSubpathsDir := kl.getPodVolumeSubpathsDir(podUID)
if pathExists, pathErr := mount.PathExists(podSubpathsDir); pathErr != nil {
return nil, fmt.Errorf("error checking if path %q exists: %v", podSubpathsDir, pathErr)
} else if !pathExists {
return volumes, nil
}
// Explicitly walks /<volume>/<container name>/<subPathIndex>
volumePluginDirs, err := os.ReadDir(podSubpathsDir)
if err != nil {
klog.ErrorS(err, "Could not read directory", "path", podSubpathsDir)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {
volumePluginName := volumePluginDir.Name()
volumePluginPath := filepath.Join(podSubpathsDir, volumePluginName)
containerDirs, err := os.ReadDir(volumePluginPath)
if err != nil {
return volumes, fmt.Errorf("could not read directory %s: %v", volumePluginPath, err)
}
for _, containerDir := range containerDirs {
containerName := containerDir.Name()
containerPath := filepath.Join(volumePluginPath, containerName)
// Switch to ReadDirNoStat at the subPathIndex level to prevent issues with stat'ing
// mount points that may not be responsive
subPaths, err := utilpath.ReadDirNoStat(containerPath)
if err != nil {
return volumes, fmt.Errorf("could not read directory %s: %v", containerPath, err)
}
for _, subPathDir := range subPaths {
volumes = append(volumes, filepath.Join(containerPath, subPathDir))
}
}
}
return volumes, nil
}
// GetRequestedContainersInfo returns container info.
func (kl *Kubelet) GetRequestedContainersInfo(containerName string, options cadvisorv2.RequestOptions) (map[string]*cadvisorapiv1.ContainerInfo, error) {
return kl.cadvisor.GetRequestedContainersInfo(containerName, options)
}
// GetVersionInfo returns information about the version of cAdvisor in use.
func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
return kl.cadvisor.VersionInfo()
}
// GetCachedMachineInfo assumes that the machine info can't change without a reboot
func (kl *Kubelet) GetCachedMachineInfo() (*cadvisorapiv1.MachineInfo, error) {
kl.machineInfoLock.RLock()
defer kl.machineInfoLock.RUnlock()
return kl.machineInfo, nil
}
func (kl *Kubelet) setCachedMachineInfo(info *cadvisorapiv1.MachineInfo) {
kl.machineInfoLock.Lock()
defer kl.machineInfoLock.Unlock()
kl.machineInfo = info
}
// getLastStableNodeAddresses returns the last observed node addresses.
func (kl *Kubelet) getLastObservedNodeAddresses() []v1.NodeAddress {
node, err := kl.GetNode()
if err != nil || node == nil {
klog.V(4).InfoS("fail to obtain node from local cache", "node", kl.nodeName, "error", err)
return nil
}
return node.Status.Addresses
}
//go:build linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"errors"
"fmt"
"os"
"path/filepath"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
func (kl *Kubelet) cgroupVersionCheck() error {
cgroupVersion := kl.containerManager.GetNodeConfig().CgroupVersion
metrics.CgroupVersion.Set(float64(cgroupVersion))
switch cgroupVersion {
case 1:
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.CgroupV1, cm.CgroupV1MaintenanceModeWarning)
return errors.New(cm.CgroupV1MaintenanceModeWarning)
case 2:
cpustat := filepath.Join(util.CgroupRoot, "cpu.stat")
if _, err := os.Stat(cpustat); os.IsNotExist(err) {
// if `/sys/fs/cgroup/cpu.stat` does not exist, log a warning
return errors.New(cm.CgroupV2KernelWarning)
}
default:
return fmt.Errorf("unsupported cgroup version: %d", cgroupVersion)
}
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
)
// updatePodCIDR updates the pod CIDR in the runtime state if it is different
// from the current CIDR. Return true if pod CIDR is actually changed.
func (kl *Kubelet) updatePodCIDR(ctx context.Context, cidr string) (bool, error) {
kl.updatePodCIDRMux.Lock()
defer kl.updatePodCIDRMux.Unlock()
podCIDR := kl.runtimeState.podCIDR()
if podCIDR == cidr {
return false, nil
}
// kubelet -> generic runtime -> runtime shim -> network plugin
// docker/non-cri implementations have a passthrough UpdatePodCIDR
if err := kl.getRuntime().UpdatePodCIDR(ctx, cidr); err != nil {
// If updatePodCIDR would fail, theoretically pod CIDR could not change.
// But it is better to be on the safe side to still return true here.
return true, fmt.Errorf("failed to update pod CIDR: %v", err)
}
klog.InfoS("Updating Pod CIDR", "originalPodCIDR", podCIDR, "newPodCIDR", cidr)
kl.runtimeState.setPodCIDR(cidr)
return true, nil
}
// GetPodDNS returns DNS settings for the pod.
// This function is defined in kubecontainer.RuntimeHelper interface so we
// have to implement it.
func (kl *Kubelet) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
return kl.dnsConfigurer.GetPodDNS(context.TODO(), pod)
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
const (
// KubeIPTablesHintChain is the chain whose existence in either iptables-legacy
// or iptables-nft indicates which version of iptables the system is using
KubeIPTablesHintChain utiliptables.Chain = "KUBE-IPTABLES-HINT"
// KubeFirewallChain is kubernetes firewall rules
KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
)
func (kl *Kubelet) initNetworkUtil() {
iptClients := utiliptables.NewBestEffort()
if len(iptClients) == 0 {
// We don't log this as an error because kubelet itself doesn't need any
// of this (it sets up these rules for the benefit of *other* components),
// and because we *expect* this to fail on hosts where only nftables is
// supported (in which case there can't be any other components using
// iptables that would need these rules anyway).
klog.InfoS("No iptables support on this system; not creating the KUBE-IPTABLES-HINT chain")
return
}
for family := range iptClients {
iptClient := iptClients[family]
if kl.syncIPTablesRules(iptClient) {
klog.InfoS("Initialized iptables rules.", "protocol", iptClient.Protocol())
go iptClient.Monitor(
utiliptables.Chain("KUBE-KUBELET-CANARY"),
[]utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter},
func() { kl.syncIPTablesRules(iptClient) },
1*time.Minute, wait.NeverStop,
)
} else {
klog.InfoS("Failed to initialize iptables rules; some functionality may be missing.", "protocol", iptClient.Protocol())
}
}
}
// syncIPTablesRules ensures the KUBE-IPTABLES-HINT chain exists, and the martian packet
// protection rule is installed.
func (kl *Kubelet) syncIPTablesRules(iptClient utiliptables.Interface) bool {
// Create hint chain so other components can see whether we are using iptables-legacy
// or iptables-nft.
if _, err := iptClient.EnsureChain(utiliptables.TableMangle, KubeIPTablesHintChain); err != nil {
klog.ErrorS(err, "Failed to ensure that iptables hint chain exists")
return false
}
if !iptClient.IsIPv6() { // ipv6 doesn't have this issue
// Set up the KUBE-FIREWALL chain and martian packet protection rule.
// (See below.)
// NOTE: kube-proxy (in iptables mode) creates an identical copy of this
// rule. If you want to change this rule in the future, you MUST do so in
// a way that will interoperate correctly with skewed versions of the rule
// created by kube-proxy.
if _, err := iptClient.EnsureChain(utiliptables.TableFilter, KubeFirewallChain); err != nil {
klog.ErrorS(err, "Failed to ensure that filter table KUBE-FIREWALL chain exists")
return false
}
if _, err := iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil {
klog.ErrorS(err, "Failed to ensure that OUTPUT chain jumps to KUBE-FIREWALL")
return false
}
if _, err := iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainInput, "-j", string(KubeFirewallChain)); err != nil {
klog.ErrorS(err, "Failed to ensure that INPUT chain jumps to KUBE-FIREWALL")
return false
}
// Kube-proxy's use of `route_localnet` to enable NodePorts on localhost
// creates a security hole (https://issue.k8s.io/90259) which this
// iptables rule mitigates. This rule should have been added to
// kube-proxy, but it mistakenly ended up in kubelet instead, and we are
// keeping it in kubelet for now in case other third-party components
// depend on it.
if _, err := iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain,
"-m", "comment", "--comment", "block incoming localnet connections",
"--dst", "127.0.0.0/8",
"!", "--src", "127.0.0.0/8",
"-m", "conntrack",
"!", "--ctstate", "RELATED,ESTABLISHED,DNAT",
"-j", "DROP"); err != nil {
klog.ErrorS(err, "Failed to ensure rule to drop invalid localhost packets in filter table KUBE-FIREWALL chain")
return false
}
}
return true
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"fmt"
"math/rand"
"net"
goruntime "runtime"
"sort"
"strings"
"time"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
cloudproviderapi "k8s.io/cloud-provider/api"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/klog/v2"
kubeletapis "k8s.io/kubelet/pkg/apis"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
taintutil "k8s.io/kubernetes/pkg/util/taints"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
// registerWithAPIServer registers the node with the cluster master. It is safe
// to call multiple times, but not concurrently (kl.registrationCompleted is
// not locked).
func (kl *Kubelet) registerWithAPIServer() {
if kl.registrationCompleted {
return
}
kl.nodeStartupLatencyTracker.RecordAttemptRegisterNode()
step := 100 * time.Millisecond
for {
time.Sleep(step)
step = step * 2
if step >= 7*time.Second {
step = 7 * time.Second
}
node, err := kl.initialNode(context.TODO())
if err != nil {
klog.ErrorS(err, "Unable to construct v1.Node object for kubelet")
continue
}
klog.InfoS("Attempting to register node", "node", klog.KObj(node))
registered := kl.tryRegisterWithAPIServer(node)
if registered {
klog.InfoS("Successfully registered node", "node", klog.KObj(node))
kl.registrationCompleted = true
return
}
}
}
// tryRegisterWithAPIServer makes an attempt to register the given node with
// the API server, returning a boolean indicating whether the attempt was
// successful. If a node with the same name already exists, it reconciles the
// value of the annotation for controller-managed attach-detach of attachable
// persistent volumes for the node.
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
_, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
if err == nil {
kl.nodeStartupLatencyTracker.RecordRegisteredNewNode()
return true
}
switch {
case apierrors.IsAlreadyExists(err):
// Node already exists, proceed to reconcile node.
case apierrors.IsForbidden(err):
// Creating nodes is forbidden, but node may still exist, attempt to get the node.
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletRegistrationGetOnExistsOnly) {
klog.ErrorS(err, "Unable to register node with API server, reason is forbidden", "node", klog.KObj(node))
return false
}
default:
klog.ErrorS(err, "Unable to register node with API server", "node", klog.KObj(node))
return false
}
existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), metav1.GetOptions{})
if err != nil {
klog.ErrorS(err, "Unable to register node with API server, error getting existing node", "node", klog.KObj(node))
return false
}
if existingNode == nil {
klog.InfoS("Unable to register node with API server, no node instance returned", "node", klog.KObj(node))
return false
}
originalNode := existingNode.DeepCopy()
klog.InfoS("Node was previously registered", "node", klog.KObj(node))
// Edge case: the node was previously registered; reconcile
// the value of the controller-managed attach-detach
// annotation.
requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode)
requiresUpdate = kl.updateDefaultLabels(node, existingNode) || requiresUpdate
requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate
requiresUpdate = kl.reconcileHugePageResource(node, existingNode) || requiresUpdate
if requiresUpdate {
if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil {
klog.ErrorS(err, "Unable to reconcile node with API server,error updating node", "node", klog.KObj(node))
return false
}
}
return true
}
// reconcileHugePageResource will update huge page capacity for each page size and remove huge page sizes no longer supported
func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node) bool {
requiresUpdate := updateDefaultResources(initialNode, existingNode)
supportedHugePageResources := sets.Set[string]{}
for resourceName := range initialNode.Status.Capacity {
if !v1helper.IsHugePageResourceName(resourceName) {
continue
}
supportedHugePageResources.Insert(string(resourceName))
initialCapacity := initialNode.Status.Capacity[resourceName]
initialAllocatable := initialNode.Status.Allocatable[resourceName]
capacity, resourceIsSupported := existingNode.Status.Capacity[resourceName]
allocatable := existingNode.Status.Allocatable[resourceName]
// Add or update capacity if it the size was previously unsupported or has changed
if !resourceIsSupported || capacity.Cmp(initialCapacity) != 0 {
existingNode.Status.Capacity[resourceName] = initialCapacity.DeepCopy()
requiresUpdate = true
}
// Add or update allocatable if it the size was previously unsupported or has changed
if !resourceIsSupported || allocatable.Cmp(initialAllocatable) != 0 {
existingNode.Status.Allocatable[resourceName] = initialAllocatable.DeepCopy()
requiresUpdate = true
}
}
for resourceName := range existingNode.Status.Capacity {
if !v1helper.IsHugePageResourceName(resourceName) {
continue
}
// If huge page size no longer is supported, we remove it from the node
if !supportedHugePageResources.Has(string(resourceName)) {
delete(existingNode.Status.Capacity, resourceName)
delete(existingNode.Status.Allocatable, resourceName)
klog.InfoS("Removing huge page resource which is no longer supported", "resourceName", resourceName)
requiresUpdate = true
}
}
return requiresUpdate
}
// Zeros out extended resource capacity during reconciliation.
func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
requiresUpdate := updateDefaultResources(initialNode, node)
// Check with the device manager to see if node has been recreated, in which case extended resources should be zeroed until they are available
if kl.containerManager.ShouldResetExtendedResourceCapacity() {
for k := range node.Status.Capacity {
if v1helper.IsExtendedResourceName(k) {
klog.InfoS("Zero out resource capacity in existing node", "resourceName", k, "node", klog.KObj(node))
node.Status.Capacity[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
node.Status.Allocatable[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
requiresUpdate = true
}
}
}
return requiresUpdate
}
// updateDefaultResources will set the default resources on the existing node according to the initial node
func updateDefaultResources(initialNode, existingNode *v1.Node) bool {
requiresUpdate := false
if existingNode.Status.Capacity == nil {
if initialNode.Status.Capacity != nil {
existingNode.Status.Capacity = initialNode.Status.Capacity.DeepCopy()
requiresUpdate = true
} else {
existingNode.Status.Capacity = make(map[v1.ResourceName]resource.Quantity)
}
}
if existingNode.Status.Allocatable == nil {
if initialNode.Status.Allocatable != nil {
existingNode.Status.Allocatable = initialNode.Status.Allocatable.DeepCopy()
requiresUpdate = true
} else {
existingNode.Status.Allocatable = make(map[v1.ResourceName]resource.Quantity)
}
}
return requiresUpdate
}
// updateDefaultLabels will set the default labels on the node
func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool {
defaultLabels := []string{
v1.LabelHostname,
v1.LabelTopologyZone,
v1.LabelTopologyRegion,
v1.LabelFailureDomainBetaZone,
v1.LabelFailureDomainBetaRegion,
v1.LabelInstanceTypeStable,
v1.LabelInstanceType,
v1.LabelOSStable,
v1.LabelArchStable,
v1.LabelWindowsBuild,
kubeletapis.LabelOS,
kubeletapis.LabelArch,
}
needsUpdate := false
if existingNode.Labels == nil {
existingNode.Labels = make(map[string]string)
}
//Set default labels but make sure to not set labels with empty values
for _, label := range defaultLabels {
if _, hasInitialValue := initialNode.Labels[label]; !hasInitialValue {
continue
}
if existingNode.Labels[label] != initialNode.Labels[label] {
existingNode.Labels[label] = initialNode.Labels[label]
needsUpdate = true
}
if existingNode.Labels[label] == "" {
delete(existingNode.Labels, label)
}
}
return needsUpdate
}
// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
// attach-detach annotation on a new node and the existing node, returning
// whether the existing node must be updated.
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
var (
existingCMAAnnotation = existingNode.Annotations[volutil.ControllerManagedAttachAnnotation]
newCMAAnnotation, newSet = node.Annotations[volutil.ControllerManagedAttachAnnotation]
)
if newCMAAnnotation == existingCMAAnnotation {
return false
}
// If the just-constructed node and the existing node do
// not have the same value, update the existing node with
// the correct value of the annotation.
if !newSet {
klog.InfoS("Controller attach-detach setting changed to false; updating existing Node")
delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation)
} else {
klog.InfoS("Controller attach-detach setting changed to true; updating existing Node")
if existingNode.Annotations == nil {
existingNode.Annotations = make(map[string]string)
}
existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] = newCMAAnnotation
}
return true
}
// initialNode constructs the initial v1.Node for this Kubelet, incorporating node
// labels, information from the cloud provider, and Kubelet configuration.
func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: string(kl.nodeName),
Labels: map[string]string{
v1.LabelHostname: kl.hostname,
v1.LabelOSStable: goruntime.GOOS,
v1.LabelArchStable: goruntime.GOARCH,
kubeletapis.LabelOS: goruntime.GOOS,
kubeletapis.LabelArch: goruntime.GOARCH,
},
},
}
osLabels, err := getOSSpecificLabels()
if err != nil {
return nil, err
}
for label, value := range osLabels {
node.Labels[label] = value
}
nodeTaints := make([]v1.Taint, len(kl.registerWithTaints))
copy(nodeTaints, kl.registerWithTaints)
unschedulableTaint := v1.Taint{
Key: v1.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
}
// Taint node with TaintNodeUnschedulable when initializing
// node to avoid race condition; refer to #63897 for more detail.
if node.Spec.Unschedulable &&
!taintutil.TaintExists(nodeTaints, &unschedulableTaint) {
nodeTaints = append(nodeTaints, unschedulableTaint)
}
if kl.externalCloudProvider {
taint := v1.Taint{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
}
nodeTaints = append(nodeTaints, taint)
}
if len(nodeTaints) > 0 {
node.Spec.Taints = nodeTaints
}
if kl.enableControllerAttachDetach {
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
klog.V(2).InfoS("Setting node annotation to enable volume controller attach/detach")
node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true"
} else {
klog.V(2).InfoS("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
}
// @question: should this be place after the call to the cloud provider? which also applies labels
for k, v := range kl.nodeLabels {
if cv, found := node.ObjectMeta.Labels[k]; found {
klog.InfoS("the node label will overwrite default setting", "labelKey", k, "labelValue", v, "default", cv)
}
node.ObjectMeta.Labels[k] = v
}
if kl.providerID != "" {
node.Spec.ProviderID = kl.providerID
}
kl.setNodeStatus(ctx, node)
return node, nil
}
// fastNodeStatusUpdate is a "lightweight" version of syncNodeStatus which doesn't hit the
// apiserver except for the final run, to be called by fastStatusUpdateOnce in each loop.
// It holds the same lock as syncNodeStatus and is thread-safe when called concurrently with
// syncNodeStatus. Its return value indicates whether the loop running it should exit
// (final run), and it also sets kl.containerRuntimeReadyExpected.
func (kl *Kubelet) fastNodeStatusUpdate(ctx context.Context, timeout bool) (completed bool) {
kl.syncNodeStatusMux.Lock()
defer func() {
kl.syncNodeStatusMux.Unlock()
if completed {
// containerRuntimeReadyExpected is read by updateRuntimeUp().
// Not going for a more granular mutex as this path runs only once.
kl.updateRuntimeMux.Lock()
defer kl.updateRuntimeMux.Unlock()
kl.containerRuntimeReadyExpected = true
}
}()
if timeout {
klog.ErrorS(nil, "Node not becoming ready in time after startup")
return true
}
originalNode, err := kl.GetNode()
if err != nil {
klog.ErrorS(err, "Error getting the current node from lister")
return false
}
readyIdx, originalNodeReady := nodeutil.GetNodeCondition(&originalNode.Status, v1.NodeReady)
if readyIdx == -1 {
klog.ErrorS(nil, "Node does not have NodeReady condition", "originalNode", originalNode)
return false
}
if originalNodeReady.Status == v1.ConditionTrue {
return true
}
// This is in addition to the regular syncNodeStatus logic so we can get the container runtime status earlier.
// This function itself has a mutex and it doesn't recursively call fastNodeStatusUpdate or syncNodeStatus.
kl.updateRuntimeUp()
node, changed := kl.updateNode(ctx, originalNode)
if !changed {
// We don't do markVolumesFromNode(node) here and leave it to the regular syncNodeStatus().
return false
}
readyIdx, nodeReady := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
if readyIdx == -1 {
klog.ErrorS(nil, "Node does not have NodeReady condition", "node", node)
return false
}
if nodeReady.Status == v1.ConditionFalse {
return false
}
klog.InfoS("Fast updating node status as it just became ready")
if _, err := kl.patchNodeStatus(originalNode, node); err != nil {
// The originalNode is probably stale, but we know that the current state of kubelet would turn
// the node to be ready. Retry using syncNodeStatus() which fetches from the apiserver.
klog.ErrorS(err, "Error updating node status, will retry with syncNodeStatus")
// The reversed kl.syncNodeStatusMux.Unlock/Lock() below to allow kl.syncNodeStatus() execution.
kl.syncNodeStatusMux.Unlock()
kl.syncNodeStatus()
// This lock action is unnecessary if we add a flag to check in the defer before unlocking it,
// but having it here makes the logic a bit easier to read.
kl.syncNodeStatusMux.Lock()
}
// We don't do markVolumesFromNode(node) here and leave it to the regular syncNodeStatus().
return true
}
// syncNodeStatus should be called periodically from a goroutine.
// It synchronizes node status to master if there is any change or enough time
// passed from the last sync, registering the kubelet first if necessary.
func (kl *Kubelet) syncNodeStatus() {
kl.syncNodeStatusMux.Lock()
defer kl.syncNodeStatusMux.Unlock()
ctx := context.Background()
if kl.kubeClient == nil || kl.heartbeatClient == nil {
return
}
if kl.registerNode {
// This will exit immediately if it doesn't need to do anything.
kl.registerWithAPIServer()
}
if err := kl.updateNodeStatus(ctx); err != nil {
klog.ErrorS(err, "Unable to update node status")
}
}
// updateNodeStatus updates node status to master with retries if there is any
// change or enough time passed from the last sync.
func (kl *Kubelet) updateNodeStatus(ctx context.Context) error {
klog.V(5).InfoS("Updating node status")
for i := 0; i < nodeStatusUpdateRetry; i++ {
if err := kl.tryUpdateNodeStatus(ctx, i); err != nil {
if i > 0 && kl.onRepeatedHeartbeatFailure != nil {
kl.onRepeatedHeartbeatFailure()
}
klog.ErrorS(err, "Error updating node status, will retry")
} else {
return nil
}
}
return fmt.Errorf("update node status exceeds retry count")
}
// tryUpdateNodeStatus tries to update node status to master if there is any
// change or enough time passed from the last sync.
func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error {
// In large clusters, GET and PUT operations on Node objects coming
// from here are the majority of load on apiserver and etcd.
// To reduce the load on control-plane, we are serving GET operations from
// local lister (the data might be slightly delayed but it doesn't
// seem to cause more conflict - the delays are pretty small).
// If it result in a conflict, all retries are served directly from etcd.
var originalNode *v1.Node
var err error
if tryNumber == 0 {
originalNode, err = kl.nodeLister.Get(string(kl.nodeName))
} else {
opts := metav1.GetOptions{}
originalNode, err = kl.heartbeatClient.CoreV1().Nodes().Get(ctx, string(kl.nodeName), opts)
}
if err != nil {
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
}
if originalNode == nil {
return fmt.Errorf("nil %q node object", kl.nodeName)
}
node, changed := kl.updateNode(ctx, originalNode)
shouldPatchNodeStatus := changed || kl.isUpdateStatusPeriodExpired()
if !shouldPatchNodeStatus {
kl.markVolumesFromNode(node)
return nil
}
// There are 3 possible conditions that make shouldPatchNodeStatus to be true:
// 1. node is changed
// 2. isUpdateStatusPeriodExpired returns true due to lastStatusReportTime has Zero value. This will happen when kubelet restarts.
// 3. isUpdateStatusPeriodExpired returns true due to lastStatusReportTime expires with non-zero value.
// We want to calculate a new random delay for condition 1 and 2, so that we can avoid all the periodic node status
// updates to reach the apiserver at the same time.
// When condition 3 happens, random interval has already been used, and we want to reset the random delay, so that
// the node updates its status with fixed interval going forward.
if changed || kl.lastStatusReportTime.IsZero() {
kl.delayAfterNodeStatusChange = kl.calculateDelay()
} else {
kl.delayAfterNodeStatusChange = 0
}
updatedNode, err := kl.patchNodeStatus(originalNode, node)
if err == nil {
kl.markVolumesFromNode(updatedNode)
}
return err
}
func (kl *Kubelet) isUpdateStatusPeriodExpired() bool {
return kl.clock.Since(kl.lastStatusReportTime) >= kl.nodeStatusReportFrequency+kl.delayAfterNodeStatusChange
}
func (kl *Kubelet) calculateDelay() time.Duration {
return time.Duration(float64(kl.nodeStatusReportFrequency) * (-0.5 + rand.Float64()))
}
// updateNode creates a copy of originalNode and runs update logic on it.
// It returns the updated node object and a bool indicating if anything has been changed.
func (kl *Kubelet) updateNode(ctx context.Context, originalNode *v1.Node) (*v1.Node, bool) {
node := originalNode.DeepCopy()
podCIDRChanged := false
if len(node.Spec.PodCIDRs) != 0 {
// Pod CIDR could have been updated before, so we cannot rely on
// node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is
// actually changed.
var err error
podCIDRs := strings.Join(node.Spec.PodCIDRs, ",")
if podCIDRChanged, err = kl.updatePodCIDR(ctx, podCIDRs); err != nil {
klog.ErrorS(err, "Error updating pod CIDR")
}
}
areRequiredLabelsNotPresent := false
osName, osLabelExists := node.Labels[v1.LabelOSStable]
if !osLabelExists || osName != goruntime.GOOS {
if len(node.Labels) == 0 {
node.Labels = make(map[string]string)
}
node.Labels[v1.LabelOSStable] = goruntime.GOOS
areRequiredLabelsNotPresent = true
}
// Set the arch if there is a mismatch
arch, archLabelExists := node.Labels[v1.LabelArchStable]
if !archLabelExists || arch != goruntime.GOARCH {
if len(node.Labels) == 0 {
node.Labels = make(map[string]string)
}
node.Labels[v1.LabelArchStable] = goruntime.GOARCH
areRequiredLabelsNotPresent = true
}
kl.setNodeStatus(ctx, node)
changed := podCIDRChanged || nodeStatusHasChanged(&originalNode.Status, &node.Status) || areRequiredLabelsNotPresent
return node, changed
}
// patchNodeStatus patches node on the API server based on originalNode.
// It returns any potential error, or an updatedNode and refreshes the state of kubelet when successful.
func (kl *Kubelet) patchNodeStatus(originalNode, node *v1.Node) (*v1.Node, error) {
// Patch the current status on the API server
updatedNode, _, err := nodeutil.PatchNodeStatus(kl.heartbeatClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, node)
if err != nil {
return nil, err
}
kl.lastStatusReportTime = kl.clock.Now()
readyIdx, readyCondition := nodeutil.GetNodeCondition(&updatedNode.Status, v1.NodeReady)
if readyIdx >= 0 && readyCondition.Status == v1.ConditionTrue {
kl.nodeStartupLatencyTracker.RecordNodeReady()
}
return updatedNode, nil
}
// markVolumesFromNode updates volumeManager with VolumesInUse status from node.
//
// In the case of node status update being unnecessary, call with the fetched node.
// We must mark the volumes as ReportedInUse in volume manager's dsw even
// if no changes were made to the node status (no volumes were added or removed
// from the VolumesInUse list).
//
// The reason is that on a kubelet restart, the volume manager's dsw is
// repopulated and the volume ReportedInUse is initialized to false, while the
// VolumesInUse list from the Node object still contains the state from the
// previous kubelet instantiation.
//
// Once the volumes are added to the dsw, the ReportedInUse field needs to be
// synced from the VolumesInUse list in the Node.Status.
//
// The MarkVolumesAsReportedInUse() call cannot be performed in dsw directly
// because it does not have access to the Node object.
// This also cannot be populated on node status manager init because the volume
// may not have been added to dsw at that time.
//
// Or, after a successful node status update, call with updatedNode returned from
// the patch call, to mark the volumeInUse as reportedInUse to indicate
// those volumes are already updated in the node's status
func (kl *Kubelet) markVolumesFromNode(node *v1.Node) {
kl.volumeManager.MarkVolumesAsReportedInUse(node.Status.VolumesInUse)
}
// recordNodeStatusEvent records an event of the given type with the given
// message for the node.
func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
klog.V(2).InfoS("Recording event message for node", "node", klog.KRef("", string(kl.nodeName)), "event", event)
kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
}
// recordEvent records an event for this node, the Kubelet's nodeRef is passed to the recorder
func (kl *Kubelet) recordEvent(eventType, event, message string) {
kl.recorder.Eventf(kl.nodeRef, eventType, event, message)
}
// record if node schedulable change.
func (kl *Kubelet) recordNodeSchedulableEvent(ctx context.Context, node *v1.Node) error {
kl.lastNodeUnschedulableLock.Lock()
defer kl.lastNodeUnschedulableLock.Unlock()
if kl.lastNodeUnschedulable != node.Spec.Unschedulable {
if node.Spec.Unschedulable {
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable)
} else {
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeSchedulable)
}
kl.lastNodeUnschedulable = node.Spec.Unschedulable
}
return nil
}
// setNodeStatus fills in the Status fields of the given Node, overwriting
// any fields that are currently set.
// TODO(madhusudancs): Simplify the logic for setting node conditions and
// refactor the node status condition code out to a different file.
func (kl *Kubelet) setNodeStatus(ctx context.Context, node *v1.Node) {
for i, f := range kl.setNodeStatusFuncs {
klog.V(5).InfoS("Setting node status condition code", "position", i, "node", klog.KObj(node))
if err := f(ctx, node); err != nil {
klog.ErrorS(err, "Failed to set some node status fields", "node", klog.KObj(node))
}
}
}
// defaultNodeStatusFuncs is a factory that generates the default set of
// setNodeStatus funcs
func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) error {
var setters []func(ctx context.Context, n *v1.Node) error
setters = append(setters,
nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.externalCloudProvider, utilnet.ResolveBindAddress),
nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,
kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent, kl.supportLocalStorageCapacityIsolation()),
nodestatus.VersionInfo(kl.cadvisor.VersionInfo, kl.containerRuntime.Type, kl.containerRuntime.Version),
nodestatus.DaemonEndpoints(kl.daemonEndpoints),
nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
nodestatus.GoRuntime(),
nodestatus.RuntimeHandlers(kl.runtimeState.runtimeHandlers),
nodestatus.NodeFeatures(kl.runtimeState.runtimeFeatures),
)
setters = append(setters,
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors,
kl.containerManager.Status, kl.shutdownManager.ShutdownStatus, kl.recordNodeStatusEvent, kl.supportLocalStorageCapacityIsolation()),
nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
// TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event
// and record state back to the Kubelet runtime object. In the future, I'd like to isolate
// these side-effects by decoupling the decisions to send events and partial status recording
// from the Node setters.
kl.recordNodeSchedulableEvent,
)
return setters
}
// Validate given node IP belongs to the current host
func validateNodeIP(nodeIP net.IP) error {
// Honor IP limitations set in setNodeStatus()
if nodeIP.To4() == nil && nodeIP.To16() == nil {
return fmt.Errorf("nodeIP must be a valid IP address")
}
if nodeIP.IsLoopback() {
return fmt.Errorf("nodeIP can't be loopback address")
}
if nodeIP.IsMulticast() {
return fmt.Errorf("nodeIP can't be a multicast address")
}
if nodeIP.IsLinkLocalUnicast() {
return fmt.Errorf("nodeIP can't be a link-local unicast address")
}
if nodeIP.IsUnspecified() {
return fmt.Errorf("nodeIP can't be an all zeros address")
}
addrs, err := net.InterfaceAddrs()
if err != nil {
return err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip != nil && ip.Equal(nodeIP) {
return nil
}
}
return fmt.Errorf("node IP: %q not found in the host's network interfaces", nodeIP.String())
}
// nodeStatusHasChanged compares the original node and current node's status and
// returns true if any change happens. The heartbeat timestamp is ignored.
func nodeStatusHasChanged(originalStatus *v1.NodeStatus, status *v1.NodeStatus) bool {
if originalStatus == nil && status == nil {
return false
}
if originalStatus == nil || status == nil {
return true
}
// Compare node conditions here because we need to ignore the heartbeat timestamp.
if nodeConditionsHaveChanged(originalStatus.Conditions, status.Conditions) {
return true
}
// Compare other fields of NodeStatus.
originalStatusCopy := originalStatus.DeepCopy()
statusCopy := status.DeepCopy()
originalStatusCopy.Conditions = nil
statusCopy.Conditions = nil
return !apiequality.Semantic.DeepEqual(originalStatusCopy, statusCopy)
}
// nodeConditionsHaveChanged compares the original node and current node's
// conditions and returns true if any change happens. The heartbeat timestamp is
// ignored.
func nodeConditionsHaveChanged(originalConditions []v1.NodeCondition, conditions []v1.NodeCondition) bool {
if len(originalConditions) != len(conditions) {
return true
}
originalConditionsCopy := make([]v1.NodeCondition, 0, len(originalConditions))
originalConditionsCopy = append(originalConditionsCopy, originalConditions...)
conditionsCopy := make([]v1.NodeCondition, 0, len(conditions))
conditionsCopy = append(conditionsCopy, conditions...)
sort.SliceStable(originalConditionsCopy, func(i, j int) bool { return originalConditionsCopy[i].Type < originalConditionsCopy[j].Type })
sort.SliceStable(conditionsCopy, func(i, j int) bool { return conditionsCopy[i].Type < conditionsCopy[j].Type })
replacedheartbeatTime := metav1.Time{}
for i := range conditionsCopy {
originalConditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
conditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
if !apiequality.Semantic.DeepEqual(&originalConditionsCopy[i], &conditionsCopy[i]) {
return true
}
}
return false
}
//go:build !windows
// +build !windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
func getOSSpecificLabels() (map[string]string, error) {
return nil, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"encoding/json"
"fmt"
"net"
goruntime "runtime"
"sort"
"strconv"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
core "k8s.io/client-go/testing"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/version"
kubeletapis "k8s.io/kubelet/pkg/apis"
"k8s.io/kubernetes/pkg/features"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
taintutil "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/pkg/volume/util"
netutils "k8s.io/utils/net"
)
const (
maxImageTagsForTest = 20
)
// generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
// imageList is randomly generated image list
var imageList []kubecontainer.Image
for ; count > 0; count-- {
imageItem := kubecontainer.Image{
ID: string(uuid.NewUUID()),
RepoTags: generateImageTags(),
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
}
imageList = append(imageList, imageItem)
}
expectedImageList := makeExpectedImageList(imageList, maxImages)
return imageList, expectedImageList
}
func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
// expectedImageList is generated by imageList according to size and maxImages
// 1. sort the imageList by size
sort.Sort(sliceutils.ByImageSize(imageList))
// 2. convert sorted imageList to v1.ContainerImage list
var expectedImageList []v1.ContainerImage
for _, kubeImage := range imageList {
apiImage := v1.ContainerImage{
Names: kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
SizeBytes: kubeImage.Size,
}
expectedImageList = append(expectedImageList, apiImage)
}
// 3. only returns the top maxImages images in expectedImageList
if maxImages == -1 { // -1 means no limit
return expectedImageList
}
return expectedImageList[0:maxImages]
}
func generateImageTags() []string {
var tagList []string
// Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
// that kubelet report up to MaxNamesPerImageInNodeStatus tags.
count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
for ; count > 0; count-- {
tagList = append(tagList, "registry.k8s.io:v"+strconv.Itoa(count))
}
return tagList
}
func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
original, err := json.Marshal(originalNode)
if err != nil {
return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
}
updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
if err != nil {
return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
patch, originalNode, err)
}
updatedNode := &v1.Node{}
if err := json.Unmarshal(updated, updatedNode); err != nil {
return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
}
return updatedNode, nil
}
func notImplemented(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
}
func addNotImplatedReaction(kubeClient *fake.Clientset) {
if kubeClient == nil {
return
}
kubeClient.AddReactor("*", "*", notImplemented)
}
type localCM struct {
cm.ContainerManager
allocatableReservation v1.ResourceList
capacity v1.ResourceList
}
func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
return lcm.allocatableReservation
}
func (lcm *localCM) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
if !localStorageCapacityIsolation {
delete(lcm.capacity, v1.ResourceEphemeralStorage)
}
return lcm.capacity
}
type delegatingNodeLister struct {
client clientset.Interface
}
func (l delegatingNodeLister) Get(name string) (*v1.Node, error) {
return l.client.CoreV1().Nodes().Get(context.Background(), name, metav1.GetOptions{})
}
func (l delegatingNodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) {
opts := metav1.ListOptions{}
if selector != nil {
opts.LabelSelector = selector.String()
}
nodeList, err := l.client.CoreV1().Nodes().List(context.Background(), opts)
if err != nil {
return nil, err
}
nodes := make([]*v1.Node, len(nodeList.Items))
return nodes, nil
}
func TestUpdateNewNodeStatus(t *testing.T) {
cases := []struct {
desc string
nodeStatusMaxImages int32
}{
{
desc: "5 image limit",
nodeStatusMaxImages: 5,
},
{
desc: "no image limit",
nodeStatusMaxImages: -1,
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
// generate one more in inputImageList than we configure the Kubelet to report,
// or 5 images if unlimited
numTestImages := int(tc.nodeStatusMaxImages) + 1
if tc.nodeStatusMaxImages == -1 {
numTestImages = 5
}
inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
testKubelet := newTestKubeletWithImageList(
t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/, true /* localStorageCapacityIsolation */, false /*excludePodAdmitHandlers*/, false /*enableResizing*/)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10e9, // 10G
}
kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: "kubelet has sufficient memory available",
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: "kubelet has no disk pressure",
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: "kubelet has sufficient PID available",
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status",
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOSVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: "",
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
Images: expectedImageList,
},
}
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions()
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, "status", actions[1].GetSubresource())
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
assert.NoError(t, err)
for i, cond := range updatedNode.Status.Conditions {
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NotReady should be last")
assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
})
}
}
func TestUpdateExistingNodeStatus(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20e9,
}
kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: metav1.Time{}, // placeholder
LastTransitionTime: metav1.Time{}, // placeholder
},
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOSVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: "",
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
// images will be sorted from max to min in node status.
Images: []v1.ContainerImage{
{
Names: []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
SizeBytes: 456,
},
},
},
}
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions()
assert.Len(t, actions, 2)
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
require.NoError(t, err)
for i, cond := range updatedNode.Status.Conditions {
old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
// Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NodeReady should be the last condition")
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
}
func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
ctx := context.Background()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
attempts := int64(0)
failureCallbacks := int64(0)
// set up a listener that hangs connections
ln, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
defer ln.Close()
go func() {
// accept connections and just let them hang
for {
_, err := ln.Accept()
if err != nil {
t.Log(err)
return
}
t.Log("accepted connection")
atomic.AddInt64(&attempts, 1)
}
}()
config := &rest.Config{
Host: "http://" + ln.Addr().String(),
QPS: -1,
Timeout: time.Second,
}
assert.NoError(t, err)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.heartbeatClient, err = clientset.NewForConfig(config)
require.NoError(t, err)
kubelet.onRepeatedHeartbeatFailure = func() {
atomic.AddInt64(&failureCallbacks, 1)
}
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
},
}
// should return an error, but not hang
assert.Error(t, kubelet.updateNodeStatus(ctx))
// should have attempted multiple times
if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
}
// should have gotten multiple failure callbacks
if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
}
}
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10e9,
}
kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: fmt.Sprintf("kubelet has no disk pressure"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{}, //placeholder
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOSVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: "",
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
Images: []v1.ContainerImage{
{
Names: []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
SizeBytes: 456,
},
},
},
}
checkNodeStatus := func(status v1.ConditionStatus, reason string) {
kubeClient.ClearActions()
assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions()
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, "status", actions[1].GetSubresource())
updatedNode, err := kubeClient.CoreV1().Nodes().Get(ctx, testKubeletHostname, metav1.GetOptions{})
require.NoError(t, err, "can't apply node status patch")
for i, cond := range updatedNode.Status.Conditions {
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
lastIndex := len(updatedNode.Status.Conditions) - 1
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
updatedNode.Status.Conditions[lastIndex].Message = ""
expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
Type: v1.NodeReady,
Status: status,
Reason: reason,
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
}
// TODO(random-liu): Refactor the unit test to be table driven test.
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report kubelet ready if the runtime check is updated
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report kubelet not ready if the runtime check failed
fakeRuntime := testKubelet.fakeRuntime
// Inject error into fake runtime status check, node should be NotReady
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
fakeRuntime.StatusErr = nil
// Should report node not ready if runtime status is nil.
fakeRuntime.RuntimeStatus = nil
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node not ready if runtime status is empty.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node not ready if RuntimeReady is false.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
Conditions: []kubecontainer.RuntimeCondition{
{Type: kubecontainer.RuntimeReady, Status: false},
{Type: kubecontainer.NetworkReady, Status: true},
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node ready if RuntimeReady is true.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
Conditions: []kubecontainer.RuntimeCondition{
{Type: kubecontainer.RuntimeReady, Status: true},
{Type: kubecontainer.NetworkReady, Status: true},
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
// Should report node not ready if NetworkReady is false.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
Conditions: []kubecontainer.RuntimeCondition{
{Type: kubecontainer.RuntimeReady, Status: true},
{Type: kubecontainer.NetworkReady, Status: false},
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
}
func TestUpdateNodeStatusError(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
assert.Error(t, kubelet.updateNodeStatus(ctx))
assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
}
func TestUpdateNodeStatusWithLease(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
clock := testKubelet.fakeClock
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
// You will add up to 50% of nodeStatusReportFrequency of additional random latency for
// kubelet to determine if update node status is needed due to time passage. We need to
// take that into consideration to ensure this test pass all time.
kubelet.nodeStatusReportFrequency = 30 * time.Second
kubeClient := testKubelet.fakeKubeClient
existingNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*existingNode}}).ReactionChain
kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20e9,
}
kubelet.setCachedMachineInfo(machineInfo)
now := metav1.NewTime(clock.Now()).Rfc3339Copy()
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: fmt.Sprintf("kubelet has no disk pressure"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOSVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: "",
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
// images will be sorted from max to min in node status.
Images: []v1.ContainerImage{
{
Names: []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
SizeBytes: 456,
},
},
},
}
// Update node status when node status is created.
// Report node status.
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions()
assert.Len(t, actions, 2)
assert.IsType(t, core.GetActionImpl{}, actions[0])
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(existingNode, patchAction.GetPatch())
require.NoError(t, err)
for _, cond := range updatedNode.Status.Conditions {
cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NodeReady should be the last condition")
// Update node status again when nothing is changed (except heartbeat time).
// Report node status if it has exceeded the duration of nodeStatusReportFrequency.
clock.Step(time.Minute)
assert.NoError(t, kubelet.updateNodeStatus(ctx))
// 2 more action (There were 2 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 4)
assert.IsType(t, core.GetActionImpl{}, actions[2])
assert.IsType(t, core.PatchActionImpl{}, actions[3])
patchAction = actions[3].(core.PatchActionImpl)
updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
require.NoError(t, err)
for _, cond := range updatedNode.Status.Conditions {
cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
}
// Expect LastHeartbeat updated, other things unchanged.
for i, cond := range expectedNode.Status.Conditions {
expectedNode.Status.Conditions[i].LastHeartbeatTime = metav1.NewTime(cond.LastHeartbeatTime.Time.Add(time.Minute)).Rfc3339Copy()
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
// Update node status again when nothing is changed (except heartbeat time).
// Do not report node status if it is within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
assert.NoError(t, kubelet.updateNodeStatus(ctx))
// Only 1 more action (There were 4 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 5)
assert.IsType(t, core.GetActionImpl{}, actions[4])
// Update node status again when something is changed.
// Report node status even if it is still within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
var newMemoryCapacity int64 = 40e9
oldMachineInfo, err := kubelet.GetCachedMachineInfo()
if err != nil {
t.Fatal(err)
}
newMachineInfo := oldMachineInfo.Clone()
newMachineInfo.MemoryCapacity = uint64(newMemoryCapacity)
kubelet.setCachedMachineInfo(newMachineInfo)
assert.NoError(t, kubelet.updateNodeStatus(ctx))
// 2 more action (There were 5 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 7)
assert.IsType(t, core.GetActionImpl{}, actions[5])
assert.IsType(t, core.PatchActionImpl{}, actions[6])
patchAction = actions[6].(core.PatchActionImpl)
updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
require.NoError(t, err)
memCapacity := updatedNode.Status.Capacity[v1.ResourceMemory]
updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
now = metav1.NewTime(clock.Now()).Rfc3339Copy()
for _, cond := range updatedNode.Status.Conditions {
// Expect LastHearbeat updated, while LastTransitionTime unchanged.
assert.Equal(t, now, cond.LastHeartbeatTime.Rfc3339Copy(),
"LastHeartbeatTime for condition %v", cond.Type)
assert.Equal(t, now, metav1.NewTime(cond.LastTransitionTime.Time.Add(time.Minute+20*time.Second)).Rfc3339Copy(),
"LastTransitionTime for condition %v", cond.Type)
}
// Update node status when changing pod CIDR.
// Report node status if it is still within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
assert.Equal(t, "", kubelet.runtimeState.podCIDR(), "Pod CIDR should be empty")
podCIDRs := []string{"10.0.0.0/24", "2000::/10"}
updatedNode.Spec.PodCIDR = podCIDRs[0]
updatedNode.Spec.PodCIDRs = podCIDRs
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
assert.NoError(t, kubelet.updateNodeStatus(ctx))
assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
// 2 more action (There were 7 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 9)
assert.IsType(t, core.GetActionImpl{}, actions[7])
assert.IsType(t, core.PatchActionImpl{}, actions[8])
// Update node status when keeping the pod CIDR.
// Do not report node status if it is within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
assert.NoError(t, kubelet.updateNodeStatus(ctx))
// Only 1 more action (There were 9 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 10)
assert.IsType(t, core.GetActionImpl{}, actions[9])
}
func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
cases := []struct {
desc string
existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
existingNode *v1.Node // existing node object
expectedNode *v1.Node // new node object after patch
expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
}{
{
desc: "no volumes and no update",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
},
{
desc: "volumes inuse on node and volumeManager",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
{
desc: "volumes inuse on node but not in volumeManager",
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
},
{
desc: "volumes inuse in volumeManager but not on node",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
// Setup
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
kubelet.lastStatusReportTime = kubelet.clock.Now()
kubelet.nodeStatusReportFrequency = time.Hour
kubelet.setCachedMachineInfo(&cadvisorapi.MachineInfo{})
// override test volumeManager
fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes, 0, nil, false)
kubelet.volumeManager = fakeVolumeManager
// Only test VolumesInUse setter
kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
kubelet.volumeManager.GetVolumesInUse),
}
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
// Execute
assert.NoError(t, kubelet.updateNodeStatus(ctx))
// Validate
actions := kubeClient.Actions()
if tc.expectedNode != nil {
assert.Len(t, actions, 2)
assert.IsType(t, core.GetActionImpl{}, actions[0])
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
require.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", cmp.Diff(tc.expectedNode, updatedNode))
} else {
assert.Len(t, actions, 1)
assert.IsType(t, core.GetActionImpl{}, actions[0])
}
reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", cmp.Diff(tc.expectedReportedInUse, reportedInUse))
})
}
}
func TestFastStatusUpdateOnce(t *testing.T) {
tests := []struct {
name string
beforeMarkReady int
beforeNextReady int
beforeTimeout int
wantCalls int
patchFailures int
wantPatches int
}{
{
name: "timeout after third loop",
beforeMarkReady: 9,
beforeNextReady: 9,
beforeTimeout: 2,
wantCalls: 3,
},
{
name: "already ready on third loop",
beforeMarkReady: 9,
beforeNextReady: 1,
beforeTimeout: 9,
wantCalls: 2,
},
{
name: "turns ready on third loop",
beforeMarkReady: 2,
beforeNextReady: 9,
beforeTimeout: 9,
wantCalls: 3,
wantPatches: 1,
},
{
name: "turns ready on second loop then first patch fails",
beforeMarkReady: 1,
beforeNextReady: 9,
beforeTimeout: 9,
wantCalls: 3,
patchFailures: 1,
wantPatches: 2,
},
{
name: "turns ready on second loop then all patches fail",
beforeMarkReady: 1,
beforeNextReady: 9,
beforeTimeout: 9,
wantCalls: nodeStatusUpdateRetry + 2,
patchFailures: nodeStatusUpdateRetry + 2,
wantPatches: nodeStatusUpdateRetry + 1,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
// Ensure we capture actions on the heartbeat client only.
// We don't set it to nil or GetNode() doesn't read from nodeLister.
kubelet.kubeClient = &fake.Clientset{}
kubeClient := testKubelet.fakeKubeClient
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: string(kubelet.nodeName),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
Reason: "NotReady",
Message: "Node not ready",
},
},
},
}
nodeLister := testNodeLister{[]*v1.Node{node.DeepCopy()}}
kubelet.nodeLister = nodeLister
callCount := 0
// The original node status functions turn the node ready.
nodeStatusFuncs := kubelet.setNodeStatusFuncs
kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{func(ctx context.Context, node *v1.Node) error {
assert.False(t, kubelet.containerRuntimeReadyExpected)
callCount++
var lastErr error
if callCount > tc.beforeMarkReady {
for _, f := range nodeStatusFuncs {
if err := f(ctx, node); err != nil {
lastErr = err
}
}
}
if callCount > tc.beforeNextReady {
nodeLister.nodes[0].Status.Conditions[0].Status = v1.ConditionTrue
}
if callCount > tc.beforeTimeout {
testKubelet.fakeClock.Step(nodeReadyGracePeriod)
}
return lastErr
}}
patchCount := 0
kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
assert.False(t, kubelet.containerRuntimeReadyExpected)
patchCount++
if patchCount > tc.patchFailures {
return false, nil, nil
}
return true, nil, fmt.Errorf("try again")
})
kubelet.fastStatusUpdateOnce()
assert.True(t, kubelet.containerRuntimeReadyExpected)
assert.Equal(t, tc.wantCalls, callCount)
assert.Equal(t, tc.wantPatches, patchCount)
actions := kubeClient.Actions()
if tc.wantPatches == 0 {
require.Empty(t, actions)
return
}
// patch, then patch, get, patch, get, patch, ... up to initial patch + nodeStatusUpdateRetry patches
expectedActions := 2*tc.wantPatches - 2
if tc.wantPatches == 1 {
expectedActions = 1
}
require.Len(t, actions, expectedActions)
for i, action := range actions {
if i%2 == 0 && i > 0 {
require.IsType(t, core.GetActionImpl{}, action)
continue
}
require.IsType(t, core.PatchActionImpl{}, action)
patchAction := action.(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(node, patchAction.GetPatch())
require.NoError(t, err)
seenNodeReady := false
for _, c := range updatedNode.Status.Conditions {
if c.Type == v1.NodeReady {
assert.Equal(t, v1.ConditionTrue, c.Status)
seenNodeReady = true
}
}
assert.True(t, seenNodeReady)
}
})
}
}
func TestRegisterWithApiServer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an error on create.
return true, &v1.Node{}, &apierrors.StatusError{
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
}
})
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: testKubeletHostname,
Labels: map[string]string{
v1.LabelHostname: testKubeletHostname,
v1.LabelOSStable: goruntime.GOOS,
v1.LabelArchStable: goruntime.GOARCH,
kubeletapis.LabelOS: goruntime.GOOS,
kubeletapis.LabelArch: goruntime.GOARCH,
},
},
}, nil
})
kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "status" {
return true, nil, nil
}
return notImplemented(action)
})
addNotImplatedReaction(kubeClient)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
kubelet.setCachedMachineInfo(machineInfo)
done := make(chan struct{})
go func() {
kubelet.registerWithAPIServer()
done <- struct{}{}
}()
select {
case <-time.After(wait.ForeverTestTimeout):
assert.Fail(t, "timed out waiting for registration")
case <-done:
return
}
}
func TestTryRegisterWithApiServer(t *testing.T) {
alreadyExists := &apierrors.StatusError{
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
}
conflict := &apierrors.StatusError{
ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
}
forbidden := &apierrors.StatusError{
ErrStatus: metav1.Status{Reason: metav1.StatusReasonForbidden},
}
newNode := func(cmad bool) *v1.Node {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: testKubeletHostname,
v1.LabelOSStable: goruntime.GOOS,
v1.LabelArchStable: goruntime.GOARCH,
kubeletapis.LabelOS: goruntime.GOOS,
kubeletapis.LabelArch: goruntime.GOARCH,
},
},
}
if cmad {
node.Annotations = make(map[string]string)
node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
}
return node
}
cases := []struct {
name string
newNode *v1.Node
existingNode *v1.Node
createError error
getError error
patchError error
deleteError error
expectedResult bool
expectedActions int
testSavedNode bool
getOnForbiddenDisabled bool
savedNodeIndex int
savedNodeCMAD bool
}{
{
name: "success case - new node",
newNode: &v1.Node{},
expectedResult: true,
expectedActions: 1,
},
{
name: "success case - existing node - no change in CMAD",
newNode: newNode(true),
createError: alreadyExists,
existingNode: newNode(true),
expectedResult: true,
expectedActions: 2,
},
{
name: "success case - existing node - create forbidden - no change in CMAD",
newNode: newNode(true),
createError: forbidden,
existingNode: newNode(true),
expectedResult: true,
expectedActions: 2,
},
{
name: "success case - existing node - create forbidden - CMAD disabled",
newNode: newNode(false),
createError: forbidden,
existingNode: newNode(true),
expectedResult: true,
expectedActions: 3,
testSavedNode: true,
savedNodeIndex: 2,
savedNodeCMAD: false,
},
{
name: "success case - existing node - CMAD disabled",
newNode: newNode(false),
createError: alreadyExists,
existingNode: newNode(true),
expectedResult: true,
expectedActions: 3,
testSavedNode: true,
savedNodeIndex: 2,
savedNodeCMAD: false,
},
{
name: "success case - existing node - CMAD enabled",
newNode: newNode(true),
createError: alreadyExists,
existingNode: newNode(false),
expectedResult: true,
expectedActions: 3,
testSavedNode: true,
savedNodeIndex: 2,
savedNodeCMAD: true,
},
{
name: "create failed",
newNode: newNode(false),
createError: conflict,
expectedResult: false,
expectedActions: 1,
},
{
name: "create failed with forbidden - get-on-forbidden feature is disabled",
newNode: newNode(false),
getOnForbiddenDisabled: true,
createError: forbidden,
expectedResult: false,
expectedActions: 1,
},
{
name: "get existing node failed",
newNode: newNode(false),
createError: alreadyExists,
getError: conflict,
expectedResult: false,
expectedActions: 2,
},
{
name: "update existing node failed",
newNode: newNode(false),
createError: alreadyExists,
existingNode: newNode(true),
patchError: conflict,
expectedResult: false,
expectedActions: 3,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if tc.getOnForbiddenDisabled {
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, utilfeature.DefaultFeatureGate, utilversion.MustParse("1.32"))
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletRegistrationGetOnExistsOnly, true)
}
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.createError
})
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, tc.existingNode, tc.getError
})
kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "status" {
return true, nil, tc.patchError
}
return notImplemented(action)
})
kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.deleteError
})
addNotImplatedReaction(kubeClient)
result := kubelet.tryRegisterWithAPIServer(tc.newNode)
require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
actions := kubeClient.Actions()
assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
if tc.testSavedNode {
var savedNode *v1.Node
t.Logf("actions: %v: %+v", len(actions), actions)
action := actions[tc.savedNodeIndex]
if action.GetVerb() == "create" {
createAction := action.(core.CreateAction)
obj := createAction.GetObject()
require.IsType(t, &v1.Node{}, obj)
savedNode = obj.(*v1.Node)
} else if action.GetVerb() == "patch" {
patchAction := action.(core.PatchActionImpl)
var err error
savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
require.NoError(t, err)
}
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
}
})
}
}
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
ctx := context.Background()
const nodeStatusMaxImages = 5
// generate one more in inputImageList than we configure the Kubelet to report
inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
testKubelet := newTestKubeletWithImageList(
t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*enableResizing*/)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = nodeStatusMaxImages
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10e9, // 10G
}
kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
},
}
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions()
require.Len(t, actions, 1)
require.True(t, actions[0].Matches("patch", "nodes"))
require.Equal(t, "status", actions[0].GetSubresource())
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[0].(core.PatchActionImpl).GetPatch())
assert.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", cmp.Diff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
}
func TestUpdateDefaultLabels(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
cases := []struct {
name string
initialNode *v1.Node
existingNode *v1.Node
needsUpdate bool
finalLabels map[string]string
}{
{
name: "make sure default labels exist",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{},
},
},
needsUpdate: true,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
{
name: "make sure default labels are up to date",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "old-hostname",
v1.LabelTopologyZone: "old-zone-failure-domain",
v1.LabelTopologyRegion: "old-zone-region",
v1.LabelFailureDomainBetaZone: "old-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "old-zone-region",
v1.LabelInstanceTypeStable: "old-instance-type",
v1.LabelInstanceType: "old-instance-type",
v1.LabelOSStable: "old-os",
v1.LabelArchStable: "old-arch",
},
},
},
needsUpdate: true,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
{
name: "make sure existing labels do not get deleted",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
"please-persist": "foo",
},
},
},
needsUpdate: false,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
"please-persist": "foo",
},
},
{
name: "make sure existing labels do not get deleted when initial node has no opinion",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
"please-persist": "foo",
},
},
},
needsUpdate: false,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
"please-persist": "foo",
},
},
{
name: "no update needed",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
},
needsUpdate: false,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
{
name: "not panic when existing node has nil labels",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{},
},
needsUpdate: true,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
{
name: "backfill required for new stable labels for os/arch/zones/regions/instance-type",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
},
},
},
needsUpdate: true,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelTopologyZone: "new-zone-failure-domain",
v1.LabelTopologyRegion: "new-zone-region",
v1.LabelFailureDomainBetaZone: "new-zone-failure-domain",
v1.LabelFailureDomainBetaRegion: "new-zone-region",
v1.LabelInstanceTypeStable: "new-instance-type",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOSStable: "new-os",
v1.LabelArchStable: "new-arch",
},
},
}
for _, tc := range cases {
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
}
}
func TestUpdateDefaultResources(t *testing.T) {
cases := []struct {
name string
initialNode *v1.Node
existingNode *v1.Node
expectedNode *v1.Node
needsUpdate bool
}{
{
name: "no update needed when capacity and allocatable of the existing node are not nil",
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: false,
}, {
name: "no update needed when capacity and allocatable of the initial node are nil",
initialNode: &v1.Node{},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: false,
}, {
name: "update needed when capacity and allocatable of the existing node are nil and capacity and allocatable of the initial node are not nil",
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
existingNode: &v1.Node{},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: true,
}, {
name: "update needed when capacity of the existing node is nil and capacity of the initial node is not nil",
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: true,
}, {
name: "update needed when allocatable of the existing node is nil and allocatable of the initial node is not nil",
initialNode: &v1.Node{
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: true,
}, {
name: "no update needed but capacity and allocatable of existing node should be initialized",
initialNode: &v1.Node{},
existingNode: &v1.Node{},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{},
Allocatable: v1.ResourceList{},
},
},
needsUpdate: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(T *testing.T) {
needsUpdate := updateDefaultResources(tc.initialNode, tc.existingNode)
assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
})
}
}
func TestReconcileHugePageResource(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
hugePageResourceName64Ki := v1.ResourceName("hugepages-64Ki")
hugePageResourceName2Mi := v1.ResourceName("hugepages-2Mi")
hugePageResourceName1Gi := v1.ResourceName("hugepages-1Gi")
cases := []struct {
name string
testKubelet *TestKubelet
initialNode *v1.Node
existingNode *v1.Node
expectedNode *v1.Node
needsUpdate bool
}{
{
name: "no update needed when all huge page resources are similar",
testKubelet: testKubelet,
needsUpdate: false,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
},
},
}, {
name: "update needed when new huge page resources is supported",
testKubelet: testKubelet,
needsUpdate: true,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: *resource.NewQuantity(0, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: *resource.NewQuantity(0, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: *resource.NewQuantity(0, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: *resource.NewQuantity(0, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
},
},
}, {
name: "update needed when huge page resource quantity has changed",
testKubelet: testKubelet,
needsUpdate: true,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("4Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("4Gi"),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("4Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("4Gi"),
},
},
},
}, {
name: "update needed when a huge page resources is no longer supported",
testKubelet: testKubelet,
needsUpdate: true,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: *resource.NewQuantity(0, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: *resource.NewQuantity(0, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName1Gi: resource.MustParse("2Gi"),
},
},
},
}, {
name: "not panic when capacity or allocatable of existing node is nil",
testKubelet: testKubelet,
needsUpdate: true,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
hugePageResourceName2Mi: resource.MustParse("100Mi"),
hugePageResourceName64Ki: *resource.NewQuantity(0, resource.BinarySI),
},
},
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(T *testing.T) {
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
needsUpdate := kubelet.reconcileHugePageResource(tc.initialNode, tc.existingNode)
assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
})
}
}
func TestReconcileExtendedResource(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
testKubelet.kubelet.containerManager = cm.NewStubContainerManagerWithExtendedResource(true /* shouldResetExtendedResourceCapacity*/)
testKubeletNoReset := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubeletNoReset.Cleanup()
extendedResourceName1 := v1.ResourceName("test.com/resource1")
extendedResourceName2 := v1.ResourceName("test.com/resource2")
cases := []struct {
name string
testKubelet *TestKubelet
initialNode *v1.Node
existingNode *v1.Node
expectedNode *v1.Node
needsUpdate bool
}{
{
name: "no update needed without extended resource",
testKubelet: testKubelet,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: false,
},
{
name: "extended resource capacity is zeroed",
testKubelet: testKubeletNoReset,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
},
},
},
needsUpdate: true,
},
{
name: "not panic when allocatable of existing node is nil",
testKubelet: testKubelet,
initialNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
},
},
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
},
},
},
needsUpdate: true,
},
}
for _, tc := range cases {
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
needsUpdate := kubelet.reconcileExtendedResource(tc.initialNode, tc.existingNode)
assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
}
}
func TestValidateNodeIPParam(t *testing.T) {
type test struct {
nodeIP string
success bool
testName string
}
tests := []test{
{
nodeIP: "",
success: false,
testName: "IP not set",
},
{
nodeIP: "127.0.0.1",
success: false,
testName: "IPv4 loopback address",
},
{
nodeIP: "::1",
success: false,
testName: "IPv6 loopback address",
},
{
nodeIP: "224.0.0.1",
success: false,
testName: "multicast IPv4 address",
},
{
nodeIP: "ff00::1",
success: false,
testName: "multicast IPv6 address",
},
{
nodeIP: "169.254.0.1",
success: false,
testName: "IPv4 link-local unicast address",
},
{
nodeIP: "fe80::0202:b3ff:fe1e:8329",
success: false,
testName: "IPv6 link-local unicast address",
},
{
nodeIP: "0.0.0.0",
success: false,
testName: "Unspecified IPv4 address",
},
{
nodeIP: "::",
success: false,
testName: "Unspecified IPv6 address",
},
{
nodeIP: "1.2.3.4",
success: false,
testName: "IPv4 address that doesn't belong to host",
},
}
addrs, err := net.InterfaceAddrs()
if err != nil {
assert.Errorf(t, err, "Unable to obtain a list of the node's unicast interface addresses.")
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
continue
}
successTest := test{
nodeIP: ip.String(),
success: true,
testName: fmt.Sprintf("Success test case for address %s", ip.String()),
}
tests = append(tests, successTest)
}
for _, test := range tests {
err := validateNodeIP(netutils.ParseIPSloppy(test.nodeIP))
if test.success {
assert.NoErrorf(t, err, "test %s", test.testName)
} else {
assert.Errorf(t, err, "test %s", test.testName)
}
}
}
func TestRegisterWithApiServerWithTaint(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
kubelet.setCachedMachineInfo(machineInfo)
var gotNode runtime.Object
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
createAction := action.(core.CreateAction)
gotNode = createAction.GetObject()
return true, gotNode, nil
})
addNotImplatedReaction(kubeClient)
unschedulableTaint := v1.Taint{
Key: v1.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
}
// Mark node with unschedulable taints
kubelet.registerWithTaints = []v1.Taint{unschedulableTaint}
// Reset kubelet status for each test.
kubelet.registrationCompleted = false
// Register node to apiserver.
kubelet.registerWithAPIServer()
// Check the unschedulable taint.
got := gotNode.(*v1.Node)
require.True(t,
taintutil.TaintExists(got.Spec.Taints, &unschedulableTaint),
"test unschedulable taint for TaintNodesByCondition")
}
func TestNodeStatusHasChanged(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
fakeFuture := metav1.Time{Time: fakeNow.Time.Add(time.Minute)}
readyCondition := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
}
readyConditionAtDiffHearbeatTime := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: fakeFuture,
LastTransitionTime: fakeNow,
}
readyConditionAtDiffTransitionTime := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: fakeFuture,
LastTransitionTime: fakeFuture,
}
notReadyCondition := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
}
memoryPressureCondition := v1.NodeCondition{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
}
testcases := []struct {
name string
originalStatus *v1.NodeStatus
status *v1.NodeStatus
expectChange bool
}{
{
name: "Node status does not change with nil status.",
originalStatus: nil,
status: nil,
expectChange: false,
},
{
name: "Node status does not change with default status.",
originalStatus: &v1.NodeStatus{},
status: &v1.NodeStatus{},
expectChange: false,
},
{
name: "Node status changes with nil and default status.",
originalStatus: nil,
status: &v1.NodeStatus{},
expectChange: true,
},
{
name: "Node status changes with nil and status.",
originalStatus: nil,
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status does not change with empty conditions.",
originalStatus: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
status: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
expectChange: false,
},
{
name: "Node status does not change",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
expectChange: false,
},
{
name: "Node status does not change even if heartbeat time changes.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyConditionAtDiffHearbeatTime, memoryPressureCondition},
},
expectChange: false,
},
{
name: "Node status does not change even if the orders of conditions are different.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{memoryPressureCondition, readyConditionAtDiffHearbeatTime},
},
expectChange: false,
},
{
name: "Node status changes if condition status differs.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{notReadyCondition, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status changes if transition time changes.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyConditionAtDiffTransitionTime, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status changes with different number of conditions.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status changes with different phase.",
originalStatus: &v1.NodeStatus{
Phase: v1.NodePending,
Conditions: []v1.NodeCondition{readyCondition},
},
status: &v1.NodeStatus{
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{readyCondition},
},
expectChange: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
originalStatusCopy := tc.originalStatus.DeepCopy()
statusCopy := tc.status.DeepCopy()
changed := nodeStatusHasChanged(tc.originalStatus, tc.status)
assert.Equal(t, tc.expectChange, changed, "Expect node status change to be %t, but got %t.", tc.expectChange, changed)
assert.True(t, apiequality.Semantic.DeepEqual(originalStatusCopy, tc.originalStatus), "%s", cmp.Diff(originalStatusCopy, tc.originalStatus))
assert.True(t, apiequality.Semantic.DeepEqual(statusCopy, tc.status), "%s", cmp.Diff(statusCopy, tc.status))
})
}
}
func TestUpdateNodeAddresses(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
tests := []struct {
Name string
Before []v1.NodeAddress
After []v1.NodeAddress
}{
{
Name: "nil to populated",
Before: nil,
After: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
},
{
Name: "empty to populated",
Before: []v1.NodeAddress{},
After: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
},
{
Name: "populated to nil",
Before: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
After: nil,
},
{
Name: "populated to empty",
Before: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
After: []v1.NodeAddress{},
},
{
Name: "multiple addresses of same type, no change",
Before: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
After: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
},
{
Name: "1 InternalIP to 2 InternalIP",
Before: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
After: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
},
{
Name: "2 InternalIP to 1 InternalIP",
Before: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
After: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
},
{
Name: "2 InternalIP to 2 different InternalIP",
Before: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
After: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
{Type: v1.NodeInternalIP, Address: "127.0.0.4"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
},
{
Name: "2 InternalIP to reversed order",
Before: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
After: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
},
}
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
ctx := context.Background()
oldNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Addresses: test.Before,
},
}
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Addresses: test.After,
},
}
_, err := kubeClient.CoreV1().Nodes().Update(ctx, oldNode, metav1.UpdateOptions{})
assert.NoError(t, err)
kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
func(_ context.Context, node *v1.Node) error {
node.Status.Addresses = expectedNode.Status.Addresses
return nil
},
}
assert.NoError(t, kubelet.updateNodeStatus(ctx))
actions := kubeClient.Actions()
lastAction := actions[len(actions)-1]
assert.IsType(t, core.PatchActionImpl{}, lastAction)
patchAction := lastAction.(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(oldNode, patchAction.GetPatch())
require.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(updatedNode, expectedNode), "%s", cmp.Diff(expectedNode, updatedNode))
})
}
}
func TestIsUpdateStatusPeriodExpired(t *testing.T) {
testcases := []struct {
name string
lastStatusReportTime time.Time
delayAfterNodeStatusChange time.Duration
expectExpired bool
}{
{
name: "no status update before and no delay",
lastStatusReportTime: time.Time{},
delayAfterNodeStatusChange: 0,
expectExpired: true,
},
{
name: "no status update before and existing delay",
lastStatusReportTime: time.Time{},
delayAfterNodeStatusChange: 30 * time.Second,
expectExpired: true,
},
{
name: "not expired and no delay",
lastStatusReportTime: time.Now().Add(-4 * time.Minute),
delayAfterNodeStatusChange: 0,
expectExpired: false,
},
{
name: "not expired",
lastStatusReportTime: time.Now().Add(-5 * time.Minute),
delayAfterNodeStatusChange: time.Minute,
expectExpired: false,
},
{
name: "expired",
lastStatusReportTime: time.Now().Add(-4 * time.Minute),
delayAfterNodeStatusChange: -2 * time.Minute,
expectExpired: true,
},
{
name: "Delay exactly at threshold",
lastStatusReportTime: time.Now().Add(-5 * time.Minute),
delayAfterNodeStatusChange: 0,
expectExpired: true,
},
}
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusReportFrequency = 5 * time.Minute
for _, tc := range testcases {
kubelet.lastStatusReportTime = tc.lastStatusReportTime
kubelet.delayAfterNodeStatusChange = tc.delayAfterNodeStatusChange
expired := kubelet.isUpdateStatusPeriodExpired()
assert.Equal(t, tc.expectExpired, expired, tc.name)
}
}
func TestCalculateDelay(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusReportFrequency = 5 * time.Minute
for i := 0; i < 100; i++ {
randomDelay := kubelet.calculateDelay()
assert.LessOrEqual(t, randomDelay.Abs(), kubelet.nodeStatusReportFrequency/2)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"context"
goerrors "errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubelet/pkg/cri/streaming/portforward"
remotecommandserver "k8s.io/kubelet/pkg/cri/streaming/remotecommand"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/resource"
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/envvars"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
envutil "k8s.io/kubernetes/pkg/kubelet/util/env"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
utilpod "k8s.io/kubernetes/pkg/util/pod"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
utilnet "k8s.io/utils/net"
)
const (
managedHostsHeader = "# Kubernetes-managed hosts file.\n"
managedHostsHeaderWithHostNetwork = "# Kubernetes-managed hosts file (host network).\n"
)
// Container state reason list
const (
PodInitializing = "PodInitializing"
ContainerCreating = "ContainerCreating"
kubeletUser = "kubelet"
)
// parseGetSubIdsOutput parses the output from the `getsubids` tool, which is used to query subordinate user or group ID ranges for
// a given user or group. getsubids produces a line for each mapping configured.
// Here we expect that there is a single mapping, and the same values are used for the subordinate user and group ID ranges.
// The output is something like:
// $ getsubids kubelet
// 0: kubelet 65536 2147483648
// $ getsubids -g kubelet
// 0: kubelet 65536 2147483648
func parseGetSubIdsOutput(input string) (uint32, uint32, error) {
lines := strings.Split(strings.Trim(input, "\n"), "\n")
if len(lines) != 1 {
return 0, 0, fmt.Errorf("error parsing line %q: it must contain only one line", input)
}
parts := strings.Fields(lines[0])
if len(parts) != 4 {
return 0, 0, fmt.Errorf("invalid line %q", input)
}
// Parsing the numbers
num1, err := strconv.ParseUint(parts[2], 10, 32)
if err != nil {
return 0, 0, fmt.Errorf("error parsing line %q: %w", input, err)
}
num2, err := strconv.ParseUint(parts[3], 10, 32)
if err != nil {
return 0, 0, fmt.Errorf("error parsing line %q: %w", input, err)
}
return uint32(num1), uint32(num2), nil
}
// getKubeletMappings returns the range of IDs that can be used to configure user namespaces.
// If subordinate user or group ID ranges are specified for the kubelet user and the getsubids tool
// is installed, then the single mapping specified both for user and group IDs will be used.
// If the tool is not installed, or there are no IDs configured, the default mapping is returned.
// The default mapping includes the entire IDs range except IDs below 65536.
func (kl *Kubelet) getKubeletMappings() (uint32, uint32, error) {
// default mappings to return if there is no specific configuration
const defaultFirstID = 1 << 16
const defaultLen = 1<<32 - defaultFirstID
if !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) {
return defaultFirstID, defaultLen, nil
}
// We NEED to check for the user because getsubids can be configured to gather the response
// with a remote call and we can't distinguish between the remote endpoint not being reachable
// and the remote endpoint is reachable but no entry is present for the user.
// So we check for the kubelet user first, if it exist and getsubids is present, we expect
// to get _some_ configuration. If the user exist and getsubids doesn't give us any
// configuration, then we consider the remote down and fail to start the kubelet.
_, err := user.Lookup(kubeletUser)
if err != nil {
var unknownUserErr user.UnknownUserError
if goerrors.As(err, &unknownUserErr) {
// if the user is not found, we assume that the user is not configured
klog.V(5).InfoS("user namespaces: user not found, using default mappings", "user", kubeletUser)
return defaultFirstID, defaultLen, nil
}
return 0, 0, err
}
execName := "getsubids"
cmd, err := exec.LookPath(execName)
if err != nil {
if os.IsNotExist(err) {
klog.V(2).InfoS("user namespaces: executable not found, using default mappings", "executable", execName, "err", err)
return defaultFirstID, defaultLen, nil
}
return 0, 0, err
}
outUids, err := exec.Command(cmd, kubeletUser).Output()
if err != nil {
return 0, 0, fmt.Errorf("error retrieving additional uids for user %q: %w", kubeletUser, err)
}
outGids, err := exec.Command(cmd, "-g", kubeletUser).Output()
if err != nil {
return 0, 0, fmt.Errorf("error retrieving additional gids for user %q", kubeletUser)
}
if string(outUids) != string(outGids) {
return 0, 0, fmt.Errorf("mismatched subuids and subgids for user %q", kubeletUser)
}
klog.V(5).InfoS("user namespaces: user found, using mappings from getsubids", "user", kubeletUser)
return parseGetSubIdsOutput(string(outUids))
}
// Get a list of pods that have data directories.
func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
podInfos, err := os.ReadDir(kl.getPodsDir())
if err != nil {
return nil, err
}
pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
}
// GetActivePods returns pods that have been admitted to the kubelet that
// are not fully terminated. This is mapped to the "desired state" of the
// kubelet - what pods should be running.
//
// WARNING: Currently this list does not include pods that have been force
// deleted but may still be terminating, which means resources assigned to
// those pods during admission may still be in use. See
// https://github.com/kubernetes/kubernetes/issues/104824
func (kl *Kubelet) GetActivePods() []*v1.Pod {
allPods := kl.podManager.GetPods()
activePods := kl.filterOutInactivePods(allPods)
return activePods
}
// getAllocatedPods returns the active pods (see GetActivePods), but updates the pods to their
// allocated state.
func (kl *Kubelet) getAllocatedPods() []*v1.Pod {
activePods := kl.GetActivePods()
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
return activePods
}
allocatedPods := make([]*v1.Pod, len(activePods))
for i, pod := range activePods {
allocatedPods[i], _ = kl.allocationManager.UpdatePodFromAllocation(pod)
}
return allocatedPods
}
// makeBlockVolumes maps the raw block devices specified in the path of the container
// Experimental
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
var devices []kubecontainer.DeviceInfo
for _, device := range container.VolumeDevices {
// check path is absolute
if !utilfs.IsAbs(device.DevicePath) {
return nil, fmt.Errorf("error DevicePath `%s` must be an absolute path", device.DevicePath)
}
vol, ok := podVolumes[device.Name]
if !ok || vol.BlockVolumeMapper == nil {
klog.ErrorS(nil, "Block volume cannot be satisfied for container, because the volume is missing or the volume mapper is nil", "containerName", container.Name, "device", device)
return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name)
}
// Get a symbolic link associated to a block device under pod device path
dirPath, volName := vol.BlockVolumeMapper.GetPodDeviceMapPath()
symlinkPath := filepath.Join(dirPath, volName)
if islinkExist, checkErr := blkutil.IsSymlinkExist(symlinkPath); checkErr != nil {
return nil, checkErr
} else if islinkExist {
// Check readOnly in PVCVolumeSource and set read only permission if it's true.
permission := "mrw"
if vol.ReadOnly {
permission = "r"
}
klog.V(4).InfoS("Device will be attached to container in the corresponding path on host", "containerName", container.Name, "path", symlinkPath)
devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission})
}
}
return devices, nil
}
// shouldMountHostsFile checks if the nodes /etc/hosts should be mounted
// Kubernetes only mounts on /etc/hosts if:
// - container is not an infrastructure (pause) container
// - container is not already mounting on /etc/hosts
// Kubernetes will not mount /etc/hosts if:
// - the Pod is on the pod network and PodIP has not yet been set (e.g., Pod sandbox is being created).
// - the Pod is on Windows, and contains a hostProcess container.
func shouldMountHostsFile(pod *v1.Pod, podIPs []string) bool {
shouldMount := len(podIPs) > 0 || pod.Spec.HostNetwork
if runtime.GOOS == "windows" {
return shouldMount && !kubecontainer.HasWindowsHostProcessContainer(pod)
}
return shouldMount
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar, supportsRRO bool, imageVolumes kubecontainer.ImageVolumes) ([]kubecontainer.Mount, func(), error) {
mountEtcHostsFile := shouldMountHostsFile(pod, podIPs)
klog.V(3).InfoS("Creating hosts mount for container", "pod", klog.KObj(pod), "containerName", container.Name, "podIPs", podIPs, "path", mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
var cleanupAction func()
for i, mount := range container.VolumeMounts {
// do not mount /etc/hosts if container is already mounting on the path
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok || vol.Mounter == nil {
klog.ErrorS(nil, "Mount cannot be satisfied for the container, because the volume is missing or the volume mounter (vol.Mounter) is nil",
"containerName", container.Name, "ok", ok, "volumeMounter", mount)
return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
}
relabelVolume := false
// If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled
if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SELinuxRelabel && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true
relabelVolume = true
}
var (
hostPath string
image *runtimeapi.ImageSpec
imageSubPath string
err error
)
subPath := mount.SubPath
if mount.SubPathExpr != "" {
subPath, err = kubecontainer.ExpandContainerVolumeMounts(mount, expandEnvs)
if err != nil {
return nil, cleanupAction, err
}
}
if subPath != "" {
if utilfs.IsAbs(subPath) {
return nil, cleanupAction, fmt.Errorf("error SubPath `%s` must not be an absolute path", subPath)
}
if err := volumevalidation.ValidatePathNoBacksteps(subPath); err != nil {
return nil, cleanupAction, fmt.Errorf("unable to provision SubPath `%s`: %w", subPath, err)
}
}
if imageVolumes != nil && utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
image = imageVolumes[mount.Name]
imageSubPath = subPath
}
if image == nil {
hostPath, err = volumeutil.GetPath(vol.Mounter)
if err != nil {
return nil, cleanupAction, err
}
if subPath != "" {
volumePath := hostPath
hostPath = filepath.Join(volumePath, subPath)
if subPathExists, err := hu.PathExists(hostPath); err != nil {
klog.ErrorS(nil, "Could not determine if subPath exists, will not attempt to change its permissions", "path", hostPath)
} else if !subPathExists {
// Create the sub path now because if it's auto-created later when referenced, it may have an
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
// later auto-create it with the incorrect mode 0750
// Make extra care not to escape the volume!
perm, err := hu.GetMode(volumePath)
if err != nil {
return nil, cleanupAction, err
}
if err := subpather.SafeMakeDir(subPath, volumePath, perm); err != nil {
// Don't pass detailed error back to the user because it could give information about host filesystem
klog.ErrorS(err, "Failed to create subPath directory for volumeMount of the container", "containerName", container.Name, "volumeMountName", mount.Name)
return nil, cleanupAction, fmt.Errorf("failed to create subPath directory for volumeMount %q of container %q", mount.Name, container.Name)
}
}
hostPath, cleanupAction, err = subpather.PrepareSafeSubpath(subpath.Subpath{
VolumeMountIndex: i,
Path: hostPath,
VolumeName: vol.InnerVolumeSpecName,
VolumePath: volumePath,
PodDir: podDir,
ContainerName: container.Name,
})
if err != nil {
// Don't pass detailed error back to the user because it could give information about host filesystem
klog.ErrorS(err, "Failed to prepare subPath for volumeMount of the container", "containerName", container.Name, "volumeMountName", mount.Name)
return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name)
}
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
if hostPath != "" && volumeutil.IsWindowsLocalPath(runtime.GOOS, hostPath) {
hostPath = volumeutil.MakeAbsolutePath(runtime.GOOS, hostPath)
}
}
containerPath := mount.MountPath
// IsAbs returns false for UNC path/SMB shares/named pipes in Windows. So check for those specifically and skip MakeAbsolutePath
if !volumeutil.IsWindowsUNCPath(runtime.GOOS, containerPath) && !utilfs.IsAbs(containerPath) {
containerPath = volumeutil.MakeAbsolutePath(runtime.GOOS, containerPath)
}
propagation, err := translateMountPropagation(mount.MountPropagation)
if err != nil {
return nil, cleanupAction, err
}
klog.V(5).InfoS("Mount has propagation", "pod", klog.KObj(pod), "containerName", container.Name, "volumeMountName", mount.Name, "propagation", propagation)
mustMountRO := vol.Mounter.GetAttributes().ReadOnly
rro, err := resolveRecursiveReadOnly(mount, supportsRRO)
if err != nil {
return nil, cleanupAction, fmt.Errorf("failed to resolve recursive read-only mode: %w", err)
}
if rro && !utilfeature.DefaultFeatureGate.Enabled(features.RecursiveReadOnlyMounts) {
return nil, cleanupAction, fmt.Errorf("recursive read-only mount needs feature gate %q to be enabled", features.RecursiveReadOnlyMounts)
}
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
Image: image,
ImageSubPath: imageSubPath,
ReadOnly: mount.ReadOnly || mustMountRO,
RecursiveReadOnly: rro,
SELinuxRelabel: relabelVolume,
Propagation: propagation,
})
}
if mountEtcHostsFile {
hostAliases := pod.Spec.HostAliases
hostsMount, err := makeHostsMount(podDir, podIPs, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
if err != nil {
return nil, cleanupAction, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, cleanupAction, nil
}
// translateMountPropagation transforms v1.MountPropagationMode to
// runtimeapi.MountPropagation.
func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) {
if runtime.GOOS == "windows" {
// Windows containers doesn't support mount propagation, use private for it.
// Refer https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation.
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
}
switch {
case mountMode == nil:
// PRIVATE is the default
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
case *mountMode == v1.MountPropagationHostToContainer:
return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil
case *mountMode == v1.MountPropagationBidirectional:
return runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, nil
case *mountMode == v1.MountPropagationNone:
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
default:
return 0, fmt.Errorf("invalid MountPropagation mode: %q", *mountMode)
}
}
// getEtcHostsPath returns the full host-side path to a pod's generated /etc/hosts file
func getEtcHostsPath(podDir string) string {
hostsFilePath := filepath.Join(podDir, "etc-hosts")
// Volume Mounts fail on Windows if it is not of the form C:/
return volumeutil.MakeAbsolutePath(runtime.GOOS, hostsFilePath)
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with. podIPs is provided instead of podIP as podIPs
// are present even if dual-stack feature flag is not enabled.
func makeHostsMount(podDir string, podIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
hostsFilePath := getEtcHostsPath(podDir)
if err := ensureHostsFile(hostsFilePath, podIPs, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
return nil, err
}
return &kubecontainer.Mount{
Name: "k8s-managed-etc-hosts",
ContainerPath: etcHostsPath,
HostPath: hostsFilePath,
ReadOnly: false,
SELinuxRelabel: true,
}, nil
}
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName string, hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error {
var hostsFileContent []byte
var err error
if useHostNetwork {
// if Pod is using host network, read hosts file from the node's filesystem.
// `etcHostsPath` references the location of the hosts file on the node.
// `/etc/hosts` for *nix systems.
hostsFileContent, err = nodeHostsFileContent(etcHostsPath, hostAliases)
if err != nil {
return err
}
} else {
// if Pod is not using host network, create a managed hosts file with Pod IP and other information.
hostsFileContent = managedHostsFileContent(hostIPs, hostName, hostDomainName, hostAliases)
}
hostsFilePerm := os.FileMode(0644)
if err := os.WriteFile(fileName, hostsFileContent, hostsFilePerm); err != nil {
return err
}
return os.Chmod(fileName, hostsFilePerm)
}
// nodeHostsFileContent reads the content of node's hosts file.
func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]byte, error) {
hostsFileContent, err := os.ReadFile(hostsFilePath)
if err != nil {
return nil, err
}
var buffer bytes.Buffer
buffer.WriteString(managedHostsHeaderWithHostNetwork)
buffer.Write(hostsFileContent)
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
return buffer.Bytes(), nil
}
// managedHostsFileContent generates the content of the managed etc hosts based on Pod IPs and other
// information.
func managedHostsFileContent(hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte {
var buffer bytes.Buffer
buffer.WriteString(managedHostsHeader)
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
buffer.WriteString("fe00::0\tip6-localnet\n")
buffer.WriteString("fe00::0\tip6-mcastprefix\n")
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
// host entry generated for all IPs in podIPs
// podIPs field is populated for clusters even
// dual-stack feature flag is not enabled.
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
}
} else {
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
}
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
return buffer.Bytes()
}
func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte {
if len(hostAliases) == 0 {
return []byte{}
}
var buffer bytes.Buffer
buffer.WriteString("\n")
buffer.WriteString("# Entries added by HostAliases.\n")
// for each IP, write all aliases onto single line in hosts file
for _, hostAlias := range hostAliases {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, strings.Join(hostAlias.Hostnames, "\t")))
}
return buffer.Bytes()
}
// truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars.
func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
const hostnameMaxLen = 63
if len(hostname) <= hostnameMaxLen {
return hostname, nil
}
truncated := hostname[:hostnameMaxLen]
klog.ErrorS(nil, "Hostname for pod was too long, truncated it", "podName", podName, "hostnameMaxLen", hostnameMaxLen, "truncatedHostname", truncated)
// hostname should not end with '-' or '.'
truncated = strings.TrimRight(truncated, "-.")
if len(truncated) == 0 {
// This should never happen.
return "", fmt.Errorf("hostname for pod %q was invalid: %q", podName, hostname)
}
return truncated, nil
}
// GetOrCreateUserNamespaceMappings returns the configuration for the sandbox user namespace
func (kl *Kubelet) GetOrCreateUserNamespaceMappings(pod *v1.Pod, runtimeHandler string) (*runtimeapi.UserNamespace, error) {
// Use context.TODO() because we currently do not have a proper logger to pass in.
// This should be replaced with an appropriate context when refactoring this function to accept a context parameter.
return kl.usernsManager.GetOrCreateUserNamespaceMappings(context.TODO(), pod, runtimeHandler)
}
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
clusterDomain := kl.dnsConfigurer.ClusterDomain
if utilfeature.DefaultFeatureGate.Enabled(features.HostnameOverride) && pod.Spec.HostnameOverride != nil {
hostname := *pod.Spec.HostnameOverride
if msgs := utilvalidation.IsDNS1123Subdomain(hostname); len(msgs) != 0 {
return "", "", fmt.Errorf("pod HostnameOverride %q is not a valid DNS subdomain: %s", hostname, strings.Join(msgs, ";"))
}
truncatedHostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
return truncatedHostname, "", nil
}
hostname := pod.Name
if len(pod.Spec.Hostname) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
return "", "", fmt.Errorf("pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
}
hostname = pod.Spec.Hostname
}
hostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
hostDomain := ""
if len(pod.Spec.Subdomain) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
return "", "", fmt.Errorf("pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
}
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
}
return hostname, hostDomain, nil
}
// GetPodCgroupParent gets pod cgroup parent from container manager.
func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
pcm := kl.containerManager.NewPodContainerManager()
_, cgroupParent := pcm.GetPodContainerName(pod)
return cgroupParent
}
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string, imageVolumes kubecontainer.ImageVolumes) (*kubecontainer.RunContainerOptions, func(), error) {
supportsRRO := kl.runtimeClassSupportsRecursiveReadOnlyMounts(pod)
opts, err := kl.containerManager.GetResources(ctx, pod, container)
if err != nil {
return nil, nil, err
}
// The value of hostname is the short host name and it is sent to makeMounts to create /etc/hosts file.
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, nil, err
}
podName := volumeutil.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
if err != nil {
return nil, nil, err
}
opts.Devices = append(opts.Devices, blkVolumes...)
envs, err := kl.makeEnvironmentVariables(pod, container, podIP, podIPs, volumes)
if err != nil {
return nil, nil, err
}
opts.Envs = append(opts.Envs, envs...)
// only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled.
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs, supportsRRO, imageVolumes)
if err != nil {
return nil, cleanupAction, err
}
opts.Mounts = append(opts.Mounts, mounts...)
// adding TerminationMessagePath on Windows is only allowed if ContainerD is used. Individual files cannot
// be mounted as volumes using Docker for Windows.
if len(container.TerminationMessagePath) != 0 {
p := kl.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
klog.ErrorS(err, "Error on creating dir", "path", p)
} else {
opts.PodContainerDir = p
}
}
return opts, cleanupAction, nil
}
var masterServices = sets.New[string]("kubernetes")
// getServiceEnvVarMap makes a map[string]string of env vars for services a
// pod in namespace ns should see.
func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[string]string, error) {
var (
serviceMap = make(map[string]*v1.Service)
m = make(map[string]string)
)
// Get all service resources from the master (via a cache),
// and populate them into service environment variables.
if kl.serviceLister == nil {
// Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars.
return m, nil
}
services, err := kl.serviceLister.List(labels.Everything())
if err != nil {
return m, fmt.Errorf("failed to list services when setting up env vars")
}
// project the services in namespace ns onto the master services
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
if !v1helper.IsServiceIPSet(service) {
continue
}
serviceName := service.Name
// We always want to add environment variabled for master services
// from the default namespace, even if enableServiceLinks is false.
// We also add environment variables for other services in the same
// namespace, if enableServiceLinks is true.
if service.Namespace == metav1.NamespaceDefault && masterServices.Has(serviceName) {
if _, exists := serviceMap[serviceName]; !exists {
serviceMap[serviceName] = service
}
} else if service.Namespace == ns && enableServiceLinks {
serviceMap[serviceName] = service
}
}
mappedServices := []*v1.Service{}
for key := range serviceMap {
mappedServices = append(mappedServices, serviceMap[key])
}
for _, e := range envvars.FromServices(mappedServices) {
m[e.Name] = e.Value
}
return m, nil
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.EnvVar, error) {
if pod.Spec.EnableServiceLinks == nil {
return nil, fmt.Errorf("nil pod.spec.enableServiceLinks encountered, cannot construct envvars")
}
// If the pod originates from the kube-api, when we know that the kube-apiserver is responding and the kubelet's credentials are valid.
// Knowing this, it is reasonable to wait until the service lister has synchronized at least once before attempting to build
// a service env var map. This doesn't present the race below from happening entirely, but it does prevent the "obvious"
// failure case of services simply not having completed a list operation that can reasonably be expected to succeed.
// One common case this prevents is a kubelet restart reading pods before services and some pod not having the
// KUBERNETES_SERVICE_HOST injected because we didn't wait a short time for services to sync before proceeding.
// The KUBERNETES_SERVICE_HOST link is special because it is unconditionally injected into pods and is read by the
// in-cluster-config for pod clients
if !kubetypes.IsStaticPod(pod) && !kl.serviceHasSynced() {
return nil, fmt.Errorf("services have not yet been read at least once, cannot construct envvars")
}
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by kubecontainer.HashContainer(...). That way, we can still determine whether an
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
// and keep trying to resolve the DNS name of the service (recommended).
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks)
if err != nil {
return result, err
}
var (
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
tmpEnv = make(map[string]string)
)
// Env will override EnvFrom variables.
// Process EnvFrom first then allow Env to replace existing values.
for _, envFrom := range container.EnvFrom {
switch {
case envFrom.ConfigMapRef != nil:
cm := envFrom.ConfigMapRef
name := cm.Name
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := cm.Optional != nil && *cm.Optional
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
for k, v := range configMap.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
tmpEnv[k] = v
}
case envFrom.SecretRef != nil:
s := envFrom.SecretRef
name := s.Name
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
optional := s.Optional != nil && *s.Optional
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
for k, v := range secret.Data {
if len(envFrom.Prefix) > 0 {
k = envFrom.Prefix + k
}
tmpEnv[k] = string(v)
}
}
}
// Determine the final values of variables:
//
// 1. Determine the final value of each variable:
// a. If the variable's Value is set, expand the `$(var)` references to other
// variables in the .Value field; the sources of variables are the declared
// variables of the container and the service environment variables
// b. If a source is defined for an environment variable, resolve the source
// 2. Create the container's environment in the order variables are declared
// 3. Add remaining service environment vars
var (
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
runtimeVal := envVar.Value
if runtimeVal != "" {
// Step 1a: expand variable references
runtimeVal = expansion.Expand(runtimeVal, mappingFunc)
} else if envVar.ValueFrom != nil {
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, podIPs)
if err != nil {
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardAPI(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
case envVar.ValueFrom.ConfigMapKeyRef != nil:
cm := envVar.ValueFrom.ConfigMapKeyRef
name := cm.Name
key := cm.Key
optional := cm.Optional != nil && *cm.Optional
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
configMap, err = kl.configMapManager.GetConfigMap(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
configMaps[name] = configMap
}
runtimeVal, ok = configMap.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
}
case envVar.ValueFrom.SecretKeyRef != nil:
s := envVar.ValueFrom.SecretKeyRef
name := s.Name
key := s.Key
optional := s.Optional != nil && *s.Optional
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
secret, err = kl.secretManager.GetSecret(pod.Namespace, name)
if err != nil {
if errors.IsNotFound(err) && optional {
// ignore error when marked optional
continue
}
return result, err
}
secrets[name] = secret
}
runtimeValBytes, ok := secret.Data[key]
if !ok {
if optional {
continue
}
return result, fmt.Errorf("couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
}
runtimeVal = string(runtimeValBytes)
case utilfeature.DefaultFeatureGate.Enabled(features.EnvFiles) && envVar.ValueFrom.FileKeyRef != nil:
f := envVar.ValueFrom.FileKeyRef
key := f.Key
volume := f.VolumeName
optional := f.Optional != nil && *f.Optional
vol, ok := podVolumes[volume]
if !ok || vol.Mounter == nil {
return result, fmt.Errorf("cannot find the volume %q referenced by FileKeyRef", volume)
}
hostPath, err := volumeutil.GetPath(vol.Mounter)
if err != nil {
return result, fmt.Errorf("failed to get host path for volume %q: %w", volume, err)
}
// Validate key length, must not exceed 128 characters.
// TODO: @HirazawaUi This limit will be relaxed after the EnvFiles feature gate beta stage.
if len(key) > 128 {
return result, fmt.Errorf("environment variable key %q exceeds maximum length of 128 characters", key)
}
// Construct the full path to the environment variable file
// by combining hostPath with the specified path in FileKeyRef
envFilePath, err := securejoin.SecureJoin(hostPath, f.Path)
if err != nil {
return result, err
}
runtimeVal, err = envutil.ParseEnv(envFilePath, key)
if err != nil {
klog.ErrorS(err, "Failed to parse env file", "pod", klog.KObj(pod))
return result, fmt.Errorf("couldn't parse env file")
}
// Validate value size, must not exceed 32KB.
// TODO: @HirazawaUi This limit will be relaxed after the EnvFiles feature gate beta stage.
if len(runtimeVal) > 32*1024 {
return result, fmt.Errorf("environment variable value for key %q exceeds maximum size of 32KB", key)
}
// If the key was not found, and it's not optional, return an error
if runtimeVal == "" {
if optional {
// If the key doesn't exist, and it's optional, skip this environment variable
continue
}
return result, fmt.Errorf("environment variable key %q not found in file %q", key, envFilePath)
}
}
}
tmpEnv[envVar.Name] = runtimeVal
}
// Append the env vars
for k, v := range tmpEnv {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
// Append remaining service env vars.
for k, v := range serviceEnv {
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we skip the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this next line once all platforms use apiserver+Pods.
if _, present := tmpEnv[k]; !present {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
}
return result, nil
}
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string, podIPs []string) (string, error) {
internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "")
if err != nil {
return "", err
}
// make podIPs order match node IP family preference #97979
podIPs = kl.sortPodIPs(podIPs)
if len(podIPs) > 0 {
podIP = podIPs[0]
}
switch internalFieldPath {
case "spec.nodeName":
return pod.Spec.NodeName, nil
case "spec.serviceAccountName":
return pod.Spec.ServiceAccountName, nil
case "status.hostIP":
hostIPs, err := kl.getHostIPsAnyWay()
if err != nil {
return "", err
}
return hostIPs[0].String(), nil
case "status.hostIPs":
hostIPs, err := kl.getHostIPsAnyWay()
if err != nil {
return "", err
}
ips := make([]string, 0, len(hostIPs))
for _, ip := range hostIPs {
ips = append(ips, ip.String())
}
return strings.Join(ips, ","), nil
case "status.podIP":
return podIP, nil
case "status.podIPs":
return strings.Join(podIPs, ","), nil
}
return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
}
// containerResourceRuntimeValue returns the value of the provided container resource
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
containerName := fs.ContainerName
if len(containerName) == 0 {
return resource.ExtractContainerResourceValue(fs, container)
}
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
}
// killPod instructs the container runtime to kill the pod. This method requires that
// the pod status contains the result of the last syncPod, otherwise it may fail to
// terminate newly created containers and sandboxes.
func (kl *Kubelet) killPod(ctx context.Context, pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error {
// Call the container runtime KillPod method which stops all known running containers of the pod
if err := kl.containerRuntime.KillPod(ctx, pod, p, gracePeriodOverride); err != nil {
return err
}
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
klog.V(2).InfoS("Failed to update QoS cgroups while killing pod", "err", err)
}
return nil
}
// makePodDataDirs creates the dirs for the pod datas.
func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
// secrets.
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret {
pullSecrets := []v1.Secret{}
failedPullSecrets := []string{}
for _, secretRef := range pod.Spec.ImagePullSecrets {
if len(secretRef.Name) == 0 {
// API validation permitted entries with empty names (https://issue.k8s.io/99454#issuecomment-787838112).
// Ignore to avoid unnecessary warnings.
continue
}
secret, err := kl.secretManager.GetSecret(pod.Namespace, secretRef.Name)
if err != nil {
klog.InfoS("Unable to retrieve pull secret, the image pull may not succeed.", "pod", klog.KObj(pod), "secret", klog.KObj(secret), "err", err)
failedPullSecrets = append(failedPullSecrets, secretRef.Name)
continue
}
pullSecrets = append(pullSecrets, *secret)
}
if len(failedPullSecrets) > 0 {
kl.recorder.Eventf(pod, v1.EventTypeWarning, "FailedToRetrieveImagePullSecret", "Unable to retrieve some image pull secrets (%s); attempting to pull the image may not succeed.", strings.Join(failedPullSecrets, ", "))
}
return pullSecrets
}
// PodCouldHaveRunningContainers returns true if the pod with the given UID could still have running
// containers. This returns false if the pod has not yet been started or the pod is unknown.
func (kl *Kubelet) PodCouldHaveRunningContainers(pod *v1.Pod) bool {
if kl.podWorkers.CouldHaveRunningContainers(pod.UID) {
return true
}
// Check if pod might need to unprepare resources before termination
// NOTE: This is a temporary solution. This call is here to avoid changing
// status manager and its tests.
// TODO: extend PodDeletionSafetyProvider interface and implement it
// in a separate Kubelet method.
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if kl.containerManager.PodMightNeedToUnprepareResources(pod.UID) {
return true
}
}
return false
}
// PodIsFinished returns true if SyncTerminatedPod is finished, ie.
// all required node-level resources that a pod was consuming have
// been reclaimed by the kubelet.
func (kl *Kubelet) PodIsFinished(pod *v1.Pod) bool {
return kl.podWorkers.ShouldPodBeFinished(pod.UID)
}
// filterOutInactivePods returns pods that are not in a terminal phase
// or are known to be fully terminated. This method should only be used
// when the set of pods being filtered is upstream of the pod worker, i.e.
// the pods the pod manager is aware of.
func (kl *Kubelet) filterOutInactivePods(pods []*v1.Pod) []*v1.Pod {
filteredPods := make([]*v1.Pod, 0, len(pods))
for _, p := range pods {
// if a pod is fully terminated by UID, it should be excluded from the
// list of pods
if kl.podWorkers.IsPodKnownTerminated(p.UID) {
continue
}
// terminal pods are considered inactive UNLESS they are actively terminating
if kl.isAdmittedPodTerminal(p) && !kl.podWorkers.IsPodTerminationRequested(p.UID) {
continue
}
filteredPods = append(filteredPods, p)
}
return filteredPods
}
// isAdmittedPodTerminal returns true if the provided config source pod is in
// a terminal phase, or if the Kubelet has already indicated the pod has reached
// a terminal phase but the config source has not accepted it yet. This method
// should only be used within the pod configuration loops that notify the pod
// worker, other components should treat the pod worker as authoritative.
func (kl *Kubelet) isAdmittedPodTerminal(pod *v1.Pod) bool {
// pods are considered inactive if the config source has observed a
// terminal phase (if the Kubelet recorded that the pod reached a terminal
// phase the pod should never be restarted)
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
return true
}
// a pod that has been marked terminal within the Kubelet is considered
// inactive (may have been rejected by Kubelet admission)
if status, ok := kl.statusManager.GetPodStatus(pod.UID); ok {
if status.Phase == v1.PodSucceeded || status.Phase == v1.PodFailed {
return true
}
}
return false
}
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
}
for _, pod := range mirrorPods {
podUIDs[pod.UID] = true
}
kl.statusManager.RemoveOrphanedStatuses(klog.TODO(), podUIDs)
}
// HandlePodCleanups performs a series of cleanup work, including terminating
// pod workers, killing unwanted pods, and removing orphaned volumes/pod
// directories. No config changes are sent to pod workers while this method
// is executing which means no new pods can appear. After this method completes
// the desired state of the kubelet should be reconciled with the actual state
// in the pod worker and other pod-related components.
//
// This function is executed by the main sync loop, so it must execute quickly
// and all nested calls should be asynchronous. Any slow reconciliation actions
// should be performed by other components (like the volume manager). The duration
// of this call is the minimum latency for static pods to be restarted if they
// are updated with a fixed UID (most should use a dynamic UID), and no config
// updates are delivered to the pod workers while this method is running.
func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error {
// The kubelet lacks checkpointing, so we need to introspect the set of pods
// in the cgroup tree prior to inspecting the set of pods in our pod manager.
// this ensures our view of the cgroup tree does not mistakenly observe pods
// that are added after the fact...
var (
cgroupPods map[types.UID]cm.CgroupName
err error
)
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
cgroupPods, err = pcm.GetAllPodsFromCgroups()
if err != nil {
return fmt.Errorf("failed to get list of pods that still exist on cgroup mounts: %v", err)
}
}
allPods, mirrorPods, orphanedMirrorPodFullnames := kl.podManager.GetPodsAndMirrorPods()
// Pod phase progresses monotonically. Once a pod has reached a final state,
// it should never leave regardless of the restart policy. The statuses
// of such pods should not be changed, and there is no need to sync them.
// TODO: the logic here does not handle two cases:
// 1. If the containers were removed immediately after they died, kubelet
// may fail to generate correct statuses, let alone filtering correctly.
// 2. If kubelet restarted before writing the terminated status for a pod
// to the apiserver, it could still restart the terminated pod (even
// though the pod was not considered terminated by the apiserver).
// These two conditions could be alleviated by checkpointing kubelet.
// Stop the workers for terminated pods not in the config source
klog.V(3).InfoS("Clean up pod workers for terminated pods")
workingPods := kl.podWorkers.SyncKnownPods(allPods)
// Reconcile: At this point the pod workers have been pruned to the set of
// desired pods. Pods that must be restarted due to UID reuse, or leftover
// pods from previous runs, are not known to the pod worker.
allPodsByUID := make(sets.Set[types.UID])
for _, pod := range allPods {
allPodsByUID.Insert(pod.UID)
}
// Identify the set of pods that have workers, which should be all pods
// from config that are not terminated, as well as any terminating pods
// that have already been removed from config. Pods that are terminating
// will be added to possiblyRunningPods, to prevent overly aggressive
// cleanup of pod cgroups.
stringIfTrue := func(t bool) string {
if t {
return "true"
}
return ""
}
runningPods := make(map[types.UID]sets.Empty)
possiblyRunningPods := make(map[types.UID]sets.Empty)
for uid, sync := range workingPods {
switch sync.State {
case SyncPod:
runningPods[uid] = struct{}{}
possiblyRunningPods[uid] = struct{}{}
case TerminatingPod:
possiblyRunningPods[uid] = struct{}{}
default:
}
}
// Retrieve the list of running containers from the runtime to perform cleanup.
// We need the latest state to avoid delaying restarts of static pods that reuse
// a UID.
if err := kl.runtimeCache.ForceUpdateIfOlder(ctx, kl.clock.Now()); err != nil {
klog.ErrorS(err, "Error listing containers")
return err
}
runningRuntimePods, err := kl.runtimeCache.GetPods(ctx)
if err != nil {
klog.ErrorS(err, "Error listing containers")
return err
}
// Stop probing pods that are not running
klog.V(3).InfoS("Clean up probes for terminated pods")
kl.probeManager.CleanupPods(possiblyRunningPods)
// Remove orphaned pod statuses not in the total list of known config pods
klog.V(3).InfoS("Clean up orphaned pod statuses")
kl.removeOrphanedPodStatuses(allPods, mirrorPods)
kl.allocationManager.RemoveOrphanedPods(allPodsByUID)
// Remove orphaned pod user namespace allocations (if any).
klog.V(3).InfoS("Clean up orphaned pod user namespace allocations")
if err = kl.usernsManager.CleanupOrphanedPodUsernsAllocations(ctx, allPods, runningRuntimePods); err != nil {
klog.ErrorS(err, "Failed cleaning up orphaned pod user namespaces allocations")
}
// Remove orphaned volumes from pods that are known not to have any
// containers. Note that we pass all pods (including terminated pods) to
// the function, so that we don't remove volumes associated with terminated
// but not yet deleted pods.
// TODO: this method could more aggressively cleanup terminated pods
// in the future (volumes, mount dirs, logs, and containers could all be
// better separated)
klog.V(3).InfoS("Clean up orphaned pod directories")
err = kl.cleanupOrphanedPodDirs(allPods, runningRuntimePods)
if err != nil {
// We want all cleanup tasks to be run even if one of them failed. So
// we just log an error here and continue other cleanup tasks.
// This also applies to the other clean up tasks.
klog.ErrorS(err, "Failed cleaning up orphaned pod directories")
}
// Remove any orphaned mirror pods (mirror pods are tracked by name via the
// pod worker)
klog.V(3).InfoS("Clean up orphaned mirror pods")
for _, podFullname := range orphanedMirrorPodFullnames {
if !kl.podWorkers.IsPodForMirrorPodTerminatingByFullName(podFullname) {
_, err := kl.mirrorPodClient.DeleteMirrorPod(ctx, podFullname, nil)
if err != nil {
klog.ErrorS(err, "Encountered error when deleting mirror pod", "podName", podFullname)
} else {
klog.V(3).InfoS("Deleted mirror pod", "podName", podFullname)
}
}
}
// After pruning pod workers for terminated pods get the list of active pods for
// metrics and to determine restarts.
activePods := kl.filterOutInactivePods(allPods)
allRegularPods, allStaticPods := splitPodsByStatic(allPods)
activeRegularPods, activeStaticPods := splitPodsByStatic(activePods)
metrics.DesiredPodCount.WithLabelValues("").Set(float64(len(allRegularPods)))
metrics.DesiredPodCount.WithLabelValues("true").Set(float64(len(allStaticPods)))
metrics.ActivePodCount.WithLabelValues("").Set(float64(len(activeRegularPods)))
metrics.ActivePodCount.WithLabelValues("true").Set(float64(len(activeStaticPods)))
metrics.MirrorPodCount.Set(float64(len(mirrorPods)))
// At this point, the pod worker is aware of which pods are not desired (SyncKnownPods).
// We now look through the set of active pods for those that the pod worker is not aware of
// and deliver an update. The most common reason a pod is not known is because the pod was
// deleted and recreated with the same UID while the pod worker was driving its lifecycle (very
// very rare for API pods, common for static pods with fixed UIDs). Containers that may still
// be running from a previous execution must be reconciled by the pod worker's sync method.
// We must use active pods because that is the set of admitted pods (podManager includes pods
// that will never be run, and statusManager tracks already rejected pods).
var restartCount, restartCountStatic int
for _, desiredPod := range activePods {
if _, knownPod := workingPods[desiredPod.UID]; knownPod {
continue
}
klog.V(3).InfoS("Pod will be restarted because it is in the desired set and not known to the pod workers (likely due to UID reuse)", "podUID", desiredPod.UID)
isStatic := kubetypes.IsStaticPod(desiredPod)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(desiredPod)
if pod == nil || wasMirror {
klog.V(2).InfoS("Programmer error, restartable pod was a mirror pod but activePods should never contain a mirror pod", "podUID", desiredPod.UID)
continue
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: pod,
MirrorPod: mirrorPod,
})
// the desired pod is now known as well
workingPods[desiredPod.UID] = PodWorkerSync{State: SyncPod, HasConfig: true, Static: isStatic}
if isStatic {
// restartable static pods are the normal case
restartCountStatic++
} else {
// almost certainly means shenanigans, as API pods should never have the same UID after being deleted and recreated
// unless there is a major API violation
restartCount++
}
}
metrics.RestartedPodTotal.WithLabelValues("true").Add(float64(restartCountStatic))
metrics.RestartedPodTotal.WithLabelValues("").Add(float64(restartCount))
// Complete termination of deleted pods that are not runtime pods (don't have
// running containers), are terminal, and are not known to pod workers.
// An example is pods rejected during kubelet admission that have never
// started before (i.e. does not have an orphaned pod).
// Adding the pods with SyncPodKill to pod workers allows to proceed with
// force-deletion of such pods, yet preventing re-entry of the routine in the
// next invocation of HandlePodCleanups.
for _, pod := range kl.filterTerminalPodsToDelete(allPods, runningRuntimePods, workingPods) {
klog.V(3).InfoS("Handling termination and deletion of the pod to pod workers", "pod", klog.KObj(pod), "podUID", pod.UID)
kl.podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
Pod: pod,
})
}
// Finally, terminate any pods that are observed in the runtime but not present in the list of
// known running pods from config. If we do terminate running runtime pods that will happen
// asynchronously in the background and those will be processed in the next invocation of
// HandlePodCleanups.
var orphanCount int
for _, runningPod := range runningRuntimePods {
// If there are orphaned pod resources in CRI that are unknown to the pod worker, terminate them
// now. Since housekeeping is exclusive to other pod worker updates, we know that no pods have
// been added to the pod worker in the meantime. Note that pods that are not visible in the runtime
// but which were previously known are terminated by SyncKnownPods().
_, knownPod := workingPods[runningPod.ID]
if !knownPod {
one := int64(1)
killPodOptions := &KillPodOptions{
PodTerminationGracePeriodSecondsOverride: &one,
}
klog.V(2).InfoS("Clean up containers for orphaned pod we had not seen before", "podUID", runningPod.ID, "killPodOptions", killPodOptions)
kl.podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
RunningPod: runningPod,
KillPodOptions: killPodOptions,
})
// the running pod is now known as well
workingPods[runningPod.ID] = PodWorkerSync{State: TerminatingPod, Orphan: true}
orphanCount++
}
}
metrics.OrphanedRuntimePodTotal.Add(float64(orphanCount))
// Now that we have recorded any terminating pods, and added new pods that should be running,
// record a summary here. Not all possible combinations of PodWorkerSync values are valid.
counts := make(map[PodWorkerSync]int)
for _, sync := range workingPods {
counts[sync]++
}
for validSync, configState := range map[PodWorkerSync]string{
{HasConfig: true, Static: true}: "desired",
{HasConfig: true, Static: false}: "desired",
{Orphan: true, HasConfig: true, Static: true}: "orphan",
{Orphan: true, HasConfig: true, Static: false}: "orphan",
{Orphan: true, HasConfig: false}: "runtime_only",
} {
for _, state := range []PodWorkerState{SyncPod, TerminatingPod, TerminatedPod} {
validSync.State = state
count := counts[validSync]
delete(counts, validSync)
staticString := stringIfTrue(validSync.Static)
if !validSync.HasConfig {
staticString = "unknown"
}
metrics.WorkingPodCount.WithLabelValues(state.String(), configState, staticString).Set(float64(count))
}
}
if len(counts) > 0 {
// in case a combination is lost
klog.V(3).InfoS("Programmer error, did not report a kubelet_working_pods metric for a value returned by SyncKnownPods", "counts", counts)
}
// Remove any cgroups in the hierarchy for pods that are definitely no longer
// running (not in the container runtime).
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
klog.V(3).InfoS("Clean up orphaned pod cgroups")
kl.cleanupOrphanedPodCgroups(pcm, cgroupPods, possiblyRunningPods)
}
// Cleanup any backoff entries.
kl.crashLoopBackOff.GC()
return nil
}
// filterTerminalPodsToDelete returns terminal pods which are ready to be
// deleted by the status manager, but are not in pod workers.
// First, the check for deletionTimestamp is a performance optimization as we
// don't need to do anything with terminal pods without deletionTimestamp.
// Second, the check for terminal pods is to avoid race conditions of triggering
// deletion on Pending pods which are not yet added to pod workers.
// Third, the check to skip pods known to pod workers is that the lifecycle of
// such pods is already handled by pod workers.
// Finally, we skip runtime pods as their termination is handled separately in
// the HandlePodCleanups routine.
func (kl *Kubelet) filterTerminalPodsToDelete(allPods []*v1.Pod, runningRuntimePods []*kubecontainer.Pod, workingPods map[types.UID]PodWorkerSync) map[types.UID]*v1.Pod {
terminalPodsToDelete := make(map[types.UID]*v1.Pod)
for _, pod := range allPods {
if pod.DeletionTimestamp == nil {
// skip pods which don't have a deletion timestamp
continue
}
if !podutil.IsPodPhaseTerminal(pod.Status.Phase) {
// skip the non-terminal pods
continue
}
if _, knownPod := workingPods[pod.UID]; knownPod {
// skip pods known to pod workers
continue
}
terminalPodsToDelete[pod.UID] = pod
}
for _, runningRuntimePod := range runningRuntimePods {
// skip running runtime pods - they are handled by a dedicated routine
// which terminates the containers
delete(terminalPodsToDelete, runningRuntimePod.ID)
}
return terminalPodsToDelete
}
// splitPodsByStatic separates a list of desired pods from the pod manager into
// regular or static pods. Mirror pods are not valid config sources (a mirror pod
// being created cannot cause the Kubelet to start running a static pod) and are
// excluded.
func splitPodsByStatic(pods []*v1.Pod) (regular, static []*v1.Pod) {
regular, static = make([]*v1.Pod, 0, len(pods)), make([]*v1.Pod, 0, len(pods))
for _, pod := range pods {
if kubetypes.IsMirrorPod(pod) {
continue
}
if kubetypes.IsStaticPod(pod) {
static = append(static, pod)
} else {
regular = append(regular, pod)
}
}
return regular, static
}
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := podutil.GetContainerStatus(podStatus.ContainerStatuses, containerName)
if !found {
cStatus, found = podutil.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found {
cStatus, found = podutil.GetContainerStatus(podStatus.EphemeralContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}
lastState := cStatus.LastTerminationState
waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
switch {
case previous:
if lastState.Terminated == nil || lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID
case terminated != nil:
// in cases where the next container didn't start, terminated.ContainerID will be empty, so get logs from the lastState.Terminated.
if terminated.ContainerID == "" {
if lastState.Terminated != nil && lastState.Terminated.ContainerID != "" {
cID = lastState.Terminated.ContainerID
} else {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
} else {
cID = terminated.ContainerID
}
case lastState.Terminated != nil:
if lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case images.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case images.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
}
return kubecontainer.ParseContainerID(cID), nil
}
// GetKubeletContainerLogs returns logs from the container
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Pod workers periodically write status to statusManager. If status is not
// cached there, something is wrong (or kubelet just restarted and hasn't
// caught up yet). Just assume the pod is not ready yet.
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err)
}
pod, ok := kl.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("pod %q cannot be found - no logs available", name)
}
// TODO: this should be using the podWorker's pod store as authoritative, since
// the mirrorPod might still exist, the pod may have been force deleted but
// is still terminating (users should be able to view logs of force deleted static pods
// based on full name).
var podUID types.UID
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
return fmt.Errorf("mirror pod %q does not have a corresponding pod", name)
}
podUID = mirrorPod.UID
} else {
podUID = pod.UID
}
podStatus, found := kl.statusManager.GetPodStatus(podUID)
if !found {
// If there is no cached status, use the status from the
// config source (apiserver). This is useful if kubelet
// has recently been restarted.
podStatus = pod.Status
}
// TODO: Consolidate the logic here with kuberuntime.GetContainerLogs, here we convert container name to containerID,
// but inside kuberuntime we convert container id back to container name and restart count.
// TODO: After separate container log lifecycle management, we should get log based on the existing log files
// instead of container status.
containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
if err != nil {
return err
}
// Since v1.32, stdout may be nil if the stream is not requested.
if stdout != nil {
// Do a zero-byte write to stdout before handing off to the container runtime.
// This ensures at least one Write call is made to the writer when copying starts,
// even if we then block waiting for log output from the container.
if _, err := stdout.Write([]byte{}); err != nil {
return err
}
}
return kl.containerRuntime.GetContainerLogs(ctx, pod, containerID, logOptions, stdout, stderr)
}
// getPhase returns the phase of a pod given its container info.
func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal, podHasInitialized bool) v1.PodPhase {
spec := pod.Spec
pendingRestartableInitContainers := 0
pendingRegularInitContainers := 0
failedInitialization := 0
failedInitializationNotRestartable := 0
// regular init containers
for _, container := range spec.InitContainers {
if podutil.IsRestartableInitContainer(&container) {
// Skip the restartable init containers here to handle them separately as
// they are slightly different from the init containers in terms of the
// pod phase.
continue
}
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
if !ok {
pendingRegularInitContainers++
continue
}
switch {
case containerStatus.State.Running != nil:
pendingRegularInitContainers++
case containerStatus.State.Terminated != nil:
exitCode := containerStatus.State.Terminated.ExitCode
if exitCode != 0 {
failedInitialization++
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if !podutil.ContainerShouldRestart(container, pod.Spec, exitCode) {
failedInitializationNotRestartable++
}
}
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
exitCode := containerStatus.LastTerminationState.Terminated.ExitCode
if exitCode != 0 {
failedInitialization++
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if !podutil.ContainerShouldRestart(container, pod.Spec, exitCode) {
failedInitializationNotRestartable++
}
}
}
} else {
pendingRegularInitContainers++
}
default:
pendingRegularInitContainers++
}
}
// counters for restartable init and regular containers
unknown := 0
running := 0
waiting := 0
stopped := 0
stoppedNotRestartable := 0
succeeded := 0
// sidecar init containers
for _, container := range spec.InitContainers {
if !podutil.IsRestartableInitContainer(&container) {
// Skip the regular init containers, as they have been handled above.
continue
}
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
}
switch {
case containerStatus.State.Running != nil:
if containerStatus.Started == nil || !*containerStatus.Started {
pendingRestartableInitContainers++
}
running++
case containerStatus.State.Terminated != nil:
// Do nothing here, as terminated restartable init containers are not
// taken into account for the pod phase.
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
// Do nothing here, as terminated restartable init containers are not
// taken into account for the pod phase.
} else {
pendingRestartableInitContainers++
waiting++
}
default:
pendingRestartableInitContainers++
unknown++
}
}
for _, container := range spec.Containers {
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
}
switch {
case containerStatus.State.Running != nil:
running++
case containerStatus.State.Terminated != nil:
stopped++
exitCode := containerStatus.State.Terminated.ExitCode
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if !podutil.ContainerShouldRestart(container, pod.Spec, exitCode) {
stoppedNotRestartable++
}
}
if exitCode == 0 {
succeeded++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
stopped++
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
exitCode := containerStatus.LastTerminationState.Terminated.ExitCode
if !podutil.ContainerShouldRestart(container, pod.Spec, exitCode) {
stoppedNotRestartable++
}
}
} else {
waiting++
}
default:
unknown++
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if failedInitializationNotRestartable > 0 {
return v1.PodFailed
}
} else {
if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever {
return v1.PodFailed
}
}
switch {
case pendingRegularInitContainers > 0 ||
(pendingRestartableInitContainers > 0 &&
// This is needed to handle the case where the pod has been initialized but
// the restartable init containers are restarting and the pod should not be
// placed back into v1.PodPending since the regular containers have run.
!podHasInitialized):
fallthrough
case waiting > 0:
klog.V(5).InfoS("Pod waiting > 0, pending")
// One or more containers has not been started
return v1.PodPending
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return v1.PodRunning
case running == 0 && stopped > 0 && unknown == 0:
// The pod is terminal so its containers won't be restarted regardless
// of the restart policy.
if podIsTerminal {
// TODO(#116484): Also assign terminal phase to static pods.
if !kubetypes.IsStaticPod(pod) {
// All regular containers are terminated in success and all restartable
// init containers are stopped.
if stopped == succeeded {
return v1.PodSucceeded
}
// There is at least one failure
return v1.PodFailed
}
}
// All containers are terminated
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if stopped != stoppedNotRestartable {
// At least one containers are in the process of restarting
return v1.PodRunning
}
if stopped == succeeded {
return v1.PodSucceeded
}
return v1.PodFailed
}
if spec.RestartPolicy == v1.RestartPolicyAlways {
// All containers are in the process of restarting
return v1.PodRunning
}
if stopped == succeeded {
// RestartPolicy is not Always, all containers are terminated in success
// and all restartable init containers are stopped.
return v1.PodSucceeded
}
if spec.RestartPolicy == v1.RestartPolicyNever {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return v1.PodFailed
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return v1.PodRunning
default:
klog.V(5).InfoS("Pod default case, pending")
return v1.PodPending
}
}
func (kl *Kubelet) determinePodResizeStatus(allocatedPod *v1.Pod, podIsTerminal bool) []*v1.PodCondition {
// If pod is terminal, clear the resize status.
if podIsTerminal {
kl.statusManager.ClearPodResizeInProgressCondition(allocatedPod.UID)
kl.statusManager.ClearPodResizePendingCondition(allocatedPod.UID)
return nil
}
resizeStatus := kl.statusManager.GetPodResizeConditions(allocatedPod.UID)
return resizeStatus
}
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status. This method should only be called from within sync*Pod methods.
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, podIsTerminal bool) v1.PodStatus {
klog.V(3).InfoS("Generating pod status", "podIsTerminal", podIsTerminal, "pod", klog.KObj(pod))
// use the previous pod status, or the api status, as the basis for this pod
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
oldPodStatus = pod.Status
}
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
// calculate the next phase and preserve reason
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = getPhase(pod, allStatus, podIsTerminal, kubecontainer.HasAnyActiveRegularContainerStarted(&pod.Spec, podStatus))
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "oldPhase", oldPodStatus.Phase, "phase", s.Phase)
// Perform a three-way merge between the statuses from the status manager,
// runtime, and generated status to ensure terminal status is correctly set.
if s.Phase != v1.PodFailed && s.Phase != v1.PodSucceeded {
switch {
case oldPodStatus.Phase == v1.PodFailed || oldPodStatus.Phase == v1.PodSucceeded:
klog.V(4).InfoS("Status manager phase was terminal, updating phase to match", "pod", klog.KObj(pod), "phase", oldPodStatus.Phase)
s.Phase = oldPodStatus.Phase
case pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded:
klog.V(4).InfoS("API phase was terminal, updating phase to match", "pod", klog.KObj(pod), "phase", pod.Status.Phase)
s.Phase = pod.Status.Phase
}
}
if s.Phase == oldPodStatus.Phase {
// preserve the reason and message which is associated with the phase
s.Reason = oldPodStatus.Reason
s.Message = oldPodStatus.Message
if len(s.Reason) == 0 {
s.Reason = pod.Status.Reason
}
if len(s.Message) == 0 {
s.Message = pod.Status.Message
}
}
// check if an internal module has requested the pod is evicted and override the reason and message
for _, podSyncHandler := range kl.PodSyncHandlers {
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
s.Phase = v1.PodFailed
s.Reason = result.Reason
s.Message = result.Message
break
}
}
// pods are not allowed to transition out of terminal phases
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
// API server shows terminal phase; transitions are not allowed
if s.Phase != pod.Status.Phase {
klog.ErrorS(nil, "Pod attempted illegal phase transition", "pod", klog.KObj(pod), "originalStatusPhase", pod.Status.Phase, "apiStatusPhase", s.Phase, "apiStatus", s)
// Force back to phase from the API server
s.Phase = pod.Status.Phase
}
}
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
// ensure the probe managers have up to date status for containers
kl.probeManager.UpdatePodStatus(context.TODO(), pod, s)
// update the allocated resources status
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceHealthStatus) {
kl.containerManager.UpdateAllocatedResourcesStatus(pod, s)
}
// preserve all conditions not owned by the kubelet
s.Conditions = make([]v1.PodCondition, 0, len(pod.Status.Conditions)+1)
for _, c := range pod.Status.Conditions {
if !kubetypes.PodConditionByKubelet(c.Type) {
s.Conditions = append(s.Conditions, c)
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
resizeStatus := kl.determinePodResizeStatus(pod, podIsTerminal)
for _, c := range resizeStatus {
// Clear the condition's observed generation if BOTH The FG is disabled AND the condition's
// observed generation is not already set. We avoid overwriting the condition's observedGeneration
// in other cases, because the condition may be reflecting an older podspec.
gen := podutil.CalculatePodConditionObservedGeneration(&oldPodStatus, pod.Generation, c.Type)
if gen == 0 {
c.ObservedGeneration = 0
}
s.Conditions = append(s.Conditions, *c)
}
}
// copy over the pod disruption conditions from state which is already
// updated during the eviciton (due to either node resource pressure or
// node graceful shutdown). We do not re-generate the conditions based
// on the container statuses as they are added based on one-time events.
cType := v1.DisruptionTarget
if _, condition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); condition != nil {
s.Conditions = utilpod.ReplaceOrAppendPodCondition(s.Conditions, condition)
}
// set all Kubelet-owned conditions
if utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) {
s.Conditions = append(s.Conditions, status.GeneratePodReadyToStartContainersCondition(pod, &oldPodStatus, podStatus))
}
allContainerStatuses := append(s.InitContainerStatuses, s.ContainerStatuses...)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(pod, &oldPodStatus, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(pod, &oldPodStatus, s.Conditions, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(pod, &oldPodStatus, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, v1.PodCondition{
Type: v1.PodScheduled,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(&oldPodStatus, pod.Generation, v1.PodScheduled),
Status: v1.ConditionTrue,
})
// set HostIP/HostIPs and initialize PodIP/PodIPs for host network pods
if kl.kubeClient != nil {
hostIPs, err := kl.getHostIPsAnyWay()
if err != nil {
klog.V(4).InfoS("Cannot get host IPs", "err", err)
} else {
if s.HostIP != "" {
if utilnet.IPFamilyOfString(s.HostIP) != utilnet.IPFamilyOf(hostIPs[0]) {
kl.recorder.Eventf(pod, v1.EventTypeWarning, "HostIPsIPFamilyMismatch",
"Kubelet detected an IPv%s node IP (%s), but the cloud provider selected an IPv%s node IP (%s); pass an explicit `--node-ip` to kubelet to fix this.",
utilnet.IPFamilyOfString(s.HostIP), s.HostIP, utilnet.IPFamilyOf(hostIPs[0]), hostIPs[0].String())
}
}
s.HostIP = hostIPs[0].String()
s.HostIPs = []v1.HostIP{{IP: s.HostIP}}
if len(hostIPs) == 2 {
s.HostIPs = append(s.HostIPs, v1.HostIP{IP: hostIPs[1].String()})
}
// HostNetwork Pods inherit the node IPs as PodIPs. They are immutable once set,
// other than that if the node becomes dual-stack, we add the secondary IP.
if kubecontainer.IsHostNetworkPod(pod) {
// Primary IP is not set
if s.PodIP == "" {
s.PodIP = hostIPs[0].String()
s.PodIPs = []v1.PodIP{{IP: s.PodIP}}
}
// Secondary IP is not set #105320
if len(hostIPs) == 2 && len(s.PodIPs) == 1 {
if utilnet.IPFamilyOfString(s.PodIPs[0].IP) != utilnet.IPFamilyOf(hostIPs[1]) {
s.PodIPs = append(s.PodIPs, v1.PodIP{IP: hostIPs[1].String()})
}
}
}
}
}
return *s
}
// sortPodIPs return the PodIPs sorted and truncated by the cluster IP family preference.
// The runtime pod status may have an arbitrary number of IPs, in an arbitrary order.
// PodIPs are obtained by: func (m *kubeGenericRuntimeManager) determinePodSandboxIPs()
// Pick out the first returned IP of the same IP family as the node IP
// first, followed by the first IP of the opposite IP family (if any)
// and use them for the Pod.Status.PodIPs and the Downward API environment variables
func (kl *Kubelet) sortPodIPs(podIPs []string) []string {
ips := make([]string, 0, 2)
var validPrimaryIP, validSecondaryIP func(ip net.IP) bool
if len(kl.nodeIPs) == 0 || utilnet.IsIPv4(kl.nodeIPs[0]) {
validPrimaryIP = utilnet.IsIPv4
validSecondaryIP = utilnet.IsIPv6
} else {
validPrimaryIP = utilnet.IsIPv6
validSecondaryIP = utilnet.IsIPv4
}
// We parse and re-stringify the IPs in case the values from CRI use an irregular format.
for _, ipStr := range podIPs {
ip := utilnet.ParseIPSloppy(ipStr)
if validPrimaryIP(ip) {
ips = append(ips, ip.String())
break
}
}
for _, ipStr := range podIPs {
ip := utilnet.ParseIPSloppy(ipStr)
if validSecondaryIP(ip) {
ips = append(ips, ip.String())
break
}
}
return ips
}
// convertStatusToAPIStatus initialize an api PodStatus for the given pod from
// the given internal pod status and the previous state of the pod from the API.
// It is purely transformative and does not alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, oldPodStatus v1.PodStatus) *v1.PodStatus {
var apiPodStatus v1.PodStatus
// copy pod status IPs to avoid race conditions with PodStatus #102806
podIPs := make([]string, len(podStatus.IPs))
copy(podIPs, podStatus.IPs)
// make podIPs order match node IP family preference #97979
podIPs = kl.sortPodIPs(podIPs)
for _, ip := range podIPs {
apiPodStatus.PodIPs = append(apiPodStatus.PodIPs, v1.PodIP{IP: ip})
}
if len(apiPodStatus.PodIPs) > 0 {
apiPodStatus.PodIP = apiPodStatus.PodIPs[0].IP
}
// set status for Pods created on versions of kube older than 1.6
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.ContainerStatuses,
pod.Spec.Containers,
len(pod.Spec.InitContainers) > 0,
false,
)
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.InitContainerStatuses,
pod.Spec.InitContainers,
len(pod.Spec.InitContainers) > 0,
true,
)
var ecSpecs []v1.Container
for i := range pod.Spec.EphemeralContainers {
ecSpecs = append(ecSpecs, v1.Container(pod.Spec.EphemeralContainers[i].EphemeralContainerCommon))
}
// #80875: By now we've iterated podStatus 3 times. We could refactor this to make a single
// pass through podStatus.ContainerStatuses
apiPodStatus.EphemeralContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
oldPodStatus.EphemeralContainerStatuses,
ecSpecs,
len(pod.Spec.InitContainers) > 0,
false,
)
return &apiPodStatus
}
// convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
// Use klog.TODO() because we currently do not have a proper logger to pass in.
// This should be replaced with an appropriate logger when refactoring this function to accept a logger parameter.
logger := klog.TODO()
convertContainerStatus := func(cs *kubecontainer.Status, oldStatus *v1.ContainerStatus) *v1.ContainerStatus {
cid := cs.ID.String()
status := &v1.ContainerStatus{
Name: cs.Name,
RestartCount: int32(cs.RestartCount),
Image: cs.Image,
// Converting the digested image ref to the Kubernetes public
// ContainerStatus.ImageID is historically intentional and should
// not change.
ImageID: cs.ImageRef,
ContainerID: cid,
}
if oldStatus != nil {
status.VolumeMounts = oldStatus.VolumeMounts // immutable
}
switch {
case cs.State == kubecontainer.ContainerStateRunning:
status.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.NewTime(cs.StartedAt)}
case cs.State == kubecontainer.ContainerStateCreated:
// containers that are created but not running are "waiting to be running"
status.State.Waiting = &v1.ContainerStateWaiting{}
case cs.State == kubecontainer.ContainerStateExited:
status.State.Terminated = &v1.ContainerStateTerminated{
ExitCode: int32(cs.ExitCode),
Reason: cs.Reason,
Message: cs.Message,
StartedAt: metav1.NewTime(cs.StartedAt),
FinishedAt: metav1.NewTime(cs.FinishedAt),
ContainerID: cid,
}
case cs.State == kubecontainer.ContainerStateUnknown &&
oldStatus != nil && // we have an old status
oldStatus.State.Running != nil: // our previous status was running
// if this happens, then we know that this container was previously running and isn't anymore (assuming the CRI isn't failing to return running containers).
// you can imagine this happening in cases where a container failed and the kubelet didn't ask about it in time to see the result.
// in this case, the container should not to into waiting state immediately because that can make cases like runonce pods actually run
// twice. "container never ran" is different than "container ran and failed". This is handled differently in the kubelet
// and it is handled differently in higher order logic like crashloop detection and handling
status.State.Terminated = &v1.ContainerStateTerminated{
Reason: kubecontainer.ContainerReasonStatusUnknown,
Message: "The container could not be located when the pod was terminated",
ExitCode: 137, // this code indicates an error
}
// the restart count normally comes from the CRI (see near the top of this method), but since this is being added explicitly
// for the case where the CRI did not return a status, we need to manually increment the restart count to be accurate.
status.RestartCount = oldStatus.RestartCount + 1
default:
// this collapses any unknown state to container waiting. If any container is waiting, then the pod status moves to pending even if it is running.
// if I'm reading this correctly, then any failure to read status on any container results in the entire pod going pending even if the containers
// are actually running.
// see https://github.com/kubernetes/kubernetes/blob/5d1b3e26af73dde33ecb6a3e69fb5876ceab192f/pkg/kubelet/kuberuntime/kuberuntime_container.go#L497 to
// https://github.com/kubernetes/kubernetes/blob/8976e3620f8963e72084971d9d4decbd026bf49f/pkg/kubelet/kuberuntime/helpers.go#L58-L71
// and interpreted here https://github.com/kubernetes/kubernetes/blob/b27e78f590a0d43e4a23ca3b2bf1739ca4c6e109/pkg/kubelet/kubelet_pods.go#L1434-L1439
status.State.Waiting = &v1.ContainerStateWaiting{}
}
return status
}
convertContainerStatusResources := func(allocatedContainer *v1.Container, status *v1.ContainerStatus, cStatus *kubecontainer.Status, oldStatuses map[string]v1.ContainerStatus) *v1.ResourceRequirements {
cName := allocatedContainer.Name
// oldStatus should always exist if container is running
oldStatus, oldStatusFound := oldStatuses[cName]
// If the new status is missing resources, then if the container is running and previous
// status was also running, preserve the resources previously reported.
preserveOldResourcesValue := func(rName v1.ResourceName, oldStatusResource, resource v1.ResourceList) {
if cStatus.State == kubecontainer.ContainerStateRunning &&
oldStatusFound && oldStatus.State.Running != nil &&
status.ContainerID == oldStatus.ContainerID &&
oldStatusResource != nil {
if r, exists := oldStatusResource[rName]; exists {
resource[rName] = r.DeepCopy()
}
}
}
if cStatus.State != kubecontainer.ContainerStateRunning {
// If the container isn't running, just use the allocated resources.
return allocatedContainer.Resources.DeepCopy()
}
if oldStatus.Resources == nil {
oldStatus.Resources = &v1.ResourceRequirements{}
}
// Status resources default to the allocated resources.
// For non-running containers this will be the reported values.
// For non-resizable resources, these values will also be used.
resources := allocatedContainer.Resources.DeepCopy()
if resources.Limits != nil {
if cStatus.Resources != nil && cStatus.Resources.CPULimit != nil {
// If both the allocated & actual resources are at or below the minimum effective limit, preserve the
// allocated value in the API to avoid confusion and simplify comparisons.
if cStatus.Resources.CPULimit.MilliValue() > cm.MinMilliCPULimit ||
resources.Limits.Cpu().MilliValue() > cm.MinMilliCPULimit {
resources.Limits[v1.ResourceCPU] = cStatus.Resources.CPULimit.DeepCopy()
}
} else {
preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Limits, resources.Limits)
}
if cStatus.Resources != nil && cStatus.Resources.MemoryLimit != nil {
resources.Limits[v1.ResourceMemory] = cStatus.Resources.MemoryLimit.DeepCopy()
} else {
preserveOldResourcesValue(v1.ResourceMemory, oldStatus.Resources.Limits, resources.Limits)
}
}
if resources.Requests != nil {
if cStatus.Resources != nil && cStatus.Resources.CPURequest != nil {
// If both the allocated & actual resources are at or below MinShares, preserve the
// allocated value in the API to avoid confusion and simplify comparisons.
if cStatus.Resources.CPURequest.MilliValue() > cm.MinShares ||
resources.Requests.Cpu().MilliValue() > cm.MinShares {
resources.Requests[v1.ResourceCPU] = cStatus.Resources.CPURequest.DeepCopy()
}
} else {
preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Requests, resources.Requests)
}
// TODO(tallclair,vinaykul,InPlacePodVerticalScaling): Investigate defaulting to actuated resources instead of allocated resources above
if _, exists := resources.Requests[v1.ResourceMemory]; exists {
// Get memory requests from actuated resources
if actuatedResources, found := kl.allocationManager.GetActuatedResources(pod.UID, allocatedContainer.Name); found {
resources.Requests[v1.ResourceMemory] = *actuatedResources.Requests.Memory()
}
}
}
return resources
}
convertContainerStatusUser := func(cStatus *kubecontainer.Status) *v1.ContainerUser {
if cStatus.User == nil {
return nil
}
user := &v1.ContainerUser{}
if cStatus.User.Linux != nil {
user.Linux = &v1.LinuxContainerUser{
UID: cStatus.User.Linux.UID,
GID: cStatus.User.Linux.GID,
SupplementalGroups: cStatus.User.Linux.SupplementalGroups,
}
}
return user
}
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]v1.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*v1.ContainerStatus, len(containers))
defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: ContainerCreating}}
if hasInitContainers {
defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: PodInitializing}}
}
supportsRRO := kl.runtimeClassSupportsRecursiveReadOnlyMounts(pod)
for _, container := range containers {
status := &v1.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
}
// status.VolumeMounts cannot be propagated from kubecontainer.Status
// because the CRI API is unaware of the volume names.
if utilfeature.DefaultFeatureGate.Enabled(features.RecursiveReadOnlyMounts) {
for _, vol := range container.VolumeMounts {
volStatus := v1.VolumeMountStatus{
Name: vol.Name,
MountPath: vol.MountPath,
ReadOnly: vol.ReadOnly,
}
if vol.ReadOnly {
rroMode := v1.RecursiveReadOnlyDisabled
if b, err := resolveRecursiveReadOnly(vol, supportsRRO); err != nil {
klog.ErrorS(err, "failed to resolve recursive read-only mode", "mode", *vol.RecursiveReadOnly)
} else if b {
if utilfeature.DefaultFeatureGate.Enabled(features.RecursiveReadOnlyMounts) {
rroMode = v1.RecursiveReadOnlyEnabled
} else {
klog.ErrorS(nil, "recursive read-only mount needs feature gate to be enabled",
"featureGate", features.RecursiveReadOnlyMounts)
}
}
volStatus.RecursiveReadOnly = &rroMode // Disabled or Enabled
}
status.VolumeMounts = append(status.VolumeMounts, volStatus)
}
}
oldStatus, found := oldStatuses[container.Name]
if found {
if oldStatus.State.Terminated != nil {
status = &oldStatus
} else {
// Apply some values from the old statuses as the default values.
status.RestartCount = oldStatus.RestartCount
status.LastTerminationState = oldStatus.LastTerminationState
}
}
statuses[container.Name] = status
}
for _, container := range containers {
found := false
for _, cStatus := range podStatus.ContainerStatuses {
if container.Name == cStatus.Name {
found = true
break
}
}
if found {
continue
}
// if no container is found, then assuming it should be waiting seems plausible, but the status code requires
// that a previous termination be present. If we're offline long enough or something removed the container, then
// the previous termination may not be present. This next code block ensures that if the container was previously running
// then when that container status disappears, we can infer that it terminated even if we don't know the status code.
// By setting the lasttermination state we are able to leave the container status waiting and present more accurate
// data via the API.
oldStatus, ok := oldStatuses[container.Name]
if !ok {
continue
}
if oldStatus.State.Terminated != nil {
// if the old container status was terminated, the lasttermination status is correct
continue
}
if oldStatus.State.Running == nil {
// if the old container status isn't running, then waiting is an appropriate status and we have nothing to do
continue
}
// If we're here, we know the pod was previously running, but doesn't have a terminated status. We will check now to
// see if it's in a pending state.
status := statuses[container.Name]
// If the status we're about to write indicates the default, the Waiting status will force this pod back into Pending.
// That isn't true, we know the pod was previously running.
isDefaultWaitingStatus := status.State.Waiting != nil && status.State.Waiting.Reason == ContainerCreating
if hasInitContainers {
isDefaultWaitingStatus = status.State.Waiting != nil && status.State.Waiting.Reason == PodInitializing
}
if !isDefaultWaitingStatus {
// the status was written, don't override
continue
}
if status.LastTerminationState.Terminated != nil {
// if we already have a termination state, nothing to do
continue
}
// setting this value ensures that we show as stopped here, not as waiting:
// https://github.com/kubernetes/kubernetes/blob/90c9f7b3e198e82a756a68ffeac978a00d606e55/pkg/kubelet/kubelet_pods.go#L1440-L1445
// This prevents the pod from becoming pending
status.LastTerminationState.Terminated = &v1.ContainerStateTerminated{
Reason: kubecontainer.ContainerReasonStatusUnknown,
Message: "The container could not be located when the pod was deleted. The container used to be Running",
ExitCode: 137,
}
// If the pod was not deleted, then it's been restarted. Increment restart count.
if pod.DeletionTimestamp == nil {
status.RestartCount += 1
}
statuses[container.Name] = status
}
// Copy the slice before sorting it
containerStatusesCopy := make([]*kubecontainer.Status, len(podStatus.ContainerStatuses))
copy(containerStatusesCopy, podStatus.ContainerStatuses)
// Make the latest container status comes first.
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(containerStatusesCopy)))
// Set container statuses according to the statuses seen in pod status
containerSeen := map[string]int{}
for _, cStatus := range containerStatusesCopy {
cName := cStatus.Name
if _, ok := statuses[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerSeen[cName] >= 2 {
continue
}
var oldStatusPtr *v1.ContainerStatus
if oldStatus, ok := oldStatuses[cName]; ok {
oldStatusPtr = &oldStatus
}
status := convertContainerStatus(cStatus, oldStatusPtr)
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
allocatedContainer := kubecontainer.GetContainerSpec(pod, cName)
if allocatedContainer != nil {
status.Resources = convertContainerStatusResources(allocatedContainer, status, cStatus, oldStatuses)
status.AllocatedResources = allocatedContainer.Resources.Requests
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
status.User = convertContainerStatusUser(cStatus)
}
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerStopSignals) {
status.StopSignal = cStatus.StopSignal
}
if containerSeen[cName] == 0 {
statuses[cName] = status
} else {
statuses[cName].LastTerminationState = status.State
}
containerSeen[cName] = containerSeen[cName] + 1
}
// Handle the containers failed to be started, which should be in Waiting state.
for _, container := range containers {
if isInitContainer {
// If the init container is terminated with exit code 0, it won't be restarted.
// TODO(random-liu): Handle this in a cleaner way.
s := podStatus.FindContainerStatusByName(container.Name)
if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 {
continue
}
}
// If a container should be restarted in next syncpod, it is *Waiting*.
if !kubecontainer.ShouldContainerBeRestarted(logger, &container, pod, podStatus) {
continue
}
status := statuses[container.Name]
reason, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok {
// In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here.
// Note that with the current implementation of ShouldContainerBeRestarted the original state here
// could be:
// * Waiting: There is no associated historical container and start failure reason record.
// * Terminated: The container is terminated.
continue
}
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: reason.Err.Error(),
Message: reason.Message,
},
}
statuses[container.Name] = status
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod has a deterministic order.
if isInitContainer {
return kubetypes.SortStatusesOfInitContainers(pod, statuses)
}
containerStatuses := make([]v1.ContainerStatus, 0, len(statuses))
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
return containerStatuses
}
// ServeLogs returns logs of current machine.
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// TODO: allowlist logs we are willing to serve
kl.logServer.ServeHTTP(w, req)
}
// findContainer finds and returns the container with the given pod ID, full name, and container name.
// It returns nil if not found.
func (kl *Kubelet) findContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerRuntime.GetPods(ctx, false)
if err != nil {
return nil, err
}
// Resolve and type convert back again.
// We need the static pod UID but the kubecontainer API works with types.UID.
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
return pod.FindContainerByName(containerName), nil
}
// RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
// TODO(tallclair): Pass a proper timeout value.
return kl.runner.RunInContainer(ctx, container.ID, cmd, 0)
}
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
return kl.streamingRuntime.GetExec(ctx, container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
}
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
// The TTY setting for attach must match the TTY setting in the initial container configuration,
// since whether the process is running in a TTY cannot be changed after it has started. We
// need the api.Pod to get the TTY status.
pod, found := kl.GetPodByFullName(podFullName)
if !found || (string(podUID) != "" && pod.UID != podUID) {
return nil, fmt.Errorf("pod %s not found", podFullName)
}
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
if containerSpec == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
tty := containerSpec.TTY
return kl.streamingRuntime.GetAttach(ctx, container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
}
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
pods, err := kl.containerRuntime.GetPods(ctx, false)
if err != nil {
return nil, err
}
// Resolve and type convert back again.
// We need the static pod UID but the kubecontainer API works with types.UID.
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return nil, fmt.Errorf("pod not found (%q)", podFullName)
}
return kl.streamingRuntime.GetPortForward(ctx, podName, podNamespace, podUID, portForwardOpts.Ports)
}
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
// it reconciles the cached state of cgroupPods with the specified list of runningPods
func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupPods map[types.UID]cm.CgroupName, possiblyRunningPods map[types.UID]sets.Empty) {
// Iterate over all the found pods to verify if they should be running
for uid, val := range cgroupPods {
// if the pod is in the running set, its not a candidate for cleanup
if _, ok := possiblyRunningPods[uid]; ok {
continue
}
// If volumes have not been unmounted/detached, do not delete the cgroup
// so any memory backed volumes don't have their charges propagated to the
// parent croup. If the volumes still exist, reduce the cpu shares for any
// process in the cgroup to the minimum value while we wait.
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
klog.V(3).InfoS("Orphaned pod found, but volumes not yet removed. Reducing cpu to minimum", "podUID", uid)
if err := pcm.ReduceCPULimits(val); err != nil {
klog.InfoS("Failed to reduce cpu time for pod pending volume cleanup", "podUID", uid, "err", err)
}
continue
}
klog.V(3).InfoS("Orphaned pod found, removing pod cgroups", "podUID", uid)
// Destroy all cgroups of pod that should not be running,
// by first killing all the attached processes to these cgroups.
// We ignore errors thrown by the method, as the housekeeping loop would
// again try to delete these unwanted pod cgroups
go pcm.Destroy(val)
}
}
func (kl *Kubelet) runtimeClassSupportsRecursiveReadOnlyMounts(pod *v1.Pod) bool {
if kl.runtimeClassManager == nil {
return false
}
runtimeHandlerName, err := kl.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
klog.ErrorS(err, "failed to look up the runtime handler", "runtimeClassName", pod.Spec.RuntimeClassName)
return false
}
runtimeHandlers := kl.runtimeState.runtimeHandlers()
return runtimeHandlerSupportsRecursiveReadOnlyMounts(runtimeHandlerName, runtimeHandlers)
}
// runtimeHandlerSupportsRecursiveReadOnlyMounts checks whether the runtime handler supports recursive read-only mounts.
// The kubelet feature gate is not checked here.
func runtimeHandlerSupportsRecursiveReadOnlyMounts(runtimeHandlerName string, runtimeHandlers []kubecontainer.RuntimeHandler) bool {
if len(runtimeHandlers) == 0 {
// The runtime does not support returning the handler list.
// No need to print a warning here.
return false
}
for _, h := range runtimeHandlers {
if h.Name == runtimeHandlerName {
return h.SupportsRecursiveReadOnlyMounts
}
}
klog.ErrorS(nil, "Unknown runtime handler", "runtimeHandlerName", runtimeHandlerName)
return false
}
// resolveRecursiveReadOnly resolves the recursive read-only mount mode.
func resolveRecursiveReadOnly(m v1.VolumeMount, runtimeSupportsRRO bool) (bool, error) {
if m.RecursiveReadOnly == nil || *m.RecursiveReadOnly == v1.RecursiveReadOnlyDisabled {
return false, nil
}
if !m.ReadOnly {
return false, fmt.Errorf("volume %q requested recursive read-only mode, but it is not read-only", m.Name)
}
if m.MountPropagation != nil && *m.MountPropagation != v1.MountPropagationNone {
return false, fmt.Errorf("volume %q requested recursive read-only mode, but it is not compatible with propagation %q",
m.Name, *m.MountPropagation)
}
switch rroMode := *m.RecursiveReadOnly; rroMode {
case v1.RecursiveReadOnlyIfPossible:
return runtimeSupportsRRO, nil
case v1.RecursiveReadOnlyEnabled:
if !runtimeSupportsRRO {
return false, fmt.Errorf("volume %q requested recursive read-only mode, but it is not supported by the runtime", m.Name)
}
return true, nil
default:
return false, fmt.Errorf("unknown recursive read-only mode %q", rroMode)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"errors"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
goruntime "runtime"
"sort"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/testutil"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubelet/pkg/cri/streaming/portforward"
"k8s.io/kubelet/pkg/cri/streaming/remotecommand"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/secret"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/volume"
netutils "k8s.io/utils/net"
"k8s.io/utils/ptr"
)
var containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
func TestNodeHostsFileContent(t *testing.T) {
testCases := []struct {
hostsFileName string
hostAliases []v1.HostAlias
rawHostsFileContent string
expectedHostsFileContent string
}{
{
hostsFileName: "hosts_test_file1",
hostAliases: []v1.HostAlias{},
rawHostsFileContent: `# hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
123.45.67.89 some.domain
`,
expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
# hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
123.45.67.89 some.domain
`,
},
{
hostsFileName: "hosts_test_file2",
hostAliases: []v1.HostAlias{},
rawHostsFileContent: `# another hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
12.34.56.78 another.domain
`,
expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
# another hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
12.34.56.78 another.domain
`,
},
{
hostsFileName: "hosts_test_file1_with_host_aliases",
hostAliases: []v1.HostAlias{
{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
},
rawHostsFileContent: `# hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
123.45.67.89 some.domain
`,
expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
# hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
123.45.67.89 some.domain
# Entries added by HostAliases.
123.45.67.89 foo bar baz
`,
},
{
hostsFileName: "hosts_test_file2_with_host_aliases",
hostAliases: []v1.HostAlias{
{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
{IP: "456.78.90.123", Hostnames: []string{"park", "doo", "boo"}},
},
rawHostsFileContent: `# another hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
12.34.56.78 another.domain
`,
expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
# another hosts file for testing.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
12.34.56.78 another.domain
# Entries added by HostAliases.
123.45.67.89 foo bar baz
456.78.90.123 park doo boo
`,
},
}
for _, testCase := range testCases {
t.Run(testCase.hostsFileName, func(t *testing.T) {
tmpdir, err := writeHostsFile(testCase.hostsFileName, testCase.rawHostsFileContent)
require.NoError(t, err, "could not create a temp hosts file")
defer os.RemoveAll(tmpdir)
actualContent, fileReadErr := nodeHostsFileContent(filepath.Join(tmpdir, testCase.hostsFileName), testCase.hostAliases)
require.NoError(t, fileReadErr, "could not create read hosts file")
assert.Equal(t, testCase.expectedHostsFileContent, string(actualContent), "hosts file content not expected")
})
}
}
// writeHostsFile will write a hosts file into a temporary dir, and return that dir.
// Caller is responsible for deleting the dir and its contents.
func writeHostsFile(filename string, cfg string) (string, error) {
tmpdir, err := os.MkdirTemp("", "kubelet=kubelet_pods_test.go=")
if err != nil {
return "", err
}
return tmpdir, os.WriteFile(filepath.Join(tmpdir, filename), []byte(cfg), 0644)
}
func TestManagedHostsFileContent(t *testing.T) {
testCases := []struct {
hostIPs []string
hostName string
hostDomainName string
hostAliases []v1.HostAlias
expectedContent string
}{
{
hostIPs: []string{"123.45.67.89"},
hostName: "podFoo",
hostAliases: []v1.HostAlias{},
expectedContent: `# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
123.45.67.89 podFoo
`,
},
{
hostIPs: []string{"203.0.113.1"},
hostName: "podFoo",
hostDomainName: "domainFoo",
hostAliases: []v1.HostAlias{},
expectedContent: `# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
203.0.113.1 podFoo.domainFoo podFoo
`,
},
{
hostIPs: []string{"203.0.113.1"},
hostName: "podFoo",
hostDomainName: "domainFoo",
hostAliases: []v1.HostAlias{
{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
},
expectedContent: `# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
203.0.113.1 podFoo.domainFoo podFoo
# Entries added by HostAliases.
123.45.67.89 foo bar baz
`,
},
{
hostIPs: []string{"203.0.113.1"},
hostName: "podFoo",
hostDomainName: "domainFoo",
hostAliases: []v1.HostAlias{
{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
{IP: "456.78.90.123", Hostnames: []string{"park", "doo", "boo"}},
},
expectedContent: `# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
203.0.113.1 podFoo.domainFoo podFoo
# Entries added by HostAliases.
123.45.67.89 foo bar baz
456.78.90.123 park doo boo
`,
},
{
hostIPs: []string{"203.0.113.1", "fd00::6"},
hostName: "podFoo",
hostDomainName: "domainFoo",
hostAliases: []v1.HostAlias{},
expectedContent: `# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
203.0.113.1 podFoo.domainFoo podFoo
fd00::6 podFoo.domainFoo podFoo
`,
},
}
for _, testCase := range testCases {
actualContent := managedHostsFileContent(testCase.hostIPs, testCase.hostName, testCase.hostDomainName, testCase.hostAliases)
assert.Equal(t, testCase.expectedContent, string(actualContent), "hosts file content not expected")
}
}
func TestRunInContainerNoSuchPod(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*containertest.FakePod{}
podName := "podFoo"
podNamespace := "nsFoo"
containerName := "containerFoo"
output, err := kubelet.RunInContainer(
ctx,
kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
containerName,
[]string{"ls"})
assert.Error(t, err)
assert.Nil(t, output, "output should be nil")
}
func TestRunInContainer(t *testing.T) {
ctx := context.Background()
for _, testError := range []error{nil, errors.New("bar")} {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := containertest.FakeContainerCommandRunner{
Err: testError,
Stdout: "foo",
}
kubelet.runner = &fakeCommandRunner
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "podFoo",
Namespace: "nsFoo",
Containers: []*kubecontainer.Container{
{Name: "containerFoo",
ID: containerID,
},
},
}},
}
cmd := []string{"ls"}
actualOutput, err := kubelet.RunInContainer(ctx, "podFoo_nsFoo", "", "containerFoo", cmd)
assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError)
assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
// this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
assert.Equal(t, "foo", string(actualOutput), "(testError=%v) output", testError)
assert.Equal(t, err, testError, "(testError=%v) err", testError)
}
}
type testServiceLister struct {
services []*v1.Service
}
func (ls testServiceLister) List(labels.Selector) ([]*v1.Service, error) {
return ls.services, nil
}
type envs []kubecontainer.EnvVar
func (e envs) Len() int {
return len(e)
}
func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
func buildService(name, namespace, clusterIP, protocol string, port int) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Protocol: v1.Protocol(protocol),
Port: int32(port),
}},
ClusterIP: clusterIP,
},
}
}
func TestMakeEnvironmentVariables(t *testing.T) {
trueVal := true
services := []*v1.Service{
buildService("kubernetes", metav1.NamespaceDefault, "1.2.3.1", "TCP", 8081),
buildService("test", "test1", "1.2.3.3", "TCP", 8083),
buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084),
buildService("test", "test2", "1.2.3.5", "TCP", 8085),
buildService("test", "test2", "None", "TCP", 8085),
buildService("test", "test2", "", "TCP", 8085),
buildService("not-special", metav1.NamespaceDefault, "1.2.3.8", "TCP", 8088),
buildService("not-special", metav1.NamespaceDefault, "None", "TCP", 8088),
buildService("not-special", metav1.NamespaceDefault, "", "TCP", 8088),
}
trueValue := true
falseValue := false
testCases := []struct {
name string // the name of the test case
ns string // the namespace to generate environment for
enableServiceLinks *bool // enabling service links
container *v1.Container // the container to use
nilLister bool // whether the lister should be nil
staticPod bool // whether the pod should be a static pod (versus an API pod)
unsyncedServices bool // whether the services should NOT be synced
configMap *v1.ConfigMap // an optional ConfigMap to pull from
secret *v1.Secret // an optional Secret to pull from
podIPs []string // the pod IPs
expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars
expectedError bool // does the test fail
expectedEvent string // does the test emit an event
}{
{
name: "if services aren't synced, non-static pods should fail",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{Env: []v1.EnvVar{}},
nilLister: false,
staticPod: false,
unsyncedServices: true,
expectedEnvs: []kubecontainer.EnvVar{},
expectedError: true,
},
{
name: "if services aren't synced, static pods should succeed", // if there is no service
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{Env: []v1.EnvVar{}},
nilLister: false,
staticPod: true,
unsyncedServices: true,
},
{
name: "api server = Y, kubelet = Y",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "api server = Y, kubelet = N",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
{
name: "api server = N; kubelet = Y",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{Name: "FOO", Value: "BAZ"},
},
},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAZ"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "api server = N; kubelet = Y; service env vars",
ns: "test1",
enableServiceLinks: &trueValue,
container: &v1.Container{
Env: []v1.EnvVar{
{Name: "FOO", Value: "BAZ"},
},
},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAZ"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "master service in pod ns",
ns: "test2",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{Name: "FOO", Value: "ZAP"},
},
},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "ZAP"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "master service in pod ns, service env vars",
ns: "test2",
enableServiceLinks: &trueValue,
container: &v1.Container{
Env: []v1.EnvVar{
{Name: "FOO", Value: "ZAP"},
},
},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "ZAP"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
{Name: "TEST_SERVICE_PORT", Value: "8085"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
{Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
},
},
{
name: "pod in master service ns",
ns: metav1.NamespaceDefault,
enableServiceLinks: &falseValue,
container: &v1.Container{},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "pod in master service ns, service env vars",
ns: metav1.NamespaceDefault,
enableServiceLinks: &trueValue,
container: &v1.Container{},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
{Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "downward api pod",
ns: "downward-api",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
},
},
{
Name: "POD_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.nodeName",
},
},
},
{
Name: "POD_SERVICE_ACCOUNT_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.serviceAccountName",
},
},
},
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_IPS",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIPs",
},
},
},
{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIP",
},
},
},
{
Name: "HOST_IPS",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIPs",
},
},
},
},
},
podIPs: []string{"1.2.3.4", "fd00::6"},
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "POD_NAME", Value: "dapi-test-pod-name"},
{Name: "POD_NAMESPACE", Value: "downward-api"},
{Name: "POD_NODE_NAME", Value: "node-name"},
{Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"},
{Name: "POD_IP", Value: "1.2.3.4"},
{Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
{Name: "HOST_IP", Value: testKubeletHostIP},
{Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
},
},
{
name: "downward api pod ips reverse order",
ns: "downward-api",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_IPS",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIPs",
},
},
},
{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIP",
},
},
},
{
Name: "HOST_IPS",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIPs",
},
},
},
},
},
podIPs: []string{"fd00::6", "1.2.3.4"},
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "POD_IP", Value: "1.2.3.4"},
{Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
{Name: "HOST_IP", Value: testKubeletHostIP},
{Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
},
},
{
name: "downward api pod ips multiple ips",
ns: "downward-api",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_IPS",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIPs",
},
},
},
{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIP",
},
},
},
{
Name: "HOST_IPS",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIPs",
},
},
},
},
},
podIPs: []string{"1.2.3.4", "192.168.1.1.", "fd00::6"},
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "POD_IP", Value: "1.2.3.4"},
{Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
{Name: "HOST_IP", Value: testKubeletHostIP},
{Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
},
},
{
name: "env expansion",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", //legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
FieldPath: "metadata.name",
},
},
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-$(EMPTY_VAR)",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-$(POD_NAME)",
},
{
Name: "POD_NAME_TEST3",
Value: "$(POD_NAME_TEST2)-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-$(TEST_LITERAL)",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
},
},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
Value: "dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST3",
Value: "test2-dapi-test-pod-name-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-test-test-test",
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "env expansion, service env vars",
ns: "test1",
enableServiceLinks: &trueValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-$(EMPTY_VAR)",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-$(POD_NAME)",
},
{
Name: "POD_NAME_TEST3",
Value: "$(POD_NAME_TEST2)-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-$(TEST_LITERAL)",
},
{
Name: "SERVICE_VAR_TEST",
Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
},
},
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
Value: "dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST3",
Value: "test2-dapi-test-pod-name-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-test-test-test",
},
{
Name: "TEST_SERVICE_HOST",
Value: "1.2.3.3",
},
{
Name: "TEST_SERVICE_PORT",
Value: "8083",
},
{
Name: "TEST_PORT",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP_PROTO",
Value: "tcp",
},
{
Name: "TEST_PORT_8083_TCP_PORT",
Value: "8083",
},
{
Name: "TEST_PORT_8083_TCP_ADDR",
Value: "1.2.3.3",
},
{
Name: "SERVICE_VAR_TEST",
Value: "1.2.3.3:8083",
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "configmapkeyref_missing_optional",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"},
Key: "key",
Optional: &trueVal,
},
},
},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "configmapkeyref_missing_key_optional",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"},
Key: "key",
Optional: &trueVal,
},
},
},
},
},
nilLister: true,
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-configmap",
},
Data: map[string]string{
"a": "b",
},
},
expectedEnvs: nil,
},
{
name: "secretkeyref_missing_optional",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
Key: "key",
Optional: &trueVal,
},
},
},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "secretkeyref_missing_key_optional",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"},
Key: "key",
Optional: &trueVal,
},
},
},
},
},
nilLister: true,
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-secret",
},
Data: map[string][]byte{
"a": []byte("b"),
},
},
expectedEnvs: nil,
},
{
name: "configmap",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
},
{
Prefix: "p_",
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
},
},
Env: []v1.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "EXPANSION_TEST",
Value: "$(REPLACE_ME)",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
},
},
nilLister: false,
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-configmap",
},
Data: map[string]string{
"REPLACE_ME": "FROM_CONFIG_MAP",
"DUPE_TEST": "CONFIG_MAP",
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "REPLACE_ME",
Value: "FROM_CONFIG_MAP",
},
{
Name: "EXPANSION_TEST",
Value: "FROM_CONFIG_MAP",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
{
Name: "p_REPLACE_ME",
Value: "FROM_CONFIG_MAP",
},
{
Name: "p_DUPE_TEST",
Value: "CONFIG_MAP",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "configmap allow prefix to start with a digital",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
},
{
Prefix: "1_",
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
},
},
Env: []v1.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "EXPANSION_TEST",
Value: "$(REPLACE_ME)",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
},
},
nilLister: false,
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-configmap",
},
Data: map[string]string{
"REPLACE_ME": "FROM_CONFIG_MAP",
"DUPE_TEST": "CONFIG_MAP",
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "REPLACE_ME",
Value: "FROM_CONFIG_MAP",
},
{
Name: "EXPANSION_TEST",
Value: "FROM_CONFIG_MAP",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
{
Name: "1_REPLACE_ME",
Value: "FROM_CONFIG_MAP",
},
{
Name: "1_DUPE_TEST",
Value: "CONFIG_MAP",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "configmap, service env vars",
ns: "test1",
enableServiceLinks: &trueValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
},
{
Prefix: "p_",
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
},
},
Env: []v1.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "EXPANSION_TEST",
Value: "$(REPLACE_ME)",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
},
},
nilLister: false,
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-configmap",
},
Data: map[string]string{
"REPLACE_ME": "FROM_CONFIG_MAP",
"DUPE_TEST": "CONFIG_MAP",
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "TEST_SERVICE_HOST",
Value: "1.2.3.3",
},
{
Name: "TEST_SERVICE_PORT",
Value: "8083",
},
{
Name: "TEST_PORT",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP_PROTO",
Value: "tcp",
},
{
Name: "TEST_PORT_8083_TCP_PORT",
Value: "8083",
},
{
Name: "TEST_PORT_8083_TCP_ADDR",
Value: "1.2.3.3",
},
{
Name: "REPLACE_ME",
Value: "FROM_CONFIG_MAP",
},
{
Name: "EXPANSION_TEST",
Value: "FROM_CONFIG_MAP",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
{
Name: "p_REPLACE_ME",
Value: "FROM_CONFIG_MAP",
},
{
Name: "p_DUPE_TEST",
Value: "CONFIG_MAP",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "configmap_missing",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}},
},
},
expectedError: true,
},
{
name: "configmap_missing_optional",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{ConfigMapRef: &v1.ConfigMapEnvSource{
Optional: &trueVal,
LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"}}},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "configmap_invalid_keys_valid",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
Prefix: "p_",
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
},
},
},
configMap: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-configmap",
},
Data: map[string]string{
"1234": "abc",
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "p_1234",
Value: "abc",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "secret",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
},
{
Prefix: "p_",
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
},
},
Env: []v1.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "EXPANSION_TEST",
Value: "$(REPLACE_ME)",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
},
},
nilLister: false,
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-secret",
},
Data: map[string][]byte{
"REPLACE_ME": []byte("FROM_SECRET"),
"DUPE_TEST": []byte("SECRET"),
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "REPLACE_ME",
Value: "FROM_SECRET",
},
{
Name: "EXPANSION_TEST",
Value: "FROM_SECRET",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
{
Name: "p_REPLACE_ME",
Value: "FROM_SECRET",
},
{
Name: "p_DUPE_TEST",
Value: "SECRET",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "secret, service env vars",
ns: "test1",
enableServiceLinks: &trueValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
},
{
Prefix: "p_",
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
},
},
Env: []v1.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "EXPANSION_TEST",
Value: "$(REPLACE_ME)",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
},
},
nilLister: false,
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-secret",
},
Data: map[string][]byte{
"REPLACE_ME": []byte("FROM_SECRET"),
"DUPE_TEST": []byte("SECRET"),
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "TEST_SERVICE_HOST",
Value: "1.2.3.3",
},
{
Name: "TEST_SERVICE_PORT",
Value: "8083",
},
{
Name: "TEST_PORT",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP_PROTO",
Value: "tcp",
},
{
Name: "TEST_PORT_8083_TCP_PORT",
Value: "8083",
},
{
Name: "TEST_PORT_8083_TCP_ADDR",
Value: "1.2.3.3",
},
{
Name: "REPLACE_ME",
Value: "FROM_SECRET",
},
{
Name: "EXPANSION_TEST",
Value: "FROM_SECRET",
},
{
Name: "DUPE_TEST",
Value: "ENV_VAR",
},
{
Name: "p_REPLACE_ME",
Value: "FROM_SECRET",
},
{
Name: "p_DUPE_TEST",
Value: "SECRET",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "secret_missing",
ns: "test1",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}},
},
},
expectedError: true,
},
{
name: "secret_missing_optional",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
Optional: &trueVal}},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "secret_invalid_keys_valid",
ns: "test",
enableServiceLinks: &falseValue,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
Prefix: "p_",
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
},
},
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-secret",
},
Data: map[string][]byte{
"1234.name": []byte("abc"),
},
},
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "p_1234.name",
Value: "abc",
},
{
Name: "KUBERNETES_SERVICE_HOST",
Value: "1.2.3.1",
},
{
Name: "KUBERNETES_SERVICE_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP",
Value: "tcp://1.2.3.1:8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PROTO",
Value: "tcp",
},
{
Name: "KUBERNETES_PORT_8081_TCP_PORT",
Value: "8081",
},
{
Name: "KUBERNETES_PORT_8081_TCP_ADDR",
Value: "1.2.3.1",
},
},
},
{
name: "nil_enableServiceLinks",
ns: "test",
enableServiceLinks: nil,
container: &v1.Container{
EnvFrom: []v1.EnvFromSource{
{
Prefix: "p_",
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
},
},
},
secret: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test1",
Name: "test-secret",
},
Data: map[string][]byte{
"1234.name": []byte("abc"),
},
},
expectedError: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fakeRecorder := record.NewFakeRecorder(1)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.kubelet.recorder = fakeRecorder
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
if tc.nilLister {
kl.serviceLister = nil
} else if tc.unsyncedServices {
kl.serviceLister = testServiceLister{}
kl.serviceHasSynced = func() bool { return false }
} else {
kl.serviceLister = testServiceLister{services}
kl.serviceHasSynced = func() bool { return true }
}
testKubelet.fakeKubeClient.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
var err error
if tc.configMap == nil {
err = apierrors.NewNotFound(action.GetResource().GroupResource(), "configmap-name")
}
return true, tc.configMap, err
})
testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
var err error
if tc.secret == nil {
err = apierrors.NewNotFound(action.GetResource().GroupResource(), "secret-name")
}
return true, tc.secret, err
})
testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
var err error
if tc.secret == nil {
err = errors.New("no secret defined")
}
return true, tc.secret, err
})
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: tc.ns,
Name: "dapi-test-pod-name",
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
ServiceAccountName: "special",
NodeName: "node-name",
EnableServiceLinks: tc.enableServiceLinks,
},
}
podIP := ""
if len(tc.podIPs) > 0 {
podIP = tc.podIPs[0]
}
if tc.staticPod {
testPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
}
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP, tc.podIPs, kubecontainer.VolumeMap{})
select {
case e := <-fakeRecorder.Events:
assert.Equal(t, tc.expectedEvent, e)
default:
assert.Equal(t, "", tc.expectedEvent)
}
if tc.expectedError {
assert.Error(t, err, tc.name)
} else {
assert.NoError(t, err, "[%s]", tc.name)
sort.Sort(envs(result))
sort.Sort(envs(tc.expectedEnvs))
assert.Equal(t, tc.expectedEnvs, result, "[%s] env entries", tc.name)
}
})
}
}
func waitingState(cName string) v1.ContainerStatus {
return waitingStateWithReason(cName, "")
}
func waitingStateWithReason(cName, reason string) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: reason},
},
}
}
func waitingStateWithLastTermination(cName string) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func waitingStateWithNonZeroTermination(cName string) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
ExitCode: -1,
},
},
}
}
func runningState(cName string) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
}
}
func startedState(cName string) v1.ContainerStatus {
started := true
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
Started: &started,
}
}
func runningStateWithStartedAt(cName string, startedAt time.Time) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{StartedAt: metav1.Time{Time: startedAt}},
},
}
}
func stoppedState(cName string) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
}
}
func succeededState(cName string) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func failedState(cName string) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
ExitCode: -1,
},
},
}
}
func waitingWithLastTerminationUnknown(cName string, restartCount int32) v1.ContainerStatus {
return v1.ContainerStatus{
Name: cName,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Reason: kubecontainer.ContainerReasonStatusUnknown,
Message: "The container could not be located when the pod was deleted. The container used to be Running",
ExitCode: 137,
},
},
RestartCount: restartCount,
}
}
func ready(status v1.ContainerStatus) v1.ContainerStatus {
status.Ready = true
return status
}
func withID(status v1.ContainerStatus, id string) v1.ContainerStatus {
status.ContainerID = id
return status
}
func TestPodPhaseWithRestartAlways(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyAlways,
}
tests := []struct {
pod *v1.Pod
podIsTerminal bool
status v1.PodPhase
test string
}{
{
&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}},
false,
v1.PodPending,
"waiting",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
false,
v1.PodRunning,
"all running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
stoppedState("containerA"),
stoppedState("containerB"),
},
},
},
false,
v1.PodRunning,
"all stopped with restart always",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
v1.PodSucceeded,
"all succeeded with restart always, but the pod is terminal",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
failedState("containerB"),
},
},
},
true,
v1.PodFailed,
"all stopped with restart always, but the pod is terminal",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
stoppedState("containerB"),
},
},
},
false,
v1.PodRunning,
"mixed state #1 with restart always",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
},
},
},
false,
v1.PodPending,
"mixed state #2 with restart always",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
false,
v1.PodPending,
"mixed state #3 with restart always",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
false,
v1.PodRunning,
"backoff crashloop container with restart always",
},
}
for _, test := range tests {
status := getPhase(test.pod, test.pod.Status.ContainerStatuses, test.podIsTerminal, false)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithRestartAlwaysInitContainers(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{Name: "containerX"},
},
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyAlways,
}
tests := []struct {
pod *v1.Pod
status v1.PodPhase
test string
}{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
},
},
v1.PodPending,
"init container running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
failedState("containerX"),
},
},
},
v1.PodPending,
"init container terminated non-zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithLastTermination("containerX"),
},
},
},
v1.PodPending,
"init container waiting, terminated zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithNonZeroTermination("containerX"),
},
},
},
v1.PodPending,
"init container waiting, terminated non-zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingState("containerX"),
},
},
},
v1.PodPending,
"init container waiting, not terminated",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
v1.PodRunning,
"init container succeeded",
},
}
for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{Name: "containerX", RestartPolicy: &containerRestartPolicyAlways},
},
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyAlways,
}
tests := []struct {
pod *v1.Pod
podIsTerminal bool
podHasInitialized bool
status v1.PodPhase
test string
}{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, false, false, v1.PodPending, "empty, waiting"},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
},
},
false,
false,
v1.PodPending,
"restartable init container running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
stoppedState("containerX"),
},
},
},
false,
false,
v1.PodPending,
"restartable init container stopped",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithLastTermination("containerX"),
},
},
},
false,
false,
v1.PodPending,
"restartable init container waiting, terminated zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithNonZeroTermination("containerX"),
},
},
},
false,
false,
v1.PodPending,
"restartable init container waiting, terminated non-zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingState("containerX"),
},
},
},
false,
false,
v1.PodPending,
"restartable init container waiting, not terminated",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
startedState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
},
},
},
false,
true,
v1.PodPending,
"restartable init container started, 1/2 regular container running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
startedState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
false,
true,
v1.PodRunning,
"restartable init container started, all regular containers running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
false,
true,
v1.PodRunning,
"restartable init container running, all regular containers running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
stoppedState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
false,
true,
v1.PodRunning,
"restartable init container stopped, all regular containers running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithLastTermination("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
false,
true,
v1.PodRunning,
"backoff crashloop restartable init container, all regular containers running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
failedState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
true,
v1.PodSucceeded,
"all regular containers succeeded and restartable init container failed with restart always, but the pod is terminal",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
true,
v1.PodSucceeded,
"all regular containers succeeded and restartable init container succeeded with restart always, but the pod is terminal",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
false,
false,
v1.PodPending,
"re-initializing the pod after the sandbox is recreated",
},
}
for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, test.podIsTerminal, test.podHasInitialized)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{Name: "containerX"},
{Name: "containerY", RestartPolicy: &containerRestartPolicyAlways},
},
Containers: []v1.Container{
{Name: "containerA"},
},
RestartPolicy: v1.RestartPolicyAlways,
}
tests := []struct {
pod *v1.Pod
podHasInitialized bool
status v1.PodPhase
test string
}{
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
runningState("containerY"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
},
},
},
false,
v1.PodPending,
"regular init containers, restartable init container and regular container are all running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
runningState("containerY"),
},
ContainerStatuses: []v1.ContainerStatus{
stoppedState("containerA"),
},
},
},
false,
v1.PodPending,
"regular containers is stopped, restartable init container and regular int container are both running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
runningState("containerY"),
},
ContainerStatuses: []v1.ContainerStatus{
stoppedState("containerA"),
},
},
},
false,
v1.PodPending,
"re-created sandbox: regular init container is succeeded, restartable init container is running, old regular containers is stopped",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
runningState("containerY"),
},
ContainerStatuses: []v1.ContainerStatus{
stoppedState("containerA"),
},
},
},
true,
v1.PodRunning,
"regular init container is succeeded, restartable init container is running, regular containers is stopped",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
runningState("containerY"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
},
},
},
true,
v1.PodRunning,
"regular init container is succeeded, restartable init container and regular containers are both running",
},
}
for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false, test.podHasInitialized)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithRestartNever(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyNever,
}
tests := []struct {
pod *v1.Pod
status v1.PodPhase
test string
}{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
v1.PodRunning,
"all running with restart never",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
v1.PodSucceeded,
"all succeeded with restart never",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
},
},
},
v1.PodFailed,
"all failed with restart never",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
},
},
},
v1.PodRunning,
"mixed state #1 with restart never",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
},
},
},
v1.PodPending,
"mixed state #2 with restart never",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
v1.PodPending,
"mixed state #3 with restart never",
},
}
for _, test := range tests {
status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithRestartNeverInitContainers(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{Name: "containerX"},
},
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyNever,
}
tests := []struct {
pod *v1.Pod
status v1.PodPhase
test string
}{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
},
},
v1.PodPending,
"init container running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
failedState("containerX"),
},
},
},
v1.PodFailed,
"init container terminated non-zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithLastTermination("containerX"),
},
},
},
v1.PodPending,
"init container waiting, terminated zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithNonZeroTermination("containerX"),
},
},
},
v1.PodFailed,
"init container waiting, terminated non-zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingState("containerX"),
},
},
},
v1.PodPending,
"init container waiting, not terminated",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
v1.PodRunning,
"init container succeeded",
},
}
for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
InitContainers: []v1.Container{
{Name: "containerX", RestartPolicy: &containerRestartPolicyAlways},
},
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyNever,
}
tests := []struct {
pod *v1.Pod
podHasInitialized bool
status v1.PodPhase
test string
}{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, false, v1.PodPending, "empty, waiting"},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
},
},
false,
v1.PodPending,
"restartable init container running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
stoppedState("containerX"),
},
},
},
false,
v1.PodPending,
"restartable init container stopped",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithLastTermination("containerX"),
},
},
},
false,
v1.PodPending,
"restartable init container waiting, terminated zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithNonZeroTermination("containerX"),
},
},
},
false,
v1.PodPending,
"restartable init container waiting, terminated non-zero",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingState("containerX"),
},
},
},
false,
v1.PodPending,
"restartable init container waiting, not terminated",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
startedState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
},
},
},
true,
v1.PodPending,
"restartable init container started, one main container running",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
startedState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
v1.PodRunning,
"restartable init container started, main containers succeeded",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
v1.PodRunning,
"restartable init container re-running, main containers succeeded",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
v1.PodSucceeded,
"all containers succeeded",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
failedState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
v1.PodSucceeded,
"restartable init container terminated non-zero, main containers succeeded",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithLastTermination("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
v1.PodSucceeded,
"backoff crashloop restartable init container, main containers succeeded",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
waitingStateWithNonZeroTermination("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
true,
v1.PodSucceeded,
"backoff crashloop with non-zero restartable init container, main containers succeeded",
},
}
for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false, test.podHasInitialized)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyOnFailure,
}
tests := []struct {
pod *v1.Pod
status v1.PodPhase
test string
}{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
v1.PodRunning,
"all running with restart onfailure",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
v1.PodSucceeded,
"all succeeded with restart onfailure",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
},
},
},
v1.PodRunning,
"all failed with restart never",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
},
},
},
v1.PodRunning,
"mixed state #1 with restart onfailure",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
},
},
},
v1.PodPending,
"mixed state #2 with restart onfailure",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
v1.PodPending,
"mixed state #3 with restart onfailure",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
v1.PodRunning,
"backoff crashloop container with restart onfailure",
},
}
for _, test := range tests {
status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test)
}
}
func TestPodPhaseWithContainerRestartPolicy(t *testing.T) {
var (
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
containerRestartPolicyOnFailure = v1.ContainerRestartPolicyOnFailure
containerRestartPolicyNever = v1.ContainerRestartPolicyNever
)
tests := []struct {
name string
spec *v1.PodSpec
statuses []v1.ContainerStatus
podIsTerminal bool
expectedPhase v1.PodPhase
}{
{
name: "container with restart policy Never failed",
spec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "failed-container",
RestartPolicy: &containerRestartPolicyNever,
}},
RestartPolicy: v1.RestartPolicyAlways,
},
statuses: []v1.ContainerStatus{failedState("failed-container")},
expectedPhase: v1.PodFailed,
},
{
name: "container with restart policy OnFailure failed",
spec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "failed-container",
RestartPolicy: &containerRestartPolicyOnFailure,
}},
RestartPolicy: v1.RestartPolicyAlways,
},
statuses: []v1.ContainerStatus{
failedState("failed-container"),
},
expectedPhase: v1.PodRunning,
},
{
name: "container with restart policy Always failed",
spec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "failed-container",
RestartPolicy: &containerRestartPolicyAlways,
}},
RestartPolicy: v1.RestartPolicyAlways,
},
statuses: []v1.ContainerStatus{
failedState("failed-container"),
},
expectedPhase: v1.PodRunning,
},
{
name: "At least one container with restartable container-level restart policy failed",
// Spec to simulate containerB having RestartPolicy: Always
spec: &v1.PodSpec{
Containers: []v1.Container{
{
Name: "containerA",
RestartPolicy: &containerRestartPolicyAlways,
},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyNever,
},
statuses: []v1.ContainerStatus{
succeededState("containerA"),
failedState("containerB"),
},
expectedPhase: v1.PodRunning,
},
{
name: "All containers without restartable container-level restart policy failed",
spec: &v1.PodSpec{
Containers: []v1.Container{
{
Name: "containerA",
RestartPolicy: &containerRestartPolicyNever,
},
{
Name: "containerB",
RestartPolicy: &containerRestartPolicyOnFailure,
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
statuses: []v1.ContainerStatus{
failedState("containerA"),
succeededState("containerB"),
},
expectedPhase: v1.PodFailed,
},
{
name: "All containers succeeded",
spec: &v1.PodSpec{
Containers: []v1.Container{
{
Name: "containerA",
RestartPolicy: &containerRestartPolicyNever,
},
{
Name: "containerB",
RestartPolicy: &containerRestartPolicyOnFailure,
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
statuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
expectedPhase: v1.PodSucceeded,
},
}
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ContainerRestartRules, true)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
pod := &v1.Pod{
Spec: *tc.spec,
Status: v1.PodStatus{
ContainerStatuses: tc.statuses,
},
}
phase := getPhase(pod, tc.statuses, tc.podIsTerminal, true)
assert.Equal(t, tc.expectedPhase, phase)
})
}
}
func TestPodPhaseWithContainerRestartPolicyInitContainers(t *testing.T) {
var (
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
containerRestartPolicyOnFailure = v1.ContainerRestartPolicyOnFailure
containerRestartPolicyNever = v1.ContainerRestartPolicyNever
)
tests := []struct {
name string
spec *v1.PodSpec
statuses []v1.ContainerStatus
podIsTerminal bool
expectedPhase v1.PodPhase
}{
{
name: "init container with restart policy Never failed",
spec: &v1.PodSpec{
InitContainers: []v1.Container{{
Name: "failed-container",
RestartPolicy: &containerRestartPolicyNever,
}},
Containers: []v1.Container{{Name: "container"}},
RestartPolicy: v1.RestartPolicyAlways,
},
statuses: []v1.ContainerStatus{failedState("failed-container")},
expectedPhase: v1.PodFailed,
},
{
name: "init container with restart policy OnFailure failed",
spec: &v1.PodSpec{
InitContainers: []v1.Container{{
Name: "failed-container",
RestartPolicy: &containerRestartPolicyOnFailure,
}},
Containers: []v1.Container{{Name: "container"}},
RestartPolicy: v1.RestartPolicyNever,
},
statuses: []v1.ContainerStatus{
failedState("failed-container"),
},
expectedPhase: v1.PodPending,
},
{
name: "container with restart policy Always failed",
spec: &v1.PodSpec{
InitContainers: []v1.Container{{
Name: "failed-container",
RestartPolicy: &containerRestartPolicyAlways,
}},
Containers: []v1.Container{{Name: "container"}},
RestartPolicy: v1.RestartPolicyNever,
},
statuses: []v1.ContainerStatus{
failedState("failed-container"),
},
expectedPhase: v1.PodPending,
},
}
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ContainerRestartRules, true)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
pod := &v1.Pod{
Spec: *tc.spec,
Status: v1.PodStatus{
ContainerStatuses: tc.statuses,
},
}
phase := getPhase(pod, tc.statuses, tc.podIsTerminal, true)
assert.Equal(t, tc.expectedPhase, phase)
})
}
}
// No special init-specific logic for this, see RestartAlways case
// func TestPodPhaseWithRestartOnFailureInitContainers(t *testing.T) {
// }
func TestConvertToAPIContainerStatuses(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyAlways,
}
now := metav1.Now()
tests := []struct {
name string
pod *v1.Pod
currentStatus *kubecontainer.PodStatus
previousStatus []v1.ContainerStatus
containers []v1.Container
hasInitContainers bool
isInitContainer bool
expected []v1.ContainerStatus
}{
{
name: "no current status, with previous statuses and deletion",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
},
currentStatus: &kubecontainer.PodStatus{},
previousStatus: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
containers: desiredState.Containers,
// no init containers
// is not an init container
expected: []v1.ContainerStatus{
waitingWithLastTerminationUnknown("containerA", 0),
waitingWithLastTerminationUnknown("containerB", 0),
},
},
{
name: "no current status, with previous statuses and no deletion",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
currentStatus: &kubecontainer.PodStatus{},
previousStatus: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
containers: desiredState.Containers,
// no init containers
// is not an init container
expected: []v1.ContainerStatus{
waitingWithLastTerminationUnknown("containerA", 1),
waitingWithLastTerminationUnknown("containerB", 1),
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
containerStatuses := kl.convertToAPIContainerStatuses(
test.pod,
test.currentStatus,
test.previousStatus,
test.containers,
test.hasInitContainers,
test.isInitContainer,
)
for i, status := range containerStatuses {
assert.Equal(t, test.expected[i], status, "[test %s]", test.name)
}
})
}
}
func Test_generateAPIPodStatus(t *testing.T) {
desiredState := v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyAlways,
}
sandboxReadyStatus := &kubecontainer.PodStatus{
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
{
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: "10.0.0.10",
},
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
State: runtimeapi.PodSandboxState_SANDBOX_READY,
},
},
}
withResources := func(cs v1.ContainerStatus) v1.ContainerStatus {
cs.Resources = &v1.ResourceRequirements{}
return cs
}
now := metav1.Now()
normalized_now := now.Rfc3339Copy()
tests := []struct {
name string
pod *v1.Pod
currentStatus *kubecontainer.PodStatus
unreadyContainer []string
previousStatus v1.PodStatus
isPodTerminal bool
expected v1.PodStatus
expectedPodDisruptionCondition *v1.PodCondition
expectedPodReadyToStartContainersCondition v1.PodCondition
}{
{
name: "pod disruption condition is copied over and the phase is set to failed when deleted",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
Conditions: []v1.PodCondition{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
LastTransitionTime: normalized_now,
}},
},
ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
},
currentStatus: sandboxReadyStatus,
previousStatus: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
Conditions: []v1.PodCondition{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
LastTransitionTime: normalized_now,
}},
},
isPodTerminal: true,
expected: v1.PodStatus{
Phase: v1.PodFailed,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodFailed"},
{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodFailed"},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(waitingWithLastTerminationUnknown("containerA", 0)),
ready(waitingWithLastTerminationUnknown("containerB", 0)),
},
},
expectedPodDisruptionCondition: &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
LastTransitionTime: normalized_now,
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionTrue,
},
},
{
name: "current status ready, with previous statuses and deletion",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
},
currentStatus: sandboxReadyStatus,
previousStatus: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
expected: v1.PodStatus{
Phase: v1.PodRunning,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
{Type: v1.PodReady, Status: v1.ConditionTrue},
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(waitingWithLastTerminationUnknown("containerA", 0)),
ready(waitingWithLastTerminationUnknown("containerB", 0)),
},
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionTrue,
},
},
{
name: "current status ready, with previous statuses and no deletion",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
currentStatus: sandboxReadyStatus,
previousStatus: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
expected: v1.PodStatus{
Phase: v1.PodRunning,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
{Type: v1.PodReady, Status: v1.ConditionTrue},
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(waitingWithLastTerminationUnknown("containerA", 1)),
ready(waitingWithLastTerminationUnknown("containerB", 1)),
},
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionTrue,
},
},
{
name: "terminal phase cannot be changed (apiserver previous is succeeded)",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
currentStatus: &kubecontainer.PodStatus{},
previousStatus: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
expected: v1.PodStatus{
Phase: v1.PodSucceeded,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(waitingWithLastTerminationUnknown("containerA", 1)),
ready(waitingWithLastTerminationUnknown("containerB", 1)),
},
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionFalse,
},
},
{
name: "terminal phase from previous status must remain terminal, restartAlways",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
currentStatus: &kubecontainer.PodStatus{},
previousStatus: v1.PodStatus{
Phase: v1.PodSucceeded,
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
// Reason and message should be preserved
Reason: "Test",
Message: "test",
},
expected: v1.PodStatus{
Phase: v1.PodSucceeded,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(waitingWithLastTerminationUnknown("containerA", 1)),
ready(waitingWithLastTerminationUnknown("containerB", 1)),
},
Reason: "Test",
Message: "test",
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionFalse,
},
},
{
name: "terminal phase from previous status must remain terminal, restartNever",
pod: &v1.Pod{
Spec: v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: v1.RestartPolicyNever,
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
currentStatus: &kubecontainer.PodStatus{},
previousStatus: v1.PodStatus{
Phase: v1.PodSucceeded,
ContainerStatuses: []v1.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
// Reason and message should be preserved
Reason: "Test",
Message: "test",
},
expected: v1.PodStatus{
Phase: v1.PodSucceeded,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(succeededState("containerA")),
ready(succeededState("containerB")),
},
Reason: "Test",
Message: "test",
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionFalse,
},
},
{
name: "running can revert to pending",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
currentStatus: sandboxReadyStatus,
previousStatus: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
waitingState("containerA"),
waitingState("containerB"),
},
},
expected: v1.PodStatus{
Phase: v1.PodPending,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
{Type: v1.PodReady, Status: v1.ConditionTrue},
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(waitingStateWithReason("containerA", "ContainerCreating")),
ready(waitingStateWithReason("containerB", "ContainerCreating")),
},
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionTrue,
},
},
{
name: "reason and message are preserved when phase doesn't change",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
waitingState("containerA"),
waitingState("containerB"),
},
},
},
currentStatus: &kubecontainer.PodStatus{
SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
ContainerStatuses: []*kubecontainer.Status{
{
ID: kubecontainer.ContainerID{ID: "foo"},
Name: "containerB",
StartedAt: time.Unix(1, 0).UTC(),
State: kubecontainer.ContainerStateRunning,
},
},
},
previousStatus: v1.PodStatus{
Phase: v1.PodPending,
Reason: "Test",
Message: "test",
ContainerStatuses: []v1.ContainerStatus{
waitingState("containerA"),
runningState("containerB"),
},
},
expected: v1.PodStatus{
Phase: v1.PodPending,
Reason: "Test",
Message: "test",
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
{Type: v1.PodReady, Status: v1.ConditionTrue},
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
ready(waitingStateWithReason("containerA", "ContainerCreating")),
withResources(ready(withID(runningStateWithStartedAt("containerB", time.Unix(1, 0).UTC()), "://foo"))),
},
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionTrue,
},
},
{
name: "reason and message are cleared when phase changes",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
Phase: v1.PodPending,
ContainerStatuses: []v1.ContainerStatus{
waitingState("containerA"),
waitingState("containerB"),
},
},
},
currentStatus: &kubecontainer.PodStatus{
SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
ContainerStatuses: []*kubecontainer.Status{
{
ID: kubecontainer.ContainerID{ID: "c1"},
Name: "containerA",
StartedAt: time.Unix(1, 0).UTC(),
State: kubecontainer.ContainerStateRunning,
},
{
ID: kubecontainer.ContainerID{ID: "c2"},
Name: "containerB",
StartedAt: time.Unix(2, 0).UTC(),
State: kubecontainer.ContainerStateRunning,
},
},
},
previousStatus: v1.PodStatus{
Phase: v1.PodPending,
Reason: "Test",
Message: "test",
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
expected: v1.PodStatus{
Phase: v1.PodRunning,
HostIP: "127.0.0.1",
HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
QOSClass: v1.PodQOSBestEffort,
Conditions: []v1.PodCondition{
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
{Type: v1.PodReady, Status: v1.ConditionTrue},
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
},
ContainerStatuses: []v1.ContainerStatus{
withResources(ready(withID(runningStateWithStartedAt("containerA", time.Unix(1, 0).UTC()), "://c1"))),
withResources(ready(withID(runningStateWithStartedAt("containerB", time.Unix(2, 0).UTC()), "://c2"))),
},
},
expectedPodReadyToStartContainersCondition: v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionTrue,
},
},
}
for _, test := range tests {
for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
t.Run(test.name, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kl.statusManager.SetPodStatus(logger, test.pod, test.previousStatus)
for _, name := range test.unreadyContainer {
kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
}
expected := test.expected.DeepCopy()
actual := kl.generateAPIPodStatus(test.pod, test.currentStatus, test.isPodTerminal)
if enablePodReadyToStartContainersCondition {
expected.Conditions = append([]v1.PodCondition{test.expectedPodReadyToStartContainersCondition}, expected.Conditions...)
}
if test.expectedPodDisruptionCondition != nil {
expected.Conditions = append([]v1.PodCondition{*test.expectedPodDisruptionCondition}, expected.Conditions...)
}
if !apiequality.Semantic.DeepEqual(*expected, actual) {
t.Fatalf("Unexpected status: %s", cmp.Diff(*expected, actual))
}
})
}
}
}
func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
if goruntime.GOOS == "windows" {
t.Skip("InPlacePodVerticalScaling is not currently supported for Windows")
}
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
testContainerName := "ctr0"
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
CPU1AndMem1GAndStorage2GAndCustomResource := CPU1AndMem1GAndStorage2G.DeepCopy()
CPU1AndMem1GAndStorage2GAndCustomResource["unknown-resource"] = resource.MustParse("1")
testKubecontainerPodStatus := kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.Status{
{
ID: testContainerID,
Name: testContainerName,
Resources: &kubecontainer.ContainerResources{
CPURequest: CPU1AndMem1G.Cpu(),
MemoryRequest: CPU1AndMem1G.Memory(),
CPULimit: CPU1AndMem1G.Cpu(),
MemoryLimit: CPU1AndMem1G.Memory(),
},
},
},
}
tests := []struct {
name string
pod *v1.Pod
oldStatus *v1.PodStatus
}{
{
name: "custom resource in ResourcesAllocated, resize should be null",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "1234560",
Name: "foo0",
Namespace: "bar0",
},
Spec: v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{
Name: testContainerName,
Image: "img",
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2GAndCustomResource, Requests: CPU1AndMem1GAndStorage2GAndCustomResource},
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: testContainerName,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: CPU1AndMem1GAndStorage2GAndCustomResource,
},
},
},
},
},
{
name: "cpu/memory resource in ResourcesAllocated, resize should be null",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "1234560",
Name: "foo0",
Namespace: "bar0",
},
Spec: v1.PodSpec{
NodeName: "machine",
Containers: []v1.Container{
{
Name: testContainerName,
Image: "img",
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: testContainerName,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: CPU1AndMem1GAndStorage2G,
},
},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
oldStatus := test.pod.Status
kl.statusManager.SetPodStatus(logger, test.pod, oldStatus)
actual := kl.generateAPIPodStatus(test.pod, &testKubecontainerPodStatus /* criStatus */, false /* test.isPodTerminal */)
for _, c := range actual.Conditions {
if c.Type == v1.PodResizePending || c.Type == v1.PodResizeInProgress {
t.Fatalf("unexpected resize status: %v", c)
}
}
})
}
}
func findContainerStatusByName(status v1.PodStatus, name string) *v1.ContainerStatus {
for i, c := range status.InitContainerStatuses {
if c.Name == name {
return &status.InitContainerStatuses[i]
}
}
for i, c := range status.ContainerStatuses {
if c.Name == name {
return &status.ContainerStatuses[i]
}
}
for i, c := range status.EphemeralContainerStatuses {
if c.Name == name {
return &status.EphemeralContainerStatuses[i]
}
}
return nil
}
func TestGetExec(t *testing.T) {
const (
podName = "podFoo"
podNamespace = "nsFoo"
podUID types.UID = "12345678"
containerID = "containerFoo"
tty = true
)
var (
podFullName = kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, podName, podNamespace))
)
testcases := []struct {
description string
podFullName string
container string
command []string
expectError bool
}{{
description: "success case",
podFullName: podFullName,
container: containerID,
command: []string{"ls"},
expectError: false,
}, {
description: "no such pod",
podFullName: "bar" + podFullName,
container: containerID,
command: []string{"ls"},
expectError: true,
}, {
description: "no such container",
podFullName: podFullName,
container: "containerBar",
command: []string{"ls"},
expectError: true,
}, {
description: "null exec command",
podFullName: podFullName,
container: containerID,
expectError: false,
}, {
description: "multi exec commands",
podFullName: podFullName,
container: containerID,
command: []string{"bash", "-c", "ls"},
expectError: false,
}}
for _, tc := range testcases {
t.Run(tc.description, func(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: podUID,
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: containerID,
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
},
},
}},
}
description := "streaming - " + tc.description
fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime}
kubelet.containerRuntime = fakeRuntime
kubelet.streamingRuntime = fakeRuntime
redirect, err := kubelet.GetExec(ctx, tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{})
if tc.expectError {
assert.Error(t, err, description)
} else {
assert.NoError(t, err, description)
assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect")
}
})
}
}
func TestGetPortForward(t *testing.T) {
const (
podName = "podFoo"
podNamespace = "nsFoo"
podUID types.UID = "12345678"
port int32 = 5000
)
testcases := []struct {
description string
podName string
expectError bool
}{{
description: "success case",
podName: podName,
}, {
description: "no such pod",
podName: "bar",
expectError: true,
}}
for _, tc := range testcases {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: podUID,
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: "foo",
ID: kubecontainer.ContainerID{Type: "test", ID: "foo"},
},
},
}},
}
description := "streaming - " + tc.description
fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime}
kubelet.containerRuntime = fakeRuntime
kubelet.streamingRuntime = fakeRuntime
redirect, err := kubelet.GetPortForward(ctx, tc.podName, podNamespace, podUID, portforward.V4Options{})
if tc.expectError {
assert.Error(t, err, description)
} else {
assert.NoError(t, err, description)
assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect")
}
}
}
func TestTruncatePodHostname(t *testing.T) {
for c, test := range map[string]struct {
input string
output string
}{
"valid hostname": {
input: "test.pod.hostname",
output: "test.pod.hostname",
},
"too long hostname": {
input: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567.", // 8*9=72 chars
output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567", //8*8-1=63 chars
},
"hostname end with .": {
input: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456.1234567.", // 8*9-1=71 chars
output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456", //8*8-2=62 chars
},
"hostname end with -": {
input: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456-1234567.", // 8*9-1=71 chars
output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456", //8*8-2=62 chars
},
} {
t.Logf("TestCase: %q", c)
output, err := truncatePodHostnameIfNeeded("test-pod", test.input)
assert.NoError(t, err)
assert.Equal(t, test.output, output)
}
}
func TestGenerateAPIPodStatusHostNetworkPodIPs(t *testing.T) {
testcases := []struct {
name string
nodeAddresses []v1.NodeAddress
criPodIPs []string
podIPs []v1.PodIP
}{
{
name: "Simple",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "InternalIP is preferred over ExternalIP",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeExternalIP, Address: "192.168.0.1"},
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "Single-stack addresses in dual-stack cluster",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "Multiple single-stack addresses in dual-stack cluster",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "10.0.0.2"},
{Type: v1.NodeExternalIP, Address: "192.168.0.1"},
},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "Dual-stack addresses in dual-stack cluster",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "fd01::1234"},
},
},
{
name: "CRI PodIPs override NodeAddresses",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
},
criPodIPs: []string{"192.168.0.1"},
podIPs: []v1.PodIP{
{IP: "192.168.0.1"},
{IP: "fd01::1234"},
},
},
{
name: "CRI dual-stack PodIPs override NodeAddresses",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
},
criPodIPs: []string{"192.168.0.1", "2001:db8::2"},
podIPs: []v1.PodIP{
{IP: "192.168.0.1"},
{IP: "2001:db8::2"},
},
},
{
// by default the cluster prefers IPv4
name: "CRI dual-stack PodIPs override NodeAddresses prefer IPv4",
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "fd01::1234"},
},
criPodIPs: []string{"2001:db8::2", "192.168.0.1"},
podIPs: []v1.PodIP{
{IP: "192.168.0.1"},
{IP: "2001:db8::2"},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
Status: v1.NodeStatus{
Addresses: tc.nodeAddresses,
},
},
}}
pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
pod.Spec.HostNetwork = true
criStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
IPs: tc.criPodIPs,
}
status := kl.generateAPIPodStatus(pod, criStatus, false)
if !reflect.DeepEqual(status.PodIPs, tc.podIPs) {
t.Fatalf("Expected PodIPs %#v, got %#v", tc.podIPs, status.PodIPs)
}
if tc.criPodIPs == nil && status.HostIP != status.PodIPs[0].IP {
t.Fatalf("Expected HostIP %q to equal PodIPs[0].IP %q", status.HostIP, status.PodIPs[0].IP)
}
})
}
}
func TestNodeAddressUpdatesGenerateAPIPodStatusHostNetworkPodIPs(t *testing.T) {
testcases := []struct {
name string
nodeIPs []string
nodeAddresses []v1.NodeAddress
expectedPodIPs []v1.PodIP
}{
{
name: "Immutable after update node addresses single-stack",
nodeIPs: []string{"10.0.0.1"},
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "1.1.1.1"},
},
expectedPodIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "Immutable after update node addresses dual-stack - primary address",
nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "1.1.1.1"},
{Type: v1.NodeInternalIP, Address: "2001:db8::2"},
},
expectedPodIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "2001:db8::2"},
},
},
{
name: "Immutable after update node addresses dual-stack - secondary address",
nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "2001:db8:1:2:3::2"},
},
expectedPodIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "2001:db8::2"},
},
},
{
name: "Immutable after update node addresses dual-stack - primary and secondary address",
nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "1.1.1.1"},
{Type: v1.NodeInternalIP, Address: "2001:db8:1:2:3::2"},
},
expectedPodIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "2001:db8::2"},
},
},
{
name: "Update secondary after new secondary address dual-stack",
nodeIPs: []string{"10.0.0.1"},
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "2001:db8::2"},
},
expectedPodIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "2001:db8::2"},
},
},
{
name: "Update secondary after new secondary address dual-stack - reverse order",
nodeIPs: []string{"2001:db8::2"},
nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
{Type: v1.NodeInternalIP, Address: "2001:db8::2"},
},
expectedPodIPs: []v1.PodIP{
{IP: "2001:db8::2"},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
for _, ip := range tc.nodeIPs {
kl.nodeIPs = append(kl.nodeIPs, netutils.ParseIPSloppy(ip))
}
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
Status: v1.NodeStatus{
Addresses: tc.nodeAddresses,
},
},
}}
pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
pod.Spec.HostNetwork = true
for _, ip := range tc.nodeIPs {
pod.Status.PodIPs = append(pod.Status.PodIPs, v1.PodIP{IP: ip})
}
if len(pod.Status.PodIPs) > 0 {
pod.Status.PodIP = pod.Status.PodIPs[0].IP
}
// set old status
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
}
podStatus.IPs = tc.nodeIPs
status := kl.generateAPIPodStatus(pod, podStatus, false)
if !reflect.DeepEqual(status.PodIPs, tc.expectedPodIPs) {
t.Fatalf("Expected PodIPs %#v, got %#v", tc.expectedPodIPs, status.PodIPs)
}
if kl.nodeIPs[0].String() != status.PodIPs[0].IP {
t.Fatalf("Expected HostIP %q to equal PodIPs[0].IP %q", status.HostIP, status.PodIPs[0].IP)
}
})
}
}
func TestGenerateAPIPodStatusPodIPs(t *testing.T) {
testcases := []struct {
name string
nodeIP string
criPodIPs []string
podIPs []v1.PodIP
}{
{
name: "Simple",
nodeIP: "",
criPodIPs: []string{"10.0.0.1"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "Dual-stack",
nodeIP: "",
criPodIPs: []string{"10.0.0.1", "fd01::1234"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "fd01::1234"},
},
},
{
name: "Dual-stack with explicit node IP",
nodeIP: "192.168.1.1",
criPodIPs: []string{"10.0.0.1", "fd01::1234"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "fd01::1234"},
},
},
{
name: "Dual-stack with CRI returning wrong family first",
nodeIP: "",
criPodIPs: []string{"fd01::1234", "10.0.0.1"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "fd01::1234"},
},
},
{
name: "Dual-stack with explicit node IP with CRI returning wrong family first",
nodeIP: "192.168.1.1",
criPodIPs: []string{"fd01::1234", "10.0.0.1"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "fd01::1234"},
},
},
{
name: "Dual-stack with IPv6 node IP",
nodeIP: "fd00::5678",
criPodIPs: []string{"10.0.0.1", "fd01::1234"},
podIPs: []v1.PodIP{
{IP: "fd01::1234"},
{IP: "10.0.0.1"},
},
},
{
name: "Dual-stack with IPv6 node IP, other CRI order",
nodeIP: "fd00::5678",
criPodIPs: []string{"fd01::1234", "10.0.0.1"},
podIPs: []v1.PodIP{
{IP: "fd01::1234"},
{IP: "10.0.0.1"},
},
},
{
name: "No Pod IP matching Node IP",
nodeIP: "fd00::5678",
criPodIPs: []string{"10.0.0.1"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "No Pod IP matching (unspecified) Node IP",
nodeIP: "",
criPodIPs: []string{"fd01::1234"},
podIPs: []v1.PodIP{
{IP: "fd01::1234"},
},
},
{
name: "Multiple IPv4 IPs",
nodeIP: "",
criPodIPs: []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
},
},
{
name: "Multiple Dual-Stack IPs",
nodeIP: "",
criPodIPs: []string{"10.0.0.1", "10.0.0.2", "fd01::1234", "10.0.0.3", "fd01::5678"},
podIPs: []v1.PodIP{
{IP: "10.0.0.1"},
{IP: "fd01::1234"},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
if tc.nodeIP != "" {
kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
}
pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
criStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
IPs: tc.criPodIPs,
}
status := kl.generateAPIPodStatus(pod, criStatus, false)
if !reflect.DeepEqual(status.PodIPs, tc.podIPs) {
t.Fatalf("Expected PodIPs %#v, got %#v", tc.podIPs, status.PodIPs)
}
if status.PodIP != status.PodIPs[0].IP {
t.Fatalf("Expected PodIP %q to equal PodIPs[0].IP %q", status.PodIP, status.PodIPs[0].IP)
}
})
}
}
func TestSortPodIPs(t *testing.T) {
testcases := []struct {
name string
nodeIP string
podIPs []string
expectedIPs []string
}{
{
name: "Simple",
nodeIP: "",
podIPs: []string{"10.0.0.1"},
expectedIPs: []string{"10.0.0.1"},
},
{
name: "Dual-stack",
nodeIP: "",
podIPs: []string{"10.0.0.1", "fd01::1234"},
expectedIPs: []string{"10.0.0.1", "fd01::1234"},
},
{
name: "Dual-stack with explicit node IP",
nodeIP: "192.168.1.1",
podIPs: []string{"10.0.0.1", "fd01::1234"},
expectedIPs: []string{"10.0.0.1", "fd01::1234"},
},
{
name: "Dual-stack with CRI returning wrong family first",
nodeIP: "",
podIPs: []string{"fd01::1234", "10.0.0.1"},
expectedIPs: []string{"10.0.0.1", "fd01::1234"},
},
{
name: "Dual-stack with explicit node IP with CRI returning wrong family first",
nodeIP: "192.168.1.1",
podIPs: []string{"fd01::1234", "10.0.0.1"},
expectedIPs: []string{"10.0.0.1", "fd01::1234"},
},
{
name: "Dual-stack with IPv6 node IP",
nodeIP: "fd00::5678",
podIPs: []string{"10.0.0.1", "fd01::1234"},
expectedIPs: []string{"fd01::1234", "10.0.0.1"},
},
{
name: "Dual-stack with IPv6 node IP, other CRI order",
nodeIP: "fd00::5678",
podIPs: []string{"fd01::1234", "10.0.0.1"},
expectedIPs: []string{"fd01::1234", "10.0.0.1"},
},
{
name: "No Pod IP matching Node IP",
nodeIP: "fd00::5678",
podIPs: []string{"10.0.0.1"},
expectedIPs: []string{"10.0.0.1"},
},
{
name: "No Pod IP matching (unspecified) Node IP",
nodeIP: "",
podIPs: []string{"fd01::1234"},
expectedIPs: []string{"fd01::1234"},
},
{
name: "Multiple IPv4 IPs",
nodeIP: "",
podIPs: []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"},
expectedIPs: []string{"10.0.0.1"},
},
{
name: "Multiple Dual-Stack IPs",
nodeIP: "",
podIPs: []string{"10.0.0.1", "10.0.0.2", "fd01::1234", "10.0.0.3", "fd01::5678"},
expectedIPs: []string{"10.0.0.1", "fd01::1234"},
},
{
name: "Badly-formatted IPs from CRI",
nodeIP: "",
podIPs: []string{"010.000.000.001", "fd01:0:0:0:0:0:0:1234"},
expectedIPs: []string{"10.0.0.1", "fd01::1234"},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
if tc.nodeIP != "" {
kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
}
podIPs := kl.sortPodIPs(tc.podIPs)
if !reflect.DeepEqual(podIPs, tc.expectedIPs) {
t.Fatalf("Expected PodIPs %#v, got %#v", tc.expectedIPs, podIPs)
}
})
}
}
func TestConvertToAPIContainerStatusesDataRace(t *testing.T) {
pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
testTimestamp := time.Unix(123456789, 987654321)
criStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: []*kubecontainer.Status{
{Name: "containerA", CreatedAt: testTimestamp},
{Name: "containerB", CreatedAt: testTimestamp.Add(1)},
},
}
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
// convertToAPIContainerStatuses is purely transformative and shouldn't alter the state of the kubelet
// as there are no synchronisation events in that function (no locks, no channels, ...) each test routine
// should have its own vector clock increased independently. Golang race detector uses pure happens-before
// detection, so would catch a race condition consistently, despite only spawning 2 goroutines
for i := 0; i < 2; i++ {
go func() {
kl.convertToAPIContainerStatuses(pod, criStatus, []v1.ContainerStatus{}, []v1.Container{}, false, false)
}()
}
}
func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
if goruntime.GOOS == "windows" {
t.Skip("InPlacePodVerticalScaling is not currently supported for Windows")
}
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
nowTime := time.Now()
testContainerName := "ctr0"
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
testContainer := v1.Container{
Name: testContainerName,
Image: "img",
}
testContainerStatus := v1.ContainerStatus{
Name: testContainerName,
}
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "123456",
Name: "foo",
Namespace: "bar",
},
Spec: v1.PodSpec{
Containers: []v1.Container{testContainer},
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{testContainerStatus},
},
}
testPodStatus := func(state kubecontainer.State, resources *kubecontainer.ContainerResources) *kubecontainer.PodStatus {
cStatus := kubecontainer.Status{
Name: testContainerName,
ID: testContainerID,
Image: "img",
ImageID: "1234",
ImageRef: "img1234",
State: state,
Resources: resources,
}
switch state {
case kubecontainer.ContainerStateRunning:
cStatus.StartedAt = nowTime
case kubecontainer.ContainerStateExited:
cStatus.StartedAt = nowTime
cStatus.FinishedAt = nowTime
}
return &kubecontainer.PodStatus{
ID: testPod.UID,
Name: testPod.Name,
Namespace: testPod.Namespace,
ContainerStatuses: []*kubecontainer.Status{&cStatus},
}
}
CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
CPU2AndMem2G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi")}
CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
CPU1AndMem1GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
CPU1AndMem2GAndStorage2G := CPU1AndMem1GAndStorage2G.DeepCopy()
CPU1AndMem2GAndStorage2G[v1.ResourceMemory] = resource.MustParse("2Gi")
CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy()
CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
CPU2AndMem2GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
addExtendedResource := func(list v1.ResourceList) v1.ResourceList {
const stubCustomResource = v1.ResourceName("dummy.io/dummy")
withExtendedResource := list.DeepCopy()
for _, resourceName := range []v1.ResourceName{v1.ResourceMemory, v1.ResourceCPU} {
if _, exists := withExtendedResource[resourceName]; !exists {
withExtendedResource[resourceName] = resource.MustParse("0")
}
}
withExtendedResource[stubCustomResource] = resource.MustParse("1")
return withExtendedResource
}
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
idx := 0
for tdesc, tc := range map[string]struct {
State kubecontainer.State // Defaults to Running
Resources v1.ResourceRequirements
AllocatedResources *v1.ResourceRequirements // Defaults to Resources
ActualResources *kubecontainer.ContainerResources // Defaults to Resources equivalent
OldStatus v1.ContainerStatus
Expected v1.ContainerStatus
}{
"GuaranteedQoSPod with CPU and memory CRI status": {
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: CPU1AndMem1G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
},
},
"BurstableQoSPod with CPU and memory CRI status": {
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2G, Requests: CPU1AndMem1G},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: CPU1AndMem1G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
},
},
"BurstableQoSPod without CPU": {
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
}},
ActualResources: &kubecontainer.ContainerResources{
CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
}},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
},
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
}},
},
},
"BurstableQoSPod with below min CPU": {
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
v1.ResourceCPU: resource.MustParse("1m"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
},
},
ActualResources: &kubecontainer.ContainerResources{
CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
CPULimit: resource.NewMilliQuantity(10, resource.DecimalSI),
},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
v1.ResourceCPU: resource.MustParse("1m"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
},
},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
v1.ResourceCPU: resource.MustParse("1m"),
},
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100M"),
v1.ResourceCPU: resource.MustParse("1m"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5m"),
},
},
},
},
"GuaranteedQoSPod with CPU and memory CRI status, with ephemeral storage": {
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage": {
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage, nil resources in OldStatus": {
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
"BestEffortQoSPod": {
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
Resources: &v1.ResourceRequirements{},
},
},
"BestEffort QoSPod with extended resources": {
Resources: v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: addExtendedResource(v1.ResourceList{}),
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
},
},
"BurstableQoSPod with extended resources": {
Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: addExtendedResource(CPU1AndMem1G),
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
},
},
"BurstableQoSPod with storage, ephemeral storage and extended resources": {
Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: addExtendedResource(CPU1AndMem1GAndStorage2G),
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
},
},
"GuaranteedQoSPod with extended resources": {
Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: addExtendedResource(CPU1AndMem1G),
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
},
},
"newly created Pod": {
State: kubecontainer.ContainerStateCreated,
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
OldStatus: v1.ContainerStatus{},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}},
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
"newly running Pod": {
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
"newly terminated Pod": {
State: kubecontainer.ContainerStateExited,
// Actual resources were different, but they should be ignored once the container is terminated.
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{
ContainerID: testContainerID.String(),
StartedAt: metav1.NewTime(nowTime),
FinishedAt: metav1.NewTime(nowTime),
}},
AllocatedResources: CPU2AndMem2GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
},
},
"resizing Pod": {
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
AllocatedResources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
OldStatus: v1.ContainerStatus{
Name: testContainerName,
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
Expected: v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
AllocatedResources: CPU2AndMem2GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem2GAndStorage2G},
},
},
} {
t.Run(tdesc, func(t *testing.T) {
tPod := testPod.DeepCopy()
tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
if tc.AllocatedResources != nil {
tPod.Spec.Containers[0].Resources = *tc.AllocatedResources
} else {
tPod.Spec.Containers[0].Resources = tc.Resources
}
err := kubelet.allocationManager.SetAllocatedResources(tPod)
require.NoError(t, err)
resources := tc.ActualResources
if resources == nil {
resources = &kubecontainer.ContainerResources{
MemoryLimit: tc.Resources.Limits.Memory(),
CPULimit: tc.Resources.Limits.Cpu(),
CPURequest: tc.Resources.Requests.Cpu(),
}
}
state := kubecontainer.ContainerStateRunning
if tc.State != "" {
state = tc.State
}
podStatus := testPodStatus(state, resources)
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, podStatus, []v1.ContainerStatus{tc.OldStatus}, tPod.Spec.Containers, false, false)
assert.Equal(t, tc.Expected, cStatuses[0])
})
}
}
func TestConvertToAPIContainerStatusesForUser(t *testing.T) {
nowTime := time.Now()
testContainerName := "ctr0"
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
testContainer := v1.Container{
Name: testContainerName,
Image: "img",
}
testContainerStatus := v1.ContainerStatus{
Name: testContainerName,
}
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "123456",
Name: "foo",
Namespace: "bar",
},
Spec: v1.PodSpec{
Containers: []v1.Container{testContainer},
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{testContainerStatus},
},
}
testPodStaus := func(user *kubecontainer.ContainerUser) *kubecontainer.PodStatus {
testKubeContainerStatus := kubecontainer.Status{
Name: testContainerName,
ID: testContainerID,
Image: "img",
State: kubecontainer.ContainerStateRunning,
StartedAt: nowTime,
User: user,
}
return &kubecontainer.PodStatus{
ID: testPod.UID,
Name: testPod.Name,
Namespace: testPod.Namespace,
ContainerStatuses: []*kubecontainer.Status{&testKubeContainerStatus},
}
}
expectedContainerStatuses := func(user *v1.ContainerUser) []v1.ContainerStatus {
return []v1.ContainerStatus{
{
Name: testContainerName,
ContainerID: testContainerID.String(),
Image: "img",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
Resources: &v1.ResourceRequirements{},
User: user,
},
}
}
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
for tdesc, tc := range map[string]struct {
testPodStatus *kubecontainer.PodStatus
featureEnabled bool
expectedContainerStatus []v1.ContainerStatus
}{
"nil user, SupplementalGroupsPolicy is disabled": {
testPodStaus(nil),
false,
expectedContainerStatuses(nil),
},
"empty user, SupplementalGroupsPolicy is disabled": {
testPodStaus(&kubecontainer.ContainerUser{}),
false,
expectedContainerStatuses(nil),
},
"linux user, SupplementalGroupsPolicy is disabled": {
testPodStaus(&kubecontainer.ContainerUser{
Linux: &kubecontainer.LinuxContainerUser{
UID: 0,
GID: 0,
SupplementalGroups: []int64{10},
},
}),
false,
expectedContainerStatuses(nil),
},
"nil user, SupplementalGroupsPolicy is enabled": {
testPodStaus(nil),
true,
expectedContainerStatuses(nil),
},
"empty user, SupplementalGroupsPolicy is enabled": {
testPodStaus(&kubecontainer.ContainerUser{}),
true,
expectedContainerStatuses(&v1.ContainerUser{}),
},
"linux user, SupplementalGroupsPolicy is enabled": {
testPodStaus(&kubecontainer.ContainerUser{
Linux: &kubecontainer.LinuxContainerUser{
UID: 0,
GID: 0,
SupplementalGroups: []int64{10},
},
}),
true,
expectedContainerStatuses(&v1.ContainerUser{
Linux: &v1.LinuxContainerUser{
UID: 0,
GID: 0,
SupplementalGroups: []int64{10},
},
}),
},
} {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SupplementalGroupsPolicy, tc.featureEnabled)
tPod := testPod.DeepCopy()
t.Logf("TestCase: %q", tdesc)
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, tc.testPodStatus, tPod.Status.ContainerStatuses, tPod.Spec.Containers, false, false)
assert.Equal(t, tc.expectedContainerStatus, cStatuses)
}
}
func TestKubelet_HandlePodCleanups(t *testing.T) {
one := int64(1)
two := int64(2)
deleted := metav1.NewTime(time.Unix(2, 0).UTC())
type rejectedPod struct {
uid types.UID
reason string
message string
}
simplePod := func() *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1", UID: types.UID("1")},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container-1"},
},
},
}
}
withPhase := func(pod *v1.Pod, phase v1.PodPhase) *v1.Pod {
pod.Status.Phase = phase
return pod
}
staticPod := func() *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns1",
UID: types.UID("1"),
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container-1"},
},
},
}
}
runtimePod := func(pod *v1.Pod) *kubecontainer.Pod {
runningPod := &kubecontainer.Pod{
ID: types.UID(pod.UID),
Name: pod.Name,
Namespace: pod.Namespace,
Containers: []*kubecontainer.Container{
{Name: "container-1", ID: kubecontainer.ContainerID{Type: "test", ID: "c1"}},
},
}
for i, container := range pod.Spec.Containers {
runningPod.Containers = append(runningPod.Containers, &kubecontainer.Container{
Name: container.Name,
ID: kubecontainer.ContainerID{Type: "test", ID: fmt.Sprintf("c%d", i)},
})
}
return runningPod
}
mirrorPod := func(pod *v1.Pod, nodeName string, nodeUID types.UID) *v1.Pod {
copied := pod.DeepCopy()
if copied.Annotations == nil {
copied.Annotations = make(map[string]string)
}
copied.Annotations[kubetypes.ConfigMirrorAnnotationKey] = pod.Annotations[kubetypes.ConfigHashAnnotationKey]
isTrue := true
copied.OwnerReferences = append(copied.OwnerReferences, metav1.OwnerReference{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: "Node",
Name: nodeName,
UID: nodeUID,
Controller: &isTrue,
})
return copied
}
tests := []struct {
name string
pods []*v1.Pod
runtimePods []*containertest.FakePod
rejectedPods []rejectedPod
terminatingErr error
prepareWorker func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
wantWorker func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
wantWorkerAfterRetry func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
wantErr bool
expectMetrics map[string]string
expectMetricsAfterRetry map[string]string
}{
{
name: "missing pod is requested for termination with short grace period",
wantErr: false,
runtimePods: []*containertest.FakePod{
{
Pod: runtimePod(staticPod()),
},
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
drainAllWorkers(w)
uid := types.UID("1")
// we expect runtime pods to be cleared from the status history as soon as they
// reach completion
if len(w.podSyncStatuses) != 0 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
r, ok := records[uid]
if !ok || len(r) != 1 || r[0].updateType != kubetypes.SyncPodKill || r[0].terminated || r[0].runningPod == nil || r[0].gracePeriod != nil {
t.Fatalf("unexpected pod sync records: %#v", r)
}
},
expectMetrics: map[string]string{
metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
# TYPE kubelet_orphaned_runtime_pods_total counter
kubelet_orphaned_runtime_pods_total 1
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 1
`,
},
},
{
name: "terminating pod that errored and is not in config is notified by the cleanup",
wantErr: false,
runtimePods: []*containertest.FakePod{
{
Pod: runtimePod(simplePod()),
},
},
terminatingErr: errors.New("unable to terminate"),
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create
pod := simplePod()
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
// send a delete update
two := int64(2)
deleted := metav1.NewTime(time.Unix(2, 0).UTC())
updatedPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns1",
UID: types.UID("1"),
DeletionGracePeriodSeconds: &two,
DeletionTimestamp: &deleted,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &two,
Containers: []v1.Container{
{Name: "container-1"},
},
},
}
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
StartTime: time.Unix(3, 0).UTC(),
Pod: updatedPod,
})
drainAllWorkers(w)
r, ok := records[updatedPod.UID]
if !ok || len(r) != 2 || r[1].gracePeriod == nil || *r[1].gracePeriod != 2 {
t.Fatalf("unexpected records: %#v", records)
}
// pod worker thinks pod1 exists, but the kubelet will not have it in the pod manager
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Fatalf("unexpected requested pod termination: %#v", s)
}
// expect we get a pod sync record for kill that should have the same grace period as before (2), but no
// running pod because the SyncKnownPods method killed it
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
expectMetrics: map[string]string{
metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
# TYPE kubelet_desired_pods gauge
kubelet_desired_pods{static=""} 0
kubelet_desired_pods{static="true"} 0
`,
metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
# TYPE kubelet_active_pods gauge
kubelet_active_pods{static=""} 0
kubelet_active_pods{static="true"} 0
`,
metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
# TYPE kubelet_orphaned_runtime_pods_total counter
kubelet_orphaned_runtime_pods_total 0
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 1
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
`,
},
wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || !s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Fatalf("unexpected requested pod termination: %#v", s)
}
// expect we get a pod sync record for kill that should have the same grace period as before (2), but no
// running pod because the SyncKnownPods method killed it
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
// after the second attempt
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
// from termination
{name: "pod1", terminated: true},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
},
{
name: "terminating pod that errored and is not in config or worker is force killed by the cleanup",
wantErr: false,
runtimePods: []*containertest.FakePod{
{
Pod: runtimePod(simplePod()),
},
},
terminatingErr: errors.New("unable to terminate"),
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Fatalf("unexpected requested pod termination: %#v", s)
}
// ensure that we recorded the appropriate state for replays
expectedRunningPod := runtimePod(simplePod())
if actual, expected := s.activeUpdate, (&UpdatePodOptions{
RunningPod: expectedRunningPod,
KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: &one},
}); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
}
// expect that a pod the pod worker does not recognize is force killed with grace period 1
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 0 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
// expect that a pod the pod worker does not recognize is force killed with grace period 1
expectedRunningPod := runtimePod(simplePod())
if actual, expected := records[uid], []syncPodRecord{
// first attempt, did not succeed
{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
// second attempt, should succeed
{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
// because this is a runtime pod, we don't have enough info to invoke syncTerminatedPod and so
// we exit after the retry succeeds
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
},
{
name: "pod is added to worker by sync method",
wantErr: false,
pods: []*v1.Pod{
simplePod(),
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() {
t.Fatalf("unexpected requested pod termination: %#v", s)
}
// pod was synced once
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
expectMetrics: map[string]string{
metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
# TYPE kubelet_desired_pods gauge
kubelet_desired_pods{static=""} 1
kubelet_desired_pods{static="true"} 0
`,
metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
# TYPE kubelet_active_pods gauge
kubelet_active_pods{static=""} 1
kubelet_active_pods{static="true"} 0
`,
metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
# TYPE kubelet_orphaned_runtime_pods_total counter
kubelet_orphaned_runtime_pods_total 0
`,
// Note that this test simulates a net-new pod being discovered during HandlePodCleanups that was not
// delivered to the pod worker via HandlePodAdditions - there is no *known* scenario that can happen, but
// we want to capture it in the metric. The more likely scenario is that a static pod with a predefined
// UID is updated, which causes pod config to deliver DELETE -> ADD while the old pod is still shutting
// down and the pod worker to ignore the ADD. The HandlePodCleanups method then is responsible for syncing
// that pod to the pod worker so that it restarts.
metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
# TYPE kubelet_restarted_pods_total counter
kubelet_restarted_pods_total{static=""} 1
kubelet_restarted_pods_total{static="true"} 0
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 1
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
`,
},
},
{
name: "pod is not added to worker by sync method because it is in a terminal phase",
wantErr: false,
pods: []*v1.Pod{
withPhase(simplePod(), v1.PodFailed),
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 0 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
// no pod sync record was delivered
if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
expectMetrics: map[string]string{
metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
# TYPE kubelet_desired_pods gauge
kubelet_desired_pods{static=""} 1
kubelet_desired_pods{static="true"} 0
`,
metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
# TYPE kubelet_active_pods gauge
kubelet_active_pods{static=""} 0
kubelet_active_pods{static="true"} 0
`,
metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
# TYPE kubelet_orphaned_runtime_pods_total counter
kubelet_orphaned_runtime_pods_total 0
`,
// Note that this test simulates a net-new pod being discovered during HandlePodCleanups that was not
// delivered to the pod worker via HandlePodAdditions - there is no *known* scenario that can happen, but
// we want to capture it in the metric. The more likely scenario is that a static pod with a predefined
// UID is updated, which causes pod config to deliver DELETE -> ADD while the old pod is still shutting
// down and the pod worker to ignore the ADD. The HandlePodCleanups method then is responsible for syncing
// that pod to the pod worker so that it restarts.
metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
# TYPE kubelet_restarted_pods_total counter
kubelet_restarted_pods_total{static=""} 0
kubelet_restarted_pods_total{static="true"} 0
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
`,
},
},
{
name: "pod is not added to worker by sync method because it has been rejected",
wantErr: false,
pods: []*v1.Pod{
simplePod(),
},
rejectedPods: []rejectedPod{
{uid: "1", reason: "Test", message: "rejected"},
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 0 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
// no pod sync record was delivered
if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
expectMetrics: map[string]string{
metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
# TYPE kubelet_desired_pods gauge
kubelet_desired_pods{static=""} 1
kubelet_desired_pods{static="true"} 0
`,
metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
# TYPE kubelet_active_pods gauge
kubelet_active_pods{static=""} 0
kubelet_active_pods{static="true"} 0
`,
metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
# TYPE kubelet_orphaned_runtime_pods_total counter
kubelet_orphaned_runtime_pods_total 0
`,
// Note that this test simulates a net-new pod being discovered during HandlePodCleanups that was not
// delivered to the pod worker via HandlePodAdditions - there is no *known* scenario that can happen, but
// we want to capture it in the metric. The more likely scenario is that a static pod with a predefined
// UID is updated, which causes pod config to deliver DELETE -> ADD while the old pod is still shutting
// down and the pod worker to ignore the ADD. The HandlePodCleanups method then is responsible for syncing
// that pod to the pod worker so that it restarts.
metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
# TYPE kubelet_restarted_pods_total counter
kubelet_restarted_pods_total{static=""} 0
kubelet_restarted_pods_total{static="true"} 0
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
`,
},
},
{
name: "terminating pod that is known to the config gets no update during pod cleanup",
wantErr: false,
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns1",
UID: types.UID("1"),
DeletionGracePeriodSeconds: &two,
DeletionTimestamp: &deleted,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &two,
Containers: []v1.Container{
{Name: "container-1"},
},
},
},
},
runtimePods: []*containertest.FakePod{
{
Pod: runtimePod(simplePod()),
},
},
terminatingErr: errors.New("unable to terminate"),
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1", UID: types.UID("1")},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container-1"},
},
},
}
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
// send a delete update
updatedPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns1",
UID: types.UID("1"),
DeletionGracePeriodSeconds: &two,
DeletionTimestamp: &deleted,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &two,
Containers: []v1.Container{
{Name: "container-1"},
},
},
}
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
StartTime: time.Unix(3, 0).UTC(),
Pod: updatedPod,
})
drainAllWorkers(w)
// pod worker thinks pod1 is terminated and pod1 visible to config
if actual, expected := records[updatedPod.UID], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Fatalf("unexpected requested pod termination: %#v", s)
}
// no pod sync record was delivered
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
},
{
name: "pod that could not start and is not in config is force terminated during pod cleanup",
wantErr: false,
runtimePods: []*containertest.FakePod{
{
Pod: runtimePod(simplePod()),
},
},
terminatingErr: errors.New("unable to terminate"),
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create of a static pod
pod := staticPod()
// block startup of the static pod due to full name collision
w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
if _, ok := records[pod.UID]; ok {
t.Fatalf("unexpected records: %#v", records)
}
// pod worker is unaware of pod1 yet, and the kubelet will not have it in the pod manager
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// the pod is not started and is cleaned, but the runtime state causes us to reenter
// and perform a direct termination (we never observed the pod as being started by
// us, and so it is safe to completely tear down)
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Errorf("unexpected requested pod termination: %#v", s)
}
// ensure that we recorded the appropriate state for replays
expectedRunningPod := runtimePod(simplePod())
if actual, expected := s.activeUpdate, (&UpdatePodOptions{
RunningPod: expectedRunningPod,
KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: &one},
}); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
}
// sync is never invoked
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
// this pod is detected as an orphaned running pod and will exit
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 0 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
// expect we get a pod sync record for kill that should have the default grace period
expectedRunningPod := runtimePod(simplePod())
if actual, expected := records[uid], []syncPodRecord{
// first attempt, syncTerminatingPod failed with an error
{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
// second attempt
{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
},
{
name: "pod that could not start still has a pending update and is tracked in metrics",
wantErr: false,
pods: []*v1.Pod{
staticPod(),
},
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create of a static pod
pod := staticPod()
// block startup of the static pod due to full name collision
w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
if _, ok := records[pod.UID]; ok {
t.Fatalf("unexpected records: %#v", records)
}
// pod worker is unaware of pod1 yet
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() || s.restartRequested || s.activeUpdate != nil || s.pendingUpdate == nil {
t.Errorf("unexpected requested pod termination: %#v", s)
}
// expect that no sync calls are made, since the pod doesn't ever start
if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
expectMetrics: map[string]string{
metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
# TYPE kubelet_desired_pods gauge
kubelet_desired_pods{static=""} 0
kubelet_desired_pods{static="true"} 1
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 1
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
`,
},
},
{
name: "pod that could not start and is not in config is force terminated without runtime during pod cleanup",
wantErr: false,
terminatingErr: errors.New("unable to terminate"),
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create of a static pod
pod := staticPod()
// block startup of the static pod due to full name collision
w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
if _, ok := records[pod.UID]; ok {
t.Fatalf("unexpected records: %#v", records)
}
// pod worker is unaware of pod1 yet, and the kubelet will not have it in the pod manager
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 0 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
// expect that no sync calls are made, since the pod doesn't ever start
if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
},
{
name: "pod that is terminating is recreated by config with the same UID",
wantErr: false,
pods: []*v1.Pod{
func() *v1.Pod {
pod := staticPod()
pod.Annotations["version"] = "2"
return pod
}(),
},
runtimePods: []*containertest.FakePod{
{
Pod: runtimePod(staticPod()),
},
},
terminatingErr: errors.New("unable to terminate"),
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create of a static pod
pod := staticPod()
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
// terminate the pod (which won't complete) and then deliver a recreate by that same UID
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
StartTime: time.Unix(2, 0).UTC(),
Pod: pod,
})
pod = staticPod()
pod.Annotations["version"] = "2"
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(3, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
// expect we get a pod sync record for kill that should have the default grace period
if actual, expected := records[pod.UID], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
// pod worker is aware of pod1, but the kubelet will not have it in the pod manager
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() || !s.restartRequested {
t.Errorf("unexpected requested pod termination: %#v", s)
}
// expect we get a pod sync record for kill that should have the default grace period
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
expectMetrics: map[string]string{
metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
# TYPE kubelet_desired_pods gauge
kubelet_desired_pods{static=""} 0
kubelet_desired_pods{static="true"} 1
`,
metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
# TYPE kubelet_active_pods gauge
kubelet_active_pods{static=""} 0
kubelet_active_pods{static="true"} 1
`,
metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
# TYPE kubelet_orphaned_runtime_pods_total counter
kubelet_orphaned_runtime_pods_total 0
`,
metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
# TYPE kubelet_restarted_pods_total counter
kubelet_restarted_pods_total{static=""} 0
kubelet_restarted_pods_total{static="true"} 0
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 1
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
`,
},
expectMetricsAfterRetry: map[string]string{
metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
# TYPE kubelet_restarted_pods_total counter
kubelet_restarted_pods_total{static=""} 0
kubelet_restarted_pods_total{static="true"} 1
`,
},
},
{
name: "started pod that is not in config is force terminated during pod cleanup",
wantErr: false,
runtimePods: []*containertest.FakePod{
{
Pod: runtimePod(simplePod()),
},
},
terminatingErr: errors.New("unable to terminate"),
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create of a static pod
pod := staticPod()
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
// expect we get a pod sync record for kill that should have the default grace period
if actual, expected := records[pod.UID], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
// pod worker is aware of pod1, but the kubelet will not have it in the pod manager
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Errorf("unexpected requested pod termination: %#v", s)
}
// expect we get a pod sync record for kill that should have the default grace period
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
},
{
name: "started pod that is not in config or runtime is force terminated during pod cleanup",
wantErr: false,
runtimePods: []*containertest.FakePod{},
terminatingErr: errors.New("unable to terminate"),
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// send a create of a static pod
pod := staticPod()
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
MirrorPod: mirrorPod(pod, "node-1", "node-uid-1"),
})
drainAllWorkers(w)
// expect we get a pod sync record for kill that should have the default grace period
if actual, expected := records[pod.UID], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
// pod worker is aware of pod1, but the kubelet will not have it in the pod manager
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Errorf("unexpected requested pod termination: %#v", s)
}
// ensure that we recorded the appropriate state for replays
expectedPod := staticPod()
if actual, expected := s.activeUpdate, (&UpdatePodOptions{
Pod: expectedPod,
MirrorPod: mirrorPod(expectedPod, "node-1", "node-uid-1"),
}); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
}
// expect we get a pod sync record for kill that should have the default grace period
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || !s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
t.Errorf("unexpected requested pod termination: %#v", s)
}
// ensure that we recorded the appropriate state for replays
expectedPod := staticPod()
if actual, expected := s.activeUpdate, (&UpdatePodOptions{
Pod: expectedPod,
MirrorPod: mirrorPod(expectedPod, "node-1", "node-uid-1"),
}); !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
}
// expect we get a pod sync record for kill that should have the default grace period
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill},
// second attempt at kill
{name: "pod1", updateType: kubetypes.SyncPodKill},
{name: "pod1", terminated: true},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
},
{
name: "terminated pod is restarted in the same invocation that it is detected",
wantErr: false,
pods: []*v1.Pod{
func() *v1.Pod {
pod := staticPod()
pod.Annotations = map[string]string{"version": "2"}
return pod
}(),
},
prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
// simulate a delete and recreate of the static pod
pod := simplePod()
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
StartTime: time.Unix(1, 0).UTC(),
Pod: pod,
})
drainAllWorkers(w)
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
Pod: pod,
})
pod2 := simplePod()
pod2.Annotations = map[string]string{"version": "2"}
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: pod2,
})
drainAllWorkers(w)
},
wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
uid := types.UID("1")
if len(w.podSyncStatuses) != 1 {
t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
}
s, ok := w.podSyncStatuses[uid]
if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() {
t.Fatalf("unexpected requested pod termination: %#v", s)
}
if s.pendingUpdate != nil || s.activeUpdate == nil || s.activeUpdate.Pod == nil || s.activeUpdate.Pod.Annotations["version"] != "2" {
t.Fatalf("unexpected restarted pod: %#v", s.activeUpdate.Pod)
}
// expect we get a pod sync record for kill that should have the same grace period as before (2), but no
// running pod because the SyncKnownPods method killed it
if actual, expected := records[uid], []syncPodRecord{
{name: "pod1", updateType: kubetypes.SyncPodCreate},
{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
{name: "pod1", terminated: true},
{name: "pod1", updateType: kubetypes.SyncPodCreate},
}; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
}
},
expectMetrics: map[string]string{
metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
# TYPE kubelet_desired_pods gauge
kubelet_desired_pods{static=""} 1
kubelet_desired_pods{static="true"} 0
`,
metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
# TYPE kubelet_active_pods gauge
kubelet_active_pods{static=""} 1
kubelet_active_pods{static="true"} 0
`,
metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
# TYPE kubelet_orphaned_runtime_pods_total counter
kubelet_orphaned_runtime_pods_total 0
`,
metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
# TYPE kubelet_restarted_pods_total counter
kubelet_restarted_pods_total{static=""} 1
kubelet_restarted_pods_total{static="true"} 0
`,
metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
# TYPE kubelet_working_pods gauge
kubelet_working_pods{config="desired",lifecycle="sync",static=""} 1
kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// clear the metrics for testing
metrics.Register()
for _, metric := range []interface{ Reset() }{
metrics.DesiredPodCount,
metrics.ActivePodCount,
metrics.RestartedPodTotal,
metrics.OrphanedRuntimePodTotal,
metrics.WorkingPodCount,
} {
metric.Reset()
}
metrics.MirrorPodCount.Set(0)
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
podWorkers, _, processed := createPodWorkers()
kl.podWorkers = podWorkers
originalPodSyncer := podWorkers.podSyncer
syncFuncs := newPodSyncerFuncs(originalPodSyncer)
podWorkers.podSyncer = &syncFuncs
if tt.terminatingErr != nil {
syncFuncs.syncTerminatingPod = func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
t.Logf("called syncTerminatingPod")
if err := originalPodSyncer.SyncTerminatingPod(ctx, pod, podStatus, gracePeriod, podStatusFn); err != nil {
t.Fatalf("unexpected error in syncTerminatingPodFn: %v", err)
}
return tt.terminatingErr
}
syncFuncs.syncTerminatingRuntimePod = func(ctx context.Context, runningPod *kubecontainer.Pod) error {
if err := originalPodSyncer.SyncTerminatingRuntimePod(ctx, runningPod); err != nil {
t.Fatalf("unexpected error in syncTerminatingRuntimePodFn: %v", err)
}
return tt.terminatingErr
}
}
if tt.prepareWorker != nil {
tt.prepareWorker(t, podWorkers, processed)
}
testKubelet.fakeRuntime.PodList = tt.runtimePods
kl.podManager.SetPods(tt.pods)
for _, reject := range tt.rejectedPods {
pod, ok := kl.podManager.GetPodByUID(reject.uid)
if !ok {
t.Fatalf("unable to reject pod by UID %v", reject.uid)
}
kl.rejectPod(pod, reject.reason, reject.message)
}
if err := kl.HandlePodCleanups(context.Background()); (err != nil) != tt.wantErr {
t.Errorf("Kubelet.HandlePodCleanups() error = %v, wantErr %v", err, tt.wantErr)
}
drainAllWorkers(podWorkers)
if tt.wantWorker != nil {
tt.wantWorker(t, podWorkers, processed)
}
for k, v := range tt.expectMetrics {
testMetric(t, k, v)
}
// check after the terminating error clears
if tt.wantWorkerAfterRetry != nil {
podWorkers.podSyncer = originalPodSyncer
if err := kl.HandlePodCleanups(context.Background()); (err != nil) != tt.wantErr {
t.Errorf("Kubelet.HandlePodCleanups() second error = %v, wantErr %v", err, tt.wantErr)
}
drainAllWorkers(podWorkers)
tt.wantWorkerAfterRetry(t, podWorkers, processed)
for k, v := range tt.expectMetricsAfterRetry {
testMetric(t, k, v)
}
}
})
}
}
func testMetric(t *testing.T, metricName string, expectedMetric string) {
t.Helper()
err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(expectedMetric), metricName)
if err != nil {
t.Error(err)
}
}
func TestGetNonExistentImagePullSecret(t *testing.T) {
secrets := make([]*v1.Secret, 0)
fakeRecorder := record.NewFakeRecorder(1)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.kubelet.recorder = fakeRecorder
testKubelet.kubelet.secretManager = secret.NewFakeManagerWithSecrets(secrets)
defer testKubelet.Cleanup()
expectedEvent := "Warning FailedToRetrieveImagePullSecret Unable to retrieve some image pull secrets (secretFoo); attempting to pull the image may not succeed."
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "nsFoo",
Name: "podFoo",
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
ImagePullSecrets: []v1.LocalObjectReference{
{Name: "secretFoo"},
},
},
}
pullSecrets := testKubelet.kubelet.getPullSecretsForPod(testPod)
assert.Empty(t, pullSecrets)
assert.Len(t, fakeRecorder.Events, 1)
event := <-fakeRecorder.Events
assert.Equal(t, expectedEvent, event)
}
func TestParseGetSubIdsOutput(t *testing.T) {
tests := []struct {
name string
input string
wantFirstID uint32
wantRangeLen uint32
wantErr bool
}{
{
name: "valid",
input: "0: kubelet 65536 2147483648",
wantFirstID: 65536,
wantRangeLen: 2147483648,
},
{
name: "multiple lines",
input: "0: kubelet 1 2\n1: kubelet 3 4\n",
wantErr: true,
},
{
name: "wrong format",
input: "0: kubelet 65536",
wantErr: true,
},
{
name: "non numeric 1",
input: "0: kubelet Foo 65536",
wantErr: true,
},
{
name: "non numeric 2",
input: "0: kubelet 0 Bar",
wantErr: true,
},
{
name: "overflow 1",
input: "0: kubelet 4294967296 2147483648",
wantErr: true,
},
{
name: "overflow 2",
input: "0: kubelet 65536 4294967296",
wantErr: true,
},
{
name: "negative value 1",
input: "0: kubelet -1 2147483648",
wantErr: true,
},
{
name: "negative value 2",
input: "0: kubelet 65536 -1",
wantErr: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotFirstID, gotRangeLen, err := parseGetSubIdsOutput(tc.input)
if tc.wantErr {
if err == nil {
t.Errorf("%s: expected error, got nil", tc.name)
}
} else {
if err != nil {
t.Errorf("%s: unexpected error: %v", tc.name, err)
}
if gotFirstID != tc.wantFirstID || gotRangeLen != tc.wantRangeLen {
t.Errorf("%s: got (%d, %d), want (%d, %d)", tc.name, gotFirstID, gotRangeLen, tc.wantFirstID, tc.wantRangeLen)
}
}
})
}
}
func TestResolveRecursiveReadOnly(t *testing.T) {
testCases := []struct {
m v1.VolumeMount
runtimeSupportsRRO bool
expected bool
expectedErr string
}{
{
m: v1.VolumeMount{Name: "rw"},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "",
},
{
m: v1.VolumeMount{Name: "ro", ReadOnly: true},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "",
},
{
m: v1.VolumeMount{Name: "ro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyDisabled)},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "",
},
{
m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible)},
runtimeSupportsRRO: true,
expected: true,
expectedErr: "",
},
{
m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible),
MountPropagation: ptr.To(v1.MountPropagationNone)},
runtimeSupportsRRO: true,
expected: true,
expectedErr: "",
},
{
m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible),
MountPropagation: ptr.To(v1.MountPropagationHostToContainer)},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "not compatible with propagation",
},
{
m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible),
MountPropagation: ptr.To(v1.MountPropagationBidirectional)},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "not compatible with propagation",
},
{
m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: false, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible)},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "not read-only",
},
{
m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: false, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible)},
runtimeSupportsRRO: false,
expected: false,
expectedErr: "not read-only",
},
{
m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled)},
runtimeSupportsRRO: true,
expected: true,
expectedErr: "",
},
{
m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled),
MountPropagation: ptr.To(v1.MountPropagationNone)},
runtimeSupportsRRO: true,
expected: true,
expectedErr: "",
},
{
m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled),
MountPropagation: ptr.To(v1.MountPropagationHostToContainer)},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "not compatible with propagation",
},
{
m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled),
MountPropagation: ptr.To(v1.MountPropagationBidirectional)},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "not compatible with propagation",
},
{
m: v1.VolumeMount{Name: "rro", RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled)},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "not read-only",
},
{
m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled)},
runtimeSupportsRRO: false,
expected: false,
expectedErr: "not supported by the runtime",
},
{
m: v1.VolumeMount{Name: "invalid", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyMode("foo"))},
runtimeSupportsRRO: true,
expected: false,
expectedErr: "unknown recursive read-only mode",
},
}
for _, tc := range testCases {
got, err := resolveRecursiveReadOnly(tc.m, tc.runtimeSupportsRRO)
t.Logf("resolveRecursiveReadOnly(%+v, %v) = (%v, %v)", tc.m, tc.runtimeSupportsRRO, got, err)
if tc.expectedErr == "" {
assert.Equal(t, tc.expected, got)
assert.NoError(t, err)
} else {
assert.ErrorContains(t, err, tc.expectedErr)
}
}
}
// testVolumeMounter is a mock volume mounter for testing FileKeyRef functionality
type testVolumeMounter struct {
path string
}
func (tvm *testVolumeMounter) GetMetrics() (*volume.Metrics, error) {
return &volume.Metrics{}, nil
}
func (tvm *testVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
return nil
}
func (tvm *testVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
return nil
}
func (tvm *testVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{}
}
func (tvm *testVolumeMounter) GetPath() string {
return tvm.path
}
func TestMakeEnvironmentVariablesWithFileKeyRef(t *testing.T) {
// Create a temporary directory for test files
tmpDir, err := os.MkdirTemp("", "filekeyref-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tmpDir)
}()
// Create test environment variable files
createEnvFile := func(filename, content string) string {
filePath := filepath.Join(tmpDir, filename)
err := os.WriteFile(filePath, []byte(content), 0644)
require.NoError(t, err)
return filePath
}
// Test cases for FileKeyRef feature gate
testCases := []struct {
name string
container *v1.Container
podVolumes kubecontainer.VolumeMap
expectedEnvs []kubecontainer.EnvVar
expectedError bool
errorContains string
setupFiles func() []string // returns created file paths
}{
{
name: "successful file key reference",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "DATABASE",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "database.env",
Key: "DATABASE",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "DATABASE", Value: "mydb"},
},
setupFiles: func() []string {
content := "DATABASE=mydb\nAPI_KEY=secret123\n"
return []string{createEnvFile("database.env", content)}
},
},
{
name: "file key reference with comments and empty lines",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "API_KEY",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "config.env",
Key: "API_KEY",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "API_KEY", Value: "secret123"},
},
setupFiles: func() []string {
content := "# This is a comment\n\nDATABASE=mydb\nAPI_KEY=secret123\n\n# Another comment\n"
return []string{createEnvFile("config.env", content)}
},
},
{
name: "file key reference with spaces around equals sign",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "DEBUG_MODE",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "debug.env",
Key: "DEBUG_MODE",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "DEBUG_MODE", Value: "true"},
},
setupFiles: func() []string {
content := "DEBUG_MODE = true\nLOG_LEVEL = info\n"
return []string{createEnvFile("debug.env", content)}
},
},
{
name: "key not found in file",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "MISSING_KEY",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "config.env",
Key: "MISSING_KEY",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedError: true,
errorContains: "environment variable key \"MISSING_KEY\" not found in file",
setupFiles: func() []string {
content := "EXISTING_KEY=value\n"
return []string{createEnvFile("config.env", content)}
},
},
{
name: "key not found in file with optional flag",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "OPTIONAL_KEY",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "config.env",
Key: "OPTIONAL_KEY",
Optional: ptr.To(true),
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedEnvs: nil,
setupFiles: func() []string {
content := "EXISTING_KEY=value\n"
return []string{createEnvFile("config.env", content)}
},
},
{
name: "file does not exist",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "DATABASE",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "nonexistent.env",
Key: "DATABASE",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedError: true,
errorContains: "couldn't parse env file",
setupFiles: func() []string {
return []string{} // No files created
},
},
{
name: "key length exceeds 128 characters",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "LONG_KEY",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "config.env",
Key: strings.Repeat("A", 129),
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedError: true,
errorContains: "exceeds maximum length of 128 characters",
setupFiles: func() []string {
content := "EXISTING_KEY=value\n"
return []string{createEnvFile("config.env", content)}
},
},
{
name: "value size exceeds 32KB",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "LARGE_VALUE",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "large.env",
Key: "LARGE_VALUE",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedError: true,
errorContains: "environment variable value for key \"LARGE_VALUE\" exceeds maximum size of 32KB",
setupFiles: func() []string {
largeValue := strings.Repeat("A", 33*1024) // 33KB
content := fmt.Sprintf("LARGE_VALUE=%s\n", largeValue)
return []string{createEnvFile("large.env", content)}
},
},
{
name: "volume not found",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "DATABASE",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "nonexistent-volume",
Path: "database.env",
Key: "DATABASE",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedError: true,
errorContains: "cannot find the volume \"nonexistent-volume\" referenced by FileKeyRef",
setupFiles: func() []string {
return []string{}
},
},
{
name: "volume mounter is nil",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "DATABASE",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "database.env",
Key: "DATABASE",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: nil,
},
},
expectedError: true,
errorContains: "cannot find the volume \"config-volume\" referenced by FileKeyRef",
setupFiles: func() []string {
return []string{}
},
},
{
name: "multiple file key references",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "DATABASE",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "database.env",
Key: "DATABASE",
},
},
},
{
Name: "API_KEY",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "api.env",
Key: "API_KEY",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "DATABASE", Value: "mydb"},
{Name: "API_KEY", Value: "secret123"},
},
setupFiles: func() []string {
dbContent := "DATABASE=mydb\n"
apiContent := "API_KEY=secret123\n"
return []string{
createEnvFile("database.env", dbContent),
createEnvFile("api.env", apiContent),
}
},
},
{
name: "mixed environment variable sources",
container: &v1.Container{
Env: []v1.EnvVar{
{
Name: "STATIC_VAR",
Value: "static_value",
},
{
Name: "FILE_VAR",
ValueFrom: &v1.EnvVarSource{
FileKeyRef: &v1.FileKeySelector{
VolumeName: "config-volume",
Path: "config.env",
Key: "FILE_VAR",
},
},
},
},
},
podVolumes: map[string]kubecontainer.VolumeInfo{
"config-volume": {
Mounter: &testVolumeMounter{path: tmpDir},
},
},
expectedEnvs: []kubecontainer.EnvVar{
{Name: "STATIC_VAR", Value: "static_value"},
{Name: "FILE_VAR", Value: "file_value"},
},
setupFiles: func() []string {
content := "FILE_VAR=file_value\n"
return []string{createEnvFile("config.env", content)}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EnvFiles, true)
if tc.setupFiles != nil {
tc.setupFiles()
}
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "test-namespace",
UID: "test-pod-uid",
},
Spec: v1.PodSpec{
Containers: []v1.Container{*tc.container},
EnableServiceLinks: ptr.To(false),
},
}
envs, err := kl.makeEnvironmentVariables(pod, tc.container, "192.168.1.1", []string{"192.168.1.1"}, tc.podVolumes)
if tc.expectedError {
require.Error(t, err)
if tc.errorContains != "" {
assert.Contains(t, err.Error(), tc.errorContains)
}
} else {
require.NoError(t, err)
// Sort both slices for comparison
sort.Sort(Envs(envs))
sort.Sort(Envs(tc.expectedEnvs))
assert.Equal(t, tc.expectedEnvs, envs)
}
})
}
}
type Envs []kubecontainer.EnvVar
func (e Envs) Len() int { return len(e) }
func (e Envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e Envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
func TestGeneratePodHostNameAndDomain(t *testing.T) {
kubelet := &Kubelet{}
kubelet.dnsConfigurer = &dns.Configurer{
ClusterDomain: "cluster.local",
}
testCases := []struct {
name string
podName string
podHostname string
podSubdomain string
podHostnameOverride *string
featureGateEnabled bool
expectedHostname string
expectedDomain string
expectError bool
errorContains string
}{
{
name: "Default behavior - pod name as hostname",
podName: "test-pod",
podHostname: "",
podSubdomain: "",
expectedHostname: "test-pod",
expectedDomain: "",
},
{
name: "Custom Hostname - uses pod.Spec.Hostname",
podName: "test-pod",
podHostname: "custom-hostname",
podSubdomain: "",
expectedHostname: "custom-hostname",
expectedDomain: "",
},
{
name: "Custom Subdomain - constructs FQDN",
podName: "test-pod",
podHostname: "",
podSubdomain: "my-subdomain",
expectedHostname: "test-pod",
expectedDomain: "my-subdomain.default.svc.cluster.local",
},
{
name: "Custom Hostname and Subdomain - uses both",
podName: "test-pod",
podHostname: "custom-hostname",
podSubdomain: "my-subdomain",
expectedHostname: "custom-hostname",
expectedDomain: "my-subdomain.default.svc.cluster.local",
},
{
name: "HostnameOverride - enabled - overrides all",
podName: "test-pod",
podHostname: "custom-hostname",
podSubdomain: "my-subdomain",
podHostnameOverride: ptr.To("override-hostname"),
featureGateEnabled: true,
expectedHostname: "override-hostname",
expectedDomain: "",
},
{
name: "HostnameOverride - enabled - overrides all - invalid hostname",
podName: "test-pod",
podHostname: "custom-hostname",
podSubdomain: "my-subdomain",
podHostnameOverride: ptr.To("Invalid-Hostname-!"),
featureGateEnabled: true,
expectError: true,
errorContains: "pod HostnameOverride \"Invalid-Hostname-!\" is not a valid DNS subdomain",
},
{
name: "HostnameOverride - enabled - overrides all - valid DNS hostname",
podName: "test-pod",
podHostnameOverride: ptr.To("valid.hostname"),
expectedHostname: "valid.hostname",
featureGateEnabled: true,
errorContains: "",
},
{
name: "HostnameOverride - disabled - is ignored",
podName: "test-pod",
podHostname: "custom-hostname",
podSubdomain: "my-subdomain",
podHostnameOverride: ptr.To("override-hostname"),
featureGateEnabled: false,
expectedHostname: "custom-hostname",
expectedDomain: "my-subdomain.default.svc.cluster.local",
},
{
name: "Hostname Truncation - pod name is too long",
podName: strings.Repeat("a", 65),
podHostname: "",
podSubdomain: "",
expectedHostname: strings.Repeat("a", 63),
expectedDomain: "",
},
{
name: "Validation - invalid hostname",
podName: "test-pod",
podHostname: "Invalid-Hostname-!",
expectError: true,
errorContains: "pod Hostname \"Invalid-Hostname-!\" is not a valid DNS label",
},
{
name: "Validation - invalid subdomain",
podName: "test-pod",
podSubdomain: "invalid_subdomain",
expectError: true,
errorContains: "pod Subdomain \"invalid_subdomain\" is not a valid DNS label",
},
{
name: "Validation - too long hostname",
podName: "test-pod",
podHostname: strings.Repeat("a", 64),
expectError: true,
errorContains: "must be no more than 63 characters",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HostnameOverride, tc.featureGateEnabled)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: tc.podName,
Namespace: "default",
},
Spec: v1.PodSpec{
Hostname: tc.podHostname,
Subdomain: tc.podSubdomain,
HostnameOverride: tc.podHostnameOverride,
},
}
hostname, domain, err := kubelet.GeneratePodHostNameAndDomain(pod)
if tc.expectError {
if err == nil {
t.Errorf("expected an error but got none")
return
}
if tc.errorContains != "" && !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("expected error to contain %q, but got %q", tc.errorContains, err.Error())
}
return
}
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if hostname != tc.expectedHostname {
t.Errorf("expected hostname %q, but got %q", tc.expectedHostname, hostname)
}
if domain != tc.expectedDomain {
t.Errorf("expected domain %q, but got %q", tc.expectedDomain, domain)
}
})
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/kubernetes/pkg/api/v1/resource"
kubefeatures "k8s.io/kubernetes/pkg/features"
)
// defaultPodLimitsForDownwardAPI copies the input pod, and optional container,
// and applies default resource limits. it returns a copy of the input pod,
// and a copy of the input container (if specified) with default limits
// applied.
// If a container has no limits specified, it defaults to the pod-level resources.
// If neither container-level nor pod-level resources limits are specified, it defaults
// to the node's allocatable resources.
func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *corev1.Pod, container *corev1.Container) (*corev1.Pod, *corev1.Container, error) {
if pod == nil {
return nil, nil, fmt.Errorf("invalid input, pod cannot be nil")
}
node, err := kl.getNodeAnyWay()
if err != nil {
return nil, nil, fmt.Errorf("failed to find node object, expected a node")
}
allocatable := node.Status.Allocatable
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelLimitsSet(pod) {
allocatable = allocatable.DeepCopy()
// Resources supported by the Downward API
for _, resource := range []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage} {
// Skip resources not supported by Pod Level Resources
if !resourcehelper.IsSupportedPodLevelResource(resource) {
continue
}
if val, exists := pod.Spec.Resources.Limits[resource]; exists && !val.IsZero() {
if _, exists := allocatable[resource]; exists {
allocatable[resource] = val.DeepCopy()
}
}
}
}
klog.InfoS("Allocatable", "allocatable", allocatable)
outputPod := pod.DeepCopy()
for idx := range outputPod.Spec.Containers {
resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)
}
var outputContainer *corev1.Container
if container != nil {
outputContainer = container.DeepCopy()
resource.MergeContainerResourceLimits(outputContainer, allocatable)
}
return outputPod, outputContainer, nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"reflect"
"regexp"
"regexp/syntax"
"runtime"
"strconv"
"strings"
"time"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
)
const (
dateLayout = "2006-1-2 15:4:5"
maxTailLines = 100000
maxServiceLength = 256
maxServices = 4
nodeLogDir = "/var/log/"
)
var (
journal = journalServer{}
// The set of known safe characters to pass to journalctl / GetWinEvent flags - only add to this list if the
// character cannot be used to create invalid sequences. This is intended as a broad defense against malformed
// input that could cause an escape.
reServiceNameUnsafeCharacters = regexp.MustCompile(`[^a-zA-Z\-_.:0-9@]+`)
)
// journalServer returns text output from the OS specific service logger to view
// from the client. It runs with the privileges of the calling process
// (the kubelet) and should only be allowed to be invoked by a root user.
type journalServer struct{}
// ServeHTTP translates HTTP query parameters into arguments to be passed
// to journalctl on the current system. It supports content-encoding of
// gzip to reduce total content size.
func (journalServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
var out io.Writer = w
nlq, errs := newNodeLogQuery(req.URL.Query())
if len(errs) > 0 {
http.Error(w, errs.ToAggregate().Error(), http.StatusBadRequest)
return
}
// TODO: Also set a response header that indicates how the request's query was resolved,
// e.g. "kube-log-source: journal://foobar?arg1=value" or "kube-log-source: file:///var/log/foobar.log"
w.Header().Set("Content-Type", "text/plain;charset=UTF-8")
if req.Header.Get("Accept-Encoding") == "gzip" {
w.Header().Set("Content-Encoding", "gzip")
gz, err := gzip.NewWriterLevel(out, gzip.BestSpeed)
if err != nil {
fmt.Fprintf(w, "\nfailed to get gzip writer: %v\n", err)
return
}
defer gz.Close()
out = gz
}
nlq.Copy(out)
}
// nodeLogQuery encapsulates the log query request
type nodeLogQuery struct {
// Services are the list of services to be queried
Services []string
// Files are the list of files
Files []string
options
}
// options encapsulates the query options for services
type options struct {
// SinceTime is an RFC3339 timestamp from which to show logs.
SinceTime *time.Time
// UntilTime is an RFC3339 timestamp until which to show logs.
UntilTime *time.Time
// TailLines is used to retrieve the specified number of lines (not more than 100k) from the end of the log.
// Support for this is implementation specific and only available for service logs.
TailLines *int
// Boot show messages from a specific boot. Allowed values are less than 1. Passing an invalid boot offset will fail
// retrieving logs and return an error. Support for this is implementation specific
Boot *int
// Pattern filters log entries by the provided regex pattern. On Linux nodes, this pattern will be read as a
// PCRE2 regex, on Windows nodes it will be read as a PowerShell regex. Support for this is implementation specific.
Pattern string
}
// newNodeLogQuery parses query values and converts all known options into nodeLogQuery
func newNodeLogQuery(query url.Values) (*nodeLogQuery, field.ErrorList) {
allErrs := field.ErrorList{}
var nlq nodeLogQuery
var err error
queries, ok := query["query"]
if len(queries) > 0 {
for _, q := range queries {
// The presence of / or \ is a hint that the query is for a log file. If the query is for foo.log without a
// slash prefix, the heuristics will still return the file contents.
if strings.ContainsAny(q, `/\`) {
nlq.Files = append(nlq.Files, q)
} else if strings.TrimSpace(q) != "" { // Prevent queries with just spaces
nlq.Services = append(nlq.Services, q)
}
}
}
// Prevent specifying an empty or blank space query.
// Example: kubectl get --raw /api/v1/nodes/$node/proxy/logs?query=" "
if ok && (len(nlq.Files) == 0 && len(nlq.Services) == 0) {
allErrs = append(allErrs, field.Invalid(field.NewPath("query"), queries, "may not be empty"))
}
var sinceTime time.Time
sinceTimeValue := query.Get("sinceTime")
if len(sinceTimeValue) > 0 {
sinceTime, err = time.Parse(time.RFC3339, sinceTimeValue)
if err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("sinceTime"), sinceTimeValue, "invalid time format"))
} else {
nlq.SinceTime = &sinceTime
}
}
var untilTime time.Time
untilTimeValue := query.Get("untilTime")
if len(untilTimeValue) > 0 {
untilTime, err = time.Parse(time.RFC3339, untilTimeValue)
if err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("untilTime"), untilTimeValue, "invalid time format"))
} else {
nlq.UntilTime = &untilTime
}
}
var boot int
bootValue := query.Get("boot")
if len(bootValue) > 0 {
boot, err = strconv.Atoi(bootValue)
if err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("boot"), bootValue, err.Error()))
} else {
nlq.Boot = &boot
}
}
var tailLines int
tailLinesValue := query.Get("tailLines")
if len(tailLinesValue) > 0 {
tailLines, err = strconv.Atoi(tailLinesValue)
if err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), tailLinesValue, err.Error()))
} else {
nlq.TailLines = &tailLines
}
}
pattern := query.Get("pattern")
if len(pattern) > 0 {
nlq.Pattern = pattern
}
if len(allErrs) > 0 {
return nil, allErrs
}
if reflect.DeepEqual(nlq, nodeLogQuery{}) {
return nil, allErrs
}
return &nlq, allErrs
}
func validateServices(services []string) field.ErrorList {
allErrs := field.ErrorList{}
for _, s := range services {
if err := safeServiceName(s); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("query"), s, err.Error()))
}
}
if len(services) > maxServices {
allErrs = append(allErrs, field.TooMany(field.NewPath("query"), len(services), maxServices))
}
return allErrs
}
func (n *nodeLogQuery) validate() field.ErrorList {
allErrs := validateServices(n.Services)
switch {
case len(n.Files) == 0 && len(n.Services) == 0:
allErrs = append(allErrs, field.Required(field.NewPath("query"), "cannot be empty with options"))
case len(n.Files) > 0 && len(n.Services) > 0:
allErrs = append(allErrs, field.Invalid(field.NewPath("query"), fmt.Sprintf("%v, %v", n.Files, n.Services),
"cannot specify a file and service"))
case len(n.Files) > 1:
allErrs = append(allErrs, field.Invalid(field.NewPath("query"), n.Files, "cannot specify more than one file"))
case len(n.Files) == 1 && n.options != (options{}):
allErrs = append(allErrs, field.Invalid(field.NewPath("query"), n.Files, "cannot specify file with options"))
case len(n.Files) == 1:
if root, err := os.OpenRoot(nodeLogDir); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("query"), n.Files, err.Error()))
} else {
// root.Close() never returns errors
defer func() { _ = root.Close() }()
if _, err := root.Stat(n.Files[0]); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("query"), n.Files, err.Error()))
}
}
}
if n.SinceTime != nil && n.UntilTime != nil && (n.SinceTime.After(*n.UntilTime)) {
allErrs = append(allErrs, field.Invalid(field.NewPath("untilTime"), n.UntilTime, "must be after `sinceTime`"))
}
if n.Boot != nil && runtime.GOOS == "windows" {
allErrs = append(allErrs, field.Invalid(field.NewPath("boot"), *n.Boot, "not supported on Windows"))
}
if n.Boot != nil && *n.Boot > 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("boot"), *n.Boot, "must be less than 1"))
}
if n.TailLines != nil {
if err := utilvalidation.IsInRange((int)(*n.TailLines), 0, maxTailLines); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *n.TailLines, err[0]))
}
}
if _, err := syntax.Parse(n.Pattern, syntax.Perl); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("pattern"), n.Pattern, err.Error()))
}
return allErrs
}
// Copy streams the contents of the OS specific logging command executed with the current args to the provided
// writer. If an error occurs a line is written to the output.
func (n *nodeLogQuery) Copy(w io.Writer) {
// set the deadline to the maximum across both runs
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second))
defer cancel()
boot := 0
if n.Boot != nil {
boot = *n.Boot
}
n.copyForBoot(ctx, w, boot)
}
// copyForBoot invokes the OS specific logging command with the provided args
func (n *nodeLogQuery) copyForBoot(ctx context.Context, w io.Writer, previousBoot int) {
if ctx.Err() != nil {
return
}
nativeLoggers, fileLoggers := n.splitNativeVsFileLoggers(ctx)
if len(nativeLoggers) > 0 {
n.copyServiceLogs(ctx, w, nativeLoggers, previousBoot)
}
if len(fileLoggers) > 0 && n.options != (options{}) {
fmt.Fprintf(w, "\noptions present and query resolved to log files for %v\ntry without specifying options\n",
fileLoggers)
return
}
if len(fileLoggers) > 0 {
copyFileLogs(ctx, w, fileLoggers)
}
}
// splitNativeVsFileLoggers checks if each service logs to native OS logs or to a file and returns a list of services
// that log natively vs maybe to a file
func (n *nodeLogQuery) splitNativeVsFileLoggers(ctx context.Context) ([]string, []string) {
var nativeLoggers []string
var fileLoggers []string
for _, service := range n.Services {
// Check the journalctl output to figure if the service is using journald or not. This is not needed in the
// Get-WinEvent case as the command returns an error if a service is not logging to the Application provider.
if checkForNativeLogger(ctx, service) {
nativeLoggers = append(nativeLoggers, service)
} else {
fileLoggers = append(fileLoggers, service)
}
}
return nativeLoggers, fileLoggers
}
// copyServiceLogs invokes journalctl or Get-WinEvent with the provided args. Note that
// services are explicitly passed here to account for the heuristics.
func (n *nodeLogQuery) copyServiceLogs(ctx context.Context, w io.Writer, services []string, previousBoot int) {
cmdStr, args, cmdEnv, err := getLoggingCmd(n, services)
if err != nil {
fmt.Fprintf(w, "\nfailed to get logging cmd: %v\n", err)
return
}
cmd := exec.CommandContext(ctx, cmdStr, args...)
cmd.Stdout = w
cmd.Stderr = w
cmd.Env = append(os.Environ(), cmdEnv...)
if err := cmd.Run(); err != nil {
if _, ok := err.(*exec.ExitError); ok {
return
}
if previousBoot == 0 {
fmt.Fprintf(w, "\nerror: journal output not available\n")
}
}
}
// copyFileLogs loops over all the services and attempts to collect the file logs of each service
func copyFileLogs(ctx context.Context, w io.Writer, services []string) {
if ctx.Err() != nil {
fmt.Fprintf(w, "\ncontext error: %v\n", ctx.Err())
return
}
for _, service := range services {
heuristicsCopyFileLogs(ctx, w, nodeLogDir, service)
}
}
// heuristicsCopyFileLogs attempts to collect logs from either
// /var/log/service
// /var/log/service.log or
// /var/log/service/service.log or
// in that order stopping on first success.
func heuristicsCopyFileLogs(ctx context.Context, w io.Writer, logDir, service string) {
logFileNames := [3]string{
service,
fmt.Sprintf("%s.log", service),
fmt.Sprintf("%s/%s.log", service, service),
}
var err error
for _, logFileName := range logFileNames {
err = heuristicsCopyFileLog(ctx, w, logDir, logFileName)
if err == nil {
break
} else if errors.Is(err, os.ErrNotExist) {
continue
} else {
break
}
}
if err != nil {
// If the last error was file not found it implies that no log file was found for the service
if errors.Is(err, os.ErrNotExist) {
fmt.Fprintf(w, "\nlog not found for %s\n", service)
return
}
fmt.Fprintf(w, "\nerror getting log for %s: %v\n", service, err)
}
}
// readerCtx is the interface that wraps io.Reader with a context
type readerCtx struct {
ctx context.Context
io.Reader
}
func (r *readerCtx) Read(p []byte) (n int, err error) {
if err := r.ctx.Err(); err != nil {
return 0, err
}
return r.Reader.Read(p)
}
// newReaderCtx gets a context-aware io.Reader
func newReaderCtx(ctx context.Context, r io.Reader) io.Reader {
return &readerCtx{
ctx: ctx,
Reader: r,
}
}
// heuristicsCopyFileLog returns the contents of the given logFile
func heuristicsCopyFileLog(ctx context.Context, w io.Writer, logDir, logFileName string) error {
f, err := os.OpenInRoot(logDir, logFileName)
if err != nil {
return err
}
// Ignoring errors when closing a file opened read-only doesn't cause data loss
defer func() { _ = f.Close() }()
fInfo, err := f.Stat()
if err != nil {
return err
}
// This is to account for the heuristics where logs for service foo
// could be in /var/log/foo/
if fInfo.IsDir() {
return os.ErrNotExist
}
if _, err := io.Copy(w, newReaderCtx(ctx, f)); err != nil {
return err
}
return nil
}
func safeServiceName(s string) error {
// Max length of a service name is 256 across supported OSes
if len(s) > maxServiceLength {
return fmt.Errorf("length must be less than 100")
}
if reServiceNameUnsafeCharacters.MatchString(s) {
return fmt.Errorf("input contains unsupported characters")
}
return nil
}
//go:build linux
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"fmt"
"os/exec"
"strings"
)
// getLoggingCmd returns the journalctl cmd and arguments for the given nodeLogQuery and boot. Note that
// services are explicitly passed here to account for the heuristics.
// The return values are:
// - cmd: the command to be executed
// - args: arguments to the command
// - cmdEnv: environment variables when the command will be executed
func getLoggingCmd(n *nodeLogQuery, services []string) (cmd string, args []string, cmdEnv []string, err error) {
args = []string{
"--utc",
"--no-pager",
"--output=short-precise",
}
if n.SinceTime != nil {
args = append(args, fmt.Sprintf("--since=%s", n.SinceTime.Format(dateLayout)))
}
if n.UntilTime != nil {
args = append(args, fmt.Sprintf("--until=%s", n.UntilTime.Format(dateLayout)))
}
if n.TailLines != nil {
args = append(args, "--pager-end", fmt.Sprintf("--lines=%d", *n.TailLines))
}
for _, service := range services {
if len(service) > 0 {
args = append(args, "--unit="+service)
}
}
if len(n.Pattern) > 0 {
args = append(args, "--grep="+n.Pattern)
}
if n.Boot != nil {
args = append(args, "--boot", fmt.Sprintf("%d", *n.Boot))
}
return "journalctl", args, nil, nil
}
// checkForNativeLogger checks journalctl output for a service
func checkForNativeLogger(ctx context.Context, service string) bool {
// This will return all the journald units
cmd := exec.CommandContext(ctx, "journalctl", []string{"--field", "_SYSTEMD_UNIT"}...)
output, err := cmd.CombinedOutput()
if err != nil {
// Returning false to allow checking if the service is logging to a file
return false
}
// journalctl won't return an error if we try to fetch logs for a non-existent service,
// hence we search for it in the list of services known to journalctl
return strings.Contains(string(output), service+".service")
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"crypto/tls"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
goruntime "runtime"
"sort"
"strconv"
"strings"
"testing"
"time"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/trace/tracetest"
oteltrace "go.opentelemetry.io/otel/trace"
noopoteltrace "go.opentelemetry.io/otel/trace/noop"
"k8s.io/component-base/metrics/legacyregistry"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
core "k8s.io/client-go/testing"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/testutil"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
remote "k8s.io/cri-client/pkg"
fakeremote "k8s.io/cri-client/pkg/fake"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/allocation"
"k8s.io/kubernetes/pkg/kubelet/allocation/state"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/clustertrustbundle"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown"
"k8s.io/kubernetes/pkg/kubelet/pleg"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
"k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/server"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/kubelet/token"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/userns"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/volume"
_ "k8s.io/kubernetes/pkg/volume/hostpath"
volumesecret "k8s.io/kubernetes/pkg/volume/secret"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/test/utils/ktesting"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/ptr"
)
func init() {
}
const (
testKubeletHostname = "127.0.0.1"
testKubeletHostIP = "127.0.0.1"
testKubeletHostIPv6 = "::1"
// TODO(harry) any global place for these two?
// Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
minImgSize int64 = 23 * 1024 * 1024
maxImgSize int64 = 1000 * 1024 * 1024
)
// fakeImageGCManager is a fake image gc manager for testing. It will return image
// list from fake runtime directly instead of caching it.
type fakeImageGCManager struct {
fakeImageService kubecontainer.ImageService
images.ImageGCManager
}
func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
return f.fakeImageService.ListImages(context.Background())
}
type TestKubelet struct {
kubelet *Kubelet
fakeRuntime *containertest.FakeRuntime
fakeContainerManager *cm.FakeContainerManager
fakeKubeClient *fake.Clientset
fakeMirrorClient *podtest.FakeMirrorClient
fakeClock *testingclock.FakeClock
mounter mount.Interface
volumePlugin *volumetest.FakeVolumePlugin
}
func (tk *TestKubelet) Cleanup() {
if tk.kubelet != nil {
os.RemoveAll(tk.kubelet.rootDirectory)
tk.kubelet = nil
}
}
// newTestKubelet returns test kubelet with two images.
func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
imageList := []kubecontainer.Image{
{
ID: "abc",
RepoTags: []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
Size: 123,
},
{
ID: "efg",
RepoTags: []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
Size: 456,
},
}
return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*enableResizing*/)
}
func newTestKubeletExcludeAdmitHandlers(t *testing.T, controllerAttachDetachEnabled, sourcesReady bool) *TestKubelet {
imageList := []kubecontainer.Image{
{
ID: "abc",
RepoTags: []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
Size: 123,
},
{
ID: "efg",
RepoTags: []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
Size: 456,
},
}
return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, true /*excludePodAdmitHandlers*/, sourcesReady)
}
func newTestKubeletWithImageList(
t *testing.T,
imageList []kubecontainer.Image,
controllerAttachDetachEnabled bool,
initFakeVolumePlugin bool,
localStorageCapacityIsolation bool,
excludeAdmitHandlers bool,
enableResizing bool,
) *TestKubelet {
logger, _ := ktesting.NewTestContext(t)
fakeRuntime := &containertest.FakeRuntime{
ImageList: imageList,
// Set ready conditions by default.
RuntimeStatus: &kubecontainer.RuntimeStatus{
Conditions: []kubecontainer.RuntimeCondition{
{Type: "RuntimeReady", Status: true},
{Type: "NetworkReady", Status: true},
},
},
VersionInfo: "1.5.0",
RuntimeType: "test",
T: t,
}
fakeRecorder := &record.FakeRecorder{}
fakeKubeClient := &fake.Clientset{}
kubelet := &Kubelet{}
kubelet.recorder = fakeRecorder
kubelet.kubeClient = fakeKubeClient
kubelet.heartbeatClient = fakeKubeClient
kubelet.os = &containertest.FakeOS{}
kubelet.mounter = mount.NewFakeMounter(nil)
kubelet.hostutil = hostutil.NewFakeHostUtil(nil)
kubelet.subpather = &subpath.FakeSubpath{}
kubelet.hostname = testKubeletHostname
kubelet.nodeName = types.NodeName(testKubeletHostname)
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
kubelet.runtimeState.setNetworkState(nil)
kubelet.rootDirectory = t.TempDir()
kubelet.podLogsDirectory = t.TempDir()
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.Set[string]) bool { return true })
kubelet.serviceLister = testServiceLister{}
kubelet.serviceHasSynced = func() bool { return true }
kubelet.nodeHasSynced = func() bool { return true }
kubelet.nodeLister = testNodeLister{
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: string(kubelet.nodeName),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "Ready",
Message: "Node ready",
},
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: testKubeletHostIP,
},
{
Type: v1.NodeInternalIP,
Address: testKubeletHostIPv6,
},
},
VolumesAttached: []v1.AttachedVolume{
{
Name: "fake/fake-device",
DevicePath: "fake/path",
},
},
},
},
},
}
kubelet.recorder = fakeRecorder
if err := kubelet.setupDataDirs(); err != nil {
t.Fatalf("can't initialize kubelet data dirs: %v", err)
}
kubelet.daemonEndpoints = &v1.NodeDaemonEndpoints{}
kubelet.cadvisor = &cadvisortest.Fake{}
machineInfo, _ := kubelet.cadvisor.MachineInfo()
kubelet.setCachedMachineInfo(machineInfo)
kubelet.tracer = noopoteltrace.NewTracerProvider().Tracer("")
fakeMirrorClient := podtest.NewFakeMirrorClient()
secretManager := secret.NewSimpleSecretManager(kubelet.kubeClient)
kubelet.secretManager = secretManager
configMapManager := configmap.NewSimpleConfigMapManager(kubelet.kubeClient)
kubelet.configMapManager = configMapManager
kubelet.mirrorPodClient = fakeMirrorClient
kubelet.podManager = kubepod.NewBasicPodManager()
podStartupLatencyTracker := kubeletutil.NewPodStartupLatencyTracker()
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{}, podStartupLatencyTracker)
kubelet.nodeStartupLatencyTracker = kubeletutil.NewNodeStartupLatencyTracker()
kubelet.podCertificateManager = &podcertificate.NoOpManager{}
kubelet.containerRuntime = fakeRuntime
kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
kubelet.reasonCache = NewReasonCache()
kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: kubelet.SyncPod,
cache: kubelet.podCache,
t: t,
}
kubelet.probeManager = probetest.FakeManager{}
kubelet.livenessManager = proberesults.NewManager()
kubelet.readinessManager = proberesults.NewManager()
kubelet.startupManager = proberesults.NewManager()
fakeContainerManager := cm.NewFakeContainerManager()
kubelet.containerManager = fakeContainerManager
fakeNodeRef := &v1.ObjectReference{
Kind: "Node",
Name: testKubeletHostname,
UID: types.UID(testKubeletHostname),
Namespace: "",
}
kubelet.allocationManager = allocation.NewInMemoryManager(
kubelet.containerManager.GetNodeConfig(),
kubelet.containerManager.GetNodeAllocatableAbsolute(),
kubelet.statusManager,
func(pod *v1.Pod) { kubelet.HandlePodSyncs([]*v1.Pod{pod}) },
kubelet.GetActivePods,
kubelet.podManager.GetPodByUID,
config.NewSourcesReady(func(_ sets.Set[string]) bool { return enableResizing }),
)
kubelet.allocationManager.SetContainerRuntime(fakeRuntime)
volumeStatsAggPeriod := time.Second * 10
kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.recorder)
fakeHostStatsProvider := stats.NewFakeHostStatsProvider(&containertest.FakeOS{})
kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
kubelet.cadvisor,
kubelet.resourceAnalyzer,
kubelet.podManager,
fakeRuntime,
kubelet.statusManager,
fakeHostStatsProvider,
kubelet.containerManager,
)
fakeImageGCPolicy := images.ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, nil, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, noopoteltrace.NewTracerProvider())
assert.NoError(t, err)
kubelet.imageManager = &fakeImageGCManager{
fakeImageService: fakeRuntime,
ImageGCManager: imageGCManager,
}
kubelet.containerLogManager = logs.NewStubContainerLogManager()
containerGCPolicy := kubecontainer.GCPolicy{
MinAge: time.Duration(0),
MaxPerPodContainer: 1,
MaxContainers: -1,
}
containerGC, err := kubecontainer.NewContainerGC(fakeRuntime, containerGCPolicy, kubelet.sourcesReady)
assert.NoError(t, err)
kubelet.containerGC = containerGC
fakeClock := testingclock.NewFakeClock(time.Now())
kubelet.crashLoopBackOff = flowcontrol.NewBackOff(time.Second, time.Minute)
kubelet.crashLoopBackOff.Clock = fakeClock
kubelet.resyncInterval = 10 * time.Second
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
// Relist period does not affect the tests.
kubelet.pleg = pleg.NewGenericPLEG(logger, fakeRuntime, make(chan *pleg.PodLifecycleEvent, 100), &pleg.RelistDuration{RelistPeriod: time.Hour, RelistThreshold: genericPlegRelistThreshold}, kubelet.podCache, clock.RealClock{})
kubelet.clock = fakeClock
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string(kubelet.nodeName),
UID: types.UID(kubelet.nodeName),
Namespace: "",
}
// setup eviction manager
evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{},
killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock, kubelet.supportLocalStorageCapacityIsolation())
kubelet.evictionManager = evictionManager
handlers := []lifecycle.PodAdmitHandler{}
handlers = append(handlers, evictionAdmitHandler)
// setup shutdown manager
shutdownManager := nodeshutdown.NewManager(&nodeshutdown.Config{
Logger: logger,
Recorder: fakeRecorder,
NodeRef: nodeRef,
GetPodsFunc: kubelet.podManager.GetPods,
KillPodFunc: killPodNow(kubelet.podWorkers, fakeRecorder),
SyncNodeStatusFunc: func() {},
ShutdownGracePeriodRequested: 0,
ShutdownGracePeriodCriticalPods: 0,
})
kubelet.shutdownManager = shutdownManager
kubelet.usernsManager, err = userns.MakeUserNsManager(logger, kubelet)
if err != nil {
t.Fatalf("Failed to create UserNsManager: %v", err)
}
handlers = append(handlers, shutdownManager)
// Add this as cleanup predicate pod admitter
handlers = append(handlers, lifecycle.NewPredicateAdmitHandler(kubelet.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), kubelet.containerManager.UpdatePluginResources))
if !excludeAdmitHandlers {
kubelet.allocationManager.AddPodAdmitHandlers(handlers)
}
allPlugins := []volume.VolumePlugin{}
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
if initFakeVolumePlugin {
allPlugins = append(allPlugins, plug)
} else {
allPlugins = append(allPlugins, volumesecret.ProbeVolumePlugins()...)
}
var prober volume.DynamicPluginProber // TODO (#51147) inject mock
kubelet.volumePluginMgr, err =
NewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), &clustertrustbundle.NoopManager{}, allPlugins, prober)
require.NoError(t, err, "Failed to initialize VolumePluginMgr")
kubelet.volumeManager = kubeletvolume.NewVolumeManager(
controllerAttachDetachEnabled,
kubelet.nodeName,
kubelet.podManager,
kubelet.podWorkers,
fakeKubeClient,
kubelet.volumePluginMgr,
kubelet.mounter,
kubelet.hostutil,
kubelet.getPodsDir(),
kubelet.recorder,
volumetest.NewBlockVolumePathHandler())
kubelet.pluginManager = pluginmanager.NewPluginManager(
kubelet.getPluginsRegistrationDir(), /* sockDir */
kubelet.recorder,
)
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
// enable active deadline handler
activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
require.NoError(t, err, "Can't initialize active deadline handler")
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
kubelet.AddPodSyncHandler(activeDeadlineHandler)
kubelet.kubeletConfiguration.LocalStorageCapacityIsolation = localStorageCapacityIsolation
return &TestKubelet{kubelet, fakeRuntime, fakeContainerManager, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
}
func newTestPods(count int) []*v1.Pod {
pods := make([]*v1.Pod, count)
for i := 0; i < count; i++ {
pods[i] = &v1.Pod{
Spec: v1.PodSpec{
HostNetwork: true,
},
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(strconv.Itoa(10000 + i)),
Name: fmt.Sprintf("pod%d", i),
},
}
}
return pods
}
func TestSyncLoopAbort(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.runtimeState.setRuntimeSync(time.Now())
// The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
// the channel close
kubelet.resyncInterval = time.Second * 30
ch := make(chan kubetypes.PodUpdate)
close(ch)
// sanity check (also prevent this test from hanging in the next step)
ok := kubelet.syncLoopIteration(ctx, ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
kubelet.syncLoop(ctx, ch, kubelet)
}
func TestSyncPodsStartPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
pods := []*v1.Pod{
podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
}),
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodSyncs(pods)
fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
}
func TestHandlePodCleanupsPerQOS(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
pod := &kubecontainer.Pod{
ID: "12345678",
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
}
fakeRuntime := testKubelet.fakeRuntime
fakeContainerManager := testKubelet.fakeContainerManager
fakeContainerManager.PodContainerManager.AddPodFromCgroups(pod) // add pod to mock cgroup
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: pod},
}
kubelet := testKubelet.kubelet
kubelet.cgroupsPerQOS = true // enable cgroupsPerQOS to turn on the cgroups cleanup
// HandlePodCleanups gets called every 2 seconds within the Kubelet's
// housekeeping routine. This test registers the pod, removes the unwanted pod, then calls into
// HandlePodCleanups a few more times. We should only see one Destroy() event. podKiller runs
// within a goroutine so a two second delay should be enough time to
// mark the pod as killed (within this test case).
kubelet.HandlePodCleanups(ctx)
// assert that unwanted pods were killed
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected %v to be deleted, got %v", expected, actual)
}
fakeRuntime.AssertKilledPods([]string(nil))
// simulate Runtime.KillPod
fakeRuntime.PodList = nil
kubelet.HandlePodCleanups(ctx)
kubelet.HandlePodCleanups(ctx)
kubelet.HandlePodCleanups(ctx)
destroyCount := 0
err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
fakeContainerManager.PodContainerManager.Lock()
defer fakeContainerManager.PodContainerManager.Unlock()
destroyCount = 0
for _, functionName := range fakeContainerManager.PodContainerManager.CalledFunctions {
if functionName == "Destroy" {
destroyCount = destroyCount + 1
}
}
return destroyCount >= 1, nil
})
assert.NoError(t, err, "wait should not return error")
// housekeeping can get called multiple times. The cgroup Destroy() is
// done within a goroutine and can get called multiple times, so the
// Destroy() count in not deterministic on the actual number.
// https://github.com/kubernetes/kubernetes/blob/29fdbb065b5e0d195299eb2d260b975cbc554673/pkg/kubelet/kubelet_pods.go#L2006
assert.GreaterOrEqual(t, destroyCount, 1, "Expect 1 or more destroys")
}
func TestDispatchWorkOfCompletedPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
var got bool
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: func(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
got = true
return false, nil
},
cache: kubelet.podCache,
t: t,
}
now := metav1.NewTime(time.Now())
pods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "1",
Name: "completed-pod1",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodFailed,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "2",
Name: "completed-pod2",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "3",
Name: "completed-pod3",
Namespace: "ns",
Annotations: make(map[string]string),
DeletionTimestamp: &now,
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
},
},
}
for _, pod := range pods {
kubelet.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodSync,
StartTime: time.Now(),
})
if !got {
t.Errorf("Should not skip completed pod %q", pod.Name)
}
got = false
}
}
func TestDispatchWorkOfActivePod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
var got bool
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: func(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
got = true
return false, nil
},
cache: kubelet.podCache,
t: t,
}
pods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "1",
Name: "active-pod1",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "2",
Name: "active-pod2",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodFailed,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
},
},
},
},
}
for _, pod := range pods {
kubelet.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodSync,
StartTime: time.Now(),
})
if !got {
t.Errorf("Should not skip active pod %q", pod.Name)
}
got = false
}
}
func TestHandlePodCleanups(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
pod := &kubecontainer.Pod{
ID: "12345678",
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
}
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: pod},
}
kubelet := testKubelet.kubelet
kubelet.HandlePodCleanups(ctx)
// assert that unwanted pods were queued to kill
if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected %v to be deleted, got %v", expected, actual)
}
fakeRuntime.AssertKilledPods([]string(nil))
}
func TestVolumeAttachLimitExceededCleanup(t *testing.T) {
const podCount = 500
tk := newTestKubelet(t, true /* controller-attach-detach enabled */)
defer tk.Cleanup()
kl := tk.kubelet
kl.nodeLister = testNodeLister{nodes: []*v1.Node{{
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(1000, resource.DecimalSI),
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("20Gi"),
},
},
}}}
kl.workQueue = queue.NewBasicWorkQueue(clock.RealClock{})
kl.podWorkers = newPodWorkers(
kl, kl.recorder, kl.workQueue,
kl.resyncInterval, backOffPeriod,
kl.podCache, kl.allocationManager,
)
kl.volumeManager = kubeletvolume.NewFakeVolumeManager(nil, 0, nil, true /* volumeAttachLimitExceededError */)
pods, _ := newTestPodsWithResources(podCount)
kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods)
ctx := context.Background()
// all pods must reach a terminal, Failed state due to VolumeAttachmentLimitExceeded.
if err := wait.PollUntilContextTimeout(
ctx, 200*time.Millisecond, 30*time.Second, true,
func(ctx context.Context) (bool, error) {
for _, p := range pods {
st, ok := kl.statusManager.GetPodStatus(p.UID)
if !ok || st.Phase != v1.PodFailed && st.Reason != "VolumeAttachmentLimitExceeded" {
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("pods did not reach a terminal, Failed state: %v", err)
}
// validate that SyncTerminatedPod completed successfully for each pod.
if err := wait.PollUntilContextTimeout(
ctx, 200*time.Millisecond, 30*time.Second, true,
func(ctx context.Context) (bool, error) {
for _, p := range pods {
if !kl.podWorkers.ShouldPodBeFinished(p.UID) {
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("pod workers did not finish cleanup: %v", err)
}
// validate container-level resource allocations are released.
for _, p := range pods {
cn := p.Spec.Containers[0].Name
if _, still := kl.allocationManager.GetContainerResourceAllocation(p.UID, cn); still {
t.Fatalf("allocation for pod %q container %q not released", p.Name, cn)
}
}
}
func newTestPodsWithResources(count int) (pods []*v1.Pod, containerNames []string) {
pods = make([]*v1.Pod, count)
containerNames = make([]string, count)
for i := 0; i < count; i++ {
containerName := fmt.Sprintf("container%d", i)
containerNames[i] = containerName
pods[i] = &v1.Pod{
Spec: v1.PodSpec{
HostNetwork: true,
Containers: []v1.Container{{
Name: containerName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1m"),
v1.ResourceMemory: resource.MustParse("1Mi"),
},
},
}},
},
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(strconv.Itoa(10000 + i)),
Name: fmt.Sprintf("pod%d", i),
},
}
}
return pods, containerNames
}
func TestHandlePodRemovesWhenSourcesAreReady(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
ready := false
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
fakePod := &kubecontainer.Pod{
ID: "1",
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
}
pods := []*v1.Pod{
podWithUIDNameNs("1", "foo", "new"),
}
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: fakePod},
}
kubelet := testKubelet.kubelet
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.Set[string]) bool { return ready })
kubelet.HandlePodRemoves(pods)
time.Sleep(2 * time.Second)
// Sources are not ready yet. Don't remove any pods.
if expect, actual := []types.UID(nil), kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion; !reflect.DeepEqual(expect, actual) {
t.Fatalf("expected %v kills, got %v", expect, actual)
}
ready = true
kubelet.HandlePodRemoves(pods)
time.Sleep(2 * time.Second)
// Sources are ready. Remove unwanted pods.
if expect, actual := []types.UID{"1"}, kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion; !reflect.DeepEqual(expect, actual) {
t.Fatalf("expected %v kills, got %v", expect, actual)
}
}
type testNodeLister struct {
nodes []*v1.Node
}
func (nl testNodeLister) Get(name string) (*v1.Node, error) {
for _, node := range nl.nodes {
if node.Name == name {
return node, nil
}
}
return nil, fmt.Errorf("Node with name: %s does not exist", name)
}
func (nl testNodeLister) List(_ labels.Selector) (ret []*v1.Node, err error) {
return nl.nodes, nil
}
func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
t.Helper()
status, found := kl.statusManager.GetPodStatus(pod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
require.Equal(t, phase, status.Phase)
}
// Tests that we handle port conflicts correctly by setting the failed status in status map.
func TestHandlePortConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: "testNode",
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = metav1.NewTime(time.Now())
pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
pods[0].UID: true,
pods[1].UID: true,
}
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle host name conflicts correctly by setting the failed status in status map.
func TestHandleHostNameConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: "testNode",
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
// default NodeName in test is 127.0.0.1
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
podWithUIDNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
}
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
nodes := []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}
kl.nodeLister = testNodeLister{nodes: nodes}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: "testNode",
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
}
// The first pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelectorBasedOnOS(t *testing.T) {
tests := []struct {
name string
nodeLabels map[string]string
podSelector map[string]string
podStatus v1.PodPhase
}{
{
name: "correct OS label, wrong pod selector, admission denied",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
podSelector: map[string]string{v1.LabelOSStable: "dummyOS"},
podStatus: v1.PodFailed,
},
{
name: "correct OS label, correct pod selector, admission denied",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
podSelector: map[string]string{v1.LabelOSStable: goruntime.GOOS},
podStatus: v1.PodPending,
},
{
// Expect no patching to happen, label B should be preserved and can be used for nodeAffinity.
name: "new node label, correct pod selector, admitted",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH, "key": "B"},
podSelector: map[string]string{"key": "B"},
podStatus: v1.PodPending,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
nodes := []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: test.nodeLabels},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}
kl.nodeLister = testNodeLister{nodes: nodes}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: "testNode",
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
pod := podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: test.podSelector})
kl.HandlePodAdditions([]*v1.Pod{pod})
// Check pod status stored in the status map.
checkPodStatus(t, kl, pod, test.podStatus)
})
}
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
nodes := []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
}}},
}
kl.nodeLister = testNodeLister{nodes: nodes}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: "testNode",
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
spec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
}}},
}
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = metav1.NewTime(time.Now())
pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
pods[0].UID: true,
pods[1].UID: true,
}
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle result of interface UpdatePluginResources correctly
// by setting corresponding status in status map.
func TestHandlePluginResources(t *testing.T) {
testKubelet := newTestKubeletExcludeAdmitHandlers(t, false /* controllerAttachDetachEnabled */, false /*enableResizing*/)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
adjustedResource := v1.ResourceName("domain1.com/adjustedResource")
emptyResource := v1.ResourceName("domain2.com/emptyResource")
missingResource := v1.ResourceName("domain2.com/missingResource")
failedResource := v1.ResourceName("domain2.com/failedResource")
resourceQuantity0 := *resource.NewQuantity(int64(0), resource.DecimalSI)
resourceQuantity1 := *resource.NewQuantity(int64(1), resource.DecimalSI)
resourceQuantity2 := *resource.NewQuantity(int64(2), resource.DecimalSI)
resourceQuantityInvalid := *resource.NewQuantity(int64(-1), resource.DecimalSI)
allowedPodQuantity := *resource.NewQuantity(int64(10), resource.DecimalSI)
nodes := []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
adjustedResource: resourceQuantity1,
emptyResource: resourceQuantity0,
v1.ResourcePods: allowedPodQuantity,
}}},
}
kl.nodeLister = testNodeLister{nodes: nodes}
updatePluginResourcesFunc := func(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
// Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
// A resource with invalid value (< 0) causes the function to return an error
// to emulate resource Allocation failure.
// Resources not contained in this map will have their node.allocatableResource
// quantity unchanged.
updateResourceMap := map[v1.ResourceName]resource.Quantity{
adjustedResource: resourceQuantity2,
emptyResource: resourceQuantity0,
failedResource: resourceQuantityInvalid,
}
pod := attrs.Pod
newAllocatableResource := node.Allocatable.Clone()
for _, container := range pod.Spec.Containers {
for resource := range container.Resources.Requests {
newQuantity, exist := updateResourceMap[resource]
if !exist {
continue
}
if newQuantity.Value() < 0 {
return fmt.Errorf("Allocation failed")
}
newAllocatableResource.ScalarResources[resource] = newQuantity.Value()
}
}
node.Allocatable = newAllocatableResource
return nil
}
// add updatePluginResourcesFunc to admission handler, to test it's behavior.
kl.allocationManager.AddPodAdmitHandlers(lifecycle.PodAdmitHandlers{lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc)})
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: "testNode",
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
// pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
// adjusts node.allocatableResource for this resource to a sufficient value.
fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
adjustedResource: resourceQuantity2,
},
Requests: v1.ResourceList{
adjustedResource: resourceQuantity2,
},
}}},
}
// pod requiring emptyResource (extended resources with 0 allocatable) will
// not pass PredicateAdmit.
emptyPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
emptyResource: resourceQuantity2,
},
Requests: v1.ResourceList{
emptyResource: resourceQuantity2,
},
}}},
}
// pod requiring missingResource will pass PredicateAdmit.
//
// Extended resources missing in node status are ignored in PredicateAdmit.
// This is required to support extended resources that are not managed by
// device plugin, such as cluster-level resources.
missingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
missingResource: resourceQuantity2,
},
Requests: v1.ResourceList{
missingResource: resourceQuantity2,
},
}}},
}
// pod requiring failedResource will fail with the resource failed to be allocated.
failedPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
failedResource: resourceQuantity1,
},
Requests: v1.ResourceList{
failedResource: resourceQuantity1,
},
}}},
}
fittingPod := podWithUIDNameNsSpec("1", "fittingpod", "foo", fittingPodSpec)
emptyPod := podWithUIDNameNsSpec("2", "emptypod", "foo", emptyPodSpec)
missingPod := podWithUIDNameNsSpec("3", "missingpod", "foo", missingPodSpec)
failedPod := podWithUIDNameNsSpec("4", "failedpod", "foo", failedPodSpec)
kl.HandlePodAdditions([]*v1.Pod{fittingPod, emptyPod, missingPod, failedPod})
// Check pod status stored in the status map.
checkPodStatus(t, kl, fittingPod, v1.PodPending)
checkPodStatus(t, kl, emptyPod, v1.PodFailed)
checkPodStatus(t, kl, missingPod, v1.PodPending)
checkPodStatus(t, kl, failedPod, v1.PodFailed)
}
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
pods := []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
}
podToTest := pods[1]
// Run once to populate the status map.
kl.HandlePodAdditions(pods)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
t.Fatalf("expected to have status cached for pod2")
}
// Sync with empty pods so that the entry in status map will be removed.
kl.podManager.SetPods([]*v1.Pod{})
kl.HandlePodCleanups(ctx)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
t.Fatalf("expected to not have status cached for pod2")
}
}
func TestValidateContainerLogStatus(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
containerName := "x"
testCases := []struct {
statuses []v1.ContainerStatus
success bool // whether getting logs for the container should succeed.
pSuccess bool // whether getting logs for the previous container should succeed.
}{
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
},
},
},
success: true,
pSuccess: true,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
},
},
success: true,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
success: false,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
},
},
},
success: true,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
success: false,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
},
},
},
success: true,
pSuccess: true,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{},
},
},
},
success: false,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"}},
},
},
success: false,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
},
},
success: false,
pSuccess: false,
},
}
for i, tc := range testCases {
// Access the log of the most recent container
previous := false
podStatus := &v1.PodStatus{ContainerStatuses: tc.statuses}
_, err := kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
if !tc.success {
assert.Errorf(t, err, "[case %d] error", i)
} else {
assert.NoErrorf(t, err, "[case %d] error", i)
}
// Access the log of the previous, terminated container
previous = true
_, err = kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
if !tc.pSuccess {
assert.Errorf(t, err, "[case %d] error", i)
} else {
assert.NoErrorf(t, err, "[case %d] error", i)
}
// Access the log of a container that's not in the pod
_, err = kubelet.validateContainerLogStatus("podName", podStatus, "blah", false)
assert.Errorf(t, err, "[case %d] invalid container name should cause an error", i)
}
}
func TestCreateMirrorPod(t *testing.T) {
tests := []struct {
name string
updateType kubetypes.SyncPodType
}{
{
name: "SyncPodCreate",
updateType: kubetypes.SyncPodCreate,
},
{
name: "SyncPodUpdate",
updateType: kubetypes.SyncPodUpdate,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := podWithUIDNameNs("12345678", "bar", "foo")
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
pods := []*v1.Pod{pod}
kl.podManager.SetPods(pods)
isTerminal, err := kl.SyncPod(context.Background(), tt.updateType, pod, nil, &kubecontainer.PodStatus{})
assert.NoError(t, err)
if isTerminal {
t.Fatalf("pod should not be terminal: %#v", pod)
}
podFullName := kubecontainer.GetPodFullName(pod)
assert.True(t, manager.HasPod(podFullName), "Expected mirror pod %q to be created", podFullName)
assert.Equal(t, 1, manager.NumOfPods(), "Expected only 1 mirror pod %q, got %+v", podFullName, manager.GetPods())
})
}
}
func TestDeleteOutdatedMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := podWithUIDNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
Containers: []v1.Container{
{Name: "1234", Image: "foo"},
},
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
// Mirror pod has an outdated spec.
mirrorPod := podWithUIDNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
Containers: []v1.Container{
{Name: "1234", Image: "bar"},
},
})
mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
pods := []*v1.Pod{pod, mirrorPod}
kl.podManager.SetPods(pods)
isTerminal, err := kl.SyncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, &kubecontainer.PodStatus{})
assert.NoError(t, err)
if isTerminal {
t.Fatalf("pod should not be terminal: %#v", pod)
}
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 1 || deletes != 1 {
t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
}
}
func TestDeleteOrphanedMirrorPods(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
orphanPods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "12345670",
Name: "pod3",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
}
kl.podManager.SetPods(orphanPods)
// a static pod that is terminating will not be deleted
kl.podWorkers.(*fakePodWorkers).terminatingStaticPods = map[string]bool{
kubecontainer.GetPodFullName(orphanPods[2]): true,
}
// Sync with an empty pod list to delete all mirror pods.
kl.HandlePodCleanups(ctx)
assert.Empty(t, manager.GetPods(), "Expected no mirror pods")
for i, pod := range orphanPods {
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
switch i {
case 2:
if creates != 0 || deletes != 0 {
t.Errorf("expected 0 creation and 0 deletion of %q, got %d, %d", name, creates, deletes)
}
default:
if creates != 0 || deletes != 1 {
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
}
}
}
}
func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.runtimeState.setNetworkState(fmt.Errorf("simulated network error"))
pod := podWithUIDNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
HostNetwork: false,
Containers: []v1.Container{
{Name: "foo"},
},
})
kubelet.podManager.SetPods([]*v1.Pod{pod})
isTerminal, err := kubelet.SyncPod(context.Background(), kubetypes.SyncPodUpdate, pod, nil, &kubecontainer.PodStatus{})
assert.Error(t, err, "expected pod with hostNetwork=false to fail when network in error")
if isTerminal {
t.Fatalf("pod should not be terminal: %#v", pod)
}
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
pod.Spec.HostNetwork = true
isTerminal, err = kubelet.SyncPod(context.Background(), kubetypes.SyncPodUpdate, pod, nil, &kubecontainer.PodStatus{})
assert.NoError(t, err, "expected pod with hostNetwork=true to succeed when network in error")
if isTerminal {
t.Fatalf("pod should not be terminal: %#v", pod)
}
}
func TestFilterOutInactivePods(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
pods := newTestPods(8)
now := metav1.NewTime(time.Now())
// terminal pods are excluded
pods[0].Status.Phase = v1.PodFailed
pods[1].Status.Phase = v1.PodSucceeded
// deleted pod is included unless it's known to be terminated
pods[2].Status.Phase = v1.PodRunning
pods[2].DeletionTimestamp = &now
pods[2].Status.ContainerStatuses = []v1.ContainerStatus{
{State: v1.ContainerState{
Running: &v1.ContainerStateRunning{
StartedAt: now,
},
}},
}
// pending and running pods are included
pods[3].Status.Phase = v1.PodPending
pods[4].Status.Phase = v1.PodRunning
// pod that is running but has been rejected by admission is excluded
pods[5].Status.Phase = v1.PodRunning
kubelet.statusManager.SetPodStatus(logger, pods[5], v1.PodStatus{Phase: v1.PodFailed})
// pod that is running according to the api but is known terminated is excluded
pods[6].Status.Phase = v1.PodRunning
kubelet.podWorkers.(*fakePodWorkers).terminated = map[types.UID]bool{
pods[6].UID: true,
}
// pod that is failed but still terminating is included (it may still be consuming
// resources)
pods[7].Status.Phase = v1.PodFailed
kubelet.podWorkers.(*fakePodWorkers).terminationRequested = map[types.UID]bool{
pods[7].UID: true,
}
expected := []*v1.Pod{pods[2], pods[3], pods[4], pods[7]}
kubelet.podManager.SetPods(pods)
actual := kubelet.filterOutInactivePods(pods)
assert.Equal(t, expected, actual)
}
func TestCheckpointContainer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
containerID := kubecontainer.ContainerID{
Type: "test",
ID: "abc1234",
}
fakePod := &containertest.FakePod{
Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "podFoo",
Namespace: "nsFoo",
Containers: []*kubecontainer.Container{
{
Name: "containerFoo",
ID: containerID,
},
},
},
}
fakeRuntime.PodList = []*containertest.FakePod{fakePod}
wrongContainerName := "wrongContainerName"
tests := []struct {
name string
containerName string
checkpointLocation string
expectedStatus error
expectedLocation string
}{
{
name: "Checkpoint with wrong container name",
containerName: wrongContainerName,
checkpointLocation: "",
expectedStatus: fmt.Errorf("container %s not found", wrongContainerName),
expectedLocation: "",
},
{
name: "Checkpoint with default checkpoint location",
containerName: fakePod.Pod.Containers[0].Name,
checkpointLocation: "",
expectedStatus: nil,
expectedLocation: filepath.Join(
kubelet.getCheckpointsDir(),
fmt.Sprintf(
"checkpoint-%s_%s-%s",
fakePod.Pod.Name,
fakePod.Pod.Namespace,
fakePod.Pod.Containers[0].Name,
),
),
},
{
name: "Checkpoint with ignored location",
containerName: fakePod.Pod.Containers[0].Name,
checkpointLocation: "somethingThatWillBeIgnored",
expectedStatus: nil,
expectedLocation: filepath.Join(
kubelet.getCheckpointsDir(),
fmt.Sprintf(
"checkpoint-%s_%s-%s",
fakePod.Pod.Name,
fakePod.Pod.Namespace,
fakePod.Pod.Containers[0].Name,
),
),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
options := &runtimeapi.CheckpointContainerRequest{}
if test.checkpointLocation != "" {
options.Location = test.checkpointLocation
}
status := kubelet.CheckpointContainer(
ctx,
fakePod.Pod.ID,
fmt.Sprintf(
"%s_%s",
fakePod.Pod.Name,
fakePod.Pod.Namespace,
),
test.containerName,
options,
)
require.Equal(t, test.expectedStatus, status)
if status != nil {
return
}
require.True(
t,
strings.HasPrefix(
options.Location,
test.expectedLocation,
),
)
require.Equal(
t,
options.ContainerId,
containerID.ID,
)
})
}
}
func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
fakeRuntime := testKubelet.fakeRuntime
kubelet := testKubelet.kubelet
now := metav1.Now()
startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(30)
pods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: v1.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
}},
}
// Let the pod worker sets the status to fail after this sync.
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
assert.Equal(t, v1.PodFailed, status.Phase)
// check pod status contains ContainerStatuses, etc.
assert.NotNil(t, status.ContainerStatuses)
}
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
fakeRuntime := testKubelet.fakeRuntime
kubelet := testKubelet.kubelet
now := metav1.Now()
startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(300)
pods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: v1.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
}},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
assert.NotEqual(t, v1.PodFailed, status.Phase)
}
func podWithUIDNameNs(uid types.UID, name, namespace string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: uid,
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
}
}
func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
pod := podWithUIDNameNs(uid, name, namespace)
pod.Spec = spec
return pod
}
func TestDeletePodDirsForDeletedPods(t *testing.T) {
ctx := context.Background()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
pods := []*v1.Pod{
podWithUIDNameNs("12345678", "pod1", "ns"),
podWithUIDNameNs("12345679", "pod2", "ns"),
}
kl.podManager.SetPods(pods)
// Sync to create pod directories.
kl.HandlePodSyncs(kl.podManager.GetPods())
for i := range pods {
assert.True(t, dirExists(kl.getPodDir(pods[i].UID)), "Expected directory to exist for pod %d", i)
}
// Pod 1 has been deleted and no longer exists.
kl.podManager.SetPods([]*v1.Pod{pods[0]})
kl.HandlePodCleanups(ctx)
assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
}
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
ctx := context.Background()
t.Helper()
kl := testKubelet.kubelet
kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods)
kl.HandlePodCleanups(ctx)
for i, pod := range podsToCheck {
exist := dirExists(kl.getPodDir(pod.UID))
assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
}
}
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
pods := []*v1.Pod{
podWithUIDNameNs("12345678", "pod1", "ns"),
podWithUIDNameNs("12345679", "pod2", "ns"),
podWithUIDNameNs("12345680", "pod3", "ns"),
}
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
// deleted.
kl.statusManager.SetPodStatus(logger, pods[1], v1.PodStatus{Phase: v1.PodFailed})
kl.statusManager.SetPodStatus(logger, pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
}
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
runningPod := &kubecontainer.Pod{
ID: "12345678",
Name: "pod1",
Namespace: "ns",
}
apiPod := podWithUIDNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
// Sync once to create pod directory; confirm that the pod directory has
// already been created.
pods := []*v1.Pod{apiPod}
testKubelet.kubelet.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{apiPod.UID: true}
syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
// Pretend the pod is deleted from apiserver, but is still active on the node.
// The pod directory should not be removed.
pods = []*v1.Pod{}
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{Pod: runningPod, NetnsPath: ""}}
syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
// The pod is deleted and also not active on the node. The pod directory
// should be removed.
pods = []*v1.Pod{}
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
testKubelet.kubelet.podWorkers.(*fakePodWorkers).running = nil
syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, false)
}
func TestGetPodsToSync(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
pods := newTestPods(5)
exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120)
startTime := metav1.NewTime(clock.Now())
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds
pods[2].Status.StartTime = &startTime
pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
kubelet.podManager.SetPods(pods)
kubelet.workQueue.Enqueue(pods[2].UID, 0)
kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
clock.Step(1 * time.Minute)
expected := []*v1.Pod{pods[2], pods[3], pods[0]}
podsToSync := kubelet.getPodsToSync()
sort.Sort(podsByUID(expected))
sort.Sort(podsByUID(podsToSync))
assert.Equal(t, expected, podsToSync)
}
func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
numContainers := 10
expectedOrder := []string{}
cStatuses := []*kubecontainer.Status{}
specContainerList := []v1.Container{}
for i := 0; i < numContainers; i++ {
id := fmt.Sprintf("%v", i)
containerName := fmt.Sprintf("%vcontainer", id)
expectedOrder = append(expectedOrder, containerName)
cStatus := &kubecontainer.Status{
ID: kubecontainer.BuildContainerID("test", id),
Name: containerName,
}
// Rearrange container statuses
if i%2 == 0 {
cStatuses = append(cStatuses, cStatus)
} else {
cStatuses = append([]*kubecontainer.Status{cStatus}, cStatuses...)
}
specContainerList = append(specContainerList, v1.Container{Name: containerName})
}
pod := podWithUIDNameNs("uid1", "foo", "test")
pod.Spec = v1.PodSpec{
Containers: specContainerList,
}
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: cStatuses,
}
for i := 0; i < 5; i++ {
apiStatus := kubelet.generateAPIPodStatus(pod, status, false)
for i, c := range apiStatus.ContainerStatuses {
if expectedOrder[i] != c.Name {
t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
}
}
}
}
func verifyContainerStatuses(t *testing.T, statuses []v1.ContainerStatus, expectedState, expectedLastTerminationState map[string]v1.ContainerState, message string) {
for _, s := range statuses {
assert.Equal(t, expectedState[s.Name], s.State, "%s: state", message)
assert.Equal(t, expectedLastTerminationState[s.Name], s.LastTerminationState, "%s: last terminated state", message)
}
}
// Test generateAPIPodStatus with different reason cache and old api pod status.
func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
// The following waiting reason and message are generated in convertStatusToAPIStatus()
testTimestamp := time.Unix(123456789, 987654321)
testErrorReason := fmt.Errorf("test-error")
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
pod := podWithUIDNameNs("12345678", "foo", "new")
pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
}
tests := []struct {
containers []v1.Container
statuses []*kubecontainer.Status
reasons map[string]error
oldStatuses []v1.ContainerStatus
expectedState map[string]v1.ContainerState
// Only set expectedInitState when it is different from expectedState
expectedInitState map[string]v1.ContainerState
expectedLastTerminationState map[string]v1.ContainerState
}{
// For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
// old status from apiserver.
{
containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
statuses: []*kubecontainer.Status{},
reasons: map[string]error{},
oldStatuses: []v1.ContainerStatus{{
Name: "with-old-record",
LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{}},
}},
expectedState: map[string]v1.ContainerState{
"without-old-record": {Waiting: &v1.ContainerStateWaiting{
Reason: ContainerCreating,
}},
"with-old-record": {Waiting: &v1.ContainerStateWaiting{
Reason: ContainerCreating,
}},
},
expectedInitState: map[string]v1.ContainerState{
"without-old-record": {Waiting: &v1.ContainerStateWaiting{
Reason: PodInitializing,
}},
"with-old-record": {Waiting: &v1.ContainerStateWaiting{
Reason: PodInitializing,
}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"with-old-record": {Terminated: &v1.ContainerStateTerminated{}},
},
},
// For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
{
containers: []v1.Container{{Name: "running"}},
statuses: []*kubecontainer.Status{
{
Name: "running",
State: kubecontainer.ContainerStateRunning,
StartedAt: testTimestamp,
},
{
Name: "running",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
},
reasons: map[string]error{},
oldStatuses: []v1.ContainerStatus{},
expectedState: map[string]v1.ContainerState{
"running": {Running: &v1.ContainerStateRunning{
StartedAt: metav1.NewTime(testTimestamp),
}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"running": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
// For terminated container:
// * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
// second latest terminated status;
// * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
// terminated status;
// * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
// recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
// terminated status.
{
containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}},
statuses: []*kubecontainer.Status{
{
Name: "without-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
{
Name: "with-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 2,
},
{
Name: "without-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 3,
},
{
Name: "with-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 4,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 5,
},
},
reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
oldStatuses: []v1.ContainerStatus{},
expectedState: map[string]v1.ContainerState{
"without-reason": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
"with-reason": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"without-reason": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
"with-reason": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 5,
ContainerID: emptyContainerID,
}},
},
},
// For Unknown Container Status:
// * In certain situations a container can be running and fail to retrieve the status which results in
// * a transition to the Unknown state. Prior to this fix, a container would make an invalid transition
// * from Running->Waiting. This test validates the correct behavior of transitioning from Running->Terminated.
{
containers: []v1.Container{{Name: "unknown"}},
statuses: []*kubecontainer.Status{
{
Name: "unknown",
State: kubecontainer.ContainerStateUnknown,
},
{
Name: "unknown",
State: kubecontainer.ContainerStateRunning,
},
},
reasons: map[string]error{},
oldStatuses: []v1.ContainerStatus{{
Name: "unknown",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
}},
expectedState: map[string]v1.ContainerState{
"unknown": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 137,
Message: "The container could not be located when the pod was terminated",
Reason: kubecontainer.ContainerReasonStatusUnknown,
}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"unknown": {Running: &v1.ContainerStateRunning{}},
},
},
}
for i, test := range tests {
kubelet.reasonCache = NewReasonCache()
for n, e := range test.reasons {
kubelet.reasonCache.add(pod.UID, n, e, "")
}
pod.Spec.Containers = test.containers
pod.Status.ContainerStatuses = test.oldStatuses
podStatus.ContainerStatuses = test.statuses
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
verifyContainerStatuses(t, apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
}
// Everything should be the same for init containers
for i, test := range tests {
kubelet.reasonCache = NewReasonCache()
for n, e := range test.reasons {
kubelet.reasonCache.add(pod.UID, n, e, "")
}
pod.Spec.InitContainers = test.containers
pod.Status.InitContainerStatuses = test.oldStatuses
podStatus.ContainerStatuses = test.statuses
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
expectedState := test.expectedState
if test.expectedInitState != nil {
expectedState = test.expectedInitState
}
verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
}
}
// Test generateAPIPodStatus with different restart policies.
func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
testErrorReason := fmt.Errorf("test-error")
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
pod := podWithUIDNameNs("12345678", "foo", "new")
containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: []*kubecontainer.Status{
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 2,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 3,
},
},
}
kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
for c, test := range []struct {
restartPolicy v1.RestartPolicy
expectedState map[string]v1.ContainerState
expectedLastTerminationState map[string]v1.ContainerState
// Only set expectedInitState when it is different from expectedState
expectedInitState map[string]v1.ContainerState
// Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
expectedInitLastTerminationState map[string]v1.ContainerState
}{
{
restartPolicy: v1.RestartPolicyNever,
expectedState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
},
},
{
restartPolicy: v1.RestartPolicyOnFailure,
expectedState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
{
restartPolicy: v1.RestartPolicyAlways,
expectedState: map[string]v1.ContainerState{
"succeed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
// If the init container is terminated with exit code 0, it won't be restarted even when the
// restart policy is RestartAlways.
expectedInitState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedInitLastTerminationState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
} {
pod.Spec.RestartPolicy = test.restartPolicy
// Test normal containers
pod.Spec.Containers = containers
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
pod.Spec.Containers = nil
// Test init containers
pod.Spec.InitContainers = containers
apiStatus = kubelet.generateAPIPodStatus(pod, podStatus, false)
if test.expectedInitState != nil {
expectedState = test.expectedInitState
}
if test.expectedInitLastTerminationState != nil {
expectedLastTerminationState = test.expectedInitLastTerminationState
}
verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
pod.Spec.InitContainers = nil
}
}
// Test generateAPIPodStatus with different container-level restart policies.
func TestGenerateAPIPodStatusWithContainerRestartPolicies(t *testing.T) {
var (
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
containerRestartPolicyOnFailure = v1.ContainerRestartPolicyOnFailure
containerRestartPolicyNever = v1.ContainerRestartPolicyNever
)
testErrorReason := fmt.Errorf("test-error")
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
pod := podWithUIDNameNs("12345678", "foo", "new")
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: []*kubecontainer.Status{
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 2,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 3,
},
},
}
kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ContainerRestartRules, true)
for _, test := range []struct {
desc string
containers []v1.Container
expectedState map[string]v1.ContainerState
expectedLastTerminationState map[string]v1.ContainerState
}{
{
desc: "container restart policy rules match",
containers: []v1.Container{
{
Name: "failed",
RestartPolicy: &containerRestartPolicyNever,
RestartPolicyRules: []v1.ContainerRestartRule{{
Action: v1.ContainerRestartRuleActionRestart,
ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
Values: []int32{1},
},
}},
},
},
expectedState: map[string]v1.ContainerState{
"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
{
desc: "container restart policy rules not match",
containers: []v1.Container{
{
Name: "failed",
RestartPolicy: &containerRestartPolicyNever,
RestartPolicyRules: []v1.ContainerRestartRule{{
Action: v1.ContainerRestartRuleActionRestart,
ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
Values: []int32{2},
},
}},
},
},
expectedState: map[string]v1.ContainerState{
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
},
},
{
desc: "container restart policy never",
containers: []v1.Container{
{
Name: "succeed",
RestartPolicy: &containerRestartPolicyNever,
},
{
Name: "failed",
RestartPolicy: &containerRestartPolicyNever,
},
},
expectedState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
},
},
{
desc: "container restart policy OnFailure",
containers: []v1.Container{
{
Name: "succeed",
RestartPolicy: &containerRestartPolicyOnFailure,
},
{
Name: "failed",
RestartPolicy: &containerRestartPolicyOnFailure,
},
},
expectedState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
{
desc: "container restart policy Always",
containers: []v1.Container{
{
Name: "succeed",
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "failed",
RestartPolicy: &containerRestartPolicyAlways,
},
},
expectedState: map[string]v1.ContainerState{
"succeed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]v1.ContainerState{
"succeed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &v1.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
} {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
// Test normal containers
pod.Spec.Containers = test.containers
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, test.desc)
pod.Spec.Containers = nil
}
}
// testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
type testPodAdmitHandler struct {
// list of pods to reject.
podsToReject []*v1.Pod
}
// Admit rejects all pods in the podsToReject list with a matching UID.
func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
for _, podToReject := range a.podsToReject {
if podToReject.UID == attrs.Pod.UID {
return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
}
}
return lifecycle.PodAdmitResult{Admit: true}
}
// Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
pods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "123456789",
Name: "podA",
Namespace: "foo",
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "987654321",
Name: "podB",
Namespace: "foo",
},
},
}
podToReject := pods[0]
podToAdmit := pods[1]
podsToReject := []*v1.Pod{podToReject}
kl.allocationManager.AddPodAdmitHandlers(lifecycle.PodAdmitHandlers{&testPodAdmitHandler{podsToReject: podsToReject}})
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
checkPodStatus(t, kl, podToReject, v1.PodFailed)
checkPodStatus(t, kl, podToAdmit, v1.PodPending)
}
func TestPodResourceAllocationReset(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
// fakePodWorkers triggers syncPodFn synchronously on update. We overwrite it here to
// avoid calling kubelet.SyncPod, which performs resize resource allocation.
kubelet.podWorkers.(*fakePodWorkers).syncPodFn =
func(_ context.Context, _ kubetypes.SyncPodType, _, _ *v1.Pod, _ *kubecontainer.PodStatus) (bool, error) {
return false, nil
}
nodes := []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
},
},
},
}
kubelet.nodeLister = testNodeLister{nodes: nodes}
cpu500m := resource.MustParse("500m")
cpu800m := resource.MustParse("800m")
mem500M := resource.MustParse("500Mi")
mem800M := resource.MustParse("800Mi")
cpu500mMem500MPodSpec := &v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
},
},
},
}
cpu800mMem800MPodSpec := cpu500mMem500MPodSpec.DeepCopy()
cpu800mMem800MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu800m, v1.ResourceMemory: mem800M}
cpu800mPodSpec := cpu500mMem500MPodSpec.DeepCopy()
cpu800mPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu800m}
mem800MPodSpec := cpu500mMem500MPodSpec.DeepCopy()
mem800MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceMemory: mem800M}
cpu500mPodSpec := cpu500mMem500MPodSpec.DeepCopy()
cpu500mPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu500m}
mem500MPodSpec := cpu500mMem500MPodSpec.DeepCopy()
mem500MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceMemory: mem500M}
emptyPodSpec := cpu500mMem500MPodSpec.DeepCopy()
emptyPodSpec.Containers[0].Resources.Requests = v1.ResourceList{}
tests := []struct {
name string
pod *v1.Pod
existingPodAllocation *v1.Pod
expectedPodResourceInfoMap state.PodResourceInfoMap
}{
{
name: "Having both memory and cpu, resource allocation not exists",
pod: podWithUIDNameNsSpec("1", "pod1", "foo", *cpu500mMem500MPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"1": {
ContainerResources: map[string]v1.ResourceRequirements{
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Having both memory and cpu, resource allocation exists",
pod: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
existingPodAllocation: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"2": {
ContainerResources: map[string]v1.ResourceRequirements{
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Having both memory and cpu, resource allocation exists (with different value)",
pod: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu500mMem500MPodSpec),
existingPodAllocation: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu800mMem800MPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"3": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
cpu800mMem800MPodSpec.Containers[0].Name: cpu800mMem800MPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Only has cpu, resource allocation not exists",
pod: podWithUIDNameNsSpec("4", "pod5", "foo", *cpu500mPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"4": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Only has cpu, resource allocation exists",
pod: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
existingPodAllocation: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"5": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Only has cpu, resource allocation exists (with different value)",
pod: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu500mPodSpec),
existingPodAllocation: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu800mPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"6": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
cpu800mPodSpec.Containers[0].Name: cpu800mPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Only has memory, resource allocation not exists",
pod: podWithUIDNameNsSpec("7", "pod7", "foo", *mem500MPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"7": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Only has memory, resource allocation exists",
pod: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
existingPodAllocation: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"8": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "Only has memory, resource allocation exists (with different value)",
pod: podWithUIDNameNsSpec("9", "pod9", "foo", *mem500MPodSpec),
existingPodAllocation: podWithUIDNameNsSpec("9", "pod9", "foo", *mem800MPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"9": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
mem800MPodSpec.Containers[0].Name: mem800MPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "No CPU and memory, resource allocation not exists",
pod: podWithUIDNameNsSpec("10", "pod10", "foo", *emptyPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"10": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
},
},
},
},
{
name: "No CPU and memory, resource allocation exists",
pod: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
existingPodAllocation: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
expectedPodResourceInfoMap: state.PodResourceInfoMap{
"11": state.PodResourceInfo{
ContainerResources: map[string]v1.ResourceRequirements{
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
},
},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if tc.existingPodAllocation != nil {
// when kubelet restarts, AllocatedResources has already existed before adding pod
err := kubelet.allocationManager.SetAllocatedResources(tc.existingPodAllocation)
if err != nil {
t.Fatalf("failed to set pod allocation: %v", err)
}
}
kubelet.HandlePodAdditions([]*v1.Pod{tc.pod})
allocatedResources, found := kubelet.allocationManager.GetContainerResourceAllocation(tc.pod.UID, tc.pod.Spec.Containers[0].Name)
if !found {
t.Fatalf("resource allocation should exist: (pod: %#v, container: %s)", tc.pod, tc.pod.Spec.Containers[0].Name)
}
assert.Equal(t, tc.expectedPodResourceInfoMap[tc.pod.UID].ContainerResources[tc.pod.Spec.Containers[0].Name], allocatedResources, tc.name)
})
}
}
// testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
type testPodSyncLoopHandler struct {
// list of pods to sync
podsToSync []*v1.Pod
}
// ShouldSync evaluates if the pod should be synced from the kubelet.
func (a *testPodSyncLoopHandler) ShouldSync(pod *v1.Pod) bool {
for _, podToSync := range a.podsToSync {
if podToSync.UID == pod.UID {
return true
}
}
return false
}
// TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
pods := newTestPods(5)
expected := []*v1.Pod{pods[0]}
kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{expected})
kubelet.podManager.SetPods(pods)
podsToSync := kubelet.getPodsToSync()
sort.Sort(podsByUID(expected))
sort.Sort(podsByUID(podsToSync))
assert.Equal(t, expected, podsToSync)
}
// testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
type testPodSyncHandler struct {
// list of pods to evict.
podsToEvict []*v1.Pod
// the reason for the eviction
reason string
// the message for the eviction
message string
}
// ShouldEvict evaluates if the pod should be evicted from the kubelet.
func (a *testPodSyncHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
for _, podToEvict := range a.podsToEvict {
if podToEvict.UID == pod.UID {
return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
}
}
return lifecycle.ShouldEvictResponse{Evict: false}
}
// TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
pod := newTestPods(1)[0]
podsToEvict := []*v1.Pod{pod}
kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
}
apiStatus := kubelet.generateAPIPodStatus(pod, status, false)
require.Equal(t, v1.PodFailed, apiStatus.Phase)
require.Equal(t, "Evicted", apiStatus.Reason)
require.Equal(t, "because", apiStatus.Message)
}
func TestSyncTerminatingPodKillPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "foo",
},
}
pods := []*v1.Pod{pod}
kl.podManager.SetPods(pods)
podStatus := &kubecontainer.PodStatus{ID: pod.UID}
gracePeriodOverride := int64(0)
err := kl.SyncTerminatingPod(context.Background(), pod, podStatus, &gracePeriodOverride, func(podStatus *v1.PodStatus) {
podStatus.Phase = v1.PodFailed
podStatus.Reason = "reason"
podStatus.Message = "message"
})
require.NoError(t, err)
// Check pod status stored in the status map.
checkPodStatus(t, kl, pod, v1.PodFailed)
}
func TestSyncLabels(t *testing.T) {
tests := []struct {
name string
existingNode *v1.Node
isPatchingNeeded bool
}{
{
name: "no labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}},
isPatchingNeeded: true,
},
{
name: "wrong labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS", v1.LabelArchStable: "dummyArch"}}},
isPatchingNeeded: true,
},
{
name: "correct labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
isPatchingNeeded: false,
},
{
name: "partially correct labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: "dummyArch"}}},
isPatchingNeeded: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
test.existingNode.Name = string(kl.nodeName)
kl.nodeLister = testNodeLister{nodes: []*v1.Node{test.existingNode}}
go func() { kl.syncNodeStatus() }()
err := retryWithExponentialBackOff(
100*time.Millisecond,
func() (bool, error) {
var savedNode *v1.Node
if test.isPatchingNeeded {
actions := kubeClient.Actions()
if len(actions) == 0 {
t.Logf("No action yet")
return false, nil
}
for _, action := range actions {
if action.GetVerb() == "patch" {
var (
err error
patchAction = action.(core.PatchActionImpl)
patchContent = patchAction.GetPatch()
)
savedNode, err = applyNodeStatusPatch(test.existingNode, patchContent)
if err != nil {
t.Logf("node patching failed, %v", err)
return false, nil
}
}
}
} else {
savedNode = test.existingNode
}
if savedNode == nil || savedNode.Labels == nil {
t.Logf("savedNode.Labels should not be nil")
return false, nil
}
val, ok := savedNode.Labels[v1.LabelOSStable]
if !ok {
t.Logf("expected kubernetes.io/os label to be present")
return false, nil
}
if val != goruntime.GOOS {
t.Logf("expected kubernetes.io/os to match runtime.GOOS but got %v", val)
return false, nil
}
val, ok = savedNode.Labels[v1.LabelArchStable]
if !ok {
t.Logf("expected kubernetes.io/arch label to be present")
return false, nil
}
if val != goruntime.GOARCH {
t.Logf("expected kubernetes.io/arch to match runtime.GOARCH but got %v", val)
return false, nil
}
return true, nil
},
)
if err != nil {
t.Fatalf("expected labels to be reconciled but it failed with %v", err)
}
})
}
}
func waitForVolumeUnmount(
volumeManager kubeletvolume.VolumeManager,
pod *v1.Pod) error {
err := retryWithExponentialBackOff(
time.Duration(50*time.Millisecond),
func() (bool, error) {
// Verify volumes detached
hasVolumes := volumeManager.HasPossiblyMountedVolumesForPod(
util.GetUniquePodName(pod))
return !hasVolumes, nil
},
)
if err != nil {
return fmt.Errorf(
"Expected volumes to be unmounted. But some volumes are still mounted")
}
return nil
}
func waitForVolumeDetach(
volumeName v1.UniqueVolumeName,
volumeManager kubeletvolume.VolumeManager) error {
attachedVolumes := []v1.UniqueVolumeName{}
err := retryWithExponentialBackOff(
time.Duration(50*time.Millisecond),
func() (bool, error) {
// Verify volumes detached
volumeAttached := volumeManager.VolumeIsAttached(volumeName)
return !volumeAttached, nil
},
)
if err != nil {
return fmt.Errorf(
"Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
}
return nil
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 6,
}
return wait.ExponentialBackoff(backoff, fn)
}
func simulateVolumeInUseUpdate(
volumeName v1.UniqueVolumeName,
stopCh <-chan struct{},
volumeManager kubeletvolume.VolumeManager) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
volumeManager.MarkVolumesAsReportedInUse(
[]v1.UniqueVolumeName{volumeName})
case <-stopCh:
return
}
}
}
// dirExists returns true if the path exists and represents a directory.
func dirExists(path string) bool {
s, err := os.Stat(path)
if err != nil {
return false
}
return s.IsDir()
}
// Sort pods by UID.
type podsByUID []*v1.Pod
func (p podsByUID) Len() int { return len(p) }
func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }
// createAndStartFakeRemoteRuntime creates and starts fakeremote.RemoteRuntime.
// It returns the RemoteRuntime, endpoint on success.
// Users should call fakeRuntime.Stop() to cleanup the server.
func createAndStartFakeRemoteRuntime(t *testing.T) (*fakeremote.RemoteRuntime, string) {
endpoint, err := fakeremote.GenerateEndpoint()
require.NoError(t, err)
fakeRuntime := fakeremote.NewFakeRemoteRuntime()
fakeRuntime.Start(endpoint)
return fakeRuntime, endpoint
}
func createRemoteRuntimeService(endpoint string, t *testing.T, tp oteltrace.TracerProvider) internalapi.RuntimeService {
logger := klog.Background()
runtimeService, err := remote.NewRemoteRuntimeService(endpoint, 15*time.Second, tp, &logger)
require.NoError(t, err)
return runtimeService
}
func TestNewMainKubeletStandAlone(t *testing.T) {
tCtx := ktesting.Init(t)
tempDir, err := os.MkdirTemp("", "logs")
ContainerLogsDir = tempDir
assert.NoError(t, err)
defer os.RemoveAll(ContainerLogsDir)
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
SyncFrequency: metav1.Duration{Duration: time.Minute},
ConfigMapAndSecretChangeDetectionStrategy: kubeletconfiginternal.WatchChangeDetectionStrategy,
ContainerLogMaxSize: "10Mi",
ContainerLogMaxFiles: 5,
MemoryThrottlingFactor: ptr.To[float64](0),
}
var prober volume.DynamicPluginProber
tp := noopoteltrace.NewTracerProvider()
cadvisor := cadvisortest.NewMockInterface(t)
cadvisor.EXPECT().MachineInfo().Return(&cadvisorapi.MachineInfo{}, nil).Maybe()
cadvisor.EXPECT().ImagesFsInfo(tCtx).Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 1000,
Available: 600,
}, nil).Maybe()
tlsOptions := &server.TLSOptions{
Config: &tls.Config{
MinVersion: 0,
},
}
fakeRuntime, endpoint := createAndStartFakeRemoteRuntime(t)
defer func() {
fakeRuntime.Stop()
}()
fakeRecorder := &record.FakeRecorder{}
rtSvc := createRemoteRuntimeService(endpoint, t, noopoteltrace.NewTracerProvider())
kubeDep := &Dependencies{
Auth: nil,
CAdvisorInterface: cadvisor,
ContainerManager: cm.NewStubContainerManager(),
KubeClient: nil, // standalone mode
HeartbeatClient: nil,
EventClient: nil,
TracerProvider: tp,
HostUtil: hostutil.NewFakeHostUtil(nil),
Mounter: mount.NewFakeMounter(nil),
Recorder: fakeRecorder,
RemoteRuntimeService: rtSvc,
RemoteImageService: fakeRuntime.ImageService,
Subpather: &subpath.FakeSubpath{},
OOMAdjuster: oom.NewOOMAdjuster(),
OSInterface: kubecontainer.RealOS{},
DynamicPluginProber: prober,
TLSOptions: tlsOptions,
}
crOptions := &config.ContainerRuntimeOptions{}
testMainKubelet, err := NewMainKubelet(
tCtx,
kubeCfg,
kubeDep,
crOptions,
"hostname",
"hostname",
[]net.IP{},
"",
"external",
"/tmp/cert",
"/tmp/rootdir",
tempDir,
"",
"",
false,
[]v1.Taint{},
[]string{},
"",
false,
false,
metav1.Duration{Duration: time.Minute},
1024,
110,
map[string]string{},
1024,
false,
)
assert.NoError(t, err, "NewMainKubelet should succeed")
assert.NotNil(t, testMainKubelet, "testMainKubelet should not be nil")
testMainKubelet.BirthCry()
testMainKubelet.StartGarbageCollection()
// Nil pointer panic can be reproduced if configmap manager is not nil.
// See https://github.com/kubernetes/kubernetes/issues/113492
// pod := &v1.Pod{
// ObjectMeta: metav1.ObjectMeta{
// UID: "12345678",
// Name: "bar",
// Namespace: "foo",
// },
// Spec: v1.PodSpec{
// Containers: []v1.Container{{
// EnvFrom: []v1.EnvFromSource{{
// ConfigMapRef: &v1.ConfigMapEnvSource{
// LocalObjectReference: v1.LocalObjectReference{Name: "config-map"}}},
// }}},
// Volumes: []v1.Volume{{
// VolumeSource: v1.VolumeSource{
// ConfigMap: &v1.ConfigMapVolumeSource{
// LocalObjectReference: v1.LocalObjectReference{
// Name: "config-map"}}}}},
// },
// }
// testMainKubelet.configMapManager.RegisterPod(pod)
// testMainKubelet.secretManager.RegisterPod(pod)
assert.Nil(t, testMainKubelet.configMapManager, "configmap manager should be nil if kubelet is in standalone mode")
assert.Nil(t, testMainKubelet.secretManager, "secret manager should be nil if kubelet is in standalone mode")
}
func TestSyncPodSpans(t *testing.T) {
tCtx := ktesting.Init(t)
testKubelet := newTestKubelet(t, false)
kubelet := testKubelet.kubelet
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: "testNode",
UID: types.UID("testNode"),
Namespace: "",
}
kubelet.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, "TEST", "")
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
SyncFrequency: metav1.Duration{Duration: time.Minute},
ConfigMapAndSecretChangeDetectionStrategy: kubeletconfiginternal.WatchChangeDetectionStrategy,
ContainerLogMaxSize: "10Mi",
ContainerLogMaxFiles: 5,
MemoryThrottlingFactor: ptr.To[float64](0),
}
exp := tracetest.NewInMemoryExporter()
tp := sdktrace.NewTracerProvider(
sdktrace.WithSyncer(exp),
)
kubelet.tracer = tp.Tracer(instrumentationScope)
fakeRuntime, endpoint := createAndStartFakeRemoteRuntime(t)
defer func() {
fakeRuntime.Stop()
}()
runtimeSvc := createRemoteRuntimeService(endpoint, t, tp)
kubelet.runtimeService = runtimeSvc
fakeRuntime.ImageService.SetFakeImageSize(100)
fakeRuntime.ImageService.SetFakeImages([]string{"test:latest"})
logger := klog.Background()
imageSvc, err := remote.NewRemoteImageService(endpoint, 15*time.Second, tp, &logger)
assert.NoError(t, err)
kubelet.containerRuntime, _, err = kuberuntime.NewKubeGenericRuntimeManager(
tCtx,
kubelet.recorder,
kubelet.livenessManager,
kubelet.readinessManager,
kubelet.startupManager,
kubelet.rootDirectory,
kubelet.podLogsDirectory,
kubelet.machineInfo,
kubelet.podWorkers,
kubeCfg.MaxPods,
kubelet.os,
kubelet,
nil,
kubelet.crashLoopBackOff,
kubeCfg.SerializeImagePulls,
kubeCfg.MaxParallelImagePulls,
float32(kubeCfg.RegistryPullQPS),
int(kubeCfg.RegistryBurst),
string(kubeletconfiginternal.NeverVerify),
nil,
"",
"",
nil,
kubeCfg.CPUCFSQuota,
kubeCfg.CPUCFSQuotaPeriod,
runtimeSvc,
imageSvc,
kubelet.containerManager,
kubelet.containerLogManager,
kubelet.runtimeClassManager,
kubelet.allocationManager,
false,
kubeCfg.MemorySwap.SwapBehavior,
kubelet.containerManager.GetNodeAllocatableAbsolute,
*kubeCfg.MemoryThrottlingFactor,
kubeletutil.NewPodStartupLatencyTracker(),
tp,
token.NewManager(kubelet.kubeClient),
func(string, string) (*v1.ServiceAccount, error) { return nil, nil },
)
assert.NoError(t, err)
kubelet.allocationManager.SetContainerRuntime(kubelet.containerRuntime)
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{
{
Name: "bar",
Image: "test:latest",
ImagePullPolicy: v1.PullAlways,
},
},
EnableServiceLinks: ptr.To(false),
})
_, err = kubelet.SyncPod(context.Background(), kubetypes.SyncPodCreate, pod, nil, &kubecontainer.PodStatus{})
require.NoError(t, err)
assert.NotEmpty(t, exp.GetSpans())
// find root span for syncPod
var rootSpan *tracetest.SpanStub
spans := exp.GetSpans()
for i, span := range spans {
if span.Name == "syncPod" {
rootSpan = &spans[i]
break
}
}
assert.NotNil(t, rootSpan)
imageServiceSpans := make([]tracetest.SpanStub, 0)
runtimeServiceSpans := make([]tracetest.SpanStub, 0)
for _, span := range exp.GetSpans() {
if span.SpanContext.TraceID() == rootSpan.SpanContext.TraceID() {
switch {
case strings.HasPrefix(span.Name, "runtime.v1.ImageService"):
imageServiceSpans = append(imageServiceSpans, span)
case strings.HasPrefix(span.Name, "runtime.v1.RuntimeService"):
runtimeServiceSpans = append(runtimeServiceSpans, span)
}
}
}
assert.NotEmpty(t, imageServiceSpans, "syncPod trace should have image service spans")
assert.NotEmpty(t, runtimeServiceSpans, "syncPod trace should have runtime service spans")
for _, span := range imageServiceSpans {
assert.Equalf(t, span.Parent.SpanID(), rootSpan.SpanContext.SpanID(), "image service span %s %s should be child of root span", span.Name, span.Parent.SpanID())
}
for _, span := range runtimeServiceSpans {
assert.Equalf(t, span.Parent.SpanID(), rootSpan.SpanContext.SpanID(), "runtime service span %s %s should be child of root span", span.Name, span.Parent.SpanID())
}
}
func TestRecordAdmissionRejection(t *testing.T) {
metrics.Register()
testCases := []struct {
name string
reason string
wants string
}{
{
name: "AppArmor",
reason: lifecycle.AppArmorNotAdmittedReason,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="AppArmor"} 1
`,
},
{
name: "VolumeAttachmentLimitExceeded",
reason: kubeletvolume.VolumeAttachmentLimitExceededReason,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="VolumeAttachmentLimitExceeded"} 1
`,
},
{
name: "PodOSSelectorNodeLabelDoesNotMatch",
reason: lifecycle.PodOSSelectorNodeLabelDoesNotMatch,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="PodOSSelectorNodeLabelDoesNotMatch"} 1
`,
},
{
name: "PodOSNotSupported",
reason: lifecycle.PodOSNotSupported,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="PodOSNotSupported"} 1
`,
},
{
name: "InvalidNodeInfo",
reason: lifecycle.InvalidNodeInfo,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="InvalidNodeInfo"} 1
`,
},
{
name: "InitContainerRestartPolicyForbidden",
reason: lifecycle.InitContainerRestartPolicyForbidden,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="InitContainerRestartPolicyForbidden"} 1
`,
},
{
name: "SupplementalGroupsPolicyNotSupported",
reason: lifecycle.SupplementalGroupsPolicyNotSupported,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="SupplementalGroupsPolicyNotSupported"} 1
`,
},
{
name: "UnexpectedAdmissionError",
reason: lifecycle.UnexpectedAdmissionError,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="UnexpectedAdmissionError"} 1
`,
},
{
name: "UnknownReason",
reason: lifecycle.UnknownReason,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="UnknownReason"} 1
`,
},
{
name: "UnexpectedPredicateFailureType",
reason: lifecycle.UnexpectedPredicateFailureType,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="UnexpectedPredicateFailureType"} 1
`,
},
{
name: "node(s) had taints that the pod didn't tolerate",
reason: tainttoleration.ErrReasonNotMatch,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="node(s) had taints that the pod didn't tolerate"} 1
`,
},
{
name: "Evicted",
reason: eviction.Reason,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="Evicted"} 1
`,
},
{
name: "SysctlForbidden",
reason: sysctl.ForbiddenReason,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="SysctlForbidden"} 1
`,
},
{
name: "TopologyAffinityError",
reason: topologymanager.ErrorTopologyAffinity,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="TopologyAffinityError"} 1
`,
},
{
name: "NodeShutdown",
reason: nodeshutdown.NodeShutdownNotAdmittedReason,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="NodeShutdown"} 1
`,
},
{
name: "OutOfcpu",
reason: "OutOfcpu",
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="OutOfcpu"} 1
`,
},
{
name: "OutOfmemory",
reason: "OutOfmemory",
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="OutOfmemory"} 1
`,
},
{
name: "OutOfephemeral-storage",
reason: "OutOfephemeral-storage",
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="OutOfephemeral-storage"} 1
`,
},
{
name: "OutOfpods",
reason: "OutOfpods",
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="OutOfpods"} 1
`,
},
{
name: "OutOfgpu",
reason: "OutOfgpu",
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="OutOfExtendedResources"} 1
`,
},
{
name: "PodLevelResources",
reason: lifecycle.PodLevelResourcesNotAdmittedReason,
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="PodLevelResourcesNotSupported"} 1
`,
},
{
name: "OtherReason",
reason: "OtherReason",
wants: `
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
# TYPE kubelet_admission_rejections_total counter
kubelet_admission_rejections_total{reason="Other"} 1
`,
},
}
// Run tests.
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Clear the metrics after the test.
metrics.AdmissionRejectionsTotal.Reset()
// Call the function.
recordAdmissionRejection(tc.reason)
if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(tc.wants), "kubelet_admission_rejections_total"); err != nil {
t.Error(err)
}
})
}
}
func TestCrashLoopBackOffConfiguration(t *testing.T) {
testCases := []struct {
name string
featureGates []featuregate.Feature
nodeDecay metav1.Duration
expectedInitial time.Duration
expectedMax time.Duration
}{
{
name: "Prior behavior",
expectedMax: time.Duration(300 * time.Second),
expectedInitial: time.Duration(10 * time.Second),
},
{
name: "New default only",
featureGates: []featuregate.Feature{features.ReduceDefaultCrashLoopBackOffDecay},
expectedMax: time.Duration(60 * time.Second),
expectedInitial: time.Duration(1 * time.Second),
},
{
name: "Faster per node config; only node config configured",
featureGates: []featuregate.Feature{features.KubeletCrashLoopBackOffMax},
nodeDecay: metav1.Duration{Duration: 2 * time.Second},
expectedMax: time.Duration(2 * time.Second),
expectedInitial: time.Duration(2 * time.Second),
},
{
name: "Faster per node config; new default and node config configured",
featureGates: []featuregate.Feature{features.KubeletCrashLoopBackOffMax, features.ReduceDefaultCrashLoopBackOffDecay},
nodeDecay: metav1.Duration{Duration: 2 * time.Second},
expectedMax: time.Duration(2 * time.Second),
expectedInitial: time.Duration(1 * time.Second),
},
{
name: "Slower per node config; new default and node config configured, set A",
featureGates: []featuregate.Feature{features.KubeletCrashLoopBackOffMax, features.ReduceDefaultCrashLoopBackOffDecay},
nodeDecay: metav1.Duration{Duration: 10 * time.Second},
expectedMax: time.Duration(10 * time.Second),
expectedInitial: time.Duration(1 * time.Second),
},
{
name: "Slower per node config; new default and node config configured, set B",
featureGates: []featuregate.Feature{features.KubeletCrashLoopBackOffMax, features.ReduceDefaultCrashLoopBackOffDecay},
nodeDecay: metav1.Duration{Duration: 300 * time.Second},
expectedMax: time.Duration(300 * time.Second),
expectedInitial: time.Duration(1 * time.Second),
},
{
name: "Slower per node config; only node config configured, set A",
featureGates: []featuregate.Feature{features.KubeletCrashLoopBackOffMax},
nodeDecay: metav1.Duration{Duration: 11 * time.Second},
expectedMax: time.Duration(11 * time.Second),
expectedInitial: time.Duration(10 * time.Second),
},
{
name: "Slower per node config; only node config configured, set B",
featureGates: []featuregate.Feature{features.KubeletCrashLoopBackOffMax},
nodeDecay: metav1.Duration{Duration: 300 * time.Second},
expectedMax: time.Duration(300 * time.Second),
expectedInitial: time.Duration(10 * time.Second),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{}
for _, f := range tc.featureGates {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, f, true)
}
if tc.nodeDecay.Duration > 0 {
kubeCfg.CrashLoopBackOff.MaxContainerRestartPeriod = &tc.nodeDecay
}
resultMax, resultInitial := newCrashLoopBackOff(kubeCfg)
assert.Equalf(t, tc.expectedMax, resultMax, "wrong max calculated, want: %v, got %v", tc.expectedMax, resultMax)
assert.Equalf(t, tc.expectedInitial, resultInitial, "wrong base calculated, want: %v, got %v", tc.expectedInitial, resultInitial)
})
}
}
func TestSyncPodWithErrorsDuringInPlacePodResize(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
testCases := []struct {
name string
syncResults *kubecontainer.PodSyncResult
expectedErr string
expectedResizeConditions []*v1.PodCondition
}{
{
name: "pod resize error returned from the runtime",
syncResults: &kubecontainer.PodSyncResult{
SyncResults: []*kubecontainer.SyncResult{{
Action: kubecontainer.ResizePodInPlace,
Target: pod.UID,
Error: kubecontainer.ErrResizePodInPlace,
Message: "could not resize pod",
}},
},
expectedErr: "failed to \"ResizePodInPlace\" for \"12345678\" with ResizePodInPlaceError: \"could not resize pod\"",
expectedResizeConditions: []*v1.PodCondition{{
Type: v1.PodResizeInProgress,
Status: v1.ConditionTrue,
Reason: v1.PodReasonError,
Message: "could not resize pod",
}},
},
{
name: "pod resize error cleared upon successful run",
syncResults: &kubecontainer.PodSyncResult{
SyncResults: []*kubecontainer.SyncResult{{
Action: kubecontainer.ResizePodInPlace,
Target: pod.UID,
}},
},
expectedResizeConditions: nil,
},
{
name: "sync results have a non-resize error",
syncResults: &kubecontainer.PodSyncResult{
SyncResults: []*kubecontainer.SyncResult{{
Action: kubecontainer.CreatePodSandbox,
Target: pod.UID,
Error: kubecontainer.ErrCreatePodSandbox,
Message: "could not create pod sandbox",
}},
},
expectedErr: "failed to \"CreatePodSandbox\" for \"12345678\" with CreatePodSandboxError: \"could not create pod sandbox\"",
expectedResizeConditions: nil,
},
{
name: "sync results have a non-resize error and a successful pod resize action",
syncResults: &kubecontainer.PodSyncResult{
SyncResults: []*kubecontainer.SyncResult{
{
Action: kubecontainer.CreatePodSandbox,
Target: pod.UID,
Error: kubecontainer.ErrCreatePodSandbox,
Message: "could not create pod sandbox",
},
{
Action: kubecontainer.ResizePodInPlace,
Target: pod.UID,
},
},
},
expectedErr: "failed to \"CreatePodSandbox\" for \"12345678\" with CreatePodSandboxError: \"could not create pod sandbox\"",
expectedResizeConditions: nil,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
testKubelet.fakeRuntime.SyncResults = tc.syncResults
kubelet.podManager.SetPods([]*v1.Pod{pod})
isTerminal, err := kubelet.SyncPod(context.Background(), kubetypes.SyncPodUpdate, pod, nil, &kubecontainer.PodStatus{})
require.False(t, isTerminal)
if tc.expectedErr == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedErr, err.Error())
}
gotResizeConditions := kubelet.statusManager.GetPodResizeConditions(pod.UID)
for _, c := range gotResizeConditions {
// ignore last probe and transition times for comparison
c.LastProbeTime = metav1.Time{}
c.LastTransitionTime = metav1.Time{}
}
require.Equal(t, tc.expectedResizeConditions, gotResizeConditions)
})
}
}
func TestHandlePodUpdates_RecordContainerRequestedResizes(t *testing.T) {
metrics.Register()
metrics.ContainerRequestedResizes.Reset()
type expectedMetricsStruct struct {
memoryLimitsCounter map[string]int
memoryRequestsCounter map[string]int
cpuLimitsCounter map[string]int
cpuRequestsCounter map[string]int
}
expectedMetrics := expectedMetricsStruct{
memoryLimitsCounter: make(map[string]int),
memoryRequestsCounter: make(map[string]int),
cpuLimitsCounter: make(map[string]int),
cpuRequestsCounter: make(map[string]int),
}
for _, tc := range []struct {
name string
initialAllocation *v1.Pod
updatedPod *v1.Pod
updateExpectedFunc func(*expectedMetricsStruct)
}{
// Memory requests
{
name: "add memory requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryRequestsCounter["add"]++
},
},
{
name: "remove memory requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryRequestsCounter["remove"]++
},
},
{
name: "increase memory requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
}},
},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("110"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryRequestsCounter["increase"]++
},
},
{
name: "decrease memory requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("110"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryRequestsCounter["decrease"]++
},
},
// Memory limits
{
name: "add memory limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryLimitsCounter["add"]++
},
},
{
name: "remove memory limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryLimitsCounter["remove"]++
},
},
{
name: "increase memory limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("110"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryLimitsCounter["increase"]++
},
},
{
name: "decrease memory limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("110"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryLimitsCounter["decrease"]++
},
},
// CPU requests
{
name: "add cpu requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuRequestsCounter["add"]++
},
},
{
name: "remove cpu requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuRequestsCounter["remove"]++
},
},
{
name: "increase cpu requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuRequestsCounter["increase"]++
},
},
{
name: "decrease cpu requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuRequestsCounter["decrease"]++
},
},
// CPU limits
{
name: "add cpu limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuLimitsCounter["add"]++
},
},
{
name: "remove cpu limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuLimitsCounter["remove"]++
},
},
{
name: "increase cpu limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuLimitsCounter["increase"]++
},
},
{
name: "decrease cpu limits",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuLimitsCounter["decrease"]++
},
},
// Some combinations of things
{
name: "add cpu limits + increase memory requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
},
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("110"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuLimitsCounter["add"]++
e.memoryRequestsCounter["increase"]++
},
},
{
name: "remove memory limits + decrease cpu requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("90"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.memoryLimitsCounter["remove"]++
e.cpuRequestsCounter["decrease"]++
},
},
{
name: "increase cpu requests + memory requests",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
v1.ResourceMemory: resource.MustParse("110"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuRequestsCounter["increase"]++
e.memoryRequestsCounter["increase"]++
},
},
{
name: "decrease all possible values",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
v1.ResourceMemory: resource.MustParse("110"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("110"),
v1.ResourceMemory: resource.MustParse("110"),
},
},
}},
},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
v1.ResourceMemory: resource.MustParse("90"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("90"),
v1.ResourceMemory: resource.MustParse("90"),
},
},
}},
},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {
e.cpuLimitsCounter["decrease"]++
e.cpuRequestsCounter["decrease"]++
e.memoryLimitsCounter["decrease"]++
e.memoryRequestsCounter["decrease"]++
},
},
{
name: "no resize request",
initialAllocation: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updatedPod: &v1.Pod{
Spec: v1.PodSpec{Containers: []v1.Container{{}}},
},
updateExpectedFunc: func(e *expectedMetricsStruct) {},
},
} {
t.Run(tc.name, func(t *testing.T) {
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
UID: "12345",
},
}
initialPod := testPod.DeepCopy()
updatedPod := testPod.DeepCopy()
initialPod.Spec = tc.initialAllocation.Spec
updatedPod.Spec = tc.updatedPod.Spec
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
t.Cleanup(func() { testKubelet.Cleanup() })
kubelet := testKubelet.kubelet
kubelet.podManager.AddPod(initialPod)
require.NoError(t, kubelet.allocationManager.SetAllocatedResources(initialPod))
kubelet.HandlePodUpdates([]*v1.Pod{updatedPod})
tc.updateExpectedFunc(&expectedMetrics)
expectedFormat := `
# HELP kubelet_container_requested_resizes_total [ALPHA] Number of requested resizes, counted at the container level. Different resources on the same container are counted separately. The 'requirement' label refers to 'memory' or 'limits'; the 'operation' label can be one of 'add', 'remove', 'increase' or 'decrease'.
# TYPE kubelet_container_requested_resizes_total counter
kubelet_container_requested_resizes_total{operation="add",requirement="requests",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="add",requirement="requests",resource="cpu"} %d
kubelet_container_requested_resizes_total{operation="add",requirement="limits",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="add",requirement="limits",resource="cpu"} %d
kubelet_container_requested_resizes_total{operation="decrease",requirement="requests",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="decrease",requirement="requests",resource="cpu"} %d
kubelet_container_requested_resizes_total{operation="decrease",requirement="limits",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="decrease",requirement="limits",resource="cpu"} %d
kubelet_container_requested_resizes_total{operation="increase",requirement="requests",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="increase",requirement="requests",resource="cpu"} %d
kubelet_container_requested_resizes_total{operation="increase",requirement="limits",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="increase",requirement="limits",resource="cpu"} %d
kubelet_container_requested_resizes_total{operation="remove",requirement="requests",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="remove",requirement="requests",resource="cpu"} %d
kubelet_container_requested_resizes_total{operation="remove",requirement="limits",resource="memory"} %d
kubelet_container_requested_resizes_total{operation="remove",requirement="limits",resource="cpu"} %d
`
expected := fmt.Sprintf(expectedFormat,
expectedMetrics.memoryRequestsCounter["add"],
expectedMetrics.cpuRequestsCounter["add"],
expectedMetrics.memoryLimitsCounter["add"],
expectedMetrics.cpuLimitsCounter["add"],
expectedMetrics.memoryRequestsCounter["decrease"],
expectedMetrics.cpuRequestsCounter["decrease"],
expectedMetrics.memoryLimitsCounter["decrease"],
expectedMetrics.cpuLimitsCounter["decrease"],
expectedMetrics.memoryRequestsCounter["increase"],
expectedMetrics.cpuRequestsCounter["increase"],
expectedMetrics.memoryLimitsCounter["increase"],
expectedMetrics.cpuLimitsCounter["increase"],
expectedMetrics.memoryRequestsCounter["remove"],
expectedMetrics.cpuRequestsCounter["remove"],
expectedMetrics.memoryLimitsCounter["remove"],
expectedMetrics.cpuLimitsCounter["remove"],
)
// Omit lines from the expected metrics string where the count is "0"
re := regexp.MustCompile("(?m)[\r\n]+^.*} 0.*$")
expected = re.ReplaceAllString(expected, "")
require.NoError(t, testutil.GatherAndCompare(
legacyregistry.DefaultGatherer, strings.NewReader(expected), "kubelet_container_requested_resizes_total",
))
})
}
}
func TestHandlePodReconcile_RetryPendingResizes(t *testing.T) {
if goruntime.GOOS == "windows" {
t.Skip("InPlacePodVerticalScaling is not currently supported for Windows")
}
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
testKubelet := newTestKubeletExcludeAdmitHandlers(t, false /* controllerAttachDetachEnabled */, true /*enableResizing*/)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
lowCPU := resource.MustParse("500m")
highCPU := resource.MustParse("1")
lowMem := resource.MustParse("500Mi")
highMem := resource.MustParse("1Gi")
// Set desired resources to some huge value to verify that they are being ignored in the aggregate check.
enormousCPU := resource.MustParse("2000m")
enormousMem := resource.MustParse("2000Mi")
makePodWithResources := func(name string, requests v1.ResourceList, status v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
UID: types.UID(name),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Image: "i1",
Resources: v1.ResourceRequirements{
Requests: requests,
},
},
},
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: "c1",
Resources: &v1.ResourceRequirements{
Requests: status,
},
},
},
},
}
}
pendingResizeAllocated := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-pending-resize",
UID: types.UID("pod-pending-resize"),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Image: "i1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem},
},
},
},
},
}
pendingResizeDesired := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-pending-resize",
UID: types.UID("pod-pending-resize"),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Image: "i1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem},
},
},
},
},
}
testCases := []struct {
name string
oldPod *v1.Pod
newPod *v1.Pod
shouldRetryPendingResize bool
}{
{
name: "requests are increasing",
oldPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}),
newPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: enormousCPU, v1.ResourceMemory: enormousMem}, v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem}),
shouldRetryPendingResize: false,
},
{
name: "requests are unchanged",
oldPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}),
newPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: enormousCPU, v1.ResourceMemory: enormousMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}),
shouldRetryPendingResize: false,
},
{
name: "requests are decreasing",
oldPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}, v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem}),
newPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: enormousCPU, v1.ResourceMemory: enormousMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}),
shouldRetryPendingResize: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// For the sake of this test, just reject all resize requests.
handler := &testPodAdmitHandler{podsToReject: []*v1.Pod{pendingResizeAllocated}}
kubelet.allocationManager.AddPodAdmitHandlers(lifecycle.PodAdmitHandlers{handler})
require.NoError(t, kubelet.allocationManager.SetAllocatedResources(pendingResizeAllocated))
require.NoError(t, kubelet.allocationManager.SetAllocatedResources(tc.oldPod))
// We only expect status resources to change in HandlePodReconcile.
tc.oldPod.Spec = tc.newPod.Spec
kubelet.podManager.AddPod(pendingResizeDesired)
kubelet.podManager.AddPod(tc.oldPod)
kubelet.allocationManager.PushPendingResize(pendingResizeDesired.UID)
kubelet.statusManager.ClearPodResizePendingCondition(pendingResizeDesired.UID)
kubelet.HandlePodReconcile([]*v1.Pod{tc.newPod})
require.Equal(t, tc.shouldRetryPendingResize, kubelet.statusManager.IsPodResizeDeferred(pendingResizeDesired.UID))
kubelet.allocationManager.RemovePod(pendingResizeDesired.UID)
kubelet.podManager.RemovePod((pendingResizeDesired))
kubelet.podManager.RemovePod(tc.oldPod)
})
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"os"
"path/filepath"
"syscall"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/util/removeall"
"k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// ListVolumesForPod returns a map of the mounted volumes for the given pod.
// The key in the map is the OuterVolumeSpecName (i.e. pod.Spec.Volumes[x].Name)
func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
volumesToReturn := make(map[string]volume.Volume)
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
volumetypes.UniquePodName(podUID))
for outerVolumeSpecName, volume := range podVolumes {
// TODO: volume.Mounter could be nil if volume object is recovered
// from reconciler's sync state process. PR 33616 will fix this problem
// to create Mounter object when recovering volume state.
if volume.Mounter == nil {
continue
}
volumesToReturn[outerVolumeSpecName] = volume.Mounter
}
return volumesToReturn, len(volumesToReturn) > 0
}
// ListBlockVolumesForPod returns a map of the mounted volumes for the given
// pod. The key in the map is the OuterVolumeSpecName (i.e.
// pod.Spec.Volumes[x].Name)
func (kl *Kubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) {
volumesToReturn := make(map[string]volume.BlockVolume)
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
volumetypes.UniquePodName(podUID))
for outerVolumeSpecName, volume := range podVolumes {
// TODO: volume.Mounter could be nil if volume object is recovered
// from reconciler's sync state process. PR 33616 will fix this problem
// to create Mounter object when recovering volume state.
if volume.BlockVolumeMapper == nil {
continue
}
volumesToReturn[outerVolumeSpecName] = volume.BlockVolumeMapper
}
return volumesToReturn, len(volumesToReturn) > 0
}
// podVolumesExist checks with the volume manager and returns true any of the
// pods for the specified volume are mounted or are uncertain.
func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
if kl.volumeManager.HasPossiblyMountedVolumesForPod(volumetypes.UniquePodName(podUID)) {
return true
}
// TODO: This checks pod volume paths and whether they are mounted. If checking returns error, podVolumesExist will return true
// which means we consider volumes might exist and requires further checking.
// There are some volume plugins such as flexvolume might not have mounts. See issue #61229
volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID)
if err != nil {
klog.ErrorS(err, "Pod found, but error occurred during checking mounted volumes from disk", "podUID", podUID)
return true
}
if len(volumePaths) > 0 {
klog.V(4).InfoS("Pod found, but volumes are still mounted on disk", "podUID", podUID, "paths", volumePaths)
return true
}
return false
}
// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod
// and volume options and then creates a Mounter.
// Returns a valid mounter or an error.
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod) (volume.Mounter, error) {
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
if err != nil {
return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
}
physicalMounter, err := plugin.NewMounter(spec, pod)
if err != nil {
return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
}
klog.V(10).InfoS("Using volume plugin for mount", "volumePluginName", plugin.GetPluginName(), "volumeName", spec.Name())
return physicalMounter, nil
}
// removeOrphanedPodVolumeDirs attempts to remove the pod volumes directory and
// its subdirectories. There should be no files left under normal conditions
// when this is called, so it effectively does a recursive rmdir instead of
// RemoveAll to ensure it only removes empty directories and files that were
// used as mount points, but not content of the mount points.
func (kl *Kubelet) removeOrphanedPodVolumeDirs(uid types.UID) []error {
orphanVolumeErrors := []error{}
// If there are still volume directories, attempt to rmdir them
volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
if err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error occurred during reading volume dir from disk: %v", uid, err))
return orphanVolumeErrors
}
if len(volumePaths) > 0 {
for _, volumePath := range volumePaths {
if err := syscall.Rmdir(volumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() volume at path %v: %v", uid, volumePath, err))
} else {
klog.InfoS("Cleaned up orphaned volume from pod", "podUID", uid, "path", volumePath)
}
}
}
// If there are any volume-subpaths, attempt to remove them
subpathVolumePaths, err := kl.getPodVolumeSubpathListFromDisk(uid)
if err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error occurred during reading of volume-subpaths dir from disk: %v", uid, err))
return orphanVolumeErrors
}
if len(subpathVolumePaths) > 0 {
for _, subpathVolumePath := range subpathVolumePaths {
// Remove both files and empty directories here, as the subpath may have been a bind-mount of a file or a directory.
if err := os.Remove(subpathVolumePath); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() subpath at path %v: %v", uid, subpathVolumePath, err))
} else {
klog.InfoS("Cleaned up orphaned volume subpath from pod", "podUID", uid, "path", subpathVolumePath)
}
}
}
// Remove any remaining subdirectories along with the volumes directory itself.
// Fail if any regular files are encountered.
podVolDir := kl.getPodVolumesDir(uid)
if err := removeall.RemoveDirsOneFilesystem(kl.mounter, podVolDir); err != nil {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error occurred when trying to remove the volumes dir: %v", uid, err))
} else {
klog.InfoS("Cleaned up orphaned pod volumes dir", "podUID", uid, "path", podVolDir)
}
return orphanVolumeErrors
}
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
// running and that have no containers running. Note that we roll up logs here since it runs in the main loop.
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
allPods := sets.New[string]()
for _, pod := range pods {
allPods.Insert(string(pod.UID))
}
for _, pod := range runningPods {
allPods.Insert(string(pod.ID))
}
found, err := kl.listPodsFromDisk()
if err != nil {
return err
}
orphanRemovalErrors := []error{}
orphanVolumeErrors := []error{}
var totalPods, errorPods int
for _, uid := range found {
if allPods.Has(string(uid)) {
continue
}
totalPods++
// If volumes have not been unmounted/detached, do not delete directory.
// Doing so may result in corruption of data.
// TODO: getMountedVolumePathListFromDisk() call may be redundant with
// kl.getPodVolumePathListFromDisk(). Can this be cleaned up?
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
errorPods++
klog.V(3).InfoS("Orphaned pod found, but volumes are not cleaned up", "podUID", uid)
continue
}
// Attempt to remove the pod volumes directory and its subdirs
podVolumeErrors := kl.removeOrphanedPodVolumeDirs(uid)
if len(podVolumeErrors) > 0 {
errorPods++
orphanVolumeErrors = append(orphanVolumeErrors, podVolumeErrors...)
// Not all volumes were removed, so don't clean up the pod directory yet. It is likely
// that there are still mountpoints or files left which could cause removal of the pod
// directory to fail below.
// Errors for all removal operations have already been recorded, so don't add another
// one here.
continue
}
// Call RemoveAllOneFilesystem for remaining subdirs under the pod directory
podDir := kl.getPodDir(uid)
podSubdirs, err := os.ReadDir(podDir)
if err != nil {
errorPods++
klog.ErrorS(err, "Could not read directory", "path", podDir)
orphanRemovalErrors = append(orphanRemovalErrors, fmt.Errorf("orphaned pod %q found, but error occurred during reading the pod dir from disk: %v", uid, err))
continue
}
var cleanupFailed bool
for _, podSubdir := range podSubdirs {
podSubdirName := podSubdir.Name()
podSubdirPath := filepath.Join(podDir, podSubdirName)
// Never attempt RemoveAllOneFilesystem on the volumes directory,
// as this could lead to data loss in some situations. The volumes
// directory should have been removed by removeOrphanedPodVolumeDirs.
if podSubdirName == "volumes" {
cleanupFailed = true
err := fmt.Errorf("volumes subdir was found after it was removed")
klog.ErrorS(err, "Orphaned pod found, but failed to remove volumes subdir", "podUID", uid, "path", podSubdirPath)
continue
}
if err := removeall.RemoveAllOneFilesystem(kl.mounter, podSubdirPath); err != nil {
cleanupFailed = true
klog.ErrorS(err, "Failed to remove orphaned pod subdir", "podUID", uid, "path", podSubdirPath)
orphanRemovalErrors = append(orphanRemovalErrors, fmt.Errorf("orphaned pod %q found, but error occurred when trying to remove subdir %q: %v", uid, podSubdirPath, err))
}
}
// Rmdir the pod dir, which should be empty if everything above was successful
klog.V(3).InfoS("Orphaned pod found, removing", "podUID", uid)
if err := syscall.Rmdir(podDir); err != nil {
cleanupFailed = true
klog.ErrorS(err, "Failed to remove orphaned pod dir", "podUID", uid)
orphanRemovalErrors = append(orphanRemovalErrors, fmt.Errorf("orphaned pod %q found, but error occurred when trying to remove the pod directory: %v", uid, err))
}
if cleanupFailed {
errorPods++
}
}
logSpew := func(errs []error) {
if len(errs) > 0 {
klog.ErrorS(errs[0], "There were many similar errors. Turn up verbosity to see them.", "numErrs", len(errs))
for _, err := range errs {
klog.V(5).InfoS("Orphan pod", "err", err)
}
}
}
logSpew(orphanVolumeErrors)
logSpew(orphanRemovalErrors)
metrics.OrphanPodCleanedVolumes.Set(float64(totalPods))
metrics.OrphanPodCleanedVolumesErrors.Set(float64(errorPods))
return utilerrors.NewAggregate(orphanRemovalErrors)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"sort"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// This file contains help function to kuberuntime types to CRI runtime API types, or vice versa.
func toKubeContainerImageSpec(image *runtimeapi.Image) kubecontainer.ImageSpec {
var annotations []kubecontainer.Annotation
if image.Spec != nil && len(image.Spec.Annotations) > 0 {
annotationKeys := make([]string, 0, len(image.Spec.Annotations))
for k := range image.Spec.Annotations {
annotationKeys = append(annotationKeys, k)
}
sort.Strings(annotationKeys)
for _, k := range annotationKeys {
annotations = append(annotations, kubecontainer.Annotation{
Name: k,
Value: image.Spec.Annotations[k],
})
}
}
spec := kubecontainer.ImageSpec{
Image: image.Id,
Annotations: annotations,
}
// if RuntimeClassInImageCriAPI feature gate is enabled, set runtimeHandler CRI field
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI) {
runtimeHandler := ""
if image.Spec != nil {
runtimeHandler = image.Spec.RuntimeHandler
}
spec.RuntimeHandler = runtimeHandler
}
return spec
}
func toRuntimeAPIImageSpec(imageSpec kubecontainer.ImageSpec) *runtimeapi.ImageSpec {
var annotations = make(map[string]string)
if imageSpec.Annotations != nil {
for _, a := range imageSpec.Annotations {
annotations[a.Name] = a.Value
}
}
spec := runtimeapi.ImageSpec{
Image: imageSpec.Image,
Annotations: annotations,
}
// if RuntimeClassInImageCriAPI feature gate is enabled, set runtimeHandler CRI field
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI) {
spec.RuntimeHandler = imageSpec.RuntimeHandler
}
return &spec
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"net/http"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"go.opentelemetry.io/otel/trace"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/component-base/logs/logreduction"
internalapi "k8s.io/cri-api/pkg/apis"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/kubelet/allocation"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/images"
imagepullmanager "k8s.io/kubernetes/pkg/kubelet/images/pullmanager"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/utils/ptr"
)
const (
fakeSeccompProfileRoot = "/fakeSeccompProfileRoot"
fakeNodeAllocatableMemory = "32Gi"
fakeNodeAllocatableCPU = "16"
fakePodLogsDirectory = "/var/log/pods"
)
type fakeHTTP struct {
req *http.Request
err error
}
func (f *fakeHTTP) Do(req *http.Request) (*http.Response, error) {
f.req = req
return nil, f.err
}
type fakePodStateProvider struct {
terminated map[types.UID]struct{}
removed map[types.UID]struct{}
}
func newFakePodStateProvider() *fakePodStateProvider {
return &fakePodStateProvider{
terminated: make(map[types.UID]struct{}),
removed: make(map[types.UID]struct{}),
}
}
func (f *fakePodStateProvider) IsPodTerminationRequested(uid types.UID) bool {
_, found := f.removed[uid]
return found
}
func (f *fakePodStateProvider) ShouldPodRuntimeBeRemoved(uid types.UID) bool {
_, found := f.terminated[uid]
return found
}
func (f *fakePodStateProvider) ShouldPodContentBeRemoved(uid types.UID) bool {
_, found := f.removed[uid]
return found
}
type fakePodPullingTimeRecorder struct{}
func (f *fakePodPullingTimeRecorder) RecordImageStartedPulling(podUID types.UID) {}
func (f *fakePodPullingTimeRecorder) RecordImageFinishedPulling(podUID types.UID) {}
func newFakeKubeRuntimeManager(ctx context.Context, runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, tracer trace.Tracer) (*kubeGenericRuntimeManager, error) {
recorder := &record.FakeRecorder{}
logManager, err := logs.NewContainerLogManager(runtimeService, osInterface, "1", 2, 10, metav1.Duration{Duration: 10 * time.Second})
if err != nil {
return nil, err
}
kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder,
cpuCFSQuota: false,
cpuCFSQuotaPeriod: metav1.Duration{Duration: time.Millisecond * 100},
livenessManager: proberesults.NewManager(),
startupManager: proberesults.NewManager(),
machineInfo: machineInfo,
osInterface: osInterface,
containerManager: cm.NewFakeContainerManager(),
runtimeHelper: runtimeHelper,
runtimeService: runtimeService,
imageService: imageService,
seccompProfileRoot: fakeSeccompProfileRoot,
internalLifecycle: cm.NewFakeInternalContainerLifecycle(),
logReduction: logreduction.NewLogReduction(identicalErrorDelay),
logManager: logManager,
memoryThrottlingFactor: 0.9,
podLogsDirectory: fakePodLogsDirectory,
allocationManager: allocation.NewInMemoryManager(cm.NodeConfig{}, nil, nil, nil, nil, nil, nil),
}
// Initialize swap controller availability check (always false for tests)
kubeRuntimeManager.getSwapControllerAvailable = func() bool { return false }
typedVersion, err := runtimeService.Version(ctx, kubeRuntimeAPIVersion)
if err != nil {
return nil, err
}
podStateProvider := newFakePodStateProvider()
kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager, tracer)
kubeRuntimeManager.podStateProvider = podStateProvider
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
kubeRuntimeManager.imagePuller = images.NewImageManager(
kubecontainer.FilterEventRecorder(recorder),
&credentialprovider.BasicDockerKeyring{},
kubeRuntimeManager,
&imagepullmanager.NoopImagePullManager{},
flowcontrol.NewBackOff(time.Second, 300*time.Second),
false,
ptr.To[int32](0), // No limit on max parallel image pulls,
0, // Disable image pull throttling by setting QPS to 0,
0,
&fakePodPullingTimeRecorder{},
)
kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(
&fakeHTTP{},
kubeRuntimeManager,
kubeRuntimeManager,
recorder)
kubeRuntimeManager.getNodeAllocatable = func() v1.ResourceList {
return v1.ResourceList{
v1.ResourceMemory: resource.MustParse(fakeNodeAllocatableMemory),
v1.ResourceCPU: resource.MustParse(fakeNodeAllocatableCPU),
}
}
return kubeRuntimeManager, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"errors"
"fmt"
"hash/fnv"
"path/filepath"
"strconv"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/security/apparmor"
)
type podsByID []*kubecontainer.Pod
func (b podsByID) Len() int { return len(b) }
func (b podsByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b podsByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
type containersByID []*kubecontainer.Container
func (b containersByID) Len() int { return len(b) }
func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
// Newest first.
type podSandboxByCreated []*runtimeapi.PodSandbox
func (p podSandboxByCreated) Len() int { return len(p) }
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p podSandboxByCreated) Less(i, j int) bool {
if p[i].Metadata == nil || p[j].Metadata == nil {
return p[i].CreatedAt > p[j].CreatedAt
}
return p[i].Metadata.Attempt > p[j].Metadata.Attempt
}
type containerStatusByCreated []*kubecontainer.Status
func (c containerStatusByCreated) Len() int { return len(c) }
func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) }
// toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.State.
func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.State {
switch state {
case runtimeapi.ContainerState_CONTAINER_CREATED:
return kubecontainer.ContainerStateCreated
case runtimeapi.ContainerState_CONTAINER_RUNNING:
return kubecontainer.ContainerStateRunning
case runtimeapi.ContainerState_CONTAINER_EXITED:
return kubecontainer.ContainerStateExited
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
return kubecontainer.ContainerStateUnknown
}
return kubecontainer.ContainerStateUnknown
}
// toRuntimeProtocol converts v1.Protocol to runtimeapi.Protocol.
func toRuntimeProtocol(logger klog.Logger, protocol v1.Protocol) runtimeapi.Protocol {
switch protocol {
case v1.ProtocolTCP:
return runtimeapi.Protocol_TCP
case v1.ProtocolUDP:
return runtimeapi.Protocol_UDP
case v1.ProtocolSCTP:
return runtimeapi.Protocol_SCTP
}
logger.Info("Unknown protocol, defaulting to TCP", "protocol", protocol)
return runtimeapi.Protocol_TCP
}
// toKubeContainer converts runtimeapi.Container to kubecontainer.Container.
func (m *kubeGenericRuntimeManager) toKubeContainer(ctx context.Context, c *runtimeapi.Container) (*kubecontainer.Container, error) {
if c == nil || c.Id == "" || c.Image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
// Keep backwards compatibility to older runtimes, c.ImageId has been added in v1.30
imageID := c.ImageRef
if c.ImageId != "" {
imageID = c.ImageId
}
annotatedInfo := getContainerInfoFromAnnotations(ctx, c.Annotations)
return &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: c.Id},
Name: c.GetMetadata().GetName(),
ImageID: imageID,
ImageRef: c.ImageRef,
ImageRuntimeHandler: c.Image.RuntimeHandler,
Image: c.Image.Image,
Hash: annotatedInfo.Hash,
State: toKubeContainerState(c.State),
}, nil
}
// sandboxToKubeContainer converts runtimeapi.PodSandbox to kubecontainer.Container.
// This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete.
func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSandbox) (*kubecontainer.Container, error) {
if s == nil || s.Id == "" {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
return &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: s.Id},
State: kubecontainer.SandboxToContainerState(s.State),
}, nil
}
// getImageUser gets uid or user name that will run the command(s) from image. The function
// guarantees that only one of them is set.
func (m *kubeGenericRuntimeManager) getImageUser(ctx context.Context, image string) (*int64, string, error) {
resp, err := m.imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: image}, false)
if err != nil {
return nil, "", err
}
imageStatus := resp.GetImage()
if imageStatus != nil {
if imageStatus.Uid != nil {
return &imageStatus.GetUid().Value, "", nil
}
if imageStatus.Username != "" {
return nil, imageStatus.Username, nil
}
}
// If non of them is set, treat it as root.
return new(int64), "", nil
}
// isInitContainerFailed returns true under the following conditions:
// 1. container has exited and exitcode is not zero.
// 2. container is in unknown state.
// 3. container gets OOMKilled.
func isInitContainerFailed(status *kubecontainer.Status) bool {
// When oomkilled occurs, init container should be considered as a failure.
if status.Reason == "OOMKilled" {
return true
}
if status.State == kubecontainer.ContainerStateExited && status.ExitCode != 0 {
return true
}
if status.State == kubecontainer.ContainerStateUnknown {
return true
}
return false
}
// GetBackoffKey generates a key (string) to uniquely identify a (pod, container) tuple for tracking
// container backoff. The key should include any content of the container that is tied to the
// backoff, so that any change generates a new key.
func GetBackoffKey(pod *v1.Pod, container *v1.Container) string {
// Include stable identifiers (name, namespace, uid) as well as any
// fields that should reset the backoff when changed.
key := []string{
pod.Name,
pod.Namespace,
string(pod.UID),
container.Name,
container.Image,
container.Resources.String(),
}
hash := fnv.New64a()
hash.Write([]byte(strings.Join(key, "/")))
return strconv.FormatUint(hash.Sum64(), 16)
}
// logPathDelimiter is the delimiter used in the log path.
const logPathDelimiter = "_"
// buildContainerLogsPath builds log path for container relative to pod logs directory.
func buildContainerLogsPath(containerName string, restartCount int) string {
return filepath.Join(containerName, fmt.Sprintf("%d.log", restartCount))
}
// BuildContainerLogsDirectory builds absolute log directory path for a container in pod.
func BuildContainerLogsDirectory(podLogsDir, podNamespace, podName string, podUID types.UID, containerName string) string {
return filepath.Join(BuildPodLogsDirectory(podLogsDir, podNamespace, podName, podUID), containerName)
}
// BuildPodLogsDirectory builds absolute log directory path for a pod sandbox.
func BuildPodLogsDirectory(podLogsDir, podNamespace, podName string, podUID types.UID) string {
return filepath.Join(podLogsDir, strings.Join([]string{podNamespace, podName,
string(podUID)}, logPathDelimiter))
}
// parsePodUIDFromLogsDirectory parses pod logs directory name and returns the pod UID.
// It supports both the old pod log directory /var/log/pods/UID, and the new pod log
// directory /var/log/pods/NAMESPACE_NAME_UID.
func parsePodUIDFromLogsDirectory(name string) types.UID {
parts := strings.Split(name, logPathDelimiter)
return types.UID(parts[len(parts)-1])
}
// toKubeRuntimeStatus converts the runtimeapi.RuntimeStatus to kubecontainer.RuntimeStatus.
func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus, handlers []*runtimeapi.RuntimeHandler, features *runtimeapi.RuntimeFeatures) *kubecontainer.RuntimeStatus {
conditions := []kubecontainer.RuntimeCondition{}
for _, c := range status.GetConditions() {
conditions = append(conditions, kubecontainer.RuntimeCondition{
Type: kubecontainer.RuntimeConditionType(c.Type),
Status: c.Status,
Reason: c.Reason,
Message: c.Message,
})
}
retHandlers := make([]kubecontainer.RuntimeHandler, len(handlers))
for i, h := range handlers {
supportsRRO := false
supportsUserns := false
if h.Features != nil {
supportsRRO = h.Features.RecursiveReadOnlyMounts
supportsUserns = h.Features.UserNamespaces
}
retHandlers[i] = kubecontainer.RuntimeHandler{
Name: h.Name,
SupportsRecursiveReadOnlyMounts: supportsRRO,
SupportsUserNamespaces: supportsUserns,
}
}
var retFeatures *kubecontainer.RuntimeFeatures
if features != nil {
retFeatures = &kubecontainer.RuntimeFeatures{
SupplementalGroupsPolicy: features.SupplementalGroupsPolicy,
}
}
return &kubecontainer.RuntimeStatus{Conditions: conditions, Handlers: retHandlers, Features: retFeatures}
}
func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) {
if scmp == nil {
if fallbackToRuntimeDefault {
return &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
}, nil
}
return &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_Unconfined,
}, nil
}
if scmp.Type == v1.SeccompProfileTypeRuntimeDefault {
return &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
}, nil
}
if scmp.Type == v1.SeccompProfileTypeLocalhost {
if scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 {
fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile)
return &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_Localhost,
LocalhostRef: fname,
}, nil
} else {
return nil, fmt.Errorf("localhostProfile must be set if seccompProfile type is Localhost.")
}
}
return &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_Unconfined,
}, nil
}
func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]string, containerName string,
podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) {
// container fields are applied first
if containerSecContext != nil && containerSecContext.SeccompProfile != nil {
return fieldSeccompProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault)
}
// when container seccomp is not defined, try to apply from pod field
if podSecContext != nil && podSecContext.SeccompProfile != nil {
return fieldSeccompProfile(podSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault)
}
if fallbackToRuntimeDefault {
return &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
}, nil
}
return &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_Unconfined,
}, nil
}
func getAppArmorProfile(pod *v1.Pod, container *v1.Container) (*runtimeapi.SecurityProfile, string, error) {
profile := apparmor.GetProfile(pod, container)
if profile == nil {
return nil, "", nil
}
var (
securityProfile *runtimeapi.SecurityProfile
deprecatedProfile string // Deprecated apparmor profile format, still provided for backwards compatibility with older runtimes.
)
switch profile.Type {
case v1.AppArmorProfileTypeRuntimeDefault:
securityProfile = &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
}
deprecatedProfile = v1.DeprecatedAppArmorBetaProfileRuntimeDefault
case v1.AppArmorProfileTypeUnconfined:
securityProfile = &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_Unconfined,
}
deprecatedProfile = v1.DeprecatedAppArmorBetaProfileNameUnconfined
case v1.AppArmorProfileTypeLocalhost:
if profile.LocalhostProfile == nil {
return nil, "", errors.New("missing localhost apparmor profile name")
}
securityProfile = &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_Localhost,
LocalhostRef: *profile.LocalhostProfile,
}
deprecatedProfile = v1.DeprecatedAppArmorBetaProfileNamePrefix + *profile.LocalhostProfile
default:
// Shouldn't happen.
return nil, "", fmt.Errorf("unknown apparmor profile type: %q", profile.Type)
}
return securityProfile, deprecatedProfile, nil
}
func mergeResourceConfig(source, update *cm.ResourceConfig) *cm.ResourceConfig {
if source == nil {
return update
}
if update == nil {
return source
}
merged := *source
if update.Memory != nil {
merged.Memory = update.Memory
}
if update.CPUSet.Size() > 0 {
merged.CPUSet = update.CPUSet
}
if update.CPUShares != nil {
merged.CPUShares = update.CPUShares
}
if update.CPUQuota != nil {
merged.CPUQuota = update.CPUQuota
}
if update.CPUPeriod != nil {
merged.CPUPeriod = update.CPUPeriod
}
if update.PidsLimit != nil {
merged.PidsLimit = update.PidsLimit
}
if update.HugePageLimit != nil {
if merged.HugePageLimit == nil {
merged.HugePageLimit = make(map[int64]int64)
}
for k, v := range update.HugePageLimit {
merged.HugePageLimit[k] = v
}
}
if update.Unified != nil {
if merged.Unified == nil {
merged.Unified = make(map[string]string)
}
for k, v := range update.Unified {
merged.Unified[k] = v
}
}
return &merged
}
func convertResourceConfigToLinuxContainerResources(rc *cm.ResourceConfig) *runtimeapi.LinuxContainerResources {
if rc == nil {
return nil
}
lcr := &runtimeapi.LinuxContainerResources{}
if rc.CPUPeriod != nil {
lcr.CpuPeriod = int64(*rc.CPUPeriod)
}
if rc.CPUQuota != nil {
lcr.CpuQuota = *rc.CPUQuota
}
if rc.CPUShares != nil {
lcr.CpuShares = int64(*rc.CPUShares)
}
if rc.Memory != nil {
lcr.MemoryLimitInBytes = *rc.Memory
}
if rc.CPUSet.Size() > 0 {
lcr.CpusetCpus = rc.CPUSet.String()
}
if rc.Unified != nil {
lcr.Unified = make(map[string]string, len(rc.Unified))
for k, v := range rc.Unified {
lcr.Unified[k] = v
}
}
return lcr
}
var signalNameToRuntimeEnum = map[string]runtimeapi.Signal{
"SIGABRT": runtimeapi.Signal_SIGABRT,
"SIGALRM": runtimeapi.Signal_SIGALRM,
"SIGBUS": runtimeapi.Signal_SIGBUS,
"SIGCHLD": runtimeapi.Signal_SIGCHLD,
"SIGCLD": runtimeapi.Signal_SIGCLD,
"SIGCONT": runtimeapi.Signal_SIGCONT,
"SIGFPE": runtimeapi.Signal_SIGFPE,
"SIGHUP": runtimeapi.Signal_SIGHUP,
"SIGILL": runtimeapi.Signal_SIGILL,
"SIGINT": runtimeapi.Signal_SIGINT,
"SIGIO": runtimeapi.Signal_SIGIO,
"SIGIOT": runtimeapi.Signal_SIGIOT,
"SIGKILL": runtimeapi.Signal_SIGKILL,
"SIGPIPE": runtimeapi.Signal_SIGPIPE,
"SIGPOLL": runtimeapi.Signal_SIGPOLL,
"SIGPROF": runtimeapi.Signal_SIGPROF,
"SIGPWR": runtimeapi.Signal_SIGPWR,
"SIGQUIT": runtimeapi.Signal_SIGQUIT,
"SIGSEGV": runtimeapi.Signal_SIGSEGV,
"SIGSTKFLT": runtimeapi.Signal_SIGSTKFLT,
"SIGSTOP": runtimeapi.Signal_SIGSTOP,
"SIGSYS": runtimeapi.Signal_SIGSYS,
"SIGTERM": runtimeapi.Signal_SIGTERM,
"SIGTRAP": runtimeapi.Signal_SIGTRAP,
"SIGTSTP": runtimeapi.Signal_SIGTSTP,
"SIGTTIN": runtimeapi.Signal_SIGTTIN,
"SIGTTOU": runtimeapi.Signal_SIGTTOU,
"SIGURG": runtimeapi.Signal_SIGURG,
"SIGUSR1": runtimeapi.Signal_SIGUSR1,
"SIGUSR2": runtimeapi.Signal_SIGUSR2,
"SIGVTALRM": runtimeapi.Signal_SIGVTALRM,
"SIGWINCH": runtimeapi.Signal_SIGWINCH,
"SIGXCPU": runtimeapi.Signal_SIGXCPU,
"SIGXFSZ": runtimeapi.Signal_SIGXFSZ,
"SIGRTMIN": runtimeapi.Signal_SIGRTMIN,
"SIGRTMIN+1": runtimeapi.Signal_SIGRTMINPLUS1,
"SIGRTMIN+2": runtimeapi.Signal_SIGRTMINPLUS2,
"SIGRTMIN+3": runtimeapi.Signal_SIGRTMINPLUS3,
"SIGRTMIN+4": runtimeapi.Signal_SIGRTMINPLUS4,
"SIGRTMIN+5": runtimeapi.Signal_SIGRTMINPLUS5,
"SIGRTMIN+6": runtimeapi.Signal_SIGRTMINPLUS6,
"SIGRTMIN+7": runtimeapi.Signal_SIGRTMINPLUS7,
"SIGRTMIN+8": runtimeapi.Signal_SIGRTMINPLUS8,
"SIGRTMIN+9": runtimeapi.Signal_SIGRTMINPLUS9,
"SIGRTMIN+10": runtimeapi.Signal_SIGRTMINPLUS10,
"SIGRTMIN+11": runtimeapi.Signal_SIGRTMINPLUS11,
"SIGRTMIN+12": runtimeapi.Signal_SIGRTMINPLUS12,
"SIGRTMIN+13": runtimeapi.Signal_SIGRTMINPLUS13,
"SIGRTMIN+14": runtimeapi.Signal_SIGRTMINPLUS14,
"SIGRTMIN+15": runtimeapi.Signal_SIGRTMINPLUS15,
"SIGRTMAX-14": runtimeapi.Signal_SIGRTMAXMINUS14,
"SIGRTMAX-13": runtimeapi.Signal_SIGRTMAXMINUS13,
"SIGRTMAX-12": runtimeapi.Signal_SIGRTMAXMINUS12,
"SIGRTMAX-11": runtimeapi.Signal_SIGRTMAXMINUS11,
"SIGRTMAX-10": runtimeapi.Signal_SIGRTMAXMINUS10,
"SIGRTMAX-9": runtimeapi.Signal_SIGRTMAXMINUS9,
"SIGRTMAX-8": runtimeapi.Signal_SIGRTMAXMINUS8,
"SIGRTMAX-7": runtimeapi.Signal_SIGRTMAXMINUS7,
"SIGRTMAX-6": runtimeapi.Signal_SIGRTMAXMINUS6,
"SIGRTMAX-5": runtimeapi.Signal_SIGRTMAXMINUS5,
"SIGRTMAX-4": runtimeapi.Signal_SIGRTMAXMINUS4,
"SIGRTMAX-3": runtimeapi.Signal_SIGRTMAXMINUS3,
"SIGRTMAX-2": runtimeapi.Signal_SIGRTMAXMINUS2,
"SIGRTMAX-1": runtimeapi.Signal_SIGRTMAXMINUS1,
"SIGRTMAX": runtimeapi.Signal_SIGRTMAX,
}
func getContainerConfigStopSignal(container *v1.Container) (stopsignal *runtimeapi.Signal) {
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerStopSignals) {
if container.Lifecycle != nil && container.Lifecycle.StopSignal != nil {
var signalValue runtimeapi.Signal
signalStr := string(*container.Lifecycle.StopSignal)
signalValue = signalNameToRuntimeEnum[signalStr]
return &signalValue
} else {
return nil
}
}
return nil
}
func runtimeSignalToString(signal runtimeapi.Signal) *v1.Signal {
var convertedSignal v1.Signal
for key, value := range signalNameToRuntimeEnum {
if value == signal {
convertedSignal = v1.Signal(key)
}
}
return &convertedSignal
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"math"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
)
// sharesToMilliCPU converts CpuShares (cpu.shares) to milli-CPU value
func sharesToMilliCPU(shares int64) int64 {
milliCPU := int64(0)
if shares >= int64(cm.MinShares) {
milliCPU = int64(math.Ceil(float64(shares*cm.MilliCPUToCPU) / float64(cm.SharesPerCPU)))
}
return milliCPU
}
// quotaToMilliCPU converts cpu.cfs_quota_us and cpu.cfs_period_us to milli-CPU value
func quotaToMilliCPU(quota int64, period int64) int64 {
if quota == -1 {
return int64(0)
}
return (quota * cm.MilliCPUToCPU) / period
}
func subtractOverheadFromResourceConfig(resCfg *cm.ResourceConfig, pod *v1.Pod) *cm.ResourceConfig {
if resCfg == nil {
return nil
}
rc := *resCfg
if pod.Spec.Overhead != nil {
if cpu, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
if rc.CPUPeriod != nil {
cpuPeriod := int64(*rc.CPUPeriod)
cpuQuota := *rc.CPUQuota - cm.MilliCPUToQuota(cpu.MilliValue(), cpuPeriod)
rc.CPUQuota = &cpuQuota
}
if rc.CPUShares != nil {
totalCPUMilli := sharesToMilliCPU(int64(*rc.CPUShares))
cpuShares := cm.MilliCPUToShares(totalCPUMilli - cpu.MilliValue())
rc.CPUShares = &cpuShares
}
}
if memory, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
if rc.Memory != nil {
currMemory := *rc.Memory
if mem, ok := memory.AsInt64(); ok {
currMemory -= mem
}
rc.Memory = &currMemory
}
}
}
return &rc
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"time"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
// instrumentedRuntimeService wraps the RuntimeService and records the operations
// and errors metrics.
type instrumentedRuntimeService struct {
service internalapi.RuntimeService
}
// Creates an instrumented RuntimeInterface from an existing RuntimeService.
func newInstrumentedRuntimeService(service internalapi.RuntimeService) internalapi.RuntimeService {
return &instrumentedRuntimeService{service: service}
}
// instrumentedImageManagerService wraps the ImageManagerService and records the operations
// and errors metrics.
type instrumentedImageManagerService struct {
service internalapi.ImageManagerService
}
// Creates an instrumented ImageManagerService from an existing ImageManagerService.
func newInstrumentedImageManagerService(service internalapi.ImageManagerService) internalapi.ImageManagerService {
return &instrumentedImageManagerService{service: service}
}
// recordOperation records the duration of the operation.
func recordOperation(operation string, start time.Time) {
metrics.RuntimeOperations.WithLabelValues(operation).Inc()
metrics.RuntimeOperationsDuration.WithLabelValues(operation).Observe(metrics.SinceInSeconds(start))
}
// recordError records error for metric if an error occurred.
func recordError(operation string, err error) {
if err != nil {
metrics.RuntimeOperationsErrors.WithLabelValues(operation).Inc()
}
}
func (in instrumentedRuntimeService) Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) {
const operation = "version"
defer recordOperation(operation, time.Now())
out, err := in.service.Version(ctx, apiVersion)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) {
const operation = "status"
defer recordOperation(operation, time.Now())
out, err := in.service.Status(ctx, verbose)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) CreateContainer(ctx context.Context, podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "create_container"
defer recordOperation(operation, time.Now())
out, err := in.service.CreateContainer(ctx, podSandboxID, config, sandboxConfig)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) StartContainer(ctx context.Context, containerID string) error {
const operation = "start_container"
defer recordOperation(operation, time.Now())
err := in.service.StartContainer(ctx, containerID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) StopContainer(ctx context.Context, containerID string, timeout int64) error {
const operation = "stop_container"
defer recordOperation(operation, time.Now())
err := in.service.StopContainer(ctx, containerID, timeout)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) RemoveContainer(ctx context.Context, containerID string) error {
const operation = "remove_container"
defer recordOperation(operation, time.Now())
err := in.service.RemoveContainer(ctx, containerID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
const operation = "list_containers"
defer recordOperation(operation, time.Now())
out, err := in.service.ListContainers(ctx, filter)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
const operation = "container_status"
defer recordOperation(operation, time.Now())
out, err := in.service.ContainerStatus(ctx, containerID, verbose)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) error {
const operation = "update_container"
defer recordOperation(operation, time.Now())
err := in.service.UpdateContainerResources(ctx, containerID, resources)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) ReopenContainerLog(ctx context.Context, containerID string) error {
const operation = "reopen_container_log"
defer recordOperation(operation, time.Now())
err := in.service.ReopenContainerLog(ctx, containerID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) {
const operation = "exec_sync"
defer recordOperation(operation, time.Now())
stdout, stderr, err := in.service.ExecSync(ctx, containerID, cmd, timeout)
recordError(operation, err)
return stdout, stderr, err
}
func (in instrumentedRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
const operation = "exec"
defer recordOperation(operation, time.Now())
resp, err := in.service.Exec(ctx, req)
recordError(operation, err)
return resp, err
}
func (in instrumentedRuntimeService) Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
const operation = "attach"
defer recordOperation(operation, time.Now())
resp, err := in.service.Attach(ctx, req)
recordError(operation, err)
return resp, err
}
func (in instrumentedRuntimeService) RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
const operation = "run_podsandbox"
startTime := time.Now()
defer recordOperation(operation, startTime)
defer metrics.RunPodSandboxDuration.WithLabelValues(runtimeHandler).Observe(metrics.SinceInSeconds(startTime))
out, err := in.service.RunPodSandbox(ctx, config, runtimeHandler)
recordError(operation, err)
if err != nil {
metrics.RunPodSandboxErrors.WithLabelValues(runtimeHandler).Inc()
}
return out, err
}
func (in instrumentedRuntimeService) StopPodSandbox(ctx context.Context, podSandboxID string) error {
const operation = "stop_podsandbox"
defer recordOperation(operation, time.Now())
err := in.service.StopPodSandbox(ctx, podSandboxID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) RemovePodSandbox(ctx context.Context, podSandboxID string) error {
const operation = "remove_podsandbox"
defer recordOperation(operation, time.Now())
err := in.service.RemovePodSandbox(ctx, podSandboxID)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) PodSandboxStatus(ctx context.Context, podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
const operation = "podsandbox_status"
defer recordOperation(operation, time.Now())
out, err := in.service.PodSandboxStatus(ctx, podSandboxID, verbose)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
const operation = "list_podsandbox"
defer recordOperation(operation, time.Now())
out, err := in.service.ListPodSandbox(ctx, filter)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) {
const operation = "container_stats"
defer recordOperation(operation, time.Now())
out, err := in.service.ContainerStats(ctx, containerID)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
const operation = "list_container_stats"
defer recordOperation(operation, time.Now())
out, err := in.service.ListContainerStats(ctx, filter)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) {
const operation = "podsandbox_stats"
defer recordOperation(operation, time.Now())
out, err := in.service.PodSandboxStats(ctx, podSandboxID)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) {
const operation = "list_podsandbox_stats"
defer recordOperation(operation, time.Now())
out, err := in.service.ListPodSandboxStats(ctx, filter)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) PortForward(ctx context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
const operation = "port_forward"
defer recordOperation(operation, time.Now())
resp, err := in.service.PortForward(ctx, req)
recordError(operation, err)
return resp, err
}
func (in instrumentedRuntimeService) UpdatePodSandboxResources(ctx context.Context, req *runtimeapi.UpdatePodSandboxResourcesRequest) (*runtimeapi.UpdatePodSandboxResourcesResponse, error) {
const operation = "update_podsandbox_resources"
defer recordOperation(operation, time.Now())
resp, err := in.service.UpdatePodSandboxResources(ctx, req)
recordError(operation, err)
return resp, err
}
func (in instrumentedRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) error {
const operation = "update_runtime_config"
defer recordOperation(operation, time.Now())
err := in.service.UpdateRuntimeConfig(ctx, runtimeConfig)
recordError(operation, err)
return err
}
func (in instrumentedImageManagerService) ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
const operation = "list_images"
defer recordOperation(operation, time.Now())
out, err := in.service.ListImages(ctx, filter)
recordError(operation, err)
return out, err
}
func (in instrumentedImageManagerService) ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
const operation = "image_status"
defer recordOperation(operation, time.Now())
out, err := in.service.ImageStatus(ctx, image, verbose)
recordError(operation, err)
return out, err
}
func (in instrumentedImageManagerService) PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "pull_image"
defer recordOperation(operation, time.Now())
imageRef, err := in.service.PullImage(ctx, image, auth, podSandboxConfig)
recordError(operation, err)
return imageRef, err
}
func (in instrumentedImageManagerService) RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) error {
const operation = "remove_image"
defer recordOperation(operation, time.Now())
err := in.service.RemoveImage(ctx, image)
recordError(operation, err)
return err
}
func (in instrumentedImageManagerService) ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) {
const operation = "image_fs_info"
defer recordOperation(operation, time.Now())
fsInfo, err := in.service.ImageFsInfo(ctx)
recordError(operation, err)
return fsInfo, nil
}
func (in instrumentedImageManagerService) Close() error {
const operation = "close"
defer recordOperation(operation, time.Now())
err := in.service.Close()
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
const operation = "checkpoint_container"
defer recordOperation(operation, time.Now())
err := in.service.CheckpointContainer(ctx, options)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) GetContainerEvents(ctx context.Context, containerEventsCh chan *runtimeapi.ContainerEventResponse, connectionEstablishedCallback func(runtimeapi.RuntimeService_GetContainerEventsClient)) error {
const operation = "get_container_events"
defer recordOperation(operation, time.Now())
err := in.service.GetContainerEvents(ctx, containerEventsCh, connectionEstablishedCallback)
recordError(operation, err)
return err
}
func (in instrumentedRuntimeService) ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error) {
const operation = "list_metric_descriptors"
defer recordOperation(operation, time.Now())
out, err := in.service.ListMetricDescriptors(ctx)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error) {
const operation = "list_podsandbox_metrics"
defer recordOperation(operation, time.Now())
out, err := in.service.ListPodSandboxMetrics(ctx)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) RuntimeConfig(ctx context.Context) (*runtimeapi.RuntimeConfigResponse, error) {
const operation = "runtime_config"
defer recordOperation(operation, time.Now())
out, err := in.service.RuntimeConfig(ctx)
recordError(operation, err)
return out, err
}
func (in instrumentedRuntimeService) Close() error {
const operation = "close"
defer recordOperation(operation, time.Now())
err := in.service.Close()
recordError(operation, err)
return err
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package kuberuntime
import (
"context"
v1 "k8s.io/api/core/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
func FuzzKubeRuntime(data []byte) int {
f := fuzz.NewConsumer(data)
pod := &v1.Pod{}
status := &kubecontainer.PodStatus{}
err := f.GenerateStruct(pod)
if err != nil {
return 0
}
err = f.GenerateStruct(status)
if err != nil {
return 0
}
_, _, m, err := createTestRuntimeManager(context.Background())
if err != nil {
return 0
}
_ = m.computePodActions(context.Background(), pod, status)
return 1
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"net/url"
"os"
"path/filepath"
"regexp"
goruntime "runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
codes "google.golang.org/grpc/codes"
crierror "k8s.io/cri-api/pkg/errors"
"github.com/opencontainers/selinux/go-selinux"
grpcstatus "google.golang.org/grpc/status"
"github.com/armon/circbuf"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
remote "k8s.io/cri-client/pkg"
kubelettypes "k8s.io/kubelet/pkg/types"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/util/tail"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
var (
// ErrCreateContainerConfig - failed to create container config
ErrCreateContainerConfig = errors.New("CreateContainerConfigError")
// ErrPreCreateHook - failed to execute PreCreateHook
ErrPreCreateHook = errors.New("PreCreateHookError")
// ErrCreateContainer - failed to create container
ErrCreateContainer = errors.New("CreateContainerError")
// ErrPreStartHook - failed to execute PreStartHook
ErrPreStartHook = errors.New("PreStartHookError")
// ErrPostStartHook - failed to execute PostStartHook
ErrPostStartHook = errors.New("PostStartHookError")
)
// recordContainerEvent should be used by the runtime manager for all container related events.
// it has sanity checks to ensure that we do not write events that can abuse our masters.
// in particular, it ensures that a containerID never appears in an event message as that
// is prone to causing a lot of distinct events that do not count well.
// it replaces any reference to a containerID with the containerName which is stable, and is what users know.
func (m *kubeGenericRuntimeManager) recordContainerEvent(ctx context.Context, pod *v1.Pod, container *v1.Container, containerID, eventType, reason, message string, args ...interface{}) {
logger := klog.FromContext(ctx)
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
logger.Error(err, "Can't make a container ref", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
return
}
eventMessage := message
if len(args) > 0 {
eventMessage = fmt.Sprintf(message, args...)
}
// this is a hack, but often the error from the runtime includes the containerID
// which kills our ability to deduplicate events. this protection makes a huge
// difference in the number of unique events
if containerID != "" {
eventMessage = strings.Replace(eventMessage, containerID, container.Name, -1)
}
m.recorder.Event(ref, eventType, reason, eventMessage)
}
// startSpec wraps the spec required to start a container, either a regular/init container
// or an ephemeral container. Ephemeral containers contain all the fields of regular/init
// containers, plus some additional fields. In both cases startSpec.container will be set.
type startSpec struct {
container *v1.Container
ephemeralContainer *v1.EphemeralContainer
}
func containerStartSpec(c *v1.Container) *startSpec {
return &startSpec{container: c}
}
func ephemeralContainerStartSpec(ec *v1.EphemeralContainer) *startSpec {
return &startSpec{
container: (*v1.Container)(&ec.EphemeralContainerCommon),
ephemeralContainer: ec,
}
}
// getTargetID returns the kubecontainer.ContainerID for ephemeral container namespace
// targeting. The target is stored as EphemeralContainer.TargetContainerName, which must be
// resolved to a ContainerID using podStatus. The target container must already exist, which
// usually isn't a problem since ephemeral containers aren't allowed at pod creation time.
func (s *startSpec) getTargetID(podStatus *kubecontainer.PodStatus) (*kubecontainer.ContainerID, error) {
if s.ephemeralContainer == nil || s.ephemeralContainer.TargetContainerName == "" {
return nil, nil
}
targetStatus := podStatus.FindContainerStatusByName(s.ephemeralContainer.TargetContainerName)
if targetStatus == nil {
return nil, fmt.Errorf("unable to find target container %v", s.ephemeralContainer.TargetContainerName)
}
return &targetStatus.ID, nil
}
func calcRestartCountByLogDir(path string) (int, error) {
// if the path doesn't exist then it's not an error
if _, err := os.Stat(path); err != nil {
return 0, nil
}
files, err := os.ReadDir(path)
if err != nil {
return 0, err
}
if len(files) == 0 {
return 0, nil
}
restartCount := 0
restartCountLogFileRegex := regexp.MustCompile(`^(\d+)\.log(\..*)?`)
for _, file := range files {
if file.IsDir() {
continue
}
matches := restartCountLogFileRegex.FindStringSubmatch(file.Name())
if len(matches) == 0 {
continue
}
count, err := strconv.Atoi(matches[1])
if err != nil {
// unlikely kubelet created this file,
// likely custom file with random numbers as a name
continue
}
count++
if count > restartCount {
restartCount = count
}
}
return restartCount, nil
}
func (m *kubeGenericRuntimeManager) getPodRuntimeHandler(pod *v1.Pod) (podRuntimeHandler string, err error) {
// If RuntimeClassInImageCriAPI feature gate is enabled, pass runtimehandler
// information for the runtime class specified. If not runtime class is
// specified, then pass ""
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI) {
if pod.Spec.RuntimeClassName != nil && *pod.Spec.RuntimeClassName != "" {
podRuntimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
msg := fmt.Sprintf("Failed to lookup runtimeHandler for runtimeClassName %v", pod.Spec.RuntimeClassName)
return msg, err
}
}
}
return podRuntimeHandler, nil
}
// startContainer starts a container and returns a message indicates why it is failed on error.
// It starts the container through the following steps:
// * pull the image
// * create the container
// * start the container
// * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string, imageVolumes kubecontainer.ImageVolumes) (string, error) {
logger := klog.FromContext(ctx)
container := spec.container
// Step 1: pull the image.
podRuntimeHandler, err := m.getPodRuntimeHandler(pod)
if err != nil {
return "", err
}
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
logger.Error(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod), "containerName", container.Name)
}
imageRef, msg, err := m.imagePuller.EnsureImageExists(ctx, ref, pod, container.Image, pullSecrets, podSandboxConfig, podRuntimeHandler, container.ImagePullPolicy)
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
return msg, err
}
// Step 2: create the container.
// For a new container, the RestartCount should be 0
restartCount := 0
containerStatus := podStatus.FindContainerStatusByName(container.Name)
if containerStatus != nil {
restartCount = containerStatus.RestartCount + 1
} else {
// The container runtime keeps state on container statuses and
// what the container restart count is. When nodes are rebooted
// some container runtimes clear their state which causes the
// restartCount to be reset to 0. This causes the logfile to
// start at 0.log, which either overwrites or appends to the
// already existing log.
//
// We are checking to see if the log directory exists, and find
// the latest restartCount by checking the log name -
// {restartCount}.log - and adding 1 to it.
logDir := BuildContainerLogsDirectory(m.podLogsDirectory, pod.Namespace, pod.Name, pod.UID, container.Name)
restartCount, err = calcRestartCountByLogDir(logDir)
if err != nil {
logger.Info("Cannot calculate restartCount from the log directory", "logDir", logDir, "err", err)
restartCount = 0
}
}
target, err := spec.getTargetID(podStatus)
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
return s.Message(), ErrCreateContainerConfig
}
containerConfig, cleanupAction, err := m.generateContainerConfig(ctx, container, pod, restartCount, podIP, imageRef, podIPs, target, imageVolumes)
if cleanupAction != nil {
defer cleanupAction()
}
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
return s.Message(), ErrCreateContainerConfig
}
// When creating a container, mark the resources as actuated.
if err := m.allocationManager.SetActuatedResources(pod, container); err != nil {
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", err)
return err.Error(), ErrCreateContainerConfig
}
err = m.internalLifecycle.PreCreateContainer(pod, container, containerConfig)
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Internal PreCreateContainer hook failed: %v", s.Message())
return s.Message(), ErrPreCreateHook
}
containerID, err := m.runtimeService.CreateContainer(ctx, podSandboxID, containerConfig, podSandboxConfig)
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
return s.Message(), ErrCreateContainer
}
err = m.internalLifecycle.PreStartContainer(pod, container, containerID)
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message())
return s.Message(), ErrPreStartHook
}
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container: %v", container.Name)
// Step 3: start the container.
err = m.runtimeService.StartContainer(ctx, containerID)
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
return s.Message(), kubecontainer.ErrRunContainer
}
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, "Started container %v", container.Name)
// Symlink container logs to the legacy container log location for cluster logging
// support.
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
containerMeta := containerConfig.GetMetadata()
sandboxMeta := podSandboxConfig.GetMetadata()
legacySymlink := legacyLogSymlink(containerID, containerMeta.Name, sandboxMeta.Name,
sandboxMeta.Namespace)
containerLog := filepath.Join(podSandboxConfig.LogDirectory, containerConfig.LogPath)
// only create legacy symlink if containerLog path exists (or the error is not IsNotExist).
// Because if containerLog path does not exist, only dangling legacySymlink is created.
// This dangling legacySymlink is later removed by container gc, so it does not make sense
// to create it in the first place. it happens when journald logging driver is used with docker.
if _, err := m.osInterface.Stat(containerLog); !os.IsNotExist(err) {
if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
logger.Error(err, "Failed to create legacy symbolic link", "path", legacySymlink,
"containerID", containerID, "containerLogPath", containerLog)
}
}
// Step 4: execute the post start hook.
if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
kubeContainerID := kubecontainer.ContainerID{
Type: m.runtimeName,
ID: containerID,
}
msg, handlerErr := m.runner.Run(ctx, kubeContainerID, pod, container, container.Lifecycle.PostStart)
if handlerErr != nil {
logger.Error(handlerErr, "Failed to execute PostStartHook", "pod", klog.KObj(pod),
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
// do not record the message in the event so that secrets won't leak from the server.
m.recordContainerEvent(ctx, pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, "PostStartHook failed")
if err := m.killContainer(ctx, pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil, nil); err != nil {
logger.Error(err, "Failed to kill container", "pod", klog.KObj(pod),
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
}
return msg, ErrPostStartHook
}
}
return "", nil
}
// generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID, imageVolumes kubecontainer.ImageVolumes) (*runtimeapi.ContainerConfig, func(), error) {
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, podIPs, imageVolumes)
if err != nil {
return nil, nil, err
}
uid, username, err := m.getImageUser(ctx, container.Image)
if err != nil {
return nil, cleanupAction, err
}
// Verify RunAsNonRoot. Non-root verification only supports numeric user.
if err := verifyRunAsNonRoot(ctx, pod, container, uid, username); err != nil {
return nil, cleanupAction, err
}
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
logDir := BuildContainerLogsDirectory(m.podLogsDirectory, pod.Namespace, pod.Name, pod.UID, container.Name)
err = m.osInterface.MkdirAll(logDir, 0755)
if err != nil {
return nil, cleanupAction, fmt.Errorf("create container log directory for container %s failed: %v", container.Name, err)
}
containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
stopsignal := getContainerConfigStopSignal(container)
restartCountUint32 := uint32(restartCount)
config := &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: container.Name,
Attempt: restartCountUint32,
},
Image: &runtimeapi.ImageSpec{Image: imageRef, UserSpecifiedImage: container.Image},
Command: command,
Args: args,
WorkingDir: container.WorkingDir,
Labels: newContainerLabels(container, pod),
Annotations: newContainerAnnotations(ctx, container, pod, restartCount, opts),
Devices: makeDevices(opts),
CDIDevices: makeCDIDevices(opts),
Mounts: m.makeMounts(opts, container),
LogPath: containerLogsPath,
Stdin: container.Stdin,
StdinOnce: container.StdinOnce,
Tty: container.TTY,
}
if stopsignal != nil {
config.StopSignal = *stopsignal
}
// set platform specific configurations.
if err := m.applyPlatformSpecificContainerConfig(ctx, config, container, pod, uid, username, nsTarget); err != nil {
return nil, cleanupAction, err
}
// set environment variables
envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
for idx := range opts.Envs {
e := opts.Envs[idx]
envs[idx] = &runtimeapi.KeyValue{
Key: e.Name,
Value: e.Value,
}
}
config.Envs = envs
return config, cleanupAction, nil
}
func (m *kubeGenericRuntimeManager) updateContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container, containerID kubecontainer.ContainerID) error {
containerResources := m.generateContainerResources(ctx, pod, container)
if containerResources == nil {
return fmt.Errorf("container %q updateContainerResources failed: cannot generate resources config", containerID.String())
}
logger := klog.FromContext(ctx)
err := m.runtimeService.UpdateContainerResources(ctx, containerID.ID, containerResources)
if err == nil {
err = m.allocationManager.SetActuatedResources(pod, container)
} else {
logger.Error(err, "UpdateContainerResources failed", "container", containerID.String())
}
return err
}
func (m *kubeGenericRuntimeManager) updatePodSandboxResources(ctx context.Context, sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) error {
logger := klog.FromContext(ctx)
podResourcesRequest := m.generateUpdatePodSandboxResourcesRequest(sandboxID, pod, podResources)
if podResourcesRequest == nil {
return fmt.Errorf("sandboxID %q updatePodSandboxResources failed: cannot generate resources config", sandboxID)
}
_, err := m.runtimeService.UpdatePodSandboxResources(ctx, podResourcesRequest)
if err != nil {
stat, _ := grpcstatus.FromError(err)
if stat.Code() == codes.Unimplemented {
logger.V(3).Info("updatePodSandboxResources failed: unimplemented; this call is best-effort: proceeding with resize", "sandboxID", sandboxID)
return nil
}
return fmt.Errorf("updatePodSandboxResources failed for sanboxID %q: %w", sandboxID, err)
}
return nil
}
// makeDevices generates container devices for kubelet runtime v1.
func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.Device {
devices := make([]*runtimeapi.Device, len(opts.Devices))
for idx := range opts.Devices {
device := opts.Devices[idx]
devices[idx] = &runtimeapi.Device{
HostPath: device.PathOnHost,
ContainerPath: device.PathInContainer,
Permissions: device.Permissions,
}
}
return devices
}
// makeCDIDevices generates container CDIDevices for kubelet runtime v1.
func makeCDIDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.CDIDevice {
devices := make([]*runtimeapi.CDIDevice, len(opts.CDIDevices))
for i, device := range opts.CDIDevices {
devices[i] = &runtimeapi.CDIDevice{
Name: device.Name,
}
}
return devices
}
// makeMounts generates container volume mounts for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeapi.Mount {
volumeMounts := []*runtimeapi.Mount{}
for idx := range opts.Mounts {
v := opts.Mounts[idx]
selinuxRelabel := v.SELinuxRelabel && selinux.GetEnabled()
mount := &runtimeapi.Mount{
HostPath: v.HostPath,
ContainerPath: v.ContainerPath,
Readonly: v.ReadOnly,
SelinuxRelabel: selinuxRelabel,
Propagation: v.Propagation,
RecursiveReadOnly: v.RecursiveReadOnly,
Image: v.Image,
ImageSubPath: v.ImageSubPath,
}
volumeMounts = append(volumeMounts, mount)
}
// The reason we create and mount the log file in here (not in kubelet) is because
// the file's location depends on the ID of the container, and we need to create and
// mount the file before actually starting the container.
if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 {
// Because the PodContainerDir contains pod uid and container name which is unique enough,
// here we just add a random id to make the path unique for different instances
// of the same container.
cid := makeUID()
containerLogPath := filepath.Join(opts.PodContainerDir, cid)
fs, err := m.osInterface.Create(containerLogPath)
if err != nil {
utilruntime.HandleError(fmt.Errorf("error on creating termination-log file %q: %v", containerLogPath, err))
} else {
fs.Close()
// Chmod is needed because os.Create() ends up calling
// open(2) to create the file, so the final mode used is "mode &
// ~umask". But we want to make sure the specified mode is used
// in the file no matter what the umask is.
if err := m.osInterface.Chmod(containerLogPath, 0666); err != nil {
utilruntime.HandleError(fmt.Errorf("unable to set termination-log file permissions %q: %v", containerLogPath, err))
}
// Volume Mounts fail on Windows if it is not of the form C:/
containerLogPath = volumeutil.MakeAbsolutePath(goruntime.GOOS, containerLogPath)
terminationMessagePath := volumeutil.MakeAbsolutePath(goruntime.GOOS, container.TerminationMessagePath)
selinuxRelabel := selinux.GetEnabled()
volumeMounts = append(volumeMounts, &runtimeapi.Mount{
HostPath: containerLogPath,
ContainerPath: terminationMessagePath,
SelinuxRelabel: selinuxRelabel,
})
}
}
return volumeMounts
}
// getKubeletContainers lists containers managed by kubelet.
// The boolean parameter specifies whether returns all containers including
// those already exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) getKubeletContainers(ctx context.Context, allContainers bool) ([]*runtimeapi.Container, error) {
logger := klog.FromContext(ctx)
filter := &runtimeapi.ContainerFilter{}
if !allContainers {
filter.State = &runtimeapi.ContainerStateValue{
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
}
}
containers, err := m.runtimeService.ListContainers(ctx, filter)
if err != nil {
logger.Error(err, "ListContainers failed")
return nil, err
}
return containers, nil
}
// makeUID returns a randomly generated string.
func makeUID() string {
return fmt.Sprintf("%08x", rand.Uint32())
}
// getTerminationMessage looks on the filesystem for the provided termination message path, returning a limited
// amount of those bytes, or returns true if the logs should be checked.
func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessagePath string, fallbackToLogs bool) (string, bool) {
if len(terminationMessagePath) == 0 {
return "", fallbackToLogs
}
// Volume Mounts fail on Windows if it is not of the form C:/
terminationMessagePath = volumeutil.MakeAbsolutePath(goruntime.GOOS, terminationMessagePath)
for _, mount := range status.Mounts {
if mount.ContainerPath != terminationMessagePath {
continue
}
path := mount.HostPath
data, _, err := tail.ReadAtMost(path, kubecontainer.MaxContainerTerminationMessageLength)
if err != nil {
if os.IsNotExist(err) {
return "", fallbackToLogs
}
return fmt.Sprintf("Error on reading termination log %s: %v", path, err), false
}
return string(data), (fallbackToLogs && len(data) == 0)
}
return "", fallbackToLogs
}
// readLastStringFromContainerLogs attempts to read up to the max log length from the end of the CRI log represented
// by path. It reads up to max log lines.
func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(ctx context.Context, path string) string {
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
if err := m.ReadLogs(ctx, path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
}
return buf.String()
}
func (m *kubeGenericRuntimeManager) convertToKubeContainerStatus(ctx context.Context, status *runtimeapi.ContainerStatus) (cStatus *kubecontainer.Status) {
cStatus = toKubeContainerStatus(ctx, status, m.runtimeName)
if status.State == runtimeapi.ContainerState_CONTAINER_EXITED {
// Populate the termination message if needed.
annotatedInfo := getContainerInfoFromAnnotations(ctx, status.Annotations)
// If a container cannot even be started, it certainly does not have logs, so no need to fallbackToLogs.
fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError &&
cStatus.ExitCode != 0 && cStatus.Reason != "ContainerCannotRun"
tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs)
if checkLogs {
tMessage = m.readLastStringFromContainerLogs(ctx, status.GetLogPath())
}
// Enrich the termination message written by the application is not empty
if len(tMessage) != 0 {
if len(cStatus.Message) != 0 {
cStatus.Message += ": "
}
cStatus.Message += tMessage
}
}
return cStatus
}
// getPodContainerStatuses gets all containers' statuses for the pod.
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, uid kubetypes.UID, name, namespace, activePodSandboxID string) ([]*kubecontainer.Status, []*kubecontainer.Status, error) {
logger := klog.FromContext(ctx)
// Select all containers of the given pod.
containers, err := m.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{
LabelSelector: map[string]string{kubelettypes.KubernetesPodUIDLabel: string(uid)},
})
if err != nil {
logger.Error(err, "ListContainers error")
return nil, nil, err
}
statuses := []*kubecontainer.Status{}
activeContainerStatuses := []*kubecontainer.Status{}
// TODO: optimization: set maximum number of containers per container name to examine.
for _, c := range containers {
resp, err := m.runtimeService.ContainerStatus(ctx, c.Id, false)
// Between List (ListContainers) and check (ContainerStatus) another thread might remove a container, and that is normal.
// The previous call (ListContainers) never fails due to a pod container not existing.
// Therefore, this method should not either, but instead act as if the previous call failed,
// which means the error should be ignored.
if crierror.IsNotFound(err) {
continue
}
if err != nil {
// Merely log this here; GetPodStatus will actually report the error out.
logger.V(4).Info("ContainerStatus return error", "containerID", c.Id, "err", err)
return nil, nil, err
}
status := resp.GetStatus()
if status == nil {
return nil, nil, remote.ErrContainerStatusNil
}
cStatus := m.convertToKubeContainerStatus(ctx, status)
statuses = append(statuses, cStatus)
if c.PodSandboxId == activePodSandboxID {
activeContainerStatuses = append(activeContainerStatuses, cStatus)
}
}
sort.Sort(containerStatusByCreated(statuses))
sort.Sort(containerStatusByCreated(activeContainerStatuses))
return statuses, activeContainerStatuses, nil
}
func toKubeContainerStatus(ctx context.Context, status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status {
annotatedInfo := getContainerInfoFromAnnotations(ctx, status.Annotations)
labeledInfo := getContainerInfoFromLabels(ctx, status.Labels)
var cStatusResources *kubecontainer.ContainerResources
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// If runtime reports cpu & memory resources info, add it to container status
cStatusResources = toKubeContainerResources(status.Resources)
}
// Keep backwards compatibility to older runtimes, status.ImageId has been added in v1.30
imageID := status.ImageRef
if status.ImageId != "" {
imageID = status.ImageId
}
var cStatusUser *kubecontainer.ContainerUser
if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
cStatusUser = toKubeContainerUser(status.User)
}
var cStatusStopSignal *v1.Signal
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerStopSignals) {
signal := status.GetStopSignal().String()
// Here Signal_RUNTIME_DEFAULT means that the runtime is not returning any StopSignal
// This happens only when the container runtime version doesn't support StopSignal yet
if signal != "" && signal != "RUNTIME_DEFAULT" {
cStatusStopSignal = runtimeSignalToString(status.GetStopSignal())
}
}
cStatus := &kubecontainer.Status{
ID: kubecontainer.ContainerID{
Type: runtimeName,
ID: status.Id,
},
Name: labeledInfo.ContainerName,
Image: status.Image.Image,
ImageID: imageID,
ImageRef: status.ImageRef,
ImageRuntimeHandler: status.Image.RuntimeHandler,
Hash: annotatedInfo.Hash,
RestartCount: annotatedInfo.RestartCount,
State: toKubeContainerState(status.State),
CreatedAt: time.Unix(0, status.CreatedAt),
Resources: cStatusResources,
User: cStatusUser,
StopSignal: cStatusStopSignal,
}
if status.State != runtimeapi.ContainerState_CONTAINER_CREATED {
// If container is not in the created state, we have tried and
// started the container. Set the StartedAt time.
cStatus.StartedAt = time.Unix(0, status.StartedAt)
}
if status.State == runtimeapi.ContainerState_CONTAINER_EXITED {
cStatus.Reason = status.Reason
cStatus.Message = status.Message
cStatus.ExitCode = int(status.ExitCode)
cStatus.FinishedAt = time.Unix(0, status.FinishedAt)
}
for _, mount := range status.Mounts {
cStatus.Mounts = append(cStatus.Mounts, kubecontainer.Mount{
HostPath: mount.HostPath,
ContainerPath: mount.ContainerPath,
ReadOnly: mount.Readonly,
RecursiveReadOnly: mount.RecursiveReadOnly,
SELinuxRelabel: mount.SelinuxRelabel,
Propagation: mount.Propagation,
Image: mount.Image,
ImageSubPath: mount.ImageSubPath,
})
}
return cStatus
}
// executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes.
func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 {
logger := klog.FromContext(ctx)
logger.V(3).Info("Running preStop hook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String())
start := metav1.Now()
done := make(chan struct{})
go func() {
defer close(done)
defer utilruntime.HandleCrash()
if _, err := m.runner.Run(ctx, containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil {
logger.Error(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerSpec.Name, "containerID", containerID.String())
// do not record the message in the event so that secrets won't leak from the server.
m.recordContainerEvent(ctx, pod, containerSpec, containerID.ID, v1.EventTypeWarning, events.FailedPreStopHook, "PreStopHook failed")
}
}()
select {
case <-time.After(time.Duration(gracePeriod) * time.Second):
logger.V(2).Info("PreStop hook not completed in grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerSpec.Name, "containerID", containerID.String(), "gracePeriod", gracePeriod)
case <-done:
logger.V(3).Info("PreStop hook completed", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerSpec.Name, "containerID", containerID.String())
}
return int64(metav1.Now().Sub(start.Time).Seconds())
}
// restoreSpecsFromContainerLabels restores all information needed for killing a container. In some
// case we may not have pod and container spec when killing a container, e.g. pod is deleted during
// kubelet restart.
// To solve this problem, we've already written necessary information into container labels. Here we
// just need to retrieve them from container labels and restore the specs.
// TODO(random-liu): Add a node e2e test to test this behaviour.
// TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can
// just pass the needed function not create the fake object.
func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(ctx context.Context, containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) {
var pod *v1.Pod
var container *v1.Container
resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false)
if err != nil {
return nil, nil, err
}
s := resp.GetStatus()
if s == nil {
return nil, nil, remote.ErrContainerStatusNil
}
l := getContainerInfoFromLabels(ctx, s.Labels)
a := getContainerInfoFromAnnotations(ctx, s.Annotations)
// Notice that the followings are not full spec. The container killing code should not use
// un-restored fields.
pod = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: l.PodUID,
Name: l.PodName,
Namespace: l.PodNamespace,
DeletionGracePeriodSeconds: a.PodDeletionGracePeriod,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: a.PodTerminationGracePeriod,
},
}
container = &v1.Container{
Name: l.ContainerName,
Ports: a.ContainerPorts,
TerminationMessagePath: a.TerminationMessagePath,
}
if a.PreStopHandler != nil {
container.Lifecycle = &v1.Lifecycle{
PreStop: a.PreStopHandler,
}
}
return pod, container, nil
}
// killContainer kills a container through the following steps:
// * Run the pre-stop lifecycle hooks (if applicable).
// * Stop the container.
func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64, ordering *terminationOrdering) error {
logger := klog.FromContext(ctx)
var containerSpec *v1.Container
if pod != nil {
if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil {
return fmt.Errorf("failed to get containerSpec %q (id=%q) in pod %q when killing container for reason %q",
containerName, containerID.String(), format.Pod(pod), message)
}
} else {
// Restore necessary information if one of the specs is nil.
restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(ctx, containerID)
if err != nil {
return err
}
pod, containerSpec = restoredPod, restoredContainer
}
// From this point, pod and container must be non-nil.
gracePeriod := setTerminationGracePeriod(ctx, pod, containerSpec, containerName, containerID, reason)
if len(message) == 0 {
message = fmt.Sprintf("Stopping container %s", containerSpec.Name)
}
m.recordContainerEvent(ctx, pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, "%v", message)
if gracePeriodOverride != nil {
gracePeriod = *gracePeriodOverride
logger.V(3).Info("Killing container with a grace period override", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
}
// Run the pre-stop lifecycle hooks if applicable and if there is enough time to run it
if containerSpec.Lifecycle != nil && containerSpec.Lifecycle.PreStop != nil && gracePeriod > 0 {
gracePeriod = gracePeriod - m.executePreStopHook(ctx, pod, containerID, containerSpec, gracePeriod)
}
// if we care about termination ordering, then wait for this container's turn to exit if there is
// time remaining
if ordering != nil && gracePeriod > 0 {
// grace period is only in seconds, so the time we've waited gets truncated downward
gracePeriod -= int64(ordering.waitForTurn(containerName, gracePeriod))
}
// always give containers a minimal shutdown window to avoid unnecessary SIGKILLs
if gracePeriod < minimumGracePeriodInSeconds {
gracePeriod = minimumGracePeriodInSeconds
}
logger.V(2).Info("Killing container with a grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
err := m.runtimeService.StopContainer(ctx, containerID.ID, gracePeriod)
if err != nil && !crierror.IsNotFound(err) {
logger.Error(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
return err
}
logger.V(3).Info("Container exited normally", "pod", klog.KObj(pod), "podUID", pod.UID,
"containerName", containerName, "containerID", containerID.String())
if ordering != nil {
ordering.containerTerminated(containerName)
}
return nil
}
// killContainersWithSyncResult kills all pod's containers with sync results.
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
logger := klog.FromContext(ctx)
containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers))
wg := sync.WaitGroup{}
wg.Add(len(runningPod.Containers))
var termOrdering *terminationOrdering
if types.HasRestartableInitContainer(pod) {
var runningContainerNames []string
for _, container := range runningPod.Containers {
runningContainerNames = append(runningContainerNames, container.Name)
}
termOrdering = newTerminationOrdering(pod, runningContainerNames)
}
for _, container := range runningPod.Containers {
go func(container *kubecontainer.Container) {
defer utilruntime.HandleCrash()
defer wg.Done()
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name)
if err := m.killContainer(ctx, pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride, termOrdering); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
// Use runningPod for logging as the pod passed in could be *nil*.
logger.Error(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID,
"containerName", container.Name, "containerID", container.ID)
}
containerResults <- killContainerResult
}(container)
}
wg.Wait()
close(containerResults)
for containerResult := range containerResults {
syncResults = append(syncResults, containerResult)
}
return
}
// pruneInitContainersBeforeStart ensures that before we begin creating init
// containers, we have reduced the number of outstanding init containers still
// present. This reduces load on the container garbage collector by only
// preserving the most recent terminated init container.
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
logger := klog.FromContext(ctx)
// only the last execution of each init container should be preserved, and only preserve it if it is in the
// list of init containers to keep.
initContainerNames := sets.New[string]()
for _, container := range pod.Spec.InitContainers {
initContainerNames.Insert(container.Name)
}
for name := range initContainerNames {
count := 0
for _, status := range podStatus.ContainerStatuses {
if status.Name != name ||
(status.State != kubecontainer.ContainerStateExited &&
status.State != kubecontainer.ContainerStateUnknown) {
continue
}
// Remove init containers in unknown state. It should have
// been stopped before pruneInitContainersBeforeStart is
// called.
count++
// keep the first init container for this name
if count == 1 {
continue
}
// prune all other init containers that match this container name
logger.V(4).Info("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
if err := m.removeContainer(ctx, status.ID.ID); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
continue
}
}
}
}
// Remove all init containers. Note that this function does not check the state
// of the container because it assumes all init containers have been stopped
// before the call happens.
func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
logger := klog.FromContext(ctx)
initContainerNames := sets.New[string]()
for _, container := range pod.Spec.InitContainers {
initContainerNames.Insert(container.Name)
}
for name := range initContainerNames {
count := 0
for _, status := range podStatus.ContainerStatuses {
if status.Name != name {
continue
}
count++
// Purge all init containers that match this container name
logger.V(4).Info("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
if err := m.removeContainer(ctx, status.ID.ID); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
continue
}
}
}
}
// hasAnyRegularContainerCreated returns true if any regular container has been
// created, which indicates all init containers have been initialized.
func hasAnyRegularContainerCreated(pod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
for _, container := range pod.Spec.Containers {
status := podStatus.FindContainerStatusByName(container.Name)
if status == nil {
continue
}
switch status.State {
case kubecontainer.ContainerStateCreated,
kubecontainer.ContainerStateRunning,
kubecontainer.ContainerStateExited:
return true
default:
// Ignore other states
}
}
return false
}
// computeInitContainerActions sets the actions on the given changes that need
// to be taken for the init containers. This includes actions to initialize the
// init containers and actions to keep restartable init containers running.
// computeInitContainerActions returns true if pod has been initialized.
//
// The actions include:
// - Start the first init container that has not been started.
// - Restart all restartable init containers that have started but are not running.
// - Kill the restartable init containers that are not alive or started.
func (m *kubeGenericRuntimeManager) computeInitContainerActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, changes *podActions) bool {
logger := klog.FromContext(ctx)
if len(pod.Spec.InitContainers) == 0 {
return true
}
// If any of the main containers have status and are Running, then all init containers must
// have been executed at some point in the past. However, they could have been removed
// from the container runtime now, and if we proceed, it would appear as if they
// never ran and will re-execute improperly except for the restartable init containers.
podHasInitialized := false
for _, container := range pod.Spec.Containers {
status := podStatus.FindContainerStatusByName(container.Name)
if status == nil {
continue
}
switch status.State {
case kubecontainer.ContainerStateCreated,
kubecontainer.ContainerStateRunning:
podHasInitialized = true
case kubecontainer.ContainerStateExited:
// This is a workaround for the issue that the kubelet cannot
// differentiate the container statuses of the previous podSandbox
// from the current one.
// If the node is rebooted, all containers will be in the exited
// state and the kubelet will try to recreate a new podSandbox.
// In this case, the kubelet should not mistakenly think that
// the newly created podSandbox has been initialized.
default:
// Ignore other states
}
if podHasInitialized {
break
}
}
// isPreviouslyInitialized indicates if the current init container is
// previously initialized.
isPreviouslyInitialized := podHasInitialized
// Feature gate ContainerRestartRules makes it possible for individual init
// containers to restart even if pod-level restart policy is Never, which is
// checked at container-level. This `restartOnFailure` is overridden if feature
// gate ContainerRestartRules is enabled.
restartOnFailure := shouldRestartOnFailure(pod)
// Note that we iterate through the init containers in reverse order to find
// the next init container to run, as the completed init containers may get
// removed from container runtime for various reasons. Therefore the kubelet
// should rely on the minimal number of init containers - the last one.
//
// Once we find the next init container to run, iterate through the rest to
// find the restartable init containers to restart.
for i := len(pod.Spec.InitContainers) - 1; i >= 0; i-- {
container := &pod.Spec.InitContainers[i]
status := podStatus.FindContainerStatusByName(container.Name)
logger.V(4).Info("Computing init container action", "pod", klog.KObj(pod), "container", container.Name, "status", status)
if status == nil {
// If the container is previously initialized but its status is not
// found, it means its last status is removed for some reason.
// Restart it if it is a restartable init container.
if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) {
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
}
continue
}
if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) {
// after initialization, only restartable init containers need to be kept
// running
continue
}
switch status.State {
case kubecontainer.ContainerStateCreated:
// The main sync loop should have created and started the container
// in one step. If the init container is in the 'created' state,
// it is likely that the container runtime failed to start it. To
// prevent the container from getting stuck in the 'created' state,
// restart it.
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
case kubecontainer.ContainerStateRunning:
if !podutil.IsRestartableInitContainer(container) {
break
}
if podutil.IsRestartableInitContainer(container) {
if container.StartupProbe != nil {
startup, found := m.startupManager.Get(status.ID)
if !found {
// If the startup probe has not been run, wait for it.
break
}
if startup != proberesults.Success {
if startup == proberesults.Failure {
// If the restartable init container failed the startup probe,
// restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container %s failed startup probe", container.Name),
reason: reasonStartupProbe,
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
}
break
}
}
logger.V(4).Info("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
if i == (len(pod.Spec.InitContainers) - 1) {
podHasInitialized = true
} else if !isPreviouslyInitialized {
// this init container is initialized for the first time, start the next one
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
}
// Restart running sidecar containers which have had their definition changed.
if _, _, changed := containerChanged(container, status); changed {
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container %s definition changed", container.Name),
reason: "",
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
break
}
// A restartable init container does not have to take into account its
// liveness probe when it determines to start the next init container.
if container.LivenessProbe != nil {
liveness, found := m.livenessManager.Get(status.ID)
if !found {
// If the liveness probe has not been run, wait for it.
break
}
if liveness == proberesults.Failure {
// If the restartable init container failed the liveness probe,
// restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container %s failed liveness probe", container.Name),
reason: reasonLivenessProbe,
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
// The container is restarting, so no other actions need to be taken.
break
}
}
if !m.computePodResizeAction(ctx, pod, i, true, status, changes) {
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
break
}
} else { // init container
// nothing do to but wait for it to finish
break
}
// If the init container failed and the restart policy is Never, the pod is terminal.
// Otherwise, restart the init container.
case kubecontainer.ContainerStateExited:
if podutil.IsRestartableInitContainer(container) {
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
} else { // init container
if isInitContainerFailed(status) {
restartOnFailure := restartOnFailure
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
restartOnFailure = kubecontainer.ShouldContainerBeRestarted(logger, container, pod, podStatus)
}
if !restartOnFailure {
changes.KillPod = true
changes.InitContainersToStart = nil
return false
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
break
}
logger.V(4).Info("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
if i == (len(pod.Spec.InitContainers) - 1) {
podHasInitialized = true
} else {
// this init container is initialized for the first time, start the next one
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
}
}
default: // kubecontainer.ContainerStatusUnknown or other unknown states
if podutil.IsRestartableInitContainer(container) {
// If the restartable init container is in unknown state, restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container is in %q state, try killing it before restart",
status.State),
reason: reasonUnknown,
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
} else { // init container
if !isInitContainerFailed(status) {
logger.V(4).Info("This should not happen, init container is in unknown state but not failed", "pod", klog.KObj(pod), "containerStatus", status)
}
restartOnFailure := restartOnFailure
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
// Only container-level restart policy is used. The container-level restart
// rules are not evaluated because the container might not have exited, so
// there is no exit code on which the rules can be used.
if container.RestartPolicy != nil {
restartOnFailure = *container.RestartPolicy != v1.ContainerRestartPolicyNever
}
}
if !restartOnFailure {
changes.KillPod = true
changes.InitContainersToStart = nil
return false
}
// If the init container is in unknown state, restart it.
changes.ContainersToKill[status.ID] = containerToKillInfo{
name: container.Name,
container: container,
message: fmt.Sprintf("Init container is in %q state, try killing it before restart",
status.State),
reason: reasonUnknown,
}
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
}
}
if !isPreviouslyInitialized {
// the one before this init container has been initialized
isPreviouslyInitialized = true
}
}
// this means no init containers have been started,
// start the first one
if !isPreviouslyInitialized {
changes.InitContainersToStart = append(changes.InitContainersToStart, 0)
}
// reverse the InitContainersToStart, as the above loop iterated through the
// init containers backwards, but we want to start them as per the order in
// the pod spec.
l := len(changes.InitContainersToStart)
for i := 0; i < l/2; i++ {
changes.InitContainersToStart[i], changes.InitContainersToStart[l-1-i] =
changes.InitContainersToStart[l-1-i], changes.InitContainersToStart[i]
}
return podHasInitialized
}
// GetContainerLogs returns logs of a specific container.
func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
logger := klog.FromContext(ctx)
resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false)
if err != nil {
logger.V(4).Info("Failed to get container status", "containerID", containerID.String(), "err", err)
return fmt.Errorf("unable to retrieve container logs for %v", containerID.String())
}
status := resp.GetStatus()
if status == nil {
return remote.ErrContainerStatusNil
}
return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
}
// GetExec gets the endpoint the runtime will serve the exec request from.
func (m *kubeGenericRuntimeManager) GetExec(ctx context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeapi.ExecRequest{
ContainerId: id.ID,
Cmd: cmd,
Tty: tty,
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
}
resp, err := m.runtimeService.Exec(ctx, req)
if err != nil {
return nil, err
}
return url.Parse(resp.Url)
}
// GetAttach gets the endpoint the runtime will serve the attach request from.
func (m *kubeGenericRuntimeManager) GetAttach(ctx context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeapi.AttachRequest{
ContainerId: id.ID,
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
Tty: tty,
}
resp, err := m.runtimeService.Attach(ctx, req)
if err != nil {
return nil, err
}
return url.Parse(resp.Url)
}
// RunInContainer synchronously executes the command in the container, and returns the output.
func (m *kubeGenericRuntimeManager) RunInContainer(ctx context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
stdout, stderr, err := m.runtimeService.ExecSync(ctx, id.ID, cmd, timeout)
// NOTE(tallclair): This does not correctly interleave stdout & stderr, but should be sufficient
// for logging purposes. A combined output option will need to be added to the ExecSyncRequest
// if more precise output ordering is ever required.
return append(stdout, stderr...), err
}
// removeContainer removes the container and the container logs.
// Notice that we remove the container logs first, so that container will not be removed if
// container logs are failed to be removed, and kubelet will retry this later. This guarantees
// that container logs to be removed with the container.
// Notice that we assume that the container should only be removed in non-running state, and
// it will not write container logs anymore in that state.
func (m *kubeGenericRuntimeManager) removeContainer(ctx context.Context, containerID string) error {
logger := klog.FromContext(ctx)
logger.V(4).Info("Removing container", "containerID", containerID)
// Call internal container post-stop lifecycle hook.
if err := m.internalLifecycle.PostStopContainer(containerID); err != nil {
return err
}
// Remove the container log.
// TODO: Separate log and container lifecycle management.
if err := m.removeContainerLog(ctx, containerID); err != nil {
return err
}
// Remove the container.
return m.runtimeService.RemoveContainer(ctx, containerID)
}
// removeContainerLog removes the container log.
func (m *kubeGenericRuntimeManager) removeContainerLog(ctx context.Context, containerID string) error {
// Use log manager to remove rotated logs.
err := m.logManager.Clean(ctx, containerID)
if err != nil {
return err
}
resp, err := m.runtimeService.ContainerStatus(ctx, containerID, false)
if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err)
}
status := resp.GetStatus()
if status == nil {
return remote.ErrContainerStatusNil
}
// Remove the legacy container log symlink.
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
labeledInfo := getContainerInfoFromLabels(ctx, status.Labels)
legacySymlink := legacyLogSymlink(containerID, labeledInfo.ContainerName, labeledInfo.PodName,
labeledInfo.PodNamespace)
if err := m.osInterface.Remove(legacySymlink); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove container %q log legacy symbolic link %q: %v",
containerID, legacySymlink, err)
}
return nil
}
// DeleteContainer removes a container.
func (m *kubeGenericRuntimeManager) DeleteContainer(ctx context.Context, containerID kubecontainer.ContainerID) error {
return m.removeContainer(ctx, containerID.ID)
}
// setTerminationGracePeriod determines the grace period to use when killing a container
func setTerminationGracePeriod(ctx context.Context, pod *v1.Pod, containerSpec *v1.Container, containerName string, containerID kubecontainer.ContainerID, reason containerKillReason) int64 {
gracePeriod := int64(minimumGracePeriodInSeconds)
switch {
case pod.DeletionGracePeriodSeconds != nil:
return *pod.DeletionGracePeriodSeconds
case pod.Spec.TerminationGracePeriodSeconds != nil:
switch reason {
case reasonStartupProbe:
if isProbeTerminationGracePeriodSecondsSet(ctx, pod, containerSpec, containerSpec.StartupProbe, containerName, containerID, "StartupProbe") {
return *containerSpec.StartupProbe.TerminationGracePeriodSeconds
}
case reasonLivenessProbe:
if isProbeTerminationGracePeriodSecondsSet(ctx, pod, containerSpec, containerSpec.LivenessProbe, containerName, containerID, "LivenessProbe") {
return *containerSpec.LivenessProbe.TerminationGracePeriodSeconds
}
}
return *pod.Spec.TerminationGracePeriodSeconds
}
return gracePeriod
}
func isProbeTerminationGracePeriodSecondsSet(ctx context.Context, pod *v1.Pod, containerSpec *v1.Container, probe *v1.Probe, containerName string, containerID kubecontainer.ContainerID, probeType string) bool {
logger := klog.FromContext(ctx)
if probe != nil && probe.TerminationGracePeriodSeconds != nil {
if *probe.TerminationGracePeriodSeconds > *pod.Spec.TerminationGracePeriodSeconds {
logger.V(4).Info("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probeType", probeType, "probeGracePeriod", *probe.TerminationGracePeriodSeconds, "podGracePeriod", *pod.Spec.TerminationGracePeriodSeconds)
}
return true
}
return false
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"errors"
"fmt"
"math"
"os"
"path/filepath"
"strconv"
"sync"
"time"
cadvisorv1 "github.com/google/cadvisor/info/v1"
libcontainercgroups "github.com/opencontainers/cgroups"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
kubeapiqos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/utils/ptr"
)
var defaultPageSize = int64(os.Getpagesize())
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(ctx context.Context, config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error {
enforceMemoryQoS := false
// Set memory.min and memory.high if MemoryQoS enabled with cgroups v2
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
isCgroup2UnifiedMode() {
enforceMemoryQoS = true
}
cl, err := m.generateLinuxContainerConfig(ctx, container, pod, uid, username, nsTarget, enforceMemoryQoS)
if err != nil {
return err
}
config.Linux = cl
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.UserNamespacesSupport) {
if cl.SecurityContext.NamespaceOptions.UsernsOptions != nil {
for _, mount := range config.Mounts {
mount.UidMappings = cl.SecurityContext.NamespaceOptions.UsernsOptions.Uids
mount.GidMappings = cl.SecurityContext.NamespaceOptions.UsernsOptions.Gids
}
}
}
return nil
}
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) (*runtimeapi.LinuxContainerConfig, error) {
sc, err := m.determineEffectiveSecurityContext(pod, container, uid, username)
if err != nil {
return nil, err
}
lc := &runtimeapi.LinuxContainerConfig{
Resources: m.generateLinuxContainerResources(ctx, pod, container, enforceMemoryQoS),
SecurityContext: sc,
}
if nsTarget != nil && lc.SecurityContext.NamespaceOptions.Pid == runtimeapi.NamespaceMode_CONTAINER {
lc.SecurityContext.NamespaceOptions.Pid = runtimeapi.NamespaceMode_TARGET
lc.SecurityContext.NamespaceOptions.TargetId = nsTarget.ID
}
return lc, nil
}
// getCPULimit returns the memory limit for the container to be used to calculate
// Linux Container Resources.
func getCPULimit(pod *v1.Pod, container *v1.Container) *resource.Quantity {
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
// When container-level CPU limit is not set, the pod-level
// limit is used in the calculation for components relying on linux resource limits
// to be set.
if container.Resources.Limits.Cpu().IsZero() {
return pod.Spec.Resources.Limits.Cpu()
}
}
return container.Resources.Limits.Cpu()
}
// getMemoryLimit returns the memory limit for the container to be used to calculate
// Linux Container Resources.
func getMemoryLimit(pod *v1.Pod, container *v1.Container) *resource.Quantity {
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
// When container-level memory limit is not set, the pod-level
// limit is used in the calculation for components relying on linux resource limits
// to be set.
if container.Resources.Limits.Memory().IsZero() {
return pod.Spec.Resources.Limits.Memory()
}
}
return container.Resources.Limits.Memory()
}
// generateLinuxContainerResources generates linux container resources config for runtime
func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container, enforceMemoryQoS bool) *runtimeapi.LinuxContainerResources {
logger := klog.FromContext(ctx)
// set linux container resources
var cpuRequest *resource.Quantity
if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists {
cpuRequest = container.Resources.Requests.Cpu()
}
memoryLimit := getMemoryLimit(pod, container)
cpuLimit := getCPULimit(pod, container)
// If pod has exclusive cpu and the container in question has integer cpu requests
// the cfs quota will not be enforced
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.ContainerHasExclusiveCPUs(pod, container)
logger.V(5).Info("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
lcr := m.calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit, disableCPUQuota)
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
int64(m.machineInfo.MemoryCapacity)))
lcr.HugepageLimits = GetHugepageLimitsFromResources(ctx, pod, container.Resources)
// Configure swap for the container
m.configureContainerSwapResources(ctx, lcr, pod, container)
// Set memory.min and memory.high to enforce MemoryQoS
if enforceMemoryQoS {
unified := map[string]string{}
memoryRequest := container.Resources.Requests.Memory().Value()
memoryLimit := container.Resources.Limits.Memory().Value()
if memoryRequest != 0 {
unified[cm.Cgroup2MemoryMin] = strconv.FormatInt(memoryRequest, 10)
}
// Guaranteed pods by their QoS definition requires that memory request equals memory limit and cpu request must equal cpu limit.
// Here, we only check from memory perspective. Hence MemoryQoS feature is disabled on those QoS pods by not setting memory.high.
if memoryRequest != memoryLimit {
// The formula for memory.high for container cgroup is modified in Alpha stage of the feature in K8s v1.27.
// It will be set based on formula:
// `memory.high=floor[(requests.memory + memory throttling factor * (limits.memory or node allocatable memory - requests.memory))/pageSize] * pageSize`
// where default value of memory throttling factor is set to 0.9
// More info: https://git.k8s.io/enhancements/keps/sig-node/2570-memory-qos
memoryHigh := int64(0)
if memoryLimit != 0 {
memoryHigh = int64(math.Floor(
float64(memoryRequest)+
(float64(memoryLimit)-float64(memoryRequest))*float64(m.memoryThrottlingFactor))/float64(defaultPageSize)) * defaultPageSize
} else {
allocatable := m.getNodeAllocatable()
allocatableMemory, ok := allocatable[v1.ResourceMemory]
if ok && allocatableMemory.Value() > 0 {
memoryHigh = int64(math.Floor(
float64(memoryRequest)+
(float64(allocatableMemory.Value())-float64(memoryRequest))*float64(m.memoryThrottlingFactor))/float64(defaultPageSize)) * defaultPageSize
}
}
if memoryHigh != 0 && memoryHigh > memoryRequest {
unified[cm.Cgroup2MemoryHigh] = strconv.FormatInt(memoryHigh, 10)
}
}
if len(unified) > 0 {
if lcr.Unified == nil {
lcr.Unified = unified
} else {
for k, v := range unified {
lcr.Unified[k] = v
}
}
logger.V(4).Info("MemoryQoS config for container", "pod", klog.KObj(pod), "containerName", container.Name, "unified", unified)
}
}
return lcr
}
// configureContainerSwapResources configures the swap resources for a specified (linux) container.
// Swap is only configured if a swap cgroup controller is available and the NodeSwap feature gate is enabled.
func (m *kubeGenericRuntimeManager) configureContainerSwapResources(ctx context.Context, lcr *runtimeapi.LinuxContainerResources, pod *v1.Pod, container *v1.Container) {
if !m.getSwapControllerAvailable() {
return
}
swapConfigurationHelper := newSwapConfigurationHelper(*m.machineInfo, m.getSwapControllerAvailable)
// NOTE(ehashman): Behavior is defined in the opencontainers runtime spec:
// https://github.com/opencontainers/runtime-spec/blob/1c3f411f041711bbeecf35ff7e93461ea6789220/config-linux.md#memory
switch m.GetContainerSwapBehavior(pod, container) {
case types.NoSwap:
swapConfigurationHelper.ConfigureNoSwap(ctx, lcr)
case types.LimitedSwap:
swapConfigurationHelper.ConfigureLimitedSwap(ctx, lcr, pod, container)
default:
swapConfigurationHelper.ConfigureNoSwap(ctx, lcr)
}
}
// GetContainerSwapBehavior checks what swap behavior should be configured for a container,
// considering the requirements for enabling swap.
func (m *kubeGenericRuntimeManager) GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) types.SwapBehavior {
c := types.SwapBehavior(m.memorySwapBehavior)
if c == types.LimitedSwap {
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeSwap) || !m.getSwapControllerAvailable() {
return types.NoSwap
}
if !isCgroup2UnifiedMode() {
return types.NoSwap
}
if types.IsCriticalPod(pod) {
return types.NoSwap
}
podQos := kubeapiqos.GetPodQOS(pod)
containerDoesNotRequestMemory := container.Resources.Requests.Memory().IsZero() && container.Resources.Limits.Memory().IsZero()
memoryRequestEqualsToLimit := container.Resources.Requests.Memory().Cmp(*container.Resources.Limits.Memory()) == 0
if podQos != v1.PodQOSBurstable || containerDoesNotRequestMemory || memoryRequestEqualsToLimit {
return types.NoSwap
}
return c
}
return types.NoSwap
}
// generateContainerResources generates platform specific (linux) container resources config for runtime
func (m *kubeGenericRuntimeManager) generateContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
enforceMemoryQoS := false
// Set memory.min and memory.high if MemoryQoS enabled with cgroups v2
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
isCgroup2UnifiedMode() {
enforceMemoryQoS = true
}
return &runtimeapi.ContainerResources{
Linux: m.generateLinuxContainerResources(ctx, pod, container, enforceMemoryQoS),
}
}
// generateUpdatePodSandboxResourcesRequest generates platform specific (linux) podsandox resources config for runtime
func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) *runtimeapi.UpdatePodSandboxResourcesRequest {
podResourcesWithoutOverhead := subtractOverheadFromResourceConfig(podResources, pod)
return &runtimeapi.UpdatePodSandboxResourcesRequest{
PodSandboxId: sandboxID,
Overhead: m.convertOverheadToLinuxResources(pod),
Resources: convertResourceConfigToLinuxContainerResources(podResourcesWithoutOverhead),
}
}
// calculateLinuxResources will create the linuxContainerResources type based on the provided CPU and memory resource requests, limits
func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit *resource.Quantity, disableCPUQuota bool) *runtimeapi.LinuxContainerResources {
resources := runtimeapi.LinuxContainerResources{}
var cpuShares int64
memLimit := memoryLimit.Value()
// If request is not specified, but limit is, we want request to default to limit.
// API server does this for new containers, but we repeat this logic in Kubelet
// for containers running on existing Kubernetes clusters.
if cpuRequest == nil && cpuLimit != nil {
cpuShares = int64(cm.MilliCPUToShares(cpuLimit.MilliValue()))
} else {
// if cpuRequest.Amount is nil, then MilliCPUToShares will return the minimal number
// of CPU shares.
cpuShares = int64(cm.MilliCPUToShares(cpuRequest.MilliValue()))
}
resources.CpuShares = cpuShares
if memLimit != 0 {
resources.MemoryLimitInBytes = memLimit
}
if m.cpuCFSQuota {
// if cpuLimit.Amount is nil, then the appropriate default value is returned
// to allow full usage of cpu resource.
cpuPeriod := int64(cm.QuotaPeriod)
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) {
// kubeGenericRuntimeManager.cpuCFSQuotaPeriod is provided in time.Duration,
// but we need to convert it to number of microseconds which is used by kernel.
cpuPeriod = int64(m.cpuCFSQuotaPeriod.Duration / time.Microsecond)
}
cpuQuota := cm.MilliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod)
resources.CpuQuota = cpuQuota
if disableCPUQuota {
resources.CpuQuota = int64(-1)
}
resources.CpuPeriod = cpuPeriod
}
// runc requires cgroupv2 for unified mode
if isCgroup2UnifiedMode() && !ptr.Deref(m.singleProcessOOMKill, true) {
resources.Unified = map[string]string{
// Ask the kernel to kill all processes in the container cgroup in case of OOM.
// See memory.oom.group in https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html for
// more info.
"memory.oom.group": "1",
}
}
return &resources
}
// GetHugepageLimitsFromResources returns limits of each hugepages from resources.
func GetHugepageLimitsFromResources(ctx context.Context, pod *v1.Pod, containerResources v1.ResourceRequirements) []*runtimeapi.HugepageLimit {
var hugepageLimits []*runtimeapi.HugepageLimit
// For each page size, limit to 0.
for _, pageSize := range libcontainercgroups.HugePageSizes() {
hugepageLimits = append(hugepageLimits, &runtimeapi.HugepageLimit{
PageSize: pageSize,
Limit: uint64(0),
})
}
requiredHugepageLimits := map[string]uint64{}
// When hugepage limits are specified at pod level and no hugepage limits are
// specified at container level, the container's cgroup will reflect the pod level limit.
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
for limitName, limitAmount := range pod.Spec.Resources.Limits {
readAndDefineRequiredHugepageLimit(ctx, requiredHugepageLimits, limitName, limitAmount)
}
}
// If both the pod level and the container level specify hugepages, the container
// level will have precedence, so the container's hugepages limit will be reflected
// in the container's cgroup values.
for resourceObj, amountObj := range containerResources.Limits {
readAndDefineRequiredHugepageLimit(ctx, requiredHugepageLimits, resourceObj, amountObj)
}
for _, hugepageLimit := range hugepageLimits {
if limit, exists := requiredHugepageLimits[hugepageLimit.PageSize]; exists {
hugepageLimit.Limit = limit
}
}
return hugepageLimits
}
func readAndDefineRequiredHugepageLimit(ctx context.Context, requiredHugepageLimits map[string]uint64, resourceObj v1.ResourceName, amountObj resource.Quantity) {
logger := klog.FromContext(ctx)
if !v1helper.IsHugePageResourceName(resourceObj) {
return
}
pageSize, err := v1helper.HugePageSizeFromResourceName(resourceObj)
if err != nil {
logger.Info("Failed to get hugepage size from resource", "object", resourceObj, "err", err)
return
}
sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize.Value())
if err != nil {
logger.Info("Size is invalid", "object", resourceObj, "err", err)
return
}
requiredHugepageLimits[sizeString] = uint64(amountObj.Value())
}
func toKubeContainerResources(statusResources *runtimeapi.ContainerResources) *kubecontainer.ContainerResources {
var cStatusResources *kubecontainer.ContainerResources
runtimeStatusResources := statusResources.GetLinux()
if runtimeStatusResources != nil {
var cpuLimit, memLimit, cpuRequest *resource.Quantity
if runtimeStatusResources.CpuPeriod > 0 {
milliCPU := quotaToMilliCPU(runtimeStatusResources.CpuQuota, runtimeStatusResources.CpuPeriod)
if milliCPU > 0 {
cpuLimit = resource.NewMilliQuantity(milliCPU, resource.DecimalSI)
}
}
if runtimeStatusResources.CpuShares > 0 {
milliCPU := sharesToMilliCPU(runtimeStatusResources.CpuShares)
if milliCPU > 0 {
cpuRequest = resource.NewMilliQuantity(milliCPU, resource.DecimalSI)
}
}
if runtimeStatusResources.MemoryLimitInBytes > 0 {
memLimit = resource.NewQuantity(runtimeStatusResources.MemoryLimitInBytes, resource.BinarySI)
}
if cpuLimit != nil || memLimit != nil || cpuRequest != nil {
cStatusResources = &kubecontainer.ContainerResources{
CPULimit: cpuLimit,
CPURequest: cpuRequest,
MemoryLimit: memLimit,
}
}
}
return cStatusResources
}
// Note: this function variable is being added here so it would be possible to mock
// the cgroup version for unit tests by assigning a new mocked function into it. Without it,
// the cgroup version would solely depend on the environment running the test.
var isCgroup2UnifiedMode = func() bool {
return libcontainercgroups.IsCgroup2UnifiedMode()
}
// checkSwapControllerAvailability checks if swap controller is available.
// It returns true if the swap controller is available, false otherwise.
func checkSwapControllerAvailability(ctx context.Context) bool {
// See https://github.com/containerd/containerd/pull/7838/
logger := klog.FromContext(ctx)
const warn = "Failed to detect the availability of the swap controller, assuming not available"
p := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes"
if isCgroup2UnifiedMode() {
// memory.swap.max does not exist in the cgroup root, so we check /sys/fs/cgroup/<SELF>/memory.swap.max
cm, err := libcontainercgroups.ParseCgroupFile("/proc/self/cgroup")
if err != nil {
logger.V(5).Error(fmt.Errorf("failed to parse /proc/self/cgroup: %w", err), warn)
return false
}
// For cgroup v2 unified hierarchy, there are no per-controller
// cgroup paths, so the cm map returned by ParseCgroupFile above
// has a single element where the key is empty string ("") and
// the value is the cgroup path the <pid> is in.
p = filepath.Join("/sys/fs/cgroup", cm[""], "memory.swap.max")
}
if _, err := os.Stat(p); err != nil {
if !errors.Is(err, os.ErrNotExist) {
logger.V(5).Error(err, warn)
}
return false
}
return true
}
// initSwapControllerAvailabilityCheck returns a function that checks swap controller availability
// with lazy initialization using sync.OnceValue
func initSwapControllerAvailabilityCheck(ctx context.Context) func() bool {
return sync.OnceValue(func() bool {
return checkSwapControllerAvailability(ctx)
})
}
type swapConfigurationHelper struct {
machineInfo cadvisorv1.MachineInfo
getSwapControllerAvailable func() bool
}
func newSwapConfigurationHelper(machineInfo cadvisorv1.MachineInfo, getSwapControllerAvailable func() bool) *swapConfigurationHelper {
return &swapConfigurationHelper{
machineInfo: machineInfo,
getSwapControllerAvailable: getSwapControllerAvailable,
}
}
func (m swapConfigurationHelper) ConfigureLimitedSwap(ctx context.Context, lcr *runtimeapi.LinuxContainerResources, pod *v1.Pod, container *v1.Container) {
logger := klog.FromContext(ctx)
containerMemoryRequest := container.Resources.Requests.Memory()
swapLimit, err := calcSwapForBurstablePods(containerMemoryRequest.Value(), int64(m.machineInfo.MemoryCapacity), int64(m.machineInfo.SwapCapacity))
if err != nil {
logger.Error(err, "Cannot calculate swap allocation amount; disallowing swap")
m.ConfigureNoSwap(ctx, lcr)
return
}
m.configureSwap(ctx, lcr, swapLimit)
}
func (m swapConfigurationHelper) ConfigureNoSwap(ctx context.Context, lcr *runtimeapi.LinuxContainerResources) {
if !isCgroup2UnifiedMode() {
if m.getSwapControllerAvailable() {
// memorySwapLimit = total permitted memory+swap; if equal to memory limit, => 0 swap above memory limit
// Some swapping is still possible.
// Note that if memory limit is 0, memory swap limit is ignored.
lcr.MemorySwapLimitInBytes = lcr.MemoryLimitInBytes
}
return
}
m.configureSwap(ctx, lcr, 0)
}
func (m swapConfigurationHelper) configureSwap(ctx context.Context, lcr *runtimeapi.LinuxContainerResources, swapMemory int64) {
logger := klog.FromContext(ctx)
if !isCgroup2UnifiedMode() {
logger.Error(fmt.Errorf("swap configuration is not supported with cgroup v1"), "Swap configuration under cgroup v1 is unexpected")
return
}
if lcr.Unified == nil {
lcr.Unified = map[string]string{}
}
lcr.Unified[cm.Cgroup2MaxSwapFilename] = fmt.Sprintf("%d", swapMemory)
}
// The swap limit is calculated as (<containerMemoryRequest>/<nodeTotalMemory>)*<totalPodsSwapAvailable>.
// For more info, please look at the following KEP: https://kep.k8s.io/2400
func calcSwapForBurstablePods(containerMemoryRequest, nodeTotalMemory, totalPodsSwapAvailable int64) (int64, error) {
if nodeTotalMemory <= 0 {
return 0, fmt.Errorf("total node memory is 0")
}
if containerMemoryRequest > nodeTotalMemory {
return 0, fmt.Errorf("container request %d is larger than total node memory %d", containerMemoryRequest, nodeTotalMemory)
}
containerMemoryProportion := float64(containerMemoryRequest) / float64(nodeTotalMemory)
swapAllocation := containerMemoryProportion * float64(totalPodsSwapAvailable)
return int64(swapAllocation), nil
}
func toKubeContainerUser(statusUser *runtimeapi.ContainerUser) *kubecontainer.ContainerUser {
if statusUser == nil {
return nil
}
user := &kubecontainer.ContainerUser{}
if statusUser.GetLinux() != nil {
user.Linux = &kubecontainer.LinuxContainerUser{
UID: statusUser.GetLinux().GetUid(),
GID: statusUser.GetLinux().GetGid(),
SupplementalGroups: statusUser.GetLinux().GetSupplementalGroups(),
}
}
return user
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"time"
"go.opentelemetry.io/otel/trace"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// containerGC is the manager of garbage collection.
type containerGC struct {
client internalapi.RuntimeService
manager *kubeGenericRuntimeManager
podStateProvider podStateProvider
tracer trace.Tracer
}
// NewContainerGC creates a new containerGC.
func newContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager, tracer trace.Tracer) *containerGC {
return &containerGC{
client: client,
manager: manager,
podStateProvider: podStateProvider,
tracer: tracer,
}
}
// containerGCInfo is the internal information kept for containers being considered for GC.
type containerGCInfo struct {
// The ID of the container.
id string
// The name of the container.
name string
// Creation time for the container.
createTime time.Time
// If true, the container is in unknown state. Garbage collector should try
// to stop containers before removal.
unknown bool
}
// sandboxGCInfo is the internal information kept for sandboxes being considered for GC.
type sandboxGCInfo struct {
// The ID of the sandbox.
id string
// Creation time for the sandbox.
createTime time.Time
// If true, the sandbox is ready or still has containers.
active bool
}
// evictUnit is considered for eviction as units of (UID, container name) pair.
type evictUnit struct {
// UID of the pod.
uid types.UID
// Name of the container in the pod.
name string
}
type containersByEvictUnit map[evictUnit][]containerGCInfo
type sandboxesByPodUID map[types.UID][]sandboxGCInfo
// NumContainers returns the number of containers in this map.
func (cu containersByEvictUnit) NumContainers() int {
num := 0
for key := range cu {
num += len(cu[key])
}
return num
}
// NumEvictUnits returns the number of pod in this map.
func (cu containersByEvictUnit) NumEvictUnits() int {
return len(cu)
}
// Newest first.
type byCreated []containerGCInfo
func (a byCreated) Len() int { return len(a) }
func (a byCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) }
// Newest first.
type sandboxByCreated []sandboxGCInfo
func (a sandboxByCreated) Len() int { return len(a) }
func (a sandboxByCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sandboxByCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) }
// enforceMaxContainersPerEvictUnit enforces MaxPerPodContainer for each evictUnit.
func (cgc *containerGC) enforceMaxContainersPerEvictUnit(ctx context.Context, evictUnits containersByEvictUnit, MaxContainers int) {
for key := range evictUnits {
toRemove := len(evictUnits[key]) - MaxContainers
if toRemove > 0 {
evictUnits[key] = cgc.removeOldestN(ctx, evictUnits[key], toRemove)
}
}
}
// removeOldestN removes the oldest toRemove containers and returns the resulting slice.
func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containerGCInfo, toRemove int) []containerGCInfo {
logger := klog.FromContext(ctx)
// Remove from oldest to newest (last to first).
numToKeep := len(containers) - toRemove
if numToKeep > 0 {
sort.Sort(byCreated(containers))
}
for i := len(containers) - 1; i >= numToKeep; i-- {
if containers[i].unknown {
// Containers in known state could be running, we should try
// to stop it before removal.
id := kubecontainer.ContainerID{
Type: cgc.manager.runtimeName,
ID: containers[i].id,
}
message := "Container is in unknown state, try killing it before removal"
if err := cgc.manager.killContainer(ctx, nil, id, containers[i].name, message, reasonUnknown, nil, nil); err != nil {
logger.Error(err, "Failed to stop container", "containerID", containers[i].id)
continue
}
}
if err := cgc.manager.removeContainer(ctx, containers[i].id); err != nil {
logger.Error(err, "Failed to remove container", "containerID", containers[i].id)
}
}
// Assume we removed the containers so that we're not too aggressive.
return containers[:numToKeep]
}
// removeOldestNSandboxes removes the oldest inactive toRemove sandboxes and
// returns the resulting slice.
func (cgc *containerGC) removeOldestNSandboxes(ctx context.Context, sandboxes []sandboxGCInfo, toRemove int) {
numToKeep := len(sandboxes) - toRemove
if numToKeep > 0 {
sort.Sort(sandboxByCreated(sandboxes))
}
// Remove from oldest to newest (last to first).
for i := len(sandboxes) - 1; i >= numToKeep; i-- {
if !sandboxes[i].active {
cgc.removeSandbox(ctx, sandboxes[i].id)
}
}
}
// removeSandbox removes the sandbox by sandboxID.
func (cgc *containerGC) removeSandbox(ctx context.Context, sandboxID string) {
logger := klog.FromContext(ctx)
logger.V(4).Info("Removing sandbox", "sandboxID", sandboxID)
// In normal cases, kubelet should've already called StopPodSandbox before
// GC kicks in. To guard against the rare cases where this is not true, try
// stopping the sandbox before removing it.
if err := cgc.client.StopPodSandbox(ctx, sandboxID); err != nil {
logger.Error(err, "Failed to stop sandbox before removing", "sandboxID", sandboxID)
return
}
if err := cgc.client.RemovePodSandbox(ctx, sandboxID); err != nil {
logger.Error(err, "Failed to remove sandbox", "sandboxID", sandboxID)
}
}
// evictableContainers gets all containers that are evictable. Evictable containers are: not running
// and created more than MinAge ago.
func (cgc *containerGC) evictableContainers(ctx context.Context, minAge time.Duration) (containersByEvictUnit, error) {
containers, err := cgc.manager.getKubeletContainers(ctx, true)
if err != nil {
return containersByEvictUnit{}, err
}
evictUnits := make(containersByEvictUnit)
newestGCTime := time.Now().Add(-minAge)
for _, container := range containers {
// Prune out running containers.
if container.State == runtimeapi.ContainerState_CONTAINER_RUNNING {
continue
}
createdAt := time.Unix(0, container.CreatedAt)
if newestGCTime.Before(createdAt) {
continue
}
labeledInfo := getContainerInfoFromLabels(ctx, container.Labels)
containerInfo := containerGCInfo{
id: container.Id,
name: container.Metadata.Name,
createTime: createdAt,
unknown: container.State == runtimeapi.ContainerState_CONTAINER_UNKNOWN,
}
key := evictUnit{
uid: labeledInfo.PodUID,
name: containerInfo.name,
}
evictUnits[key] = append(evictUnits[key], containerInfo)
}
return evictUnits, nil
}
// evict all containers that are evictable
func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
// Separate containers by evict units.
evictUnits, err := cgc.evictableContainers(ctx, gcPolicy.MinAge)
if err != nil {
return err
}
// Remove deleted pod containers if all sources are ready.
if allSourcesReady {
for key, unit := range evictUnits {
if cgc.podStateProvider.ShouldPodContentBeRemoved(key.uid) || (evictNonDeletedPods && cgc.podStateProvider.ShouldPodRuntimeBeRemoved(key.uid)) {
cgc.removeOldestN(ctx, unit, len(unit)) // Remove all.
delete(evictUnits, key)
}
}
}
// Enforce max containers per evict unit.
if gcPolicy.MaxPerPodContainer >= 0 {
cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, gcPolicy.MaxPerPodContainer)
}
// Enforce max total number of containers.
if gcPolicy.MaxContainers >= 0 && evictUnits.NumContainers() > gcPolicy.MaxContainers {
// Leave an equal number of containers per evict unit (min: 1).
numContainersPerEvictUnit := gcPolicy.MaxContainers / evictUnits.NumEvictUnits()
if numContainersPerEvictUnit < 1 {
numContainersPerEvictUnit = 1
}
cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, numContainersPerEvictUnit)
// If we still need to evict, evict oldest first.
numContainers := evictUnits.NumContainers()
if numContainers > gcPolicy.MaxContainers {
flattened := make([]containerGCInfo, 0, numContainers)
for key := range evictUnits {
flattened = append(flattened, evictUnits[key]...)
}
sort.Sort(byCreated(flattened))
cgc.removeOldestN(ctx, flattened, numContainers-gcPolicy.MaxContainers)
}
}
return nil
}
// evictSandboxes remove all evictable sandboxes. An evictable sandbox must
// meet the following requirements:
// 1. not in ready state
// 2. contains no containers.
// 3. belong to a non-existent (i.e., already removed) pod, or is not the
// most recently created sandbox for the pod.
func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods bool) error {
containers, err := cgc.manager.getKubeletContainers(ctx, true)
if err != nil {
return err
}
sandboxes, err := cgc.manager.getKubeletSandboxes(ctx, true)
if err != nil {
return err
}
// collect all the PodSandboxId of container
sandboxIDs := sets.New[string]()
for _, container := range containers {
sandboxIDs.Insert(container.PodSandboxId)
}
sandboxesByPod := make(sandboxesByPodUID, len(sandboxes))
for _, sandbox := range sandboxes {
podUID := types.UID(sandbox.Metadata.Uid)
sandboxInfo := sandboxGCInfo{
id: sandbox.Id,
createTime: time.Unix(0, sandbox.CreatedAt),
}
// Set ready sandboxes and sandboxes that still have containers to be active.
if sandbox.State == runtimeapi.PodSandboxState_SANDBOX_READY || sandboxIDs.Has(sandbox.Id) {
sandboxInfo.active = true
}
sandboxesByPod[podUID] = append(sandboxesByPod[podUID], sandboxInfo)
}
for podUID, sandboxes := range sandboxesByPod {
if cgc.podStateProvider.ShouldPodContentBeRemoved(podUID) || (evictNonDeletedPods && cgc.podStateProvider.ShouldPodRuntimeBeRemoved(podUID)) {
// Remove all evictable sandboxes if the pod has been removed.
// Note that the latest dead sandbox is also removed if there is
// already an active one.
cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes))
} else {
// Keep latest one if the pod still exists.
cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes)-1)
}
}
return nil
}
// evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories
// are evictable if there are no corresponding pods.
func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesReady bool) error {
logger := klog.FromContext(ctx)
osInterface := cgc.manager.osInterface
podLogsDirectory := cgc.manager.podLogsDirectory
if allSourcesReady {
// Only remove pod logs directories when all sources are ready.
dirs, err := osInterface.ReadDir(podLogsDirectory)
if err != nil {
return fmt.Errorf("failed to read podLogsDirectory %q: %w", podLogsDirectory, err)
}
for _, dir := range dirs {
name := dir.Name()
podUID := parsePodUIDFromLogsDirectory(name)
if !cgc.podStateProvider.ShouldPodContentBeRemoved(podUID) {
continue
}
logger.V(4).Info("Removing pod logs", "podUID", podUID)
err := osInterface.RemoveAll(filepath.Join(podLogsDirectory, name))
if err != nil {
logger.Error(err, "Failed to remove pod logs directory", "path", name)
}
}
}
// Remove dead container log symlinks.
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
logSymlinks, _ := osInterface.Glob(filepath.Join(legacyContainerLogsDir, fmt.Sprintf("*.%s", legacyLogSuffix)))
for _, logSymlink := range logSymlinks {
if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) {
if containerID, err := getContainerIDFromLegacyLogSymlink(logSymlink); err == nil {
resp, err := cgc.manager.runtimeService.ContainerStatus(ctx, containerID, false)
if err != nil {
// TODO: we should handle container not found (i.e. container was deleted) case differently
// once https://github.com/kubernetes/kubernetes/issues/63336 is resolved
logger.Info("Error getting ContainerStatus for containerID", "containerID", containerID, "err", err)
} else {
status := resp.GetStatus()
if status == nil {
logger.V(4).Info("Container status is nil")
continue
}
if status.State != runtimeapi.ContainerState_CONTAINER_EXITED {
// Here is how container log rotation works (see containerLogManager#rotateLatestLog):
//
// 1. rename current log to rotated log file whose filename contains current timestamp (fmt.Sprintf("%s.%s", log, timestamp))
// 2. reopen the container log
// 3. if #2 fails, rename rotated log file back to container log
//
// There is small but indeterministic amount of time during which log file doesn't exist (between steps #1 and #2, between #1 and #3).
// Hence the symlink may be deemed unhealthy during that period.
// See https://github.com/kubernetes/kubernetes/issues/52172
//
// We only remove unhealthy symlink for dead containers
logger.V(5).Info("Container is still running, not removing symlink", "containerID", containerID, "path", logSymlink)
continue
}
}
} else {
logger.V(4).Info("Unable to obtain container ID", "err", err)
}
err := osInterface.Remove(logSymlink)
if err != nil {
logger.Error(err, "Failed to remove container log dead symlink", "path", logSymlink)
} else {
logger.V(4).Info("Removed symlink", "path", logSymlink)
}
}
}
return nil
}
// GarbageCollect removes dead containers using the specified container gc policy.
// Note that gc policy is not applied to sandboxes. Sandboxes are only removed when they are
// not ready and containing no containers.
//
// GarbageCollect consists of the following steps:
// * gets evictable containers which are not active and created more than gcPolicy.MinAge ago.
// * removes oldest dead containers for each pod by enforcing gcPolicy.MaxPerPodContainer.
// * removes oldest dead containers by enforcing gcPolicy.MaxContainers.
// * gets evictable sandboxes which are not ready and contains no containers.
// * removes evictable sandboxes.
func (cgc *containerGC) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
ctx, otelSpan := cgc.tracer.Start(ctx, "Containers/GarbageCollect")
defer otelSpan.End()
errors := []error{}
// Remove evictable containers
if err := cgc.evictContainers(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil {
errors = append(errors, err)
}
// Remove sandboxes with zero containers
if err := cgc.evictSandboxes(ctx, evictNonDeletedPods); err != nil {
errors = append(errors, err)
}
// Remove pod sandbox log directory
if err := cgc.evictPodLogsDirectories(ctx, allSourcesReady); err != nil {
errors = append(errors, err)
}
return utilerrors.NewAggregate(errors)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
crededentialprovider "k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary.
func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecontainer.ImageSpec, credentials []crededentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, *crededentialprovider.TrackedAuthConfig, error) {
logger := klog.FromContext(ctx)
img := image.Image
imgSpec := toRuntimeAPIImageSpec(image)
if len(credentials) == 0 {
logger.V(3).Info("Pulling image without credentials", "image", img)
imageRef, err := m.imageService.PullImage(ctx, imgSpec, nil, podSandboxConfig)
if err != nil {
logger.Error(err, "Failed to pull image", "image", img)
return "", nil, err
}
return imageRef, nil, nil
}
var pullErrs []error
for _, currentCreds := range credentials {
auth := &runtimeapi.AuthConfig{
Username: currentCreds.Username,
Password: currentCreds.Password,
Auth: currentCreds.Auth,
ServerAddress: currentCreds.ServerAddress,
IdentityToken: currentCreds.IdentityToken,
RegistryToken: currentCreds.RegistryToken,
}
imageRef, err := m.imageService.PullImage(ctx, imgSpec, auth, podSandboxConfig)
// If there was no error, return success
if err == nil {
return imageRef, ¤tCreds, nil
}
pullErrs = append(pullErrs, err)
}
return "", nil, utilerrors.NewAggregate(pullErrs)
}
// GetImageRef gets the ID of the image which has already been in
// the local storage. It returns ("", nil) if the image isn't in the local storage.
func (m *kubeGenericRuntimeManager) GetImageRef(ctx context.Context, image kubecontainer.ImageSpec) (string, error) {
logger := klog.FromContext(ctx)
resp, err := m.imageService.ImageStatus(ctx, toRuntimeAPIImageSpec(image), false)
if err != nil {
logger.Error(err, "Failed to get image status", "image", image.Image)
return "", err
}
if resp.Image == nil {
return "", nil
}
return resp.Image.Id, nil
}
func (m *kubeGenericRuntimeManager) GetImageSize(ctx context.Context, image kubecontainer.ImageSpec) (uint64, error) {
logger := klog.FromContext(ctx)
resp, err := m.imageService.ImageStatus(ctx, toRuntimeAPIImageSpec(image), false)
if err != nil {
logger.Error(err, "Failed to get image status", "image", image.Image)
return 0, err
}
if resp.Image == nil {
return 0, nil
}
return resp.Image.Size, nil
}
// ListImages gets all images currently on the machine.
func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubecontainer.Image, error) {
logger := klog.FromContext(ctx)
var images []kubecontainer.Image
allImages, err := m.imageService.ListImages(ctx, nil)
if err != nil {
logger.Error(err, "Failed to list images")
return nil, err
}
for _, img := range allImages {
// Container runtimes may choose not to implement changes needed for KEP 4216. If
// the changes are not implemented by a container runtime, the exisiting behavior
// of not populating the runtimeHandler CRI field in ImageSpec struct is preserved.
// Therefore, when RuntimeClassInImageCriAPI feature gate is set, check to see if this
// field is empty and log a warning message.
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI) {
if img.Spec == nil || (img.Spec != nil && img.Spec.RuntimeHandler == "") {
logger.V(2).Info("WARNING: RuntimeHandler is empty", "ImageID", img.Id)
}
}
images = append(images, kubecontainer.Image{
ID: img.Id,
Size: int64(img.Size),
RepoTags: img.RepoTags,
RepoDigests: img.RepoDigests,
Spec: toKubeContainerImageSpec(img),
Pinned: img.Pinned,
})
}
return images, nil
}
// RemoveImage removes the specified image.
func (m *kubeGenericRuntimeManager) RemoveImage(ctx context.Context, image kubecontainer.ImageSpec) error {
logger := klog.FromContext(ctx)
err := m.imageService.RemoveImage(ctx, &runtimeapi.ImageSpec{Image: image.Image})
if err != nil {
logger.Error(err, "Failed to remove image", "image", image.Image)
return err
}
return nil
}
// ImageStats returns the statistics of the image.
// Notice that current logic doesn't really work for images which share layers (e.g. docker image),
// this is a known issue, and we'll address this by getting imagefs stats directly from CRI.
// TODO: Get imagefs stats directly from CRI.
func (m *kubeGenericRuntimeManager) ImageStats(ctx context.Context) (*kubecontainer.ImageStats, error) {
logger := klog.FromContext(ctx)
allImages, err := m.imageService.ListImages(ctx, nil)
if err != nil {
logger.Error(err, "Failed to list images")
return nil, err
}
stats := &kubecontainer.ImageStats{}
for _, img := range allImages {
stats.TotalStorageBytes += img.Size
}
return stats, nil
}
func (m *kubeGenericRuntimeManager) ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) {
logger := klog.FromContext(ctx)
allImages, err := m.imageService.ImageFsInfo(ctx)
if err != nil {
logger.Error(err, "Failed to get image filesystem")
return nil, err
}
return allImages, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"io"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/cri-client/pkg/logs"
"k8s.io/klog/v2"
)
// ReadLogs read the container log and redirect into stdout and stderr.
// Note that containerID is only needed when following the log, or else
// just pass in empty string "".
func (m *kubeGenericRuntimeManager) ReadLogs(ctx context.Context, path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Convert v1.PodLogOptions into internal log options.
opts := logs.NewLogOptions(apiOpts, time.Now())
logger := klog.FromContext(ctx)
return logs.ReadLogs(ctx, &logger, path, containerID, opts, m.runtimeService, stdout, stderr)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"go.opentelemetry.io/otel/trace"
grpcstatus "google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/component-base/logs/logreduction"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
crierror "k8s.io/cri-api/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/credentialprovider/plugin"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/allocation"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images"
imagepullmanager "k8s.io/kubernetes/pkg/kubelet/images/pullmanager"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/metrics"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/kubelet/token"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
"k8s.io/kubernetes/pkg/kubelet/util/format"
sc "k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/utils/ptr"
)
const (
// The api version of kubelet runtime api
kubeRuntimeAPIVersion = "0.1.0"
// A minimal shutdown window for avoiding unnecessary SIGKILLs
minimumGracePeriodInSeconds = 2
// The expiration time of version cache.
versionCacheTTL = 60 * time.Second
// How frequently to report identical errors
identicalErrorDelay = 1 * time.Minute
// OpenTelemetry instrumentation scope name
instrumentationScope = "k8s.io/kubernetes/pkg/kubelet/kuberuntime"
)
var (
// ErrVersionNotSupported is returned when the api version of runtime interface is not supported
ErrVersionNotSupported = errors.New("runtime api version is not supported")
)
// podStateProvider can determine if none of the elements are necessary to retain (pod content)
// or if none of the runtime elements are necessary to retain (containers)
type podStateProvider interface {
IsPodTerminationRequested(kubetypes.UID) bool
ShouldPodContentBeRemoved(kubetypes.UID) bool
ShouldPodRuntimeBeRemoved(kubetypes.UID) bool
}
type kubeGenericRuntimeManager struct {
runtimeName string
recorder record.EventRecorder
osInterface kubecontainer.OSInterface
// machineInfo contains the machine information.
machineInfo *cadvisorapi.MachineInfo
// Container GC manager
containerGC *containerGC
// Runner of lifecycle events.
runner kubecontainer.HandlerRunner
// RuntimeHelper that wraps kubelet to generate runtime container options.
runtimeHelper kubecontainer.RuntimeHelper
// Health check results.
livenessManager proberesults.Manager
readinessManager proberesults.Manager
startupManager proberesults.Manager
// If false, pass "memory.oom.group" to container cgroups when using cgroups v2 to cause processes
// in those cgroups to be killed as a unit by the OOM killer.
// It must be nil except for linux
singleProcessOOMKill *bool
// If true, enforce container cpu limits with CFS quota support
cpuCFSQuota bool
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
cpuCFSQuotaPeriod metav1.Duration
// wrapped image puller.
imagePuller images.ImageManager
// gRPC service clients
runtimeService internalapi.RuntimeService
imageService internalapi.ImageManagerService
// The version cache of runtime daemon.
versionCache *cache.ObjectCache
// The directory path for seccomp profiles.
seccompProfileRoot string
// Container management interface for pod container.
containerManager cm.ContainerManager
// Internal lifecycle event handlers for container resource management.
internalLifecycle cm.InternalContainerLifecycle
// Manage container logs.
logManager logs.ContainerLogManager
// Manage RuntimeClass resources.
runtimeClassManager *runtimeclass.Manager
// Manager allocated & actuated resources.
allocationManager allocation.Manager
// Cache last per-container error message to reduce log spam
logReduction *logreduction.LogReduction
// PodState provider instance
podStateProvider podStateProvider
// Use RuntimeDefault as the default seccomp profile for all workloads.
seccompDefault bool
// MemorySwapBehavior defines how swap is used
memorySwapBehavior string
//Function to get node allocatable resources
getNodeAllocatable func() v1.ResourceList
// Memory throttling factor for MemoryQoS
memoryThrottlingFactor float64
// Root directory used to store pod logs
podLogsDirectory string
// Swap controller availability check function (Linux only)
// Uses sync.OnceValue for lazy initialization
getSwapControllerAvailable func() bool
}
// KubeGenericRuntime is a interface contains interfaces for container runtime and command.
type KubeGenericRuntime interface {
kubecontainer.Runtime
kubecontainer.StreamingRuntime
kubecontainer.CommandRunner
}
// NewKubeGenericRuntimeManager creates a new kubeGenericRuntimeManager
func NewKubeGenericRuntimeManager(
ctx context.Context,
recorder record.EventRecorder,
livenessManager proberesults.Manager,
readinessManager proberesults.Manager,
startupManager proberesults.Manager,
rootDirectory string,
podLogsDirectory string,
machineInfo *cadvisorapi.MachineInfo,
podStateProvider podStateProvider,
maxPods int32,
osInterface kubecontainer.OSInterface,
runtimeHelper kubecontainer.RuntimeHelper,
insecureContainerLifecycleHTTPClient types.HTTPDoer,
imageBackOff *flowcontrol.Backoff,
serializeImagePulls bool,
maxParallelImagePulls *int32,
imagePullQPS float32,
imagePullBurst int,
imagePullsCredentialVerificationPolicy string,
preloadedImagesCredentialVerificationWhitelist []string,
imageCredentialProviderConfigPath string,
imageCredentialProviderBinDir string,
singleProcessOOMKill *bool,
cpuCFSQuota bool,
cpuCFSQuotaPeriod metav1.Duration,
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
containerManager cm.ContainerManager,
logManager logs.ContainerLogManager,
runtimeClassManager *runtimeclass.Manager,
allocationManager allocation.Manager,
seccompDefault bool,
memorySwapBehavior string,
getNodeAllocatable func() v1.ResourceList,
memoryThrottlingFactor float64,
podPullingTimeRecorder images.ImagePodPullingTimeRecorder,
tracerProvider trace.TracerProvider,
tokenManager *token.Manager,
getServiceAccount plugin.GetServiceAccountFunc,
) (KubeGenericRuntime, []images.PostImageGCHook, error) {
logger := klog.FromContext(ctx)
runtimeService = newInstrumentedRuntimeService(runtimeService)
imageService = newInstrumentedImageManagerService(imageService)
tracer := tracerProvider.Tracer(instrumentationScope)
kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder,
singleProcessOOMKill: singleProcessOOMKill,
cpuCFSQuota: cpuCFSQuota,
cpuCFSQuotaPeriod: cpuCFSQuotaPeriod,
seccompProfileRoot: filepath.Join(rootDirectory, "seccomp"),
livenessManager: livenessManager,
readinessManager: readinessManager,
startupManager: startupManager,
machineInfo: machineInfo,
osInterface: osInterface,
runtimeHelper: runtimeHelper,
runtimeService: runtimeService,
imageService: imageService,
containerManager: containerManager,
internalLifecycle: containerManager.InternalContainerLifecycle(),
logManager: logManager,
runtimeClassManager: runtimeClassManager,
allocationManager: allocationManager,
logReduction: logreduction.NewLogReduction(identicalErrorDelay),
seccompDefault: seccompDefault,
memorySwapBehavior: memorySwapBehavior,
getNodeAllocatable: getNodeAllocatable,
memoryThrottlingFactor: memoryThrottlingFactor,
podLogsDirectory: podLogsDirectory,
}
// Initialize swap controller availability check with lazy evaluation
kubeRuntimeManager.getSwapControllerAvailable = initSwapControllerAvailabilityCheck(ctx)
typedVersion, err := kubeRuntimeManager.getTypedVersion(ctx)
if err != nil {
logger.Error(err, "Get runtime version failed")
return nil, nil, err
}
// Only matching kubeRuntimeAPIVersion is supported now
// TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642
if typedVersion.Version != kubeRuntimeAPIVersion {
logger.Error(err, "This runtime api version is not supported",
"apiVersion", typedVersion.Version,
"supportedAPIVersion", kubeRuntimeAPIVersion)
return nil, nil, ErrVersionNotSupported
}
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
logger.Info("Container runtime initialized",
"containerRuntime", typedVersion.RuntimeName,
"version", typedVersion.RuntimeVersion,
"apiVersion", typedVersion.RuntimeApiVersion)
if imageCredentialProviderConfigPath != "" || imageCredentialProviderBinDir != "" {
if err := plugin.RegisterCredentialProviderPlugins(imageCredentialProviderConfigPath, imageCredentialProviderBinDir, tokenManager.GetServiceAccountToken, getServiceAccount); err != nil {
logger.Error(err, "Failed to register CRI auth plugins")
os.Exit(1)
}
}
var imageGCHooks []images.PostImageGCHook
var imagePullManager imagepullmanager.ImagePullManager = &imagepullmanager.NoopImagePullManager{}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletEnsureSecretPulledImages) {
imagePullCredentialsVerificationPolicy, err := imagepullmanager.NewImagePullCredentialVerificationPolicy(
kubeletconfiginternal.ImagePullCredentialsVerificationPolicy(imagePullsCredentialVerificationPolicy),
preloadedImagesCredentialVerificationWhitelist)
if err != nil {
return nil, nil, err
}
fsRecordAccessor, err := imagepullmanager.NewFSPullRecordsAccessor(rootDirectory)
if err != nil {
return nil, nil, fmt.Errorf("failed to setup the FSPullRecordsAccessor: %w", err)
}
var ( // variables used to determine cache/lock set sizes
maxParallelPulls = ptr.Deref(maxParallelImagePulls, 0)
intentCacheSize = max(2*maxPods, 2*maxParallelPulls)
pullRecordsCacheSize = 5 * maxPods
)
memCacheRecordsAccessor := imagepullmanager.NewCachedPullRecordsAccessor(fsRecordAccessor, intentCacheSize, pullRecordsCacheSize, maxParallelPulls)
imagePullManager, err = imagepullmanager.NewImagePullManager(ctx, memCacheRecordsAccessor, imagePullCredentialsVerificationPolicy, kubeRuntimeManager, maxParallelPulls)
if err != nil {
return nil, nil, fmt.Errorf("failed to create image pull manager: %w", err)
}
imageGCHooks = append(imageGCHooks, imagePullManager.PruneUnknownRecords)
}
nodeKeyring := credentialprovider.NewDefaultDockerKeyring()
kubeRuntimeManager.imagePuller = images.NewImageManager(
kubecontainer.FilterEventRecorder(recorder),
nodeKeyring,
kubeRuntimeManager,
imagePullManager,
imageBackOff,
serializeImagePulls,
maxParallelImagePulls,
imagePullQPS,
imagePullBurst,
podPullingTimeRecorder)
kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(insecureContainerLifecycleHTTPClient, kubeRuntimeManager, kubeRuntimeManager, recorder)
kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager, tracer)
kubeRuntimeManager.podStateProvider = podStateProvider
kubeRuntimeManager.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return kubeRuntimeManager.getTypedVersion(ctx)
},
versionCacheTTL,
)
return kubeRuntimeManager, imageGCHooks, nil
}
// Type returns the type of the container runtime.
func (m *kubeGenericRuntimeManager) Type() string {
return m.runtimeName
}
func newRuntimeVersion(version string) (*utilversion.Version, error) {
if ver, err := utilversion.ParseSemantic(version); err == nil {
return ver, err
}
return utilversion.ParseGeneric(version)
}
func (m *kubeGenericRuntimeManager) getTypedVersion(ctx context.Context) (*runtimeapi.VersionResponse, error) {
typedVersion, err := m.runtimeService.Version(ctx, kubeRuntimeAPIVersion)
if err != nil {
return nil, fmt.Errorf("get remote runtime typed version failed: %v", err)
}
return typedVersion, nil
}
// Version returns the version information of the container runtime.
func (m *kubeGenericRuntimeManager) Version(ctx context.Context) (kubecontainer.Version, error) {
typedVersion, err := m.getTypedVersion(ctx)
if err != nil {
return nil, err
}
return newRuntimeVersion(typedVersion.RuntimeVersion)
}
// APIVersion returns the cached API version information of the container
// runtime. Implementation is expected to update this cache periodically.
// This may be different from the runtime engine's version.
func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error) {
versionObject, err := m.versionCache.Get(m.machineInfo.MachineID)
if err != nil {
return nil, err
}
typedVersion := versionObject.(*runtimeapi.VersionResponse)
return newRuntimeVersion(typedVersion.RuntimeApiVersion)
}
// Status returns the status of the runtime. An error is returned if the Status
// function itself fails, nil otherwise.
func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer.RuntimeStatus, error) {
resp, err := m.runtimeService.Status(ctx, false)
if err != nil {
return nil, err
}
if resp.GetStatus() == nil {
return nil, errors.New("runtime status is nil")
}
return toKubeRuntimeStatus(resp.GetStatus(), resp.GetRuntimeHandlers(), resp.GetFeatures()), nil
}
// GetPods returns a list of containers grouped by pods. The boolean parameter
// specifies whether the runtime returns all containers including those already
// exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*kubecontainer.Pod, error) {
logger := klog.FromContext(ctx)
pods := make(map[kubetypes.UID]*kubecontainer.Pod)
sandboxes, err := m.getKubeletSandboxes(ctx, all)
if err != nil {
return nil, err
}
for i := range sandboxes {
s := sandboxes[i]
if s.Metadata == nil {
logger.V(4).Info("Sandbox does not have metadata", "sandbox", s)
continue
}
podUID := kubetypes.UID(s.Metadata.Uid)
if _, ok := pods[podUID]; !ok {
pods[podUID] = &kubecontainer.Pod{
ID: podUID,
Name: s.Metadata.Name,
Namespace: s.Metadata.Namespace,
}
}
p := pods[podUID]
converted, err := m.sandboxToKubeContainer(s)
if err != nil {
logger.V(4).Info("Convert sandbox of pod failed", "runtimeName", m.runtimeName, "sandbox", s, "podUID", podUID, "err", err)
continue
}
p.Sandboxes = append(p.Sandboxes, converted)
p.CreatedAt = uint64(s.GetCreatedAt())
}
containers, err := m.getKubeletContainers(ctx, all)
if err != nil {
return nil, err
}
for i := range containers {
c := containers[i]
if c.Metadata == nil {
logger.V(4).Info("Container does not have metadata", "container", c)
continue
}
labelledInfo := getContainerInfoFromLabels(ctx, c.Labels)
pod, found := pods[labelledInfo.PodUID]
if !found {
pod = &kubecontainer.Pod{
ID: labelledInfo.PodUID,
Name: labelledInfo.PodName,
Namespace: labelledInfo.PodNamespace,
}
pods[labelledInfo.PodUID] = pod
}
converted, err := m.toKubeContainer(ctx, c)
if err != nil {
logger.V(4).Info("Convert container of pod failed", "runtimeName", m.runtimeName, "container", c, "podUID", labelledInfo.PodUID, "err", err)
continue
}
pod.Containers = append(pod.Containers, converted)
}
// Convert map to list.
var result []*kubecontainer.Pod
for _, pod := range pods {
result = append(result, pod)
}
// There are scenarios where multiple pods are running in parallel having
// the same name, because one of them have not been fully terminated yet.
// To avoid unexpected behavior on container name based search (for example
// by calling *Kubelet.findContainer() without specifying a pod ID), we now
// return the list of pods ordered by their creation time.
sort.SliceStable(result, func(i, j int) bool {
return result[i].CreatedAt > result[j].CreatedAt
})
logger.V(4).Info("Retrieved pods from runtime", "all", all)
return result, nil
}
// containerKillReason explains what killed a given container
type containerKillReason string
const (
reasonStartupProbe containerKillReason = "StartupProbe"
reasonLivenessProbe containerKillReason = "LivenessProbe"
reasonFailedPostStartHook containerKillReason = "FailedPostStartHook"
reasonUnknown containerKillReason = "Unknown"
)
// containerToKillInfo contains necessary information to kill a container.
type containerToKillInfo struct {
// The spec of the container.
container *v1.Container
// The name of the container.
name string
// The message indicates why the container will be killed.
message string
// The reason is a clearer source of info on why a container will be killed
// TODO: replace message with reason?
reason containerKillReason
}
// containerResources holds the set of resources applicable to the running container
type containerResources struct {
memoryLimit int64
memoryRequest int64
cpuLimit int64
cpuRequest int64
}
// containerToUpdateInfo contains necessary information to update a container's resources.
type containerToUpdateInfo struct {
// The spec of the container.
container *v1.Container
// ID of the runtime container that needs resource update
kubeContainerID kubecontainer.ContainerID
// Desired resources for the running container
desiredContainerResources containerResources
// Most recently configured resources on the running container
currentContainerResources *containerResources
}
// podActions keeps information what to do for a pod.
type podActions struct {
// Stop all running (regular, init and ephemeral) containers and the sandbox for the pod.
KillPod bool
// Whether need to create a new sandbox. If needed to kill pod and create
// a new pod sandbox, all init containers need to be purged (i.e., removed).
CreateSandbox bool
// The id of existing sandbox. It is used for starting containers in ContainersToStart.
SandboxID string
// The attempt number of creating sandboxes for the pod.
Attempt uint32
// InitContainersToStart keeps a list of indexes for the init containers to
// start, where the index is the index of the specific init container in the
// pod spec (pod.Spec.InitContainers).
InitContainersToStart []int
// ContainersToStart keeps a list of indexes for the containers to start,
// where the index is the index of the specific container in the pod spec (
// pod.Spec.Containers).
ContainersToStart []int
// ContainersToKill keeps a map of containers that need to be killed, note that
// the key is the container ID of the container, while
// the value contains necessary information to kill a container.
ContainersToKill map[kubecontainer.ContainerID]containerToKillInfo
// EphemeralContainersToStart is a list of indexes for the ephemeral containers to start,
// where the index is the index of the specific container in pod.Spec.EphemeralContainers.
EphemeralContainersToStart []int
// ContainersToUpdate keeps a list of containers needing resource update.
// Container resource update is applicable only for CPU and memory.
ContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo
// UpdatePodResources is true if container(s) need resource update with restart
UpdatePodResources bool
}
func (p podActions) String() string {
return fmt.Sprintf("KillPod: %t, CreateSandbox: %t, UpdatePodResources: %t, Attempt: %d, InitContainersToStart: %v, ContainersToStart: %v, EphemeralContainersToStart: %v,ContainersToUpdate: %v, ContainersToKill: %v",
p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.ContainersToUpdate, p.ContainersToKill)
}
// containerChanged will determine whether the container has changed based on the fields that will affect the running of the container.
// Currently, there are only `image` and `name` fields.
// we don't need to consider the pod UID here, because we find the containerStatus through the pod UID.
// If the pod UID changes, we will not be able to find the containerStatus to compare against.
func containerChanged(container *v1.Container, containerStatus *kubecontainer.Status) (uint64, uint64, bool) {
expectedHash := kubecontainer.HashContainer(container)
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
}
func shouldRestartOnFailure(pod *v1.Pod) bool {
return pod.Spec.RestartPolicy != v1.RestartPolicyNever
}
func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) bool {
cStatus := podStatus.FindContainerStatusByName(c.Name)
if cStatus == nil {
return false
}
// Container has exited, with an exit code of 0.
return cStatus.State == kubecontainer.ContainerStateExited && cStatus.ExitCode == 0
}
func containerResourcesFromRequirements(requirements *v1.ResourceRequirements) containerResources {
return containerResources{
memoryLimit: requirements.Limits.Memory().Value(),
memoryRequest: requirements.Requests.Memory().Value(),
cpuLimit: requirements.Limits.Cpu().MilliValue(),
cpuRequest: requirements.Requests.Cpu().MilliValue(),
}
}
// computePodResizeAction determines the actions required (if any) to resize the given container.
// Returns whether to keep (true) or restart (false) the container.
// TODO(vibansal): Make this function to be agnostic to whether it is dealing with a restartable init container or not (i.e. remove the argument `isRestartableInitContainer`).
func (m *kubeGenericRuntimeManager) computePodResizeAction(ctx context.Context, pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) {
logger := klog.FromContext(ctx)
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); !resizable {
return true
}
var container v1.Container
if isRestartableInitContainer {
container = pod.Spec.InitContainers[containerIdx]
} else {
container = pod.Spec.Containers[containerIdx]
}
// Determine if the *running* container needs resource update by comparing v1.Spec.Resources (desired)
// with v1.Status.Resources / runtime.Status.Resources (last known actual).
// Proceed only when kubelet has accepted the resize a.k.a v1.Spec.Resources.Requests == v1.Status.AllocatedResources.
// Skip if runtime containerID doesn't match pod.Status containerID (container is restarting)
if kubeContainerStatus.State != kubecontainer.ContainerStateRunning {
return true
}
actuatedResources, found := m.allocationManager.GetActuatedResources(pod.UID, container.Name)
if !found {
logger.Error(nil, "Missing actuated resource record", "pod", klog.KObj(pod), "container", container.Name)
// Proceed with the zero-value actuated resources. For restart NotRequired, this may
// result in an extra call to UpdateContainerResources, but that call should be idempotent.
// For RestartContainer, this may trigger a container restart.
}
desiredResources := containerResourcesFromRequirements(&container.Resources)
currentResources := containerResourcesFromRequirements(&actuatedResources)
if currentResources == desiredResources {
// No resize required.
return true
}
determineContainerResize := func(rName v1.ResourceName, desiredValue, currentValue int64) (resize, restart bool) {
if desiredValue == currentValue {
return false, false
}
for _, policy := range container.ResizePolicy {
if policy.ResourceName == rName {
return true, policy.RestartPolicy == v1.RestartContainer
}
}
// If a resource policy isn't set, the implicit default is NotRequired.
return true, false
}
markContainerForUpdate := func(rName v1.ResourceName, desiredValue, currentValue int64) {
cUpdateInfo := containerToUpdateInfo{
container: &container,
kubeContainerID: kubeContainerStatus.ID,
desiredContainerResources: desiredResources,
currentContainerResources: ¤tResources,
}
// Order the container updates such that resource decreases are applied before increases
switch {
case desiredValue > currentValue: // append
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)
case desiredValue < currentValue: // prepend
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})
copy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])
changes.ContainersToUpdate[rName][0] = cUpdateInfo
}
}
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
resizeMemReq, restartMemReq := determineContainerResize(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
resizeCPULim, restartCPULim := determineContainerResize(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
resizeCPUReq, restartCPUReq := determineContainerResize(v1.ResourceCPU, desiredResources.cpuRequest, currentResources.cpuRequest)
if restartCPULim || restartCPUReq || restartMemLim || restartMemReq {
// resize policy requires this container to restart
changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{
name: kubeContainerStatus.Name,
container: &container,
message: fmt.Sprintf("Container %s resize requires restart", container.Name),
}
if isRestartableInitContainer {
changes.InitContainersToStart = append(changes.InitContainersToStart, containerIdx)
} else {
changes.ContainersToStart = append(changes.ContainersToStart, containerIdx)
}
changes.UpdatePodResources = true
return false
} else {
if resizeMemLim {
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
} else if resizeMemReq {
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
}
if resizeCPULim {
markContainerForUpdate(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
} else if resizeCPUReq {
markContainerForUpdate(v1.ResourceCPU, desiredResources.cpuRequest, currentResources.cpuRequest)
}
}
return true
}
func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, podContainerChanges podActions) *kubecontainer.SyncResult {
logger := klog.FromContext(ctx)
start := time.Now()
success := false
defer func() {
metrics.PodResizeDurationMilliseconds.WithLabelValues(strconv.FormatBool(success)).Observe(float64(time.Since(start).Milliseconds()))
}()
resizeResult := kubecontainer.NewSyncResult(kubecontainer.ResizePodInPlace, format.Pod(pod))
pcm := m.containerManager.NewPodContainerManager()
//TODO(vinaykul,InPlacePodVerticalScaling): Figure out best way to get enforceMemoryQoS value (parameter #4 below) in platform-agnostic way
enforceCPULimits := m.cpuCFSQuota
if utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod) {
enforceCPULimits = false
logger.V(2).Info("Disabled CFS quota", "pod", klog.KObj(pod))
}
podResources := cm.ResourceConfigForPod(pod, enforceCPULimits, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
if podResources == nil {
logger.Error(nil, "Unable to get resource configuration", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get resource configuration processing resize for pod %q", format.Pod(pod)))
return resizeResult
}
currentPodMemoryConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceMemory)
if err != nil {
logger.Error(err, "Unable to get pod cgroup memory config", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get pod cgroup memory config for pod %q", format.Pod(pod)))
return resizeResult
}
currentPodCPUConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceCPU)
if err != nil {
logger.Error(err, "Unable to get pod cgroup cpu config", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get pod cgroup cpu config for pod %q", format.Pod(pod)))
return resizeResult
}
currentPodResources := podResources
currentPodResources = mergeResourceConfig(currentPodResources, currentPodMemoryConfig)
currentPodResources = mergeResourceConfig(currentPodResources, currentPodCPUConfig)
// Before proceeding with the resize, perform a best-effort check to catch potential resize
// errors in order to avoid a partial-resize state.
if err := m.validatePodResizeAction(ctx, pod, podStatus, currentPodResources, podResources, podContainerChanges); err != nil {
logger.Error(err, "Allocated pod resize is not currently feasible", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, err.Error())
return resizeResult
}
setPodCgroupConfig := func(rName v1.ResourceName, setLimitValue bool) error {
var err error
resizedResources := &cm.ResourceConfig{}
switch rName {
case v1.ResourceCPU:
if setLimitValue {
resizedResources.CPUPeriod = podResources.CPUPeriod
resizedResources.CPUQuota = podResources.CPUQuota
} else {
resizedResources.CPUShares = podResources.CPUShares
}
case v1.ResourceMemory:
if !setLimitValue {
// Memory requests aren't written to cgroups.
return nil
}
resizedResources.Memory = podResources.Memory
}
err = pcm.SetPodCgroupConfig(pod, resizedResources)
if err != nil {
logger.Error(err, "Failed to set cgroup config", "resource", rName, "pod", klog.KObj(pod))
return err
}
currentPodResources = mergeResourceConfig(currentPodResources, resizedResources)
if err = m.updatePodSandboxResources(ctx, podContainerChanges.SandboxID, pod, currentPodResources); err != nil {
logger.Error(err, "Failed to notify runtime for UpdatePodSandboxResources", "resource", rName, "pod", klog.KObj(pod))
// Don't propagate the error since the updatePodSandboxResources call is best-effort.
}
return nil
}
// Memory and CPU are updated separately because memory resizes may be ordered differently than CPU resizes.
// If resize results in net pod resource increase, set pod cgroup config before resizing containers.
// If resize results in net pod resource decrease, set pod cgroup config after resizing containers.
// If an error occurs at any point, abort. Let future syncpod iterations retry the unfinished stuff.
resizeContainers := func(rName v1.ResourceName, currPodCgLimValue, newPodCgLimValue, currPodCgReqValue, newPodCgReqValue int64) error {
var err error
// At upsizing, limits should expand prior to requests in order to keep "requests <= limits".
if newPodCgLimValue > currPodCgLimValue {
if err = setPodCgroupConfig(rName, true); err != nil {
return err
}
}
if newPodCgReqValue > currPodCgReqValue {
if err = setPodCgroupConfig(rName, false); err != nil {
return err
}
}
if len(podContainerChanges.ContainersToUpdate[rName]) > 0 {
if err = m.updatePodContainerResources(ctx, pod, rName, podContainerChanges.ContainersToUpdate[rName]); err != nil {
logger.Error(err, "updatePodContainerResources failed", "pod", format.Pod(pod), "resource", rName)
return err
}
}
// At downsizing, requests should shrink prior to limits in order to keep "requests <= limits".
if newPodCgReqValue < currPodCgReqValue {
if err = setPodCgroupConfig(rName, false); err != nil {
return err
}
}
if newPodCgLimValue < currPodCgLimValue {
if err = setPodCgroupConfig(rName, true); err != nil {
return err
}
}
return err
}
// Always update the pod status once. Even if there was a resize error, the resize may have been
// partially actuated.
defer m.runtimeHelper.SetPodWatchCondition(pod.UID, "doPodResizeAction", func(*kubecontainer.PodStatus) bool { return true })
if len(podContainerChanges.ContainersToUpdate[v1.ResourceMemory]) > 0 || podContainerChanges.UpdatePodResources {
if podResources.Memory == nil {
// Default pod memory limit to the current memory limit if unset to prevent it from updating.
// TODO(#128675): This does not support removing limits.
podResources.Memory = currentPodMemoryConfig.Memory
}
if errResize := resizeContainers(v1.ResourceMemory, int64(*currentPodMemoryConfig.Memory), *podResources.Memory, 0, 0); errResize != nil {
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, errResize.Error())
return resizeResult
}
}
if len(podContainerChanges.ContainersToUpdate[v1.ResourceCPU]) > 0 || podContainerChanges.UpdatePodResources {
if podResources.CPUShares == nil {
// This shouldn't happen: ResourceConfigForPod always returns a non-nil value for CPUShares.
logger.Error(nil, "podResources.CPUShares is nil", "pod", pod.Name)
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("podResources.CPUShares is nil for pod %s", pod.Name))
return resizeResult
}
// Default pod CPUQuota to the current CPUQuota if no limit is set to prevent the pod limit
// from updating.
// TODO(#128675): This does not support removing limits.
if podResources.CPUQuota == nil {
podResources.CPUQuota = currentPodCPUConfig.CPUQuota
}
if errResize := resizeContainers(v1.ResourceCPU, *currentPodCPUConfig.CPUQuota, *podResources.CPUQuota,
int64(*currentPodCPUConfig.CPUShares), int64(*podResources.CPUShares)); errResize != nil {
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, errResize.Error())
return resizeResult
}
}
success = true
return resizeResult
}
// validatePodResizeAction checks whether the proposed resize actions are currently viable.
func (m *kubeGenericRuntimeManager) validatePodResizeAction(
ctx context.Context,
pod *v1.Pod,
podStatus *kubecontainer.PodStatus,
currentPodResources, desiredPodResources *cm.ResourceConfig,
podContainerChanges podActions,
) error {
if len(podContainerChanges.ContainersToUpdate[v1.ResourceMemory]) > 0 || podContainerChanges.UpdatePodResources {
return m.validateMemoryResizeAction(ctx, pod, podStatus, currentPodResources, desiredPodResources, podContainerChanges)
}
return nil
}
func (m *kubeGenericRuntimeManager) validateMemoryResizeAction(
ctx context.Context,
pod *v1.Pod,
podStatus *kubecontainer.PodStatus,
currentPodResources, desiredPodResources *cm.ResourceConfig,
podContainerChanges podActions,
) error {
// Determine which memory limits are decreasing.
podLimitDecreasing := desiredPodResources.Memory != nil &&
(currentPodResources.Memory == nil || // Pod memory limit added
*desiredPodResources.Memory < *currentPodResources.Memory) // Pod memory limit decreasing
decreasingContainerLimits := map[string]int64{} // Map of container name to desired memory limit.
for _, cUpdate := range podContainerChanges.ContainersToUpdate[v1.ResourceMemory] {
if cUpdate.desiredContainerResources.memoryLimit != 0 {
if cUpdate.currentContainerResources == nil || cUpdate.currentContainerResources.memoryLimit == 0 || // Limit added
cUpdate.desiredContainerResources.memoryLimit < cUpdate.currentContainerResources.memoryLimit { // Limit decreasing
decreasingContainerLimits[cUpdate.container.Name] = cUpdate.desiredContainerResources.memoryLimit
}
}
}
if !podLimitDecreasing && len(decreasingContainerLimits) == 0 {
// No memory limits are decreasing: nothing else to check here.
return nil
}
// Check whether any of the new memory limits are below current memory usage.
podUsageStats, err := m.runtimeHelper.PodCPUAndMemoryStats(ctx, pod, podStatus)
if err != nil {
return fmt.Errorf("unable to read memory usage for pod %q", format.Pod(pod))
}
var errs []error
if podLimitDecreasing {
if podUsageStats.Memory == nil || podUsageStats.Memory.UsageBytes == nil {
errs = append(errs, fmt.Errorf("missing pod memory usage"))
} else if *podUsageStats.Memory.UsageBytes >= uint64(*desiredPodResources.Memory) {
errs = append(errs, fmt.Errorf("attempting to set pod memory limit (%d) below current usage (%d)",
*desiredPodResources.Memory, *podUsageStats.Memory.UsageBytes))
}
}
for _, cStats := range podUsageStats.Containers {
if desiredLimit, ok := decreasingContainerLimits[cStats.Name]; ok {
if cStats.Memory == nil || cStats.Memory.UsageBytes == nil {
errs = append(errs, fmt.Errorf("missing container %q memory usage", cStats.Name))
} else if *cStats.Memory.UsageBytes >= uint64(desiredLimit) {
errs = append(errs, fmt.Errorf("attempting to set container %q memory limit (%d) below current usage (%d)",
cStats.Name, desiredLimit, *podUsageStats.Memory.UsageBytes))
}
}
}
if len(errs) > 0 {
agg := utilerrors.NewAggregate(errs)
return fmt.Errorf("cannot decrease memory limits: %w", agg)
}
return nil
}
func (m *kubeGenericRuntimeManager) updatePodContainerResources(ctx context.Context, pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo) error {
logger := klog.FromContext(ctx)
logger.V(5).Info("Updating container resources", "pod", klog.KObj(pod))
for _, cInfo := range containersToUpdate {
container := cInfo.container.DeepCopy()
// If updating memory limit, use most recently configured CPU request and limit values.
// If updating CPU request and limit, use most recently configured memory request and limit values.
switch resourceName {
case v1.ResourceMemory:
container.Resources.Limits = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.currentContainerResources.cpuLimit, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.desiredContainerResources.memoryLimit, resource.BinarySI),
}
container.Resources.Requests = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.currentContainerResources.cpuRequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.desiredContainerResources.memoryRequest, resource.BinarySI),
}
case v1.ResourceCPU:
container.Resources.Limits = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.desiredContainerResources.cpuLimit, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.currentContainerResources.memoryLimit, resource.BinarySI),
}
container.Resources.Requests = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.desiredContainerResources.cpuRequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.currentContainerResources.memoryRequest, resource.BinarySI),
}
}
if err := m.updateContainerResources(ctx, pod, container, cInfo.kubeContainerID); err != nil {
// Log error and abort as container updates need to succeed in the order determined by computePodResizeAction.
// The recovery path is for SyncPod to keep retrying at later times until it succeeds.
logger.Error(err, "updateContainerResources failed", "container", container.Name, "cID", cInfo.kubeContainerID,
"pod", format.Pod(pod), "resourceName", resourceName)
return err
}
// If UpdateContainerResources is error-free, it means desired values for 'resourceName' was accepted by runtime.
// So we update currentContainerResources for 'resourceName', which is our view of most recently configured resources.
// Note: We can't rely on GetPodStatus as runtime may lag in actuating the resource values it just accepted.
switch resourceName {
case v1.ResourceMemory:
cInfo.currentContainerResources.memoryLimit = cInfo.desiredContainerResources.memoryLimit
cInfo.currentContainerResources.memoryRequest = cInfo.desiredContainerResources.memoryRequest
case v1.ResourceCPU:
cInfo.currentContainerResources.cpuLimit = cInfo.desiredContainerResources.cpuLimit
cInfo.currentContainerResources.cpuRequest = cInfo.desiredContainerResources.cpuRequest
}
}
return nil
}
// computePodActions checks whether the pod spec has changed and returns the changes if true.
func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {
logger := klog.FromContext(ctx)
logger.V(5).Info("Syncing Pod", "pod", klog.KObj(pod))
createPodSandbox, attempt, sandboxID := runtimeutil.PodSandboxChanged(pod, podStatus)
changes := podActions{
KillPod: createPodSandbox,
CreateSandbox: createPodSandbox,
SandboxID: sandboxID,
Attempt: attempt,
ContainersToStart: []int{},
ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),
}
// If we need to (re-)create the pod sandbox, everything will need to be
// killed and recreated, and init containers should be purged.
if createPodSandbox {
if !shouldRestartOnFailure(pod) && attempt != 0 && len(podStatus.ContainerStatuses) != 0 {
// Should not restart the pod, just return.
// we should not create a sandbox, and just kill the pod if it is already done.
// if all containers are done and should not be started, there is no need to create a new sandbox.
// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.
//
// If ContainerStatuses is empty, we assume that we've never
// successfully created any containers. In this case, we should
// retry creating the sandbox.
changes.CreateSandbox = false
return changes
}
// Get the containers to start, excluding the ones that succeeded if RestartPolicy is OnFailure.
var containersToStart []int
for idx, c := range pod.Spec.Containers {
runOnce := pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if c.RestartPolicy != nil {
runOnce = *c.RestartPolicy == v1.ContainerRestartPolicyOnFailure
}
}
if runOnce && containerSucceeded(&c, podStatus) {
continue
}
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyOnFailure && containerSucceeded(&c, podStatus) {
continue
}
}
containersToStart = append(containersToStart, idx)
}
// We should not create a sandbox, and just kill the pod if initialization
// is done and there is no container to start.
if len(containersToStart) == 0 {
hasInitialized := false
// If there is any regular container, it means all init containers have
// been initialized.
hasInitialized = hasAnyRegularContainerCreated(pod, podStatus)
if hasInitialized {
changes.CreateSandbox = false
return changes
}
}
// If we are creating a pod sandbox, we should restart from the initial
// state.
if len(pod.Spec.InitContainers) != 0 {
// Pod has init containers, return the first one.
changes.InitContainersToStart = []int{0}
return changes
}
changes.ContainersToStart = containersToStart
return changes
}
// Ephemeral containers may be started even if initialization is not yet complete.
for i := range pod.Spec.EphemeralContainers {
c := (*v1.Container)(&pod.Spec.EphemeralContainers[i].EphemeralContainerCommon)
// Ephemeral Containers are never restarted
if podStatus.FindContainerStatusByName(c.Name) == nil {
changes.EphemeralContainersToStart = append(changes.EphemeralContainersToStart, i)
}
}
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); resizable {
changes.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)
}
// Check initialization progress.
// TODO: Remove this code path as logically it is the subset of the next
// code path.
hasInitialized := m.computeInitContainerActions(ctx, pod, podStatus, &changes)
if changes.KillPod || !hasInitialized {
// Initialization failed or still in progress. Skip inspecting non-init
// containers.
return changes
}
// Number of running containers to keep.
keepCount := 0
// check the status of containers.
for idx, container := range pod.Spec.Containers {
containerStatus := podStatus.FindContainerStatusByName(container.Name)
// Call internal container post-stop lifecycle hook for any non-running container so that any
// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated
// to it.
if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {
if err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {
logger.Error(err, "Internal container post-stop lifecycle hook failed for container in pod with error",
"containerName", container.Name, "pod", klog.KObj(pod))
}
}
// If container does not exist, or is not running, check whether we
// need to restart it.
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
if kubecontainer.ShouldContainerBeRestarted(logger, &container, pod, podStatus) {
logger.V(3).Info("Container of pod is not in the desired state and shall be started", "containerName", container.Name, "pod", klog.KObj(pod))
changes.ContainersToStart = append(changes.ContainersToStart, idx)
if containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateUnknown {
// If container is in unknown state, we don't know whether it
// is actually running or not, always try killing it before
// restart to avoid having 2 running instances of the same container.
changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{
name: containerStatus.Name,
container: &pod.Spec.Containers[idx],
message: fmt.Sprintf("Container is in %q state, try killing it before restart",
containerStatus.State),
reason: reasonUnknown,
}
}
}
continue
}
// The container is running, but kill the container if any of the following condition is met.
var message string
var reason containerKillReason
restart := shouldRestartOnFailure(pod)
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
// For probe failures, use container-level restart policy only. Container-level restart
// rules are not evaluated because the container is still running.
if container.RestartPolicy != nil {
restart = *container.RestartPolicy != v1.ContainerRestartPolicyNever
}
}
if _, _, changed := containerChanged(&container, containerStatus); changed {
message = fmt.Sprintf("Container %s definition changed", container.Name)
// Restart regardless of the restart policy because the container
// spec changed.
restart = true
} else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure {
// If the container failed the liveness probe, we should kill it.
message = fmt.Sprintf("Container %s failed liveness probe", container.Name)
reason = reasonLivenessProbe
} else if startup, found := m.startupManager.Get(containerStatus.ID); found && startup == proberesults.Failure {
// If the container failed the startup probe, we should kill it.
message = fmt.Sprintf("Container %s failed startup probe", container.Name)
reason = reasonStartupProbe
} else if !m.computePodResizeAction(ctx, pod, idx, false, containerStatus, &changes) {
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
continue
} else {
// Keep the container.
keepCount++
continue
}
// We need to kill the container, but if we also want to restart the
// container afterwards, make the intent clear in the message. Also do
// not kill the entire pod since we expect container to be running eventually.
if restart {
message = fmt.Sprintf("%s, will be restarted", message)
changes.ContainersToStart = append(changes.ContainersToStart, idx)
}
changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{
name: containerStatus.Name,
container: &pod.Spec.Containers[idx],
message: message,
reason: reason,
}
logger.V(2).Info("Message for Container of pod", "containerName", container.Name, "containerStatusID", containerStatus.ID, "pod", klog.KObj(pod), "containerMessage", message)
}
if keepCount == 0 && len(changes.ContainersToStart) == 0 {
changes.KillPod = true
// To prevent the restartable init containers to keep pod alive, we should
// not restart them.
changes.InitContainersToStart = nil
}
return changes
}
// SyncPod syncs the running pod into the desired pod by executing following steps:
//
// 1. Compute sandbox and container changes.
// 2. Kill pod sandbox if necessary.
// 3. Kill any containers that should not be running.
// 4. Create sandbox if necessary.
// 5. Create ephemeral containers.
// 6. Create init containers.
// 7. Resize running containers (if InPlacePodVerticalScaling==true)
// 8. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
logger := klog.FromContext(ctx)
// Step 1: Compute sandbox and container changes.
podContainerChanges := m.computePodActions(ctx, pod, podStatus)
logger.V(3).Info("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod))
if podContainerChanges.CreateSandbox {
ref, err := ref.GetReference(legacyscheme.Scheme, pod)
if err != nil {
logger.Error(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
}
if podContainerChanges.SandboxID != "" {
m.recorder.Eventf(ref, v1.EventTypeNormal, events.SandboxChanged, "Pod sandbox changed, it will be killed and re-created.")
} else {
logger.V(4).Info("SyncPod received new pod, will create a sandbox for it", "pod", klog.KObj(pod))
}
}
// Step 2: Kill the pod if the sandbox has changed.
if podContainerChanges.KillPod {
if podContainerChanges.CreateSandbox {
logger.V(4).Info("Stopping PodSandbox for pod, will start new one", "pod", klog.KObj(pod))
} else {
logger.V(4).Info("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod))
}
killResult := m.killPodWithSyncResult(ctx, pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
result.AddPodSyncResult(killResult)
if killResult.Error() != nil {
logger.Error(killResult.Error(), "killPodWithSyncResult failed")
return
}
if podContainerChanges.CreateSandbox {
m.purgeInitContainers(ctx, pod, podStatus)
}
} else {
// Step 3: kill any running containers in this pod which are not to keep.
for containerID, containerInfo := range podContainerChanges.ContainersToKill {
logger.V(3).Info("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
result.AddSyncResult(killContainerResult)
if err := m.killContainer(ctx, pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil, nil); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
logger.Error(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
return
}
}
}
// Keep terminated init containers fairly aggressively controlled
// This is an optimization because container removals are typically handled
// by container garbage collector.
m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
// We pass the value of the PRIMARY podIP and list of podIPs down to
// generatePodSandboxConfig and generateContainerConfig, which in turn
// passes it to various other functions, in order to facilitate functionality
// that requires this value (hosts file and downward API) and avoid races determining
// the pod IP in cases where a container requires restart but the
// podIP isn't in the status manager yet. The list of podIPs is used to
// generate the hosts file.
//
// We default to the IPs in the passed-in pod status, and overwrite them if the
// sandbox needs to be (re)started.
var podIPs []string
if podStatus != nil {
podIPs = podStatus.IPs
}
// Step 4: Create a sandbox for the pod if necessary.
podSandboxID := podContainerChanges.SandboxID
if podContainerChanges.CreateSandbox {
var msg string
var err error
logger.V(4).Info("Creating PodSandbox for pod", "pod", klog.KObj(pod))
metrics.StartedPodsTotal.Inc()
if utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) && pod.Spec.HostUsers != nil && !*pod.Spec.HostUsers {
metrics.StartedUserNamespacedPodsTotal.Inc()
// Failures in user namespace creation could happen at any point in the pod lifecycle,
// but usually will be caught in container creation.
// To avoid specifically handling each error case, loop through the result after the sync finishes
defer func() {
// catch unhandled errors
for _, res := range result.SyncResults {
if res.Error != nil {
metrics.StartedUserNamespacedPodsErrorsTotal.Inc()
return
}
}
// catch handled error
if result.SyncError != nil {
metrics.StartedUserNamespacedPodsErrorsTotal.Inc()
}
}()
}
createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod))
result.AddSyncResult(createSandboxResult)
// ConvertPodSysctlsVariableToDotsSeparator converts sysctl variable
// in the Pod.Spec.SecurityContext.Sysctls slice into a dot as a separator.
// runc uses the dot as the separator to verify whether the sysctl variable
// is correct in a separate namespace, so when using the slash as the sysctl
// variable separator, runc returns an error: "sysctl is not in a separate kernel namespace"
// and the podSandBox cannot be successfully created. Therefore, before calling runc,
// we need to convert the sysctl variable, the dot is used as a separator to separate the kernel namespace.
// When runc supports slash as sysctl separator, this function can no longer be used.
sysctl.ConvertPodSysctlsVariableToDotsSeparator(pod.Spec.SecurityContext)
// Prepare resources allocated by the Dynammic Resource Allocation feature for the pod
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if err := m.runtimeHelper.PrepareDynamicResources(ctx, pod); err != nil {
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
return
}
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedPrepareDynamicResources, "Failed to prepare dynamic resources: %v", err)
logger.Error(err, "Failed to prepare dynamic resources", "pod", klog.KObj(pod))
return
}
}
podSandboxID, msg, err = m.createPodSandbox(ctx, pod, podContainerChanges.Attempt)
if err != nil {
// createPodSandbox can return an error from CNI, CSI,
// or CRI if the Pod has been deleted while the POD is
// being created. If the pod has been deleted then it's
// not a real error.
//
// SyncPod can still be running when we get here, which
// means the PodWorker has not acked the deletion.
if m.podStateProvider.IsPodTerminationRequested(pod.UID) {
logger.V(4).Info("Pod was deleted and sandbox failed to be created", "pod", klog.KObj(pod), "podUID", pod.UID)
return
}
metrics.StartedPodsErrorsTotal.Inc()
createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg)
logger.Error(err, "CreatePodSandbox for pod failed", "pod", klog.KObj(pod))
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
}
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed to create pod sandbox: %v", err)
return
}
logger.V(4).Info("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
if err != nil {
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
}
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err)
logger.Error(err, "Failed to get pod sandbox status; Skipping pod", "pod", klog.KObj(pod))
result.Fail(err)
return
}
if resp.GetStatus() == nil {
result.Fail(errors.New("pod sandbox status is nil"))
return
}
// If we ever allow updating a pod from non-host-network to
// host-network, we may use a stale IP.
if !kubecontainer.IsHostNetworkPod(pod) {
// Overwrite the podIPs passed in the pod status, since we just started the pod sandbox.
podIPs = m.determinePodSandboxIPs(ctx, pod.Namespace, pod.Name, resp.GetStatus())
logger.V(4).Info("Determined the ip for pod after sandbox changed", "IPs", podIPs, "pod", klog.KObj(pod))
}
}
// the start containers routines depend on pod ip(as in primary pod ip)
// instead of trying to figure out if we have 0 < len(podIPs)
// everytime, we short circuit it here
podIP := ""
if len(podIPs) != 0 {
podIP = podIPs[0]
}
// Get podSandboxConfig for containers to start.
configPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.ConfigPodSandbox, podSandboxID)
result.AddSyncResult(configPodSandboxResult)
podSandboxConfig, err := m.generatePodSandboxConfig(ctx, pod, podContainerChanges.Attempt)
if err != nil {
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
logger.Error(err, "GeneratePodSandboxConfig for pod failed", "pod", klog.KObj(pod))
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message)
return
}
imageVolumePullResults, err := m.getImageVolumes(ctx, pod, podSandboxConfig, pullSecrets)
if err != nil {
logger.Error(err, "Get image volumes for pod failed", "pod", klog.KObj(pod))
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, err.Error())
return
}
// Helper containing boilerplate common to starting all types of containers.
// typeName is a description used to describe this type of container in log messages,
// currently: "container", "init container" or "ephemeral container"
// metricLabel is the label used to describe this type of container in monitoring metrics.
// currently: "container", "init_container" or "ephemeral_container"
start := func(ctx context.Context, typeName, metricLabel string, spec *startSpec) error {
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name)
result.AddSyncResult(startContainerResult)
isInBackOff, msg, err := m.doBackOff(ctx, pod, spec.container, podStatus, backOff)
if isInBackOff {
startContainerResult.Fail(err, msg)
logger.V(4).Info("Backing Off restarting container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
return err
}
metrics.StartedContainersTotal.WithLabelValues(metricLabel).Inc()
if sc.HasWindowsHostProcessRequest(pod, spec.container) {
metrics.StartedHostProcessContainersTotal.WithLabelValues(metricLabel).Inc()
}
logger.V(4).Info("Creating container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
// We fail late here to populate the "ErrImagePull" and "ImagePullBackOff" correctly to the end user.
imageVolumes, err := m.toKubeContainerImageVolumes(ctx, imageVolumePullResults, spec.container, pod, startContainerResult)
if err != nil {
return err
}
// NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs.
msg, err = m.startContainer(ctx, podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs, imageVolumes)
incrementImageVolumeMetrics(err, msg, spec.container, imageVolumes)
if err != nil {
// startContainer() returns well-defined error codes that have reasonable cardinality for metrics and are
// useful to cluster administrators to distinguish "server errors" from "user errors".
metrics.StartedContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc()
if sc.HasWindowsHostProcessRequest(pod, spec.container) {
metrics.StartedHostProcessContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc()
}
startContainerResult.Fail(err, msg)
// known errors that are logged in other places are logged at higher levels here to avoid
// repetitive log spam
switch {
case err == images.ErrImagePullBackOff:
logger.V(3).Info("Container start failed in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod), "containerMessage", msg, "err", err)
default:
utilruntime.HandleError(fmt.Errorf("%v %v start failed in pod %v: %w: %s", typeName, spec.container.Name, format.Pod(pod), err, msg))
}
return err
}
return nil
}
// Step 5: start ephemeral containers
// These are started "prior" to init containers to allow running ephemeral containers even when there
// are errors starting an init container. In practice init containers will start first since ephemeral
// containers cannot be specified on pod creation.
for _, idx := range podContainerChanges.EphemeralContainersToStart {
start(ctx, "ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
}
// Step 6: start init containers.
for _, idx := range podContainerChanges.InitContainersToStart {
container := &pod.Spec.InitContainers[idx]
// Start the next init container.
if err := start(ctx, "init container", metrics.InitContainer, containerStartSpec(container)); err != nil {
if podutil.IsRestartableInitContainer(container) {
logger.V(4).Info("Failed to start the restartable init container for the pod, skipping", "initContainerName", container.Name, "pod", klog.KObj(pod))
continue
}
logger.V(4).Info("Failed to initialize the pod, as the init container failed to start, aborting", "initContainerName", container.Name, "pod", klog.KObj(pod))
return
}
// Successfully started the container; clear the entry in the failure
logger.V(4).Info("Completed init container for pod", "containerName", container.Name, "pod", klog.KObj(pod))
}
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); resizable {
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources {
result.SyncResults = append(result.SyncResults, m.doPodResizeAction(ctx, pod, podStatus, podContainerChanges))
}
}
// Step 8: start containers in podContainerChanges.ContainersToStart.
for _, idx := range podContainerChanges.ContainersToStart {
start(ctx, "container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx]))
}
return
}
// incrementImageVolumeMetrics increments the image volume mount metrics
// depending on the provided error and the usage of the image volume mount
// within the container.
func incrementImageVolumeMetrics(err error, msg string, container *v1.Container, imageVolumes kubecontainer.ImageVolumes) {
if !utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
return
}
metrics.ImageVolumeRequestedTotal.Add(float64(len(imageVolumes)))
for _, m := range container.VolumeMounts {
if _, exists := imageVolumes[m.Name]; exists {
if errors.Is(err, ErrCreateContainer) && strings.HasPrefix(msg, crierror.ErrImageVolumeMountFailed.Error()) {
metrics.ImageVolumeMountedErrorsTotal.Inc()
} else {
metrics.ImageVolumeMountedSucceedTotal.Inc()
}
}
}
}
// imageVolumePulls are the pull results for each image volume name.
type imageVolumePulls = map[string]imageVolumePullResult
// imageVolumePullResult is a pull result for a single image volume.
// If spec is nil, then err and msg should be set.
// If err is nil, then spec should be set.
type imageVolumePullResult struct {
spec *runtimeapi.ImageSpec
err error
msg string
}
func (m *kubeGenericRuntimeManager) toKubeContainerImageVolumes(ctx context.Context, imageVolumePullResults imageVolumePulls, container *v1.Container, pod *v1.Pod, syncResult *kubecontainer.SyncResult) (kubecontainer.ImageVolumes, error) {
if len(imageVolumePullResults) == 0 {
return nil, nil
}
imageVolumes := kubecontainer.ImageVolumes{}
var (
lastErr error
lastMsg string
)
for _, v := range container.VolumeMounts {
res, ok := imageVolumePullResults[v.Name]
if !ok {
continue
}
if res.err != nil {
s, _ := grpcstatus.FromError(res.err)
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
lastErr = res.err
lastMsg = res.msg
continue
}
imageVolumes[v.Name] = res.spec
}
if lastErr != nil {
syncResult.Fail(lastErr, lastMsg)
return nil, lastErr
}
return imageVolumes, nil
}
func (m *kubeGenericRuntimeManager) getImageVolumes(ctx context.Context, pod *v1.Pod, podSandboxConfig *runtimeapi.PodSandboxConfig, pullSecrets []v1.Secret) (imageVolumePulls, error) {
logger := klog.FromContext(ctx)
if !utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
return nil, nil
}
podRuntimeHandler, err := m.getPodRuntimeHandler(pod)
if err != nil {
logger.Error(err, "Failed to get pod runtime handler", "pod", klog.KObj(pod))
return nil, err
}
res := make(imageVolumePulls)
for _, volume := range pod.Spec.Volumes {
if volume.Image == nil {
continue
}
objectRef, _ := ref.GetReference(legacyscheme.Scheme, pod) // objectRef can be nil, no error check required
ref, msg, err := m.imagePuller.EnsureImageExists(
ctx, objectRef, pod, volume.Image.Reference, pullSecrets, podSandboxConfig, podRuntimeHandler, volume.Image.PullPolicy,
)
if err != nil {
logger.Error(err, "Failed to ensure image", "pod", klog.KObj(pod))
res[volume.Name] = imageVolumePullResult{err: err, msg: msg}
continue
}
logger.V(4).Info("Pulled image", "ref", ref, "pod", klog.KObj(pod))
res[volume.Name] = imageVolumePullResult{spec: &runtimeapi.ImageSpec{
Image: ref,
UserSpecifiedImage: volume.Image.Reference,
RuntimeHandler: podRuntimeHandler,
Annotations: pod.Annotations,
}}
}
return res, nil
}
// If a container is still in backoff, the function will return a brief backoff error and
// a detailed error message.
func (m *kubeGenericRuntimeManager) doBackOff(ctx context.Context, pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
logger := klog.FromContext(ctx)
var cStatus *kubecontainer.Status
for _, c := range podStatus.ContainerStatuses {
if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited {
cStatus = c
break
}
}
if cStatus == nil {
return false, "", nil
}
logger.V(3).Info("Checking backoff for container in pod", "containerName", container.Name, "pod", klog.KObj(pod))
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
ts := cStatus.FinishedAt
// backOff requires a unique key to identify the container.
key := GetBackoffKey(pod, container)
if backOff.IsInBackOffSince(key, ts) {
if containerRef, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
m.recorder.Eventf(containerRef, v1.EventTypeWarning, events.BackOffStartContainer,
fmt.Sprintf("Back-off restarting failed container %s in pod %s", container.Name, format.Pod(pod)))
}
err := fmt.Errorf("back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
logger.V(3).Info("Back-off restarting failed container", "err", err.Error())
return true, err.Error(), kubecontainer.ErrCrashLoopBackOff
}
backOff.Next(key, ts)
return false, "", nil
}
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
func (m *kubeGenericRuntimeManager) KillPod(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
err := m.killPodWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
return err.Error()
}
// killPodWithSyncResult kills a runningPod and returns SyncResult.
// Note: The pod passed in could be *nil* when kubelet restarted.
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
logger := klog.FromContext(ctx)
killContainerResults := m.killContainersWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
for _, containerResult := range killContainerResults {
result.AddSyncResult(containerResult)
}
// stop sandbox, the sandbox will be removed in GarbageCollect
killSandboxResult := kubecontainer.NewSyncResult(kubecontainer.KillPodSandbox, runningPod.ID)
result.AddSyncResult(killSandboxResult)
// Stop all sandboxes belongs to same pod
for _, podSandbox := range runningPod.Sandboxes {
if err := m.runtimeService.StopPodSandbox(ctx, podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) {
killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
logger.Error(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID)
}
}
return
}
func (m *kubeGenericRuntimeManager) GeneratePodStatus(event *runtimeapi.ContainerEventResponse) *kubecontainer.PodStatus {
ctx := context.TODO() // This context will be passed as parameter in the future
podIPs := m.determinePodSandboxIPs(ctx, event.PodSandboxStatus.Metadata.Namespace, event.PodSandboxStatus.Metadata.Name, event.PodSandboxStatus)
kubeContainerStatuses := []*kubecontainer.Status{}
for _, status := range event.ContainersStatuses {
kubeContainerStatuses = append(kubeContainerStatuses, m.convertToKubeContainerStatus(ctx, status))
}
sort.Sort(containerStatusByCreated(kubeContainerStatuses))
return &kubecontainer.PodStatus{
ID: kubetypes.UID(event.PodSandboxStatus.Metadata.Uid),
Name: event.PodSandboxStatus.Metadata.Name,
Namespace: event.PodSandboxStatus.Metadata.Namespace,
IPs: podIPs,
SandboxStatuses: []*runtimeapi.PodSandboxStatus{event.PodSandboxStatus},
ContainerStatuses: kubeContainerStatuses,
}
}
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visible in Runtime.
func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
logger := klog.FromContext(ctx)
// Now we retain restart count of container as a container label. Each time a container
// restarts, pod will read the restart count from the registered dead container, increment
// it to get the new restart count, and then add a label with the new restart count on
// the newly started container.
// However, there are some limitations of this method:
// 1. When all dead containers were garbage collected, the container status could
// not get the historical value and would be *inaccurate*. Fortunately, the chance
// is really slim.
// 2. When working with old version containers which have no restart count label,
// we can only assume their restart count is 0.
// Anyhow, we only promised "best-effort" restart count reporting, we can just ignore
// these limitations now.
// TODO: move this comment to SyncPod.
podSandboxIDs, err := m.getSandboxIDByPodUID(ctx, uid, nil)
if err != nil {
return nil, err
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
UID: uid,
},
}
podFullName := format.Pod(pod)
logger.V(4).Info("getSandboxIDByPodUID got sandbox IDs for pod", "podSandboxID", podSandboxIDs, "pod", klog.KObj(pod))
sandboxStatuses := []*runtimeapi.PodSandboxStatus{}
containerStatuses := []*kubecontainer.Status{}
activeContainerStatuses := []*kubecontainer.Status{}
timestamp := time.Now()
podIPs := []string{}
var activePodSandboxID string
for idx, podSandboxID := range podSandboxIDs {
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
// Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal.
// The previous call (getSandboxIDByPodUID) never fails due to a pod sandbox not existing.
// Therefore, this method should not either, but instead act as if the previous call failed,
// which means the error should be ignored.
if crierror.IsNotFound(err) {
continue
}
if err != nil {
logger.Error(err, "PodSandboxStatus of sandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
return nil, err
}
if resp.GetStatus() == nil {
return nil, errors.New("pod sandbox status is nil")
}
sandboxStatuses = append(sandboxStatuses, resp.Status)
// Only get pod IP from latest sandbox
if idx == 0 && resp.Status.State == runtimeapi.PodSandboxState_SANDBOX_READY {
podIPs = m.determinePodSandboxIPs(ctx, namespace, name, resp.Status)
activePodSandboxID = podSandboxID
}
if idx == 0 && utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
if resp.Timestamp == 0 {
// If the Evented PLEG is enabled in the kubelet, but not in the runtime
// then the pod status we get will not have the timestamp set.
// e.g. CI job 'pull-kubernetes-e2e-gce-alpha-features' will runs with
// features gate enabled, which includes Evented PLEG, but uses the
// runtime without Evented PLEG support.
logger.V(4).Info("Runtime does not set pod status timestamp", "pod", klog.KObj(pod))
containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
if err != nil {
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
logger.Error(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
}
return nil, err
}
} else {
// Get the statuses of all containers visible to the pod and
// timestamp from sandboxStatus.
timestamp = time.Unix(0, resp.Timestamp)
for _, cs := range resp.ContainersStatuses {
cStatus := m.convertToKubeContainerStatus(ctx, cs)
containerStatuses = append(containerStatuses, cStatus)
}
}
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
// Get statuses of all containers visible in the pod.
containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
if err != nil {
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
logger.Error(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
}
return nil, err
}
}
m.logReduction.ClearID(podFullName)
return &kubecontainer.PodStatus{
ID: uid,
Name: name,
Namespace: namespace,
IPs: podIPs,
SandboxStatuses: sandboxStatuses,
ContainerStatuses: containerStatuses,
ActiveContainerStatuses: activeContainerStatuses,
TimeStamp: timestamp,
}, nil
}
func (m *kubeGenericRuntimeManager) GetContainerStatus(ctx context.Context, id kubecontainer.ContainerID) (*kubecontainer.Status, error) {
resp, err := m.runtimeService.ContainerStatus(ctx, id.ID, false)
if err != nil {
return nil, fmt.Errorf("runtime container status: %w", err)
}
return m.convertToKubeContainerStatus(ctx, resp.GetStatus()), nil
}
// GarbageCollect removes dead containers using the specified container gc policy.
func (m *kubeGenericRuntimeManager) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
return m.containerGC.GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
}
// UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim
// with the podCIDR supplied by the kubelet.
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
logger := klog.FromContext(ctx)
// TODO(#35531): do we really want to write a method on this manager for each
// field of the config?
logger.Info("Updating runtime config through cri with podcidr", "CIDR", podCIDR)
return m.runtimeService.UpdateRuntimeConfig(ctx,
&runtimeapi.RuntimeConfig{
NetworkConfig: &runtimeapi.NetworkConfig{
PodCidr: podCIDR,
},
})
}
func (m *kubeGenericRuntimeManager) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
return m.runtimeService.CheckpointContainer(ctx, options)
}
func (m *kubeGenericRuntimeManager) ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error) {
return m.runtimeService.ListMetricDescriptors(ctx)
}
func (m *kubeGenericRuntimeManager) ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error) {
return m.runtimeService.ListPodSandboxMetrics(ctx)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"errors"
"fmt"
"path/filepath"
"reflect"
goruntime "runtime"
"sort"
"strings"
"testing"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
noopoteltrace "go.opentelemetry.io/otel/trace/noop"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/util/flowcontrol"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/testutil"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
apitest "k8s.io/cri-api/pkg/apis/testing"
crierror "k8s.io/cri-api/pkg/errors"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm"
cmtesting "k8s.io/kubernetes/pkg/kubelet/cm/testing"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
imagetypes "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/metrics"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/test/utils/ktesting"
"k8s.io/utils/ptr"
)
var (
fakeCreatedAt int64 = 1
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
)
func createTestRuntimeManager(ctx context.Context) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
return createTestRuntimeManagerWithErrors(ctx, nil)
}
func createTestRuntimeManagerWithErrors(ctx context.Context, errors map[string][]error) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
fakeRuntimeService := apitest.NewFakeRuntimeService()
if errors != nil {
fakeRuntimeService.Errors = errors
}
fakeImageService := apitest.NewFakeImageService()
// Only an empty machineInfo is needed here, because in unit test all containers are besteffort,
// data in machineInfo is not used. If burstable containers are used in unit test in the future,
// we may want to set memory capacity.
memoryCapacityQuantity := resource.MustParse(fakeNodeAllocatableMemory)
machineInfo := &cadvisorapi.MachineInfo{
MemoryCapacity: uint64(memoryCapacityQuantity.Value()),
}
osInterface := &containertest.FakeOS{}
manager, err := newFakeKubeRuntimeManager(ctx, fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, noopoteltrace.NewTracerProvider().Tracer(""))
return fakeRuntimeService, fakeImageService, manager, err
}
// sandboxTemplate is a sandbox template to create fake sandbox.
type sandboxTemplate struct {
pod *v1.Pod
attempt uint32
createdAt int64
state runtimeapi.PodSandboxState
running bool
terminating bool
}
// containerTemplate is a container template to create fake container.
type containerTemplate struct {
pod *v1.Pod
container *v1.Container
sandboxAttempt uint32
attempt int
createdAt int64
state runtimeapi.ContainerState
}
// makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and
// one fake container for each of its container.
func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRuntimeService,
pod *v1.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) {
sandbox := makeFakePodSandbox(t, m, sandboxTemplate{
pod: pod,
createdAt: fakeCreatedAt,
state: runtimeapi.PodSandboxState_SANDBOX_READY,
})
var containers []*apitest.FakeContainer
newTemplate := func(c *v1.Container) containerTemplate {
return containerTemplate{
pod: pod,
container: c,
createdAt: fakeCreatedAt,
state: runtimeapi.ContainerState_CONTAINER_RUNNING,
}
}
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
containers = append(containers, makeFakeContainer(t, m, newTemplate(c)))
return true
})
fakeRuntime.SetFakeSandboxes([]*apitest.FakePodSandbox{sandbox})
fakeRuntime.SetFakeContainers(containers)
return sandbox, containers
}
// makeFakePodSandbox creates a fake pod sandbox based on a sandbox template.
func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template sandboxTemplate) *apitest.FakePodSandbox {
tCtx := ktesting.Init(t)
config, err := m.generatePodSandboxConfig(tCtx, template.pod, template.attempt)
assert.NoError(t, err, "generatePodSandboxConfig for sandbox template %+v", template)
podSandboxID := apitest.BuildSandboxName(config.Metadata)
podSandBoxStatus := &apitest.FakePodSandbox{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: podSandboxID,
Metadata: config.Metadata,
State: template.state,
CreatedAt: template.createdAt,
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: apitest.FakePodSandboxIPs[0],
},
Labels: config.Labels,
},
}
// assign additional IPs
additionalIPs := apitest.FakePodSandboxIPs[1:]
additionalPodIPs := make([]*runtimeapi.PodIP, 0, len(additionalIPs))
for _, ip := range additionalIPs {
additionalPodIPs = append(additionalPodIPs, &runtimeapi.PodIP{
Ip: ip,
})
}
if len(additionalPodIPs) > 0 {
podSandBoxStatus.Network.AdditionalIps = additionalPodIPs
}
return podSandBoxStatus
}
// makeFakePodSandboxes creates a group of fake pod sandboxes based on the sandbox templates.
// The function guarantees the order of the fake pod sandboxes is the same with the templates.
func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates []sandboxTemplate) []*apitest.FakePodSandbox {
var fakePodSandboxes []*apitest.FakePodSandbox
for _, template := range templates {
fakePodSandboxes = append(fakePodSandboxes, makeFakePodSandbox(t, m, template))
}
return fakePodSandboxes
}
// makeFakeContainer creates a fake container based on a container template.
func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer {
tCtx := ktesting.Init(t)
sandboxConfig, err := m.generatePodSandboxConfig(tCtx, template.pod, template.sandboxAttempt)
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
containerConfig, _, err := m.generateContainerConfig(tCtx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil, nil)
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
imageRef := containerConfig.Image.Image
return &apitest.FakeContainer{
ContainerStatus: runtimeapi.ContainerStatus{
Id: containerID,
Metadata: containerConfig.Metadata,
Image: containerConfig.Image,
ImageRef: imageRef,
CreatedAt: template.createdAt,
State: template.state,
Labels: containerConfig.Labels,
Annotations: containerConfig.Annotations,
LogPath: filepath.Join(sandboxConfig.GetLogDirectory(), containerConfig.GetLogPath()),
},
SandboxID: podSandboxID,
}
}
// makeFakeContainers creates a group of fake containers based on the container templates.
// The function guarantees the order of the fake containers is the same with the templates.
func makeFakeContainers(t *testing.T, m *kubeGenericRuntimeManager, templates []containerTemplate) []*apitest.FakeContainer {
var fakeContainers []*apitest.FakeContainer
for _, template := range templates {
fakeContainers = append(fakeContainers, makeFakeContainer(t, m, template))
}
return fakeContainers
}
// makeTestContainer creates a test api container.
func makeTestContainer(name, image string) v1.Container {
return v1.Container{
Name: name,
Image: image,
}
}
// makeTestPod creates a test api pod.
func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(podUID),
Name: podName,
Namespace: podNamespace,
},
Spec: v1.PodSpec{
Containers: containers,
},
}
}
// verifyPods returns true if the two pod slices are equal.
func verifyPods(a, b []*kubecontainer.Pod) bool {
if len(a) != len(b) {
return false
}
// Sort the containers within a pod.
for i := range a {
sort.Sort(containersByID(a[i].Containers))
}
for i := range b {
sort.Sort(containersByID(b[i].Containers))
}
// Sort the pods by UID.
sort.Sort(podsByID(a))
sort.Sort(podsByID(b))
return reflect.DeepEqual(a, b)
}
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.Set[string]) (sets.Set[string], bool) {
actual := sets.New[string]()
for _, c := range fakeRuntime.Containers {
actual.Insert(c.Id)
}
return actual, actual.Equal(expected)
}
// Only extract the fields of interests.
type cRecord struct {
name string
attempt uint32
state runtimeapi.ContainerState
}
type cRecordList []*cRecord
func (b cRecordList) Len() int { return len(b) }
func (b cRecordList) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b cRecordList) Less(i, j int) bool {
if b[i].name != b[j].name {
return b[i].name < b[j].name
}
return b[i].attempt < b[j].attempt
}
func verifyContainerStatuses(t *testing.T, runtime *apitest.FakeRuntimeService, expected []*cRecord, desc string) {
actual := []*cRecord{}
for _, cStatus := range runtime.Containers {
actual = append(actual, &cRecord{name: cStatus.Metadata.Name, attempt: cStatus.Metadata.Attempt, state: cStatus.State})
}
sort.Sort(cRecordList(expected))
sort.Sort(cRecordList(actual))
assert.Equal(t, expected, actual, desc)
}
func TestNewKubeRuntimeManager(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, _, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
}
func TestVersion(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
version, err := m.Version(tCtx)
assert.NoError(t, err)
assert.Equal(t, kubeRuntimeAPIVersion, version.String())
}
func TestContainerRuntimeType(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
runtimeType := m.Type()
assert.Equal(t, apitest.FakeRuntimeName, runtimeType)
}
func TestGetPodStatus(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
},
}
// Set fake sandbox and faked containers to fakeRuntime.
makeAndSetFakePod(t, m, fakeRuntime, pod)
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
assert.Equal(t, pod.UID, podStatus.ID)
assert.Equal(t, pod.Name, podStatus.Name)
assert.Equal(t, pod.Namespace, podStatus.Namespace)
assert.Equal(t, apitest.FakePodSandboxIPs, podStatus.IPs)
}
func TestStopContainerWithNotFoundError(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
},
}
// Set fake sandbox and faked containers to fakeRuntime.
makeAndSetFakePod(t, m, fakeRuntime, pod)
fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container"))
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
require.NoError(t, err)
p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus)
gracePeriod := int64(1)
err = m.KillPod(tCtx, pod, p, &gracePeriod)
require.NoError(t, err)
}
func TestGetPodStatusWithNotFoundError(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
},
}
// Set fake sandbox and faked containers to fakeRuntime.
makeAndSetFakePod(t, m, fakeRuntime, pod)
fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container"))
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
require.NoError(t, err)
require.Equal(t, pod.UID, podStatus.ID)
require.Equal(t, pod.Name, podStatus.Name)
require.Equal(t, pod.Namespace, podStatus.Namespace)
require.Equal(t, apitest.FakePodSandboxIPs, podStatus.IPs)
}
func TestGetPods(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
},
{
Name: "foo2",
Image: "busybox",
},
},
},
}
// Set fake sandbox and fake containers to fakeRuntime.
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
// Convert the fakeContainers to kubecontainer.Container
containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers {
fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(tCtx, &runtimeapi.Container{
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
Image: fakeContainer.Image,
ImageRef: fakeContainer.ImageRef,
Labels: fakeContainer.Labels,
Annotations: fakeContainer.Annotations,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
containers[i] = c
}
// Convert fakeSandbox to kubecontainer.Container
sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State,
CreatedAt: fakeSandbox.CreatedAt,
Labels: fakeSandbox.Labels,
Annotations: fakeSandbox.Annotations,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
expected := []*kubecontainer.Pod{
{
ID: types.UID("12345678"),
Name: "foo",
Namespace: "new",
CreatedAt: uint64(fakeSandbox.CreatedAt),
Containers: []*kubecontainer.Container{containers[0], containers[1]},
Sandboxes: []*kubecontainer.Container{sandbox},
},
}
actual, err := m.GetPods(tCtx, false)
assert.NoError(t, err)
if !verifyPods(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestGetPodsSorted(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}}
createdTimestamps := []uint64{10, 5, 20}
fakeSandboxes := []*apitest.FakePodSandbox{}
for i, createdAt := range createdTimestamps {
pod.UID = types.UID(fmt.Sprint(i))
fakeSandboxes = append(fakeSandboxes, makeFakePodSandbox(t, m, sandboxTemplate{
pod: pod,
createdAt: int64(createdAt),
state: runtimeapi.PodSandboxState_SANDBOX_READY,
}))
}
fakeRuntime.SetFakeSandboxes(fakeSandboxes)
actual, err := m.GetPods(tCtx, false)
assert.NoError(t, err)
assert.Len(t, actual, 3)
// Verify that the pods are sorted by their creation time (newest/biggest timestamp first)
assert.Equal(t, uint64(createdTimestamps[2]), actual[0].CreatedAt)
assert.Equal(t, uint64(createdTimestamps[0]), actual[1].CreatedAt)
assert.Equal(t, uint64(createdTimestamps[1]), actual[2].CreatedAt)
}
func TestKillPod(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
},
{
Name: "foo2",
Image: "busybox",
},
},
EphemeralContainers: []v1.EphemeralContainer{
{
EphemeralContainerCommon: v1.EphemeralContainerCommon{
Name: "debug",
Image: "busybox",
},
},
},
},
}
// Set fake sandbox and fake containers to fakeRuntime.
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
// Convert the fakeContainers to kubecontainer.Container
containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers {
fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(tCtx, &runtimeapi.Container{
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
Image: fakeContainer.Image,
ImageRef: fakeContainer.ImageRef,
Labels: fakeContainer.Labels,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
containers[i] = c
}
runningPod := kubecontainer.Pod{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
Containers: []*kubecontainer.Container{containers[0], containers[1], containers[2]},
Sandboxes: []*kubecontainer.Container{
{
ID: kubecontainer.ContainerID{
ID: fakeSandbox.Id,
Type: apitest.FakeRuntimeName,
},
},
},
}
err = m.KillPod(tCtx, pod, runningPod, nil)
assert.NoError(t, err)
assert.Len(t, fakeRuntime.Containers, 3)
assert.Len(t, fakeRuntime.Sandboxes, 1)
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State)
}
}
func TestSyncPod(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, fakeImage, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
result := m.SyncPod(tCtx, pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Len(t, fakeRuntime.Containers, 2)
assert.Len(t, fakeImage.Images, 2)
assert.Len(t, fakeRuntime.Sandboxes, 1)
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
}
}
func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
containers := []v1.Container{
{
Name: "foo",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
securityContext := &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
{
Name: "kernel/shm_rmid_forced",
Value: "1",
},
{
Name: "net/ipv4/ip_local_port_range",
Value: "1024 65535",
},
},
}
exceptSysctls := []v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
{
Name: "net.ipv4.ip_local_port_range",
Value: "1024 65535",
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
SecurityContext: securityContext,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
result := m.SyncPod(tCtx, pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls)
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
}
}
func TestPruneInitContainers(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
init1 := makeTestContainer("init1", "busybox")
init2 := makeTestContainer("init2", "busybox")
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{init1, init2},
},
}
templates := []containerTemplate{
{pod: pod, container: &init1, attempt: 3, createdAt: 3, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_UNKNOWN},
{pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
}
fakes := makeFakeContainers(t, m, templates)
fakeRuntime.SetFakeContainers(fakes)
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
m.pruneInitContainersBeforeStart(tCtx, pod, podStatus)
expectedContainers := sets.New[string](fakes[0].Id, fakes[2].Id)
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
t.Errorf("expected %v, got %v", expectedContainers, actual)
}
}
func TestSyncPodWithInitContainers(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
initContainers := []v1.Container{
{
Name: "init1",
Image: "init",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
InitContainers: initContainers,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
// 1. should only create the init container.
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result := m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
expected := []*cRecord{
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
}
verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container")
// 2. should not create app container because init container is still running.
podStatus, err = m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing")
// 3. should create all app containers because init container finished.
// Stop init container instance 0.
sandboxIDs, err := m.getSandboxIDByPodUID(tCtx, pod.UID, nil)
require.NoError(t, err)
sandboxID := sandboxIDs[0]
initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0)
require.NoError(t, err)
err = fakeRuntime.StopContainer(tCtx, initID0, 0)
require.NoError(t, err)
// Sync again.
podStatus, err = m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
expected = []*cRecord{
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
{name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
}
verifyContainerStatuses(t, fakeRuntime, expected, "init container completed; all app containers should be running")
// 4. should restart the init container if needed to create a new podsandbox
// Stop the pod sandbox.
err = fakeRuntime.StopPodSandbox(tCtx, sandboxID)
require.NoError(t, err)
// Sync again.
podStatus, err = m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
expected = []*cRecord{
// The first init container instance is purged and no longer visible.
// The second (attempt == 1) instance has been started and is running.
{name: initContainers[0].Name, attempt: 1, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
// All containers are killed.
{name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
}
verifyContainerStatuses(t, fakeRuntime, expected, "kill all app containers, purge the existing init container, and restart a new one")
}
// A helper function to get a basic pod and its status assuming all sandbox and
// containers are running and ready.
func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "foo-ns",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
},
{
Name: "foo2",
Image: "busybox",
},
{
Name: "foo3",
Image: "busybox",
},
},
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
ContainerID: "://id1",
Name: "foo1",
Image: "busybox",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
},
{
ContainerID: "://id2",
Name: "foo2",
Image: "busybox",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
},
{
ContainerID: "://id3",
Name: "foo3",
Image: "busybox",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
},
},
},
}
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
{
Id: "sandboxID",
State: runtimeapi.PodSandboxState_SANDBOX_READY,
Metadata: &runtimeapi.PodSandboxMetadata{Name: pod.Name, Namespace: pod.Namespace, Uid: "sandboxuid", Attempt: uint32(0)},
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"},
},
},
ContainerStatuses: []*kubecontainer.Status{
{
ID: kubecontainer.ContainerID{ID: "id1"},
Name: "foo1", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[0]),
},
{
ID: kubecontainer.ContainerID{ID: "id2"},
Name: "foo2", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[1]),
},
{
ID: kubecontainer.ContainerID{ID: "id3"},
Name: "foo3", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[2]),
},
},
}
return pod, status
}
func TestComputePodActions(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
require.NoError(t, err)
// Creating a pair reference pod and status for the test cases to refer
// the specific fields.
basePod, baseStatus := makeBasePodAndStatus()
noAction := podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
}
for desc, test := range map[string]struct {
mutatePodFn func(*v1.Pod)
mutateStatusFn func(*kubecontainer.PodStatus)
actions podActions
resetStatusFn func(*kubecontainer.PodStatus)
}{
"everything is good; do nothing": {
actions: noAction,
},
"start pod sandbox and all containers for a new pod": {
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// No container or sandbox exists.
status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{}
status.ContainerStatuses = []*kubecontainer.Status{}
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
Attempt: uint32(0),
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"restart exited containers if RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, restart it,
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container exited with failure, restart it,
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{0, 1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"restart failed containers if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, don't restart it,
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container exited with failure, restart it,
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"restart created but not started containers if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, don't restart it.
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container was created, but never started.
status.ContainerStatuses[1].State = kubecontainer.ContainerStateCreated
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"don't restart containers if RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// Don't restart any containers.
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: noAction,
},
"Kill pod and recreate everything if the pod sandbox is dead, and RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"Kill pod and recreate all containers (except for the succeeded one) if the pod sandbox is dead, and RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 0
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{0, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"Kill pod and recreate all containers if the PodSandbox does not have an IP": {
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].Network.Ip = ""
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"Kill and recreate the container if the container's spec changed": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[1].Hash = uint64(432423432)
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
ContainersToStart: []int{1},
},
},
"Kill and recreate the container if the liveness check has failed": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Failure, basePod)
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
ContainersToStart: []int{1},
},
resetStatusFn: func(status *kubecontainer.PodStatus) {
m.livenessManager.Remove(status.ContainerStatuses[1].ID)
},
},
"Kill and recreate the container if the startup check has failed": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Failure, basePod)
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
ContainersToStart: []int{1},
},
resetStatusFn: func(status *kubecontainer.PodStatus) {
m.startupManager.Remove(status.ContainerStatuses[1].ID)
},
},
"Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and all containers exited": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// no ready sandbox
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.SandboxStatuses[0].Metadata.Attempt = uint32(1)
// all containers exited
for i := range status.ContainerStatuses {
status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[i].ExitCode = 0
}
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(2),
CreateSandbox: false,
KillPod: true,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
},
},
"Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=OnFailure and all containers succeeded": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// no ready sandbox
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.SandboxStatuses[0].Metadata.Attempt = uint32(1)
// all containers succeeded
for i := range status.ContainerStatuses {
status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[i].ExitCode = 0
}
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(2),
CreateSandbox: false,
KillPod: true,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
},
},
"Verify we create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and no containers have ever been created": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// no ready sandbox
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.SandboxStatuses[0].Metadata.Attempt = uint32(2)
// no visible containers
status.ContainerStatuses = []*kubecontainer.Status{}
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(3),
CreateSandbox: true,
KillPod: true,
ContainersToStart: []int{0, 1, 2},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
},
},
"Kill and recreate the container if the container is in unknown state": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[1].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
ContainersToStart: []int{1},
},
},
"Restart the container if the container is in created state": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[1].State = kubecontainer.ContainerStateCreated
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
ContainersToStart: []int{1},
},
},
} {
pod, status := makeBasePodAndStatus()
if test.mutatePodFn != nil {
test.mutatePodFn(pod)
}
if test.mutateStatusFn != nil {
test.mutateStatusFn(status)
}
tCtx := ktesting.Init(t)
actions := m.computePodActions(tCtx, pod, status)
verifyActions(t, &test.actions, &actions, desc)
if test.resetStatusFn != nil {
test.resetStatusFn(status)
}
}
}
func getKillMap(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo {
m := map[kubecontainer.ContainerID]containerToKillInfo{}
for _, i := range cIndexes {
m[status.ContainerStatuses[i].ID] = containerToKillInfo{
container: &pod.Spec.Containers[i],
name: pod.Spec.Containers[i].Name,
}
}
return m
}
func getKillMapWithInitContainers(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo {
m := map[kubecontainer.ContainerID]containerToKillInfo{}
for _, i := range cIndexes {
m[status.ContainerStatuses[i].ID] = containerToKillInfo{
container: &pod.Spec.InitContainers[i],
name: pod.Spec.InitContainers[i].Name,
}
}
return m
}
func modifyKillMapContainerImage(containersToKill map[kubecontainer.ContainerID]containerToKillInfo, status *kubecontainer.PodStatus, cIndexes []int, imageNames []string) map[kubecontainer.ContainerID]containerToKillInfo {
for idx, i := range cIndexes {
containerKillInfo := containersToKill[status.ContainerStatuses[i].ID]
updatedContainer := containerKillInfo.container.DeepCopy()
updatedContainer.Image = imageNames[idx]
containersToKill[status.ContainerStatuses[i].ID] = containerToKillInfo{
container: updatedContainer,
name: containerKillInfo.name,
}
}
return containersToKill
}
func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
if actual.ContainersToKill != nil {
// Clear the message and reason fields since we don't need to verify them.
for k, info := range actual.ContainersToKill {
info.message = ""
info.reason = ""
actual.ContainersToKill[k] = info
}
}
if expected.ContainersToUpdate == nil && actual.ContainersToUpdate != nil {
// No need to distinguish empty and nil maps for the test.
expected.ContainersToUpdate = map[v1.ResourceName][]containerToUpdateInfo{}
}
assert.Equal(t, expected, actual, desc)
}
func TestComputePodActionsWithInitContainers(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
require.NoError(t, err)
// Creating a pair reference pod and status for the test cases to refer
// the specific fields.
basePod, baseStatus := makeBasePodAndStatusWithInitContainers()
noAction := podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
}
for desc, test := range map[string]struct {
mutatePodFn func(*v1.Pod)
mutateStatusFn func(*kubecontainer.PodStatus)
actions podActions
}{
"initialization completed; start all containers": {
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"no init containers have been started; start the first one": {
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses = nil
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization in progress; do nothing": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning
},
actions: noAction,
},
"Kill pod and restart the first init container if the pod sandbox is dead": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization failed; restart the last init container if RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization failed; restart the last init container if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization failed; kill pod if RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
KillPod: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"init container state unknown; kill and recreate the last init container if RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
},
},
"init container state unknown; kill and recreate the last init container if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
},
},
"init container state unknown; kill pod if RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
KillPod: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Pod sandbox not ready, init container failed, but RestartPolicy == Never; kill pod only": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
},
actions: podActions{
KillPod: true,
CreateSandbox: false,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Pod sandbox not ready, and RestartPolicy == Never, but no visible init containers; create a new pod sandbox": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses = []*kubecontainer.Status{}
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Pod sandbox not ready, init container failed, and RestartPolicy == OnFailure; create a new pod sandbox": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"some of the init container statuses are missing but the last init container is running, don't restart preceding ones": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning
status.ContainerStatuses = status.ContainerStatuses[2:]
},
actions: podActions{
KillPod: false,
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"an init container is in the created state due to an unknown error when starting container; restart it": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateCreated
},
actions: podActions{
KillPod: false,
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
} {
t.Run(desc, func(t *testing.T) {
pod, status := makeBasePodAndStatusWithInitContainers()
if test.mutatePodFn != nil {
test.mutatePodFn(pod)
}
if test.mutateStatusFn != nil {
test.mutateStatusFn(status)
}
tCtx := ktesting.Init(t)
actions := m.computePodActions(tCtx, pod, status)
verifyActions(t, &test.actions, &actions, desc)
})
}
}
func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus) {
pod, status := makeBasePodAndStatus()
pod.Spec.InitContainers = []v1.Container{
{
Name: "init1",
Image: "bar-image",
},
{
Name: "init2",
Image: "bar-image",
},
{
Name: "init3",
Image: "bar-image",
},
}
// Replace the original statuses of the containers with those for the init
// containers.
status.ContainerStatuses = []*kubecontainer.Status{
{
ID: kubecontainer.ContainerID{ID: "initid1"},
Name: "init1", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
},
{
ID: kubecontainer.ContainerID{ID: "initid2"},
Name: "init2", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[1]),
},
{
ID: kubecontainer.ContainerID{ID: "initid3"},
Name: "init3", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[2]),
},
}
return pod, status
}
func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
require.NoError(t, err)
// Creating a pair reference pod and status for the test cases to refer
// the specific fields.
basePod, baseStatus := makeBasePodAndStatusWithRestartableInitContainers()
noAction := podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
}
for desc, test := range map[string]struct {
mutatePodFn func(*v1.Pod)
mutateStatusFn func(*v1.Pod, *kubecontainer.PodStatus)
actions podActions
resetStatusFn func(*kubecontainer.PodStatus)
}{
"initialization completed; start all containers": {
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"no init containers have been started; start the first one": {
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses = nil
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"an init container is stuck in the created state; restart it": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateCreated
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"restartable init container has started; start the next": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses = status.ContainerStatuses[:1]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{1},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"livenessProbe has not been run; start the nothing": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
m.livenessManager.Remove(status.ContainerStatuses[1].ID)
status.ContainerStatuses = status.ContainerStatuses[:2]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"livenessProbe in progress; start the next": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Unknown, basePod)
status.ContainerStatuses = status.ContainerStatuses[:2]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
resetStatusFn: func(status *kubecontainer.PodStatus) {
m.livenessManager.Remove(status.ContainerStatuses[1].ID)
},
},
"livenessProbe has completed; start the next": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses = status.ContainerStatuses[:2]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"kill and recreate the restartable init container if the liveness check has failed": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Failure, basePod)
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
ContainersToStart: []int{0, 1, 2},
},
resetStatusFn: func(status *kubecontainer.PodStatus) {
m.livenessManager.Remove(status.ContainerStatuses[2].ID)
},
},
"startupProbe has not been run; do nothing": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
m.startupManager.Remove(status.ContainerStatuses[1].ID)
status.ContainerStatuses = status.ContainerStatuses[:2]
},
actions: noAction,
},
"startupProbe in progress; do nothing": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Unknown, basePod)
status.ContainerStatuses = status.ContainerStatuses[:2]
},
actions: noAction,
resetStatusFn: func(status *kubecontainer.PodStatus) {
m.startupManager.Remove(status.ContainerStatuses[1].ID)
},
},
"startupProbe has completed; start the next": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses = status.ContainerStatuses[:2]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"kill and recreate the restartable init container if the startup check has failed": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
pod.Spec.InitContainers[2].StartupProbe = &v1.Probe{}
},
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Failure, basePod)
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
ContainersToStart: []int{},
},
resetStatusFn: func(status *kubecontainer.PodStatus) {
m.startupManager.Remove(status.ContainerStatuses[2].ID)
},
},
"kill and recreate the restartable init container if the container definition changes": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
pod.Spec.InitContainers[2].Image = "foo-image"
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToKill: modifyKillMapContainerImage(getKillMapWithInitContainers(basePod, baseStatus, []int{2}), baseStatus, []int{2}, []string{"foo-image"}),
ContainersToStart: []int{0, 1, 2},
},
},
"restart terminated restartable init container and next init container": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{0, 2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"restart terminated restartable init container and regular containers": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{0},
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Pod sandbox not ready, restartable init container failed, but RestartPolicy == Never; kill pod only": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
KillPod: true,
CreateSandbox: false,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Pod sandbox not ready, and RestartPolicy == Never, but no visible restartable init containers; create a new pod sandbox": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses = []*kubecontainer.Status{}
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Pod sandbox not ready, restartable init container failed, and RestartPolicy == OnFailure; create a new pod sandbox": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Pod sandbox not ready, restartable init container failed, and RestartPolicy == Always; create a new pod sandbox": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization failed; restart the last restartable init container even if pod's RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"restartable init container state unknown; kill and recreate the last restartable init container even if pod's RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
},
},
"restart restartable init container if regular containers are running even if pod's RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[2].ExitCode = 137
// all main containers are running
for i := 1; i <= 3; i++ {
status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
ID: kubecontainer.ContainerID{ID: fmt.Sprintf("id%d", i)},
Name: fmt.Sprintf("foo%d", i),
State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[i-1]),
})
}
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{2},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"kill the pod if all main containers succeeded if pod's RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
// all main containers succeeded
for i := 1; i <= 3; i++ {
status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
ID: kubecontainer.ContainerID{ID: fmt.Sprintf("id%d", i)},
Name: fmt.Sprintf("foo%d", i),
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[i-1]),
})
}
},
actions: podActions{
KillPod: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"some of the init container statuses are missing but the last init container is running, restart restartable init and regular containers": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
status.ContainerStatuses = status.ContainerStatuses[2:]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
InitContainersToStart: []int{0, 1},
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
} {
pod, status := makeBasePodAndStatusWithRestartableInitContainers()
m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod)
m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod)
m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod)
m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod)
if test.mutatePodFn != nil {
test.mutatePodFn(pod)
}
if test.mutateStatusFn != nil {
test.mutateStatusFn(pod, status)
}
tCtx := ktesting.Init(t)
actions := m.computePodActions(tCtx, pod, status)
verifyActions(t, &test.actions, &actions, desc)
if test.resetStatusFn != nil {
test.resetStatusFn(status)
}
}
}
func makeBasePodAndStatusWithRestartableInitContainers() (*v1.Pod, *kubecontainer.PodStatus) {
pod, status := makeBasePodAndStatus()
pod.Spec.InitContainers = []v1.Container{
{
Name: "restartable-init-1",
Image: "bar-image",
RestartPolicy: &containerRestartPolicyAlways,
},
{
Name: "restartable-init-2",
Image: "bar-image",
RestartPolicy: &containerRestartPolicyAlways,
LivenessProbe: &v1.Probe{},
StartupProbe: &v1.Probe{},
},
{
Name: "restartable-init-3",
Image: "bar-image",
RestartPolicy: &containerRestartPolicyAlways,
LivenessProbe: &v1.Probe{},
StartupProbe: &v1.Probe{},
},
}
// Replace the original statuses of the containers with those for the init
// containers.
status.ContainerStatuses = []*kubecontainer.Status{
{
ID: kubecontainer.ContainerID{ID: "initid1"},
Name: "restartable-init-1", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
},
{
ID: kubecontainer.ContainerID{ID: "initid2"},
Name: "restartable-init-2", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[1]),
},
{
ID: kubecontainer.ContainerID{ID: "initid3"},
Name: "restartable-init-3", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[2]),
},
}
return pod, status
}
func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) {
// Make sure existing test cases pass with feature enabled
TestComputePodActions(t)
TestComputePodActionsWithInitContainers(t)
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
require.NoError(t, err)
basePod, baseStatus := makeBasePodAndStatusWithInitAndEphemeralContainers()
noAction := podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
}
for desc, test := range map[string]struct {
mutatePodFn func(*v1.Pod)
mutateStatusFn func(*kubecontainer.PodStatus)
actions podActions
}{
"steady state; do nothing; ignore ephemeral container": {
actions: noAction,
},
"No ephemeral containers running; start one": {
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses = status.ContainerStatuses[:4]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
EphemeralContainersToStart: []int{0},
},
},
"Start second ephemeral container": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.EphemeralContainers = append(pod.Spec.EphemeralContainers, v1.EphemeralContainer{
EphemeralContainerCommon: v1.EphemeralContainerCommon{
Name: "debug2",
Image: "busybox",
},
})
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
EphemeralContainersToStart: []int{1},
},
},
"Ephemeral container exited; do not restart": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[4].State = kubecontainer.ContainerStateExited
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
},
},
"initialization in progress; start ephemeral container": {
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[3].State = kubecontainer.ContainerStateRunning
status.ContainerStatuses = status.ContainerStatuses[:4]
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
EphemeralContainersToStart: []int{0},
},
},
"Create a new pod sandbox if the pod sandbox is dead, init container failed and RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses = status.ContainerStatuses[3:]
status.ContainerStatuses[0].ExitCode = 137
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Kill pod and do not restart ephemeral container if the pod sandbox is dead": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
InitContainersToStart: []int{0},
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"Kill pod if all containers exited except ephemeral container": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// all regular containers exited
for i := 0; i < 3; i++ {
status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[i].ExitCode = 0
}
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
CreateSandbox: false,
KillPod: true,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
},
},
"Ephemeral container is in unknown state; leave it alone": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[4].State = kubecontainer.ContainerStateUnknown
},
actions: noAction,
},
} {
t.Run(desc, func(t *testing.T) {
pod, status := makeBasePodAndStatusWithInitAndEphemeralContainers()
if test.mutatePodFn != nil {
test.mutatePodFn(pod)
}
if test.mutateStatusFn != nil {
test.mutateStatusFn(status)
}
tCtx := ktesting.Init(t)
actions := m.computePodActions(tCtx, pod, status)
verifyActions(t, &test.actions, &actions, desc)
})
}
}
func TestComputePodActionsWithContainerRestartRules(t *testing.T) {
// Make sure existing test cases pass with feature enabled
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ContainerRestartRules, true)
TestComputePodActions(t)
TestComputePodActionsWithInitContainers(t)
var (
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
containerRestartPolicyOnFailure = v1.ContainerRestartPolicyOnFailure
containerRestartPolicyNever = v1.ContainerRestartPolicyNever
)
ctx := context.Background()
_, _, m, err := createTestRuntimeManager(ctx)
require.NoError(t, err)
// Creating a pair reference pod and status for the test cases to refer
// the specific fields.
basePod, baseStatus := makeBasePodAndStatus()
noAction := podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
}
for desc, test := range map[string]struct {
mutatePodFn func(*v1.Pod)
mutateStatusFn func(*kubecontainer.PodStatus)
actions podActions
resetStatusFn func(*kubecontainer.PodStatus)
}{
"restart exited containers if RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.Containers[0].RestartPolicy = &containerRestartPolicyAlways
pod.Spec.Containers[1].RestartPolicy = &containerRestartPolicyAlways
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, restart it,
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container exited with failure, restart it,
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{0, 1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"restart failed containers if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.Containers[0].RestartPolicy = &containerRestartPolicyOnFailure
pod.Spec.Containers[1].RestartPolicy = &containerRestartPolicyOnFailure
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, don't restart it,
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container exited with failure, restart it,
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"restart created but not started containers if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.Containers[0].RestartPolicy = &containerRestartPolicyOnFailure
pod.Spec.Containers[1].RestartPolicy = &containerRestartPolicyOnFailure
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, don't restart it.
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container was created, but never started.
status.ContainerStatuses[1].State = kubecontainer.ContainerStateCreated
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"don't restart containers if RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.Containers[0].RestartPolicy = &containerRestartPolicyNever
pod.Spec.Containers[1].RestartPolicy = &containerRestartPolicyNever
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// Don't restart any containers.
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: noAction,
},
"Kill pod and recreate all containers (except for the succeeded one) if the pod sandbox is dead": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.Containers[1].RestartPolicy = &containerRestartPolicyOnFailure
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 0
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{0, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
} {
pod, status := makeBasePodAndStatus()
if test.mutatePodFn != nil {
test.mutatePodFn(pod)
}
if test.mutateStatusFn != nil {
test.mutateStatusFn(status)
}
ctx := context.Background()
actions := m.computePodActions(ctx, pod, status)
verifyActions(t, &test.actions, &actions, desc)
if test.resetStatusFn != nil {
test.resetStatusFn(status)
}
}
}
func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
tCtx := ktesting.Init(t)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
assert.NoError(t, err)
fakeRuntime.ErrorOnSandboxCreate = true
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
m.podStateProvider.(*fakePodStateProvider).removed = map[types.UID]struct{}{pod.UID: {}}
// GetPodStatus and the following SyncPod will not return errors in the
// case where the pod has been deleted. We are not adding any pods into
// the fakePodProvider so they are 'deleted'.
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result := m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
// This will return an error if the pod has _not_ been deleted.
assert.NoError(t, result.Error())
}
func makeBasePodAndStatusWithInitAndEphemeralContainers() (*v1.Pod, *kubecontainer.PodStatus) {
pod, status := makeBasePodAndStatus()
pod.Spec.InitContainers = []v1.Container{
{
Name: "init1",
Image: "bar-image",
},
}
pod.Spec.EphemeralContainers = []v1.EphemeralContainer{
{
EphemeralContainerCommon: v1.EphemeralContainerCommon{
Name: "debug",
Image: "busybox",
},
},
}
status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
ID: kubecontainer.ContainerID{ID: "initid1"},
Name: "init1", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
}, &kubecontainer.Status{
ID: kubecontainer.ContainerID{ID: "debug1"},
Name: "debug", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer((*v1.Container)(&pod.Spec.EphemeralContainers[0].EphemeralContainerCommon)),
})
return pod, status
}
func TestComputePodActionsForPodResize(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
assert.NoError(t, err)
cpu100m := resource.MustParse("100m")
cpu200m := resource.MustParse("200m")
mem100M := resource.MustParse("100Mi")
mem200M := resource.MustParse("200Mi")
cpuPolicyRestartNotRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: v1.NotRequired}
memPolicyRestartNotRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: v1.NotRequired}
cpuPolicyRestartRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer}
memPolicyRestartRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: v1.RestartContainer}
setupActuatedResources := func(pod *v1.Pod, container *v1.Container, actuatedResources v1.ResourceRequirements) {
actuatedContainer := container.DeepCopy()
actuatedContainer.Resources = actuatedResources
require.NoError(t, m.allocationManager.SetActuatedResources(pod, actuatedContainer))
}
for desc, test := range map[string]struct {
setupFn func(*v1.Pod)
getExpectedPodActionsFn func(*v1.Pod, *kubecontainer.PodStatus) *podActions
}{
"Update container CPU and memory resources": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[1]
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu200m.DeepCopy(),
v1.ResourceMemory: mem200M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name)
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
v1.ResourceMemory: {
{
container: &pod.Spec.Containers[1],
kubeContainerID: kcs.ID,
desiredContainerResources: containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu100m.MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: mem200M.Value(),
cpuLimit: cpu200m.MilliValue(),
},
},
},
v1.ResourceCPU: {
{
container: &pod.Spec.Containers[1],
kubeContainerID: kcs.ID,
desiredContainerResources: containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu100m.MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: mem200M.Value(),
cpuLimit: cpu200m.MilliValue(),
},
},
},
},
}
return &pa
},
},
"Update container CPU resources": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[1]
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu200m.DeepCopy(),
v1.ResourceMemory: mem100M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name)
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
v1.ResourceCPU: {
{
container: &pod.Spec.Containers[1],
kubeContainerID: kcs.ID,
desiredContainerResources: containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu100m.MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu200m.MilliValue(),
},
},
},
},
}
return &pa
},
},
"Update container memory resources": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[2]
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu200m.DeepCopy(),
v1.ResourceMemory: mem100M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
v1.ResourceMemory: {
{
container: &pod.Spec.Containers[2],
kubeContainerID: kcs.ID,
desiredContainerResources: containerResources{
memoryLimit: mem200M.Value(),
cpuLimit: cpu200m.MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu200m.MilliValue(),
},
},
},
},
}
return &pa
},
},
"Nothing when spec.Resources and status.Resources are equal": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[1]
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu200m.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToStart: []int{},
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
}
return &pa
},
},
"Nothing when spec.Resources and status.Resources are equal (besteffort)": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[1]
c.Resources = v1.ResourceRequirements{}
setupActuatedResources(pod, c, v1.ResourceRequirements{})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToStart: []int{},
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
}
return &pa
},
},
"Update container CPU and memory resources with Restart policy for CPU": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[0]
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired}
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu100m.DeepCopy(),
v1.ResourceMemory: mem100M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[0].Name)
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
killMap[kcs.ID] = containerToKillInfo{
container: &pod.Spec.Containers[0],
name: pod.Spec.Containers[0].Name,
}
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{0},
ContainersToKill: killMap,
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
UpdatePodResources: true,
}
return &pa
},
},
"Update container CPU and memory resources with Restart policy for memory": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[2]
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu100m.DeepCopy(),
v1.ResourceMemory: mem100M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
killMap[kcs.ID] = containerToKillInfo{
container: &pod.Spec.Containers[2],
name: pod.Spec.Containers[2].Name,
}
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{2},
ContainersToKill: killMap,
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
UpdatePodResources: true,
}
return &pa
},
},
"Update container memory resources with Restart policy for CPU": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[1]
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired}
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem200M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu100m.DeepCopy(),
v1.ResourceMemory: mem100M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name)
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
v1.ResourceMemory: {
{
container: &pod.Spec.Containers[1],
kubeContainerID: kcs.ID,
desiredContainerResources: containerResources{
memoryLimit: mem200M.Value(),
cpuLimit: cpu100m.MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu100m.MilliValue(),
},
},
},
},
}
return &pa
},
},
"Update container CPU resources with Restart policy for memory": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[2]
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem100M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu100m.DeepCopy(),
v1.ResourceMemory: mem100M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
v1.ResourceCPU: {
{
container: &pod.Spec.Containers[2],
kubeContainerID: kcs.ID,
desiredContainerResources: containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu200m.MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: mem100M.Value(),
cpuLimit: cpu100m.MilliValue(),
},
},
},
},
}
return &pa
},
},
"Update container memory (requests only) with RestartContainer policy for memory": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[2]
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu200m.DeepCopy(),
v1.ResourceMemory: mem200M.DeepCopy(),
},
Requests: v1.ResourceList{
v1.ResourceCPU: cpu100m.DeepCopy(),
v1.ResourceMemory: mem200M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
killMap[kcs.ID] = containerToKillInfo{
container: &pod.Spec.Containers[2],
name: pod.Spec.Containers[2].Name,
}
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{2},
ContainersToKill: killMap,
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
UpdatePodResources: true,
}
return &pa
},
},
"Update container memory (requests only) with RestartNotRequired policy for memory": {
setupFn: func(pod *v1.Pod) {
c := &pod.Spec.Containers[2]
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartNotRequired}
c.Resources = v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
}
setupActuatedResources(pod, c, v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: cpu200m.DeepCopy(),
v1.ResourceMemory: mem200M.DeepCopy(),
},
Requests: v1.ResourceList{
v1.ResourceCPU: cpu100m.DeepCopy(),
v1.ResourceMemory: mem200M.DeepCopy(),
},
})
},
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
killMap[kcs.ID] = containerToKillInfo{
container: &pod.Spec.Containers[2],
name: pod.Spec.Containers[2].Name,
}
pa := podActions{
SandboxID: podStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMap(pod, podStatus, []int{}),
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
v1.ResourceMemory: {
{
container: &pod.Spec.Containers[2],
kubeContainerID: kcs.ID,
desiredContainerResources: containerResources{
memoryLimit: mem200M.Value(),
memoryRequest: mem100M.Value(),
cpuLimit: cpu200m.MilliValue(),
cpuRequest: cpu100m.MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: mem200M.Value(),
memoryRequest: mem200M.Value(),
cpuLimit: cpu200m.MilliValue(),
cpuRequest: cpu100m.MilliValue(),
},
},
},
},
}
return &pa
},
},
} {
t.Run(desc, func(t *testing.T) {
pod, status := makeBasePodAndStatus()
for idx := range pod.Spec.Containers {
// default resize policy when pod resize feature is enabled
pod.Spec.Containers[idx].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartNotRequired}
}
if test.setupFn != nil {
test.setupFn(pod)
}
t.Cleanup(func() { m.allocationManager.RemovePod(pod.UID) })
for idx := range pod.Spec.Containers {
// compute hash
if kcs := status.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil {
kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx])
}
}
tCtx := ktesting.Init(t)
expectedActions := test.getExpectedPodActionsFn(pod, status)
actions := m.computePodActions(tCtx, pod, status)
verifyActions(t, expectedActions, &actions, desc)
})
}
}
func TestUpdatePodContainerResources(t *testing.T) {
tCtx := ktesting.Init(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
assert.NoError(t, err)
cpu100m := resource.MustParse("100m")
cpu150m := resource.MustParse("150m")
cpu200m := resource.MustParse("200m")
cpu250m := resource.MustParse("250m")
cpu300m := resource.MustParse("300m")
cpu350m := resource.MustParse("350m")
mem100M := resource.MustParse("100Mi")
mem150M := resource.MustParse("150Mi")
mem200M := resource.MustParse("200Mi")
mem250M := resource.MustParse("250Mi")
mem300M := resource.MustParse("300Mi")
mem350M := resource.MustParse("350Mi")
res100m100Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}
res150m100Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem100M}
res100m150Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem150M}
res150m150Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem150M}
res200m200Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}
res250m200Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem200M}
res200m250Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem250M}
res250m250Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem250M}
res300m300Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem300M}
res350m300Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem300M}
res300m350Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem350M}
res350m350Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem350M}
pod, _ := makeBasePodAndStatusWithRestartableInitContainers()
makeAndSetFakePod(t, m, fakeRuntime, pod)
for dsc, tc := range map[string]struct {
resourceName v1.ResourceName
apiSpecResources []v1.ResourceRequirements
apiStatusResources []v1.ResourceRequirements
requiresRestart []bool
invokeUpdateResources bool
expectedCurrentLimits []v1.ResourceList
expectedCurrentRequests []v1.ResourceList
}{
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU": {
resourceName: v1.ResourceCPU,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
},
"Guaranteed QoS Pod - CPU & memory resize requested, update memory": {
resourceName: v1.ResourceMemory,
apiSpecResources: []v1.ResourceRequirements{
{Limits: res150m150Mi, Requests: res150m150Mi},
{Limits: res250m250Mi, Requests: res250m250Mi},
{Limits: res350m350Mi, Requests: res350m350Mi},
},
apiStatusResources: []v1.ResourceRequirements{
{Limits: res100m100Mi, Requests: res100m100Mi},
{Limits: res200m200Mi, Requests: res200m200Mi},
{Limits: res300m300Mi, Requests: res300m300Mi},
},
requiresRestart: []bool{false, false, false},
invokeUpdateResources: true,
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
},
} {
for _, allSideCarCtrs := range []bool{false, true} {
var containersToUpdate []containerToUpdateInfo
containerToUpdateInfo := func(container *v1.Container, idx int) containerToUpdateInfo {
return containerToUpdateInfo{
container: container,
kubeContainerID: kubecontainer.ContainerID{},
desiredContainerResources: containerResources{
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(),
},
currentContainerResources: &containerResources{
memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(),
memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(),
cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(),
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
},
}
}
if allSideCarCtrs {
for idx := range pod.Spec.InitContainers {
// default resize policy when pod resize feature is enabled
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cinfo := containerToUpdateInfo(&pod.Spec.InitContainers[idx], idx)
containersToUpdate = append(containersToUpdate, cinfo)
}
} else {
for idx := range pod.Spec.Containers {
// default resize policy when pod resize feature is enabled
pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx]
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
cinfo := containerToUpdateInfo(&pod.Spec.Containers[idx], idx)
containersToUpdate = append(containersToUpdate, cinfo)
}
}
fakeRuntime.Called = []string{}
err := m.updatePodContainerResources(tCtx, pod, tc.resourceName, containersToUpdate)
require.NoError(t, err, dsc)
if tc.invokeUpdateResources {
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
}
for idx := range len(containersToUpdate) {
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
}
}
}
}
func TestToKubeContainerImageVolumes(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, manager, err := createTestRuntimeManager(tCtx)
require.NoError(t, err)
const (
volume1 = "volume-1"
volume2 = "volume-2"
)
imageSpec1 := runtimeapi.ImageSpec{Image: "image-1"}
imageSpec2 := runtimeapi.ImageSpec{Image: "image-2"}
errTest := errors.New("pull failed")
syncResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, "test")
for desc, tc := range map[string]struct {
pullResults imageVolumePulls
container *v1.Container
expectedError error
expectedImageVolumes kubecontainer.ImageVolumes
}{
"empty volumes": {},
"multiple volumes": {
pullResults: imageVolumePulls{
volume1: imageVolumePullResult{spec: &imageSpec1},
volume2: imageVolumePullResult{spec: &imageSpec2},
},
container: &v1.Container{
VolumeMounts: []v1.VolumeMount{
{Name: volume1},
{Name: volume2},
},
},
expectedImageVolumes: kubecontainer.ImageVolumes{
volume1: &imageSpec1,
volume2: &imageSpec2,
},
},
"not matching volume": {
pullResults: imageVolumePulls{
"different": imageVolumePullResult{spec: &imageSpec1},
},
container: &v1.Container{
VolumeMounts: []v1.VolumeMount{{Name: volume1}},
},
expectedImageVolumes: kubecontainer.ImageVolumes{},
},
"error in pull result": {
pullResults: imageVolumePulls{
volume1: imageVolumePullResult{err: errTest},
},
container: &v1.Container{
VolumeMounts: []v1.VolumeMount{
{Name: volume1},
},
},
expectedError: errTest,
},
} {
imageVolumes, err := manager.toKubeContainerImageVolumes(tCtx, tc.pullResults, tc.container, &v1.Pod{}, syncResult)
if tc.expectedError != nil {
require.EqualError(t, err, tc.expectedError.Error())
} else {
require.NoError(t, err, desc)
}
assert.Equal(t, tc.expectedImageVolumes, imageVolumes)
}
}
func TestGetImageVolumes(t *testing.T) {
tCtx := ktesting.Init(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ImageVolume, true)
_, _, manager, err := createTestRuntimeManager(tCtx)
require.NoError(t, err)
const (
volume1 = "volume-1"
volume2 = "volume-2"
image1 = "image-1:latest"
image2 = "image-2:latest"
)
imageSpec1 := runtimeapi.ImageSpec{Image: image1, UserSpecifiedImage: image1}
imageSpec2 := runtimeapi.ImageSpec{Image: image2, UserSpecifiedImage: image2}
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: "test-pod",
Namespace: "test-namespace",
Uid: "test-uid",
},
}
for desc, tc := range map[string]struct {
pod *v1.Pod
expectedImageVolumePulls imageVolumePulls
expectedError error
}{
"empty volumes": {
pod: &v1.Pod{Spec: v1.PodSpec{Volumes: []v1.Volume{}}},
expectedImageVolumePulls: imageVolumePulls{},
},
"multiple volumes": {
pod: &v1.Pod{Spec: v1.PodSpec{Volumes: []v1.Volume{
{Name: volume1, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: image1, PullPolicy: v1.PullAlways}}},
{Name: volume2, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: image2, PullPolicy: v1.PullAlways}}},
}}},
expectedImageVolumePulls: imageVolumePulls{
volume1: imageVolumePullResult{spec: &imageSpec1},
volume2: imageVolumePullResult{spec: &imageSpec2},
},
},
"different than image volumes": {
pod: &v1.Pod{Spec: v1.PodSpec{Volumes: []v1.Volume{
{Name: volume1, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{}}},
}}},
expectedImageVolumePulls: imageVolumePulls{},
},
"multiple volumes but one failed to pull": {
pod: &v1.Pod{Spec: v1.PodSpec{Volumes: []v1.Volume{
{Name: volume1, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: image1, PullPolicy: v1.PullAlways}}},
{Name: volume2, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: "image", PullPolicy: v1.PullNever}}}, // fails
}}},
expectedImageVolumePulls: imageVolumePulls{
volume1: imageVolumePullResult{spec: &imageSpec1},
volume2: imageVolumePullResult{err: imagetypes.ErrImageNeverPull, msg: `Container image "image" is not present with pull policy of Never`},
},
},
} {
imageVolumePulls, err := manager.getImageVolumes(tCtx, tc.pod, podSandboxConfig, nil)
if tc.expectedError != nil {
require.EqualError(t, err, tc.expectedError.Error())
} else {
require.NoError(t, err, desc)
}
assert.Equal(t, tc.expectedImageVolumePulls, imageVolumePulls)
}
}
func TestDoPodResizeAction(t *testing.T) {
if goruntime.GOOS != "linux" {
t.Skip("unsupported OS")
}
tCtx := ktesting.Init(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
metrics.Register()
metrics.PodResizeDurationMilliseconds.Reset()
for i, tc := range []struct {
testName string
currentResources containerResources
desiredResources containerResources
updatedResources []v1.ResourceName
otherContainersHaveLimits bool
runtimeErrors map[string][]error
expectedError string
expectedErrorMessage string
expectPodCgroupUpdates int
}{
{
testName: "Increase cpu and memory requests and limits, with computed pod limits",
currentResources: containerResources{
cpuRequest: 100, cpuLimit: 100,
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
cpuRequest: 200, cpuLimit: 200,
memoryRequest: 200, memoryLimit: 200,
},
otherContainersHaveLimits: true,
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
expectPodCgroupUpdates: 3, // cpu req, cpu lim, mem lim
},
{
testName: "Increase cpu and memory requests and limits, with computed pod limits and set a runtime error",
currentResources: containerResources{
cpuRequest: 100, cpuLimit: 100,
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
cpuRequest: 200, cpuLimit: 200,
memoryRequest: 200, memoryLimit: 200,
},
otherContainersHaveLimits: true,
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
expectPodCgroupUpdates: 1,
runtimeErrors: map[string][]error{"UpdateContainerResources": {fmt.Errorf("error updating container resources")}},
expectedError: "ResizePodInPlaceError",
expectedErrorMessage: "error updating container resources",
},
{
testName: "Increase cpu and memory requests and limits, without computed pod limits",
currentResources: containerResources{
cpuRequest: 100, cpuLimit: 100,
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
cpuRequest: 200, cpuLimit: 200,
memoryRequest: 200, memoryLimit: 200,
},
// If some containers don't have limits, pod level limits are not applied
otherContainersHaveLimits: false,
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
expectPodCgroupUpdates: 1, // cpu req, cpu lim, mem lim
},
{
testName: "Increase cpu and memory requests only",
currentResources: containerResources{
cpuRequest: 100, cpuLimit: 200,
memoryRequest: 100, memoryLimit: 200,
},
desiredResources: containerResources{
cpuRequest: 150, cpuLimit: 200,
memoryRequest: 150, memoryLimit: 200,
},
updatedResources: []v1.ResourceName{v1.ResourceCPU},
expectPodCgroupUpdates: 1, // cpu req
},
{
testName: "Resize memory request no limits",
currentResources: containerResources{
cpuRequest: 100,
memoryRequest: 100,
},
desiredResources: containerResources{
cpuRequest: 100,
memoryRequest: 200,
},
// Memory request resize doesn't generate an update action.
updatedResources: []v1.ResourceName{},
},
{
testName: "Resize cpu request no limits",
currentResources: containerResources{
cpuRequest: 100,
memoryRequest: 100,
},
desiredResources: containerResources{
cpuRequest: 200,
memoryRequest: 100,
},
updatedResources: []v1.ResourceName{v1.ResourceCPU},
expectPodCgroupUpdates: 1, // cpu req
},
{
testName: "Add limits",
currentResources: containerResources{
cpuRequest: 100,
memoryRequest: 100,
},
desiredResources: containerResources{
cpuRequest: 100, cpuLimit: 100,
memoryRequest: 100, memoryLimit: 100,
},
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
expectPodCgroupUpdates: 0,
},
{
testName: "Add limits and pod limits",
currentResources: containerResources{
cpuRequest: 100,
memoryRequest: 100,
},
desiredResources: containerResources{
cpuRequest: 100, cpuLimit: 100,
memoryRequest: 100, memoryLimit: 100,
},
otherContainersHaveLimits: true,
updatedResources: []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory},
expectPodCgroupUpdates: 2, // cpu lim, memory lim
},
} {
t.Run(tc.testName, func(t *testing.T) {
_, _, m, err := createTestRuntimeManagerWithErrors(tCtx, tc.runtimeErrors)
require.NoError(t, err)
m.cpuCFSQuota = true // Enforce CPU Limits
mockCM := cmtesting.NewMockContainerManager(t)
mockCM.EXPECT().PodHasExclusiveCPUs(mock.Anything).Return(false).Maybe()
mockCM.EXPECT().ContainerHasExclusiveCPUs(mock.Anything, mock.Anything).Return(false).Maybe()
m.containerManager = mockCM
mockPCM := cmtesting.NewMockPodContainerManager(t)
mockCM.EXPECT().NewPodContainerManager().Return(mockPCM)
mockPCM.EXPECT().GetPodCgroupConfig(mock.Anything, v1.ResourceMemory).Return(&cm.ResourceConfig{
Memory: ptr.To(tc.currentResources.memoryLimit),
}, nil).Maybe()
mockPCM.EXPECT().GetPodCgroupMemoryUsage(mock.Anything).Return(0, nil).Maybe()
// Set up mock pod cgroup config
podCPURequest := tc.currentResources.cpuRequest
podCPULimit := tc.currentResources.cpuLimit
if tc.otherContainersHaveLimits {
podCPURequest += 200
podCPULimit += 200
}
mockPCM.EXPECT().GetPodCgroupConfig(mock.Anything, v1.ResourceCPU).Return(&cm.ResourceConfig{
CPUShares: ptr.To(cm.MilliCPUToShares(podCPURequest)),
CPUQuota: ptr.To(cm.MilliCPUToQuota(podCPULimit, cm.QuotaPeriod)),
}, nil).Maybe()
if tc.expectPodCgroupUpdates > 0 {
mockPCM.EXPECT().SetPodCgroupConfig(mock.Anything, mock.Anything).Return(nil).Times(tc.expectPodCgroupUpdates)
}
pod, kps := makeBasePodAndStatus()
// pod spec and allocated resources are already updated as desired when doPodResizeAction() is called.
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.desiredResources.cpuRequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(tc.desiredResources.memoryRequest, resource.DecimalSI),
},
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.desiredResources.cpuLimit, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(tc.desiredResources.memoryLimit, resource.DecimalSI),
},
}
if tc.otherContainersHaveLimits {
resourceList := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100M"),
}
resources := v1.ResourceRequirements{
Requests: resourceList,
Limits: resourceList,
}
pod.Spec.Containers[1].Resources = resources
pod.Spec.Containers[2].Resources = resources
}
m.runtimeHelper = &containertest.FakeRuntimeHelper{
PodStats: map[types.UID]*statsapi.PodStats{
pod.UID: {
PodRef: statsapi.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
},
Memory: &statsapi.MemoryStats{
UsageBytes: ptr.To[uint64](20),
},
Containers: []statsapi.ContainerStats{{
Name: pod.Spec.Containers[0].Name,
Memory: &statsapi.MemoryStats{
UsageBytes: ptr.To[uint64](10),
},
}},
}},
}
updateInfo := containerToUpdateInfo{
container: &pod.Spec.Containers[0],
kubeContainerID: kps.ContainerStatuses[0].ID,
desiredContainerResources: tc.desiredResources,
currentContainerResources: &tc.currentResources,
}
containersToUpdate := make(map[v1.ResourceName][]containerToUpdateInfo)
for _, r := range tc.updatedResources {
containersToUpdate[r] = []containerToUpdateInfo{updateInfo}
}
actions := podActions{
ContainersToUpdate: containersToUpdate,
}
resizeResult := m.doPodResizeAction(tCtx, pod, kps, actions)
if tc.expectedError != "" {
require.Error(t, resizeResult.Error)
require.EqualError(t, resizeResult.Error, tc.expectedError)
require.Equal(t, tc.expectedErrorMessage, resizeResult.Message)
} else {
require.NoError(t, resizeResult.Error, resizeResult.Message)
}
mock.AssertExpectationsForObjects(t, mockPCM)
testutil.AssertHistogramTotalCount(t, "kubelet_pod_resize_duration_milliseconds", map[string]string{}, i+1)
})
}
metrics.PodResizeDurationMilliseconds.Reset()
}
func TestCheckPodResize(t *testing.T) {
if goruntime.GOOS != "linux" {
t.Skip("unsupported OS")
}
for _, tc := range []struct {
testName string
currentResources, desiredResources containerResources
currentPodMemLimit, desiredPodMemLimit *int64
containerMemoryUsage, podMemoryUsage *uint64
expectedError bool
}{
{
testName: "Resize memory request no limits",
currentResources: containerResources{
cpuRequest: 100,
memoryRequest: 100,
},
desiredResources: containerResources{
cpuRequest: 100,
memoryRequest: 200,
},
expectedError: false,
},
{
testName: "Add container limits, low usage",
currentResources: containerResources{
memoryRequest: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
containerMemoryUsage: ptr.To[uint64](10),
podMemoryUsage: ptr.To[uint64](10),
expectedError: false,
},
{
testName: "Add container limits, high usage",
currentResources: containerResources{
memoryRequest: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
containerMemoryUsage: ptr.To[uint64](200),
podMemoryUsage: ptr.To[uint64](200),
expectedError: true,
},
{
testName: "Add container limits, missing container usage",
currentResources: containerResources{
memoryRequest: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
podMemoryUsage: ptr.To[uint64](10),
expectedError: true,
},
{
testName: "Add container limits, missing pod usage",
currentResources: containerResources{
memoryRequest: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
containerMemoryUsage: ptr.To[uint64](10),
expectedError: false,
},
{
testName: "Increase container limits",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 200,
},
expectedError: false,
},
{
testName: "Decrease container limits, low usage",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 200,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
containerMemoryUsage: ptr.To[uint64](20),
podMemoryUsage: ptr.To[uint64](20),
expectedError: false,
},
{
testName: "Decrease container limits, high usage",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 200,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
containerMemoryUsage: ptr.To[uint64](150),
podMemoryUsage: ptr.To[uint64](150),
expectedError: true,
},
{
testName: "Add pod limit, low usage",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredPodMemLimit: ptr.To[int64](100),
containerMemoryUsage: ptr.To[uint64](20),
podMemoryUsage: ptr.To[uint64](20),
expectedError: false,
},
{
testName: "Add pod limit, high usage",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredPodMemLimit: ptr.To[int64](100),
containerMemoryUsage: ptr.To[uint64](20),
podMemoryUsage: ptr.To[uint64](200),
expectedError: true,
},
{
testName: "Increase pod limits",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
currentPodMemLimit: ptr.To[int64](100),
desiredPodMemLimit: ptr.To[int64](200),
containerMemoryUsage: ptr.To[uint64](20),
podMemoryUsage: ptr.To[uint64](20),
expectedError: false,
},
{
testName: "Decrease pod limits, low usage",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
currentPodMemLimit: ptr.To[int64](200),
desiredPodMemLimit: ptr.To[int64](100),
containerMemoryUsage: ptr.To[uint64](20),
podMemoryUsage: ptr.To[uint64](20),
expectedError: false,
},
{
testName: "Decrease pod limits, high usage",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
currentPodMemLimit: ptr.To[int64](200),
desiredPodMemLimit: ptr.To[int64](100),
containerMemoryUsage: ptr.To[uint64](50),
podMemoryUsage: ptr.To[uint64](150),
expectedError: true,
},
{
testName: "Decrease pod limits, missing usage",
currentResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
desiredResources: containerResources{
memoryRequest: 100, memoryLimit: 100,
},
currentPodMemLimit: ptr.To[int64](200),
desiredPodMemLimit: ptr.To[int64](100),
containerMemoryUsage: ptr.To[uint64](50),
podMemoryUsage: nil,
expectedError: true,
},
} {
t.Run(tc.testName, func(t *testing.T) {
tCtx := ktesting.Init(t)
_, _, m, err := createTestRuntimeManager(tCtx)
require.NoError(t, err)
pod, kps := makeBasePodAndStatus()
// pod spec and allocated resources are already updated as desired when doPodResizeAction() is called.
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.desiredResources.cpuRequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(tc.desiredResources.memoryRequest, resource.DecimalSI),
},
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.desiredResources.cpuLimit, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(tc.desiredResources.memoryLimit, resource.DecimalSI),
},
}
updateInfo := containerToUpdateInfo{
container: &pod.Spec.Containers[0],
kubeContainerID: kps.ContainerStatuses[0].ID,
desiredContainerResources: tc.desiredResources,
currentContainerResources: &tc.currentResources,
}
containersToUpdate := map[v1.ResourceName][]containerToUpdateInfo{
v1.ResourceMemory: {updateInfo},
}
actions := podActions{
ContainersToUpdate: containersToUpdate,
}
podStats := &statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
},
Memory: &statsapi.MemoryStats{
UsageBytes: tc.podMemoryUsage,
},
}
for _, container := range pod.Spec.Containers {
podStats.Containers = append(podStats.Containers, statsapi.ContainerStats{
Name: container.Name,
Memory: &statsapi.MemoryStats{
UsageBytes: tc.containerMemoryUsage,
},
})
}
m.runtimeHelper = &containertest.FakeRuntimeHelper{
PodStats: map[types.UID]*statsapi.PodStats{pod.UID: podStats},
}
currentPodResources := &cm.ResourceConfig{Memory: tc.currentPodMemLimit}
desiredPodResources := &cm.ResourceConfig{Memory: tc.desiredPodMemLimit}
err = m.validatePodResizeAction(tCtx, pod, kps, currentPodResources, desiredPodResources, actions)
if tc.expectedError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
func TestIncrementImageVolumeMetrics(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ImageVolume, true)
legacyregistry.MustRegister(metrics.ImageVolumeRequestedTotal)
legacyregistry.MustRegister(metrics.ImageVolumeMountedSucceedTotal)
legacyregistry.MustRegister(metrics.ImageVolumeMountedErrorsTotal)
testCases := map[string]struct {
err error
msg string
volumeMounts []v1.VolumeMount
imageVolumes kubecontainer.ImageVolumes
wants string
}{
"without error": {
volumeMounts: []v1.VolumeMount{
{Name: "mount1"},
{Name: "mount2"},
},
imageVolumes: kubecontainer.ImageVolumes{
"mount1": {Image: "image1"},
"mount2": {Image: "image2"},
"mount3": {Image: "image3"},
},
wants: `
# HELP kubelet_image_volume_mounted_errors_total [ALPHA] Number of failed image volume mounts.
# TYPE kubelet_image_volume_mounted_errors_total counter
kubelet_image_volume_mounted_errors_total 0
# HELP kubelet_image_volume_mounted_succeed_total [ALPHA] Number of successful image volume mounts.
# TYPE kubelet_image_volume_mounted_succeed_total counter
kubelet_image_volume_mounted_succeed_total 2
# HELP kubelet_image_volume_requested_total [ALPHA] Number of requested image volumes.
# TYPE kubelet_image_volume_requested_total counter
kubelet_image_volume_requested_total 3
`,
},
"with error": {
err: ErrCreateContainer,
msg: crierror.ErrImageVolumeMountFailed.Error(),
volumeMounts: []v1.VolumeMount{
{Name: "mount1"},
{Name: "mount2"},
},
imageVolumes: kubecontainer.ImageVolumes{
"mount1": {Image: "image1"},
"mount2": {Image: "image2"},
"mount3": {Image: "image3"},
},
wants: `
# HELP kubelet_image_volume_mounted_errors_total [ALPHA] Number of failed image volume mounts.
# TYPE kubelet_image_volume_mounted_errors_total counter
kubelet_image_volume_mounted_errors_total 2
# HELP kubelet_image_volume_mounted_succeed_total [ALPHA] Number of successful image volume mounts.
# TYPE kubelet_image_volume_mounted_succeed_total counter
kubelet_image_volume_mounted_succeed_total 0
# HELP kubelet_image_volume_requested_total [ALPHA] Number of requested image volumes.
# TYPE kubelet_image_volume_requested_total counter
kubelet_image_volume_requested_total 3
`,
},
"with unknown CRI error from message": {
err: ErrCreateContainer,
msg: "",
volumeMounts: []v1.VolumeMount{
{Name: "mount1"},
{Name: "mount2"},
},
imageVolumes: kubecontainer.ImageVolumes{
"mount1": {Image: "image1"},
"mount2": {Image: "image2"},
"mount3": {Image: "image3"},
},
wants: `
# HELP kubelet_image_volume_mounted_errors_total [ALPHA] Number of failed image volume mounts.
# TYPE kubelet_image_volume_mounted_errors_total counter
kubelet_image_volume_mounted_errors_total 0
# HELP kubelet_image_volume_mounted_succeed_total [ALPHA] Number of successful image volume mounts.
# TYPE kubelet_image_volume_mounted_succeed_total counter
kubelet_image_volume_mounted_succeed_total 2
# HELP kubelet_image_volume_requested_total [ALPHA] Number of requested image volumes.
# TYPE kubelet_image_volume_requested_total counter
kubelet_image_volume_requested_total 3
`,
},
"without used volume": {
volumeMounts: []v1.VolumeMount{
{Name: "mount1"},
},
imageVolumes: kubecontainer.ImageVolumes{},
wants: `
# HELP kubelet_image_volume_mounted_errors_total [ALPHA] Number of failed image volume mounts.
# TYPE kubelet_image_volume_mounted_errors_total counter
kubelet_image_volume_mounted_errors_total 0
# HELP kubelet_image_volume_mounted_succeed_total [ALPHA] Number of successful image volume mounts.
# TYPE kubelet_image_volume_mounted_succeed_total counter
kubelet_image_volume_mounted_succeed_total 0
# HELP kubelet_image_volume_requested_total [ALPHA] Number of requested image volumes.
# TYPE kubelet_image_volume_requested_total counter
kubelet_image_volume_requested_total 0
`,
},
}
// Run tests.
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
metrics.ImageVolumeRequestedTotal.Reset()
metrics.ImageVolumeMountedErrorsTotal.Reset()
metrics.ImageVolumeMountedSucceedTotal.Reset()
// Call the function.
incrementImageVolumeMetrics(tc.err, tc.msg, &v1.Container{VolumeMounts: tc.volumeMounts}, tc.imageVolumes)
if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(tc.wants),
"kubelet_"+metrics.ImageVolumeRequestedTotalKey,
"kubelet_"+metrics.ImageVolumeMountedSucceedTotalKey,
"kubelet_"+metrics.ImageVolumeMountedErrorsTotalKey,
); err != nil {
t.Error(err)
}
})
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"fmt"
"net/url"
"runtime"
"sort"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubelet/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format"
netutils "k8s.io/utils/net"
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
logger := klog.FromContext(ctx)
podSandboxConfig, err := m.generatePodSandboxConfig(ctx, pod, attempt)
if err != nil {
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
logger.Error(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
return "", message, err
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
if err != nil {
message := fmt.Sprintf("Failed to create log directory for pod %q: %v", format.Pod(pod), err)
logger.Error(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
logger.V(2).Info("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
logger.Error(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(ctx context.Context, pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
logger := klog.FromContext(ctx)
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(m.podLogsDirectory, pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(logger, &c)
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(logger, port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(ctx, pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper, m.runtimeClassManager)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SupplementalGroupsPolicy != nil {
policyValue, ok := runtimeapi.SupplementalGroupsPolicy_value[string(*sc.SupplementalGroupsPolicy)]
if !ok {
return nil, fmt.Errorf("unsupported supplementalGroupsPolicy: %s", string(*sc.SupplementalGroupsPolicy))
}
lc.SecurityContext.SupplementalGroupsPolicy = runtimeapi.SupplementalGroupsPolicy(policyValue)
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) {
return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec != nil {
wc.SecurityContext.CredentialSpec = *wo.GMSACredentialSpec
}
if wo.RunAsUserName != nil {
wc.SecurityContext.RunAsUsername = *wo.RunAsUserName
}
if kubecontainer.HasWindowsHostProcessContainer(pod) {
if wo.HostProcess != nil && !*wo.HostProcess {
return nil, fmt.Errorf("pod must not contain any HostProcess containers if Pod's WindowsOptions.HostProcess is set to false")
}
}
return wc, nil
}
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
logger := klog.FromContext(ctx)
var filter *runtimeapi.PodSandboxFilter
if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &runtimeapi.PodSandboxStateValue{
State: readyState,
},
}
}
resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
logger.Error(err, "Failed to list pod sandboxes")
return nil, err
}
return resp, nil
}
// determinePodSandboxIP determines the IP addresses of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(ctx context.Context, podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string {
logger := klog.FromContext(ctx)
podIPs := make([]string, 0)
if podSandbox.Network == nil {
logger.Info("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName))
return podIPs
}
// ip could be an empty string if runtime is not responsible for the
// IP (e.g., host networking).
// pick primary IP
if len(podSandbox.Network.Ip) != 0 {
if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
logger.Info("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
return nil
}
podIPs = append(podIPs, podSandbox.Network.Ip)
}
// pick additional ips, if cri reported them
for _, podIP := range podSandbox.Network.AdditionalIps {
if nil == netutils.ParseIPSloppy(podIP.Ip) {
logger.Info("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
return nil
}
podIPs = append(podIPs, podIP.Ip)
}
return podIPs
}
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
logger := klog.FromContext(ctx)
filter := &runtimeapi.PodSandboxFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
if state != nil {
filter.State = &runtimeapi.PodSandboxStateValue{
State: *state,
}
}
sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
logger.Error(err, "Failed to list sandboxes for pod", "podUID", podUID)
return nil, err
}
if len(sandboxes) == 0 {
return nil, nil
}
// Sort with newest first.
sandboxIDs := make([]string, len(sandboxes))
sort.Sort(podSandboxByCreated(sandboxes))
for i, s := range sandboxes {
sandboxIDs[i] = s.Id
}
return sandboxIDs, nil
}
// GetPortForward gets the endpoint the runtime will serve the port-forward request from.
func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil)
if err != nil {
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
}
if len(sandboxIDs) == 0 {
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
}
req := &runtimeapi.PortForwardRequest{
PodSandboxId: sandboxIDs[0],
Port: ports,
}
resp, err := m.runtimeService.PortForward(ctx, req)
if err != nil {
return nil, err
}
return url.Parse(resp.Url)
}
//go:build linux
// +build linux
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
resourcehelper "k8s.io/component-helpers/resource"
)
func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
resources := &runtimeapi.LinuxContainerResources{}
if pod.Spec.Overhead != nil {
cpu := pod.Spec.Overhead.Cpu()
memory := pod.Spec.Overhead.Memory()
// For overhead, we do not differentiate between requests and limits. Treat this overhead
// as "guaranteed", with requests == limits
resources = m.calculateLinuxResources(cpu, cpu, memory, false)
}
return resources
}
func (m *kubeGenericRuntimeManager) calculateSandboxResources(ctx context.Context, pod *v1.Pod) *runtimeapi.LinuxContainerResources {
logger := klog.FromContext(ctx)
opts := resourcehelper.PodResourcesOptions{
ExcludeOverhead: true,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
}
req := resourcehelper.PodRequests(pod, opts)
lim := resourcehelper.PodLimits(pod, opts)
var cpuRequest *resource.Quantity
if _, cpuRequestExists := req[v1.ResourceCPU]; cpuRequestExists {
cpuRequest = req.Cpu()
}
// If pod has exclusive cpu the sandbox will not have cfs quote enforced
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod)
logger.V(5).Info("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory(), disableCPUQuota)
}
func (m *kubeGenericRuntimeManager) applySandboxResources(ctx context.Context, pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
if config.Linux == nil {
return nil
}
config.Linux.Resources = m.calculateSandboxResources(ctx, pod)
config.Linux.Overhead = m.convertOverheadToLinuxResources(pod)
return nil
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"sync"
"time"
v1 "k8s.io/api/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// terminationOrdering is used to enforce a termination ordering for sidecar containers. It sets up
// dependencies between sidecars and allows the pod termination process to wait until the grace period
// expires, or all dependent containers have finished terminating.
type terminationOrdering struct {
// terminated is a map from container name to a channel, that if closed
// indicates that the container with that name was terminated
terminated map[string]chan struct{}
// prereqs is a map from container name to a list of channel that the container
// must wait on to ensure termination ordering
prereqs map[string][]chan struct{}
lock sync.Mutex
}
// newTerminationOrdering constructs a terminationOrdering based on the pod spec and the currently running containers.
func newTerminationOrdering(pod *v1.Pod, runningContainerNames []string) *terminationOrdering {
to := &terminationOrdering{
prereqs: map[string][]chan struct{}{},
terminated: map[string]chan struct{}{},
}
runningContainers := map[string]struct{}{}
for _, name := range runningContainerNames {
runningContainers[name] = struct{}{}
}
var mainContainerChannels []chan struct{}
// sidecar containers need to wait on main containers, so we create a channel per main container
// for them to wait on
for _, c := range pod.Spec.Containers {
channel := make(chan struct{})
to.terminated[c.Name] = channel
mainContainerChannels = append(mainContainerChannels, channel)
// if it's not a running container, pre-close the channel so nothing
// waits on it
if _, isRunning := runningContainers[c.Name]; !isRunning {
close(channel)
}
}
var previousSidecarName string
for i := range pod.Spec.InitContainers {
// get the init containers in reverse order
ic := pod.Spec.InitContainers[len(pod.Spec.InitContainers)-i-1]
channel := make(chan struct{})
to.terminated[ic.Name] = channel
// if it's not a running container, pre-close the channel so nothing
// waits on it
if _, isRunning := runningContainers[ic.Name]; !isRunning {
close(channel)
}
if podutil.IsRestartableInitContainer(&ic) {
// sidecars need to wait for all main containers to exit
to.prereqs[ic.Name] = append(to.prereqs[ic.Name], mainContainerChannels...)
// if there is a later sidecar, this container needs to wait for it to finish
if previousSidecarName != "" {
to.prereqs[ic.Name] = append(to.prereqs[ic.Name], to.terminated[previousSidecarName])
}
previousSidecarName = ic.Name
}
}
return to
}
// waitForTurn waits until it is time for the container with the specified name to begin terminating, up until
// the specified grace period. If gracePeriod = 0, there is no wait.
func (o *terminationOrdering) waitForTurn(name string, gracePeriod int64) float64 {
// if there is no grace period, we don't wait
if gracePeriod <= 0 {
return 0
}
start := time.Now()
remainingGrace := time.NewTimer(time.Duration(gracePeriod) * time.Second)
for _, c := range o.prereqs[name] {
select {
case <-c:
case <-remainingGrace.C:
// grace period expired, so immediately exit
return time.Since(start).Seconds()
}
}
return time.Since(start).Seconds()
}
// containerTerminated should be called once the container with the specified name has exited.
func (o *terminationOrdering) containerTerminated(name string) {
o.lock.Lock()
defer o.lock.Unlock()
if ch, ok := o.terminated[name]; ok {
close(ch)
delete(o.terminated, name)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"encoding/json"
"strconv"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/kubelet/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
const (
// TODO: change those label names to follow kubernetes's format
podDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod"
podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
containerHashLabel = "io.kubernetes.container.hash"
containerRestartCountLabel = "io.kubernetes.container.restartCount"
containerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
containerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy"
containerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
containerPortsLabel = "io.kubernetes.container.ports"
)
type labeledPodSandboxInfo struct {
// Labels from v1.Pod
Labels map[string]string
PodName string
PodNamespace string
PodUID kubetypes.UID
}
type annotatedPodSandboxInfo struct {
// Annotations from v1.Pod
Annotations map[string]string
}
type labeledContainerInfo struct {
ContainerName string
PodName string
PodNamespace string
PodUID kubetypes.UID
}
type annotatedContainerInfo struct {
Hash uint64
RestartCount int
PodDeletionGracePeriod *int64
PodTerminationGracePeriod *int64
TerminationMessagePath string
TerminationMessagePolicy v1.TerminationMessagePolicy
PreStopHandler *v1.LifecycleHandler
ContainerPorts []v1.ContainerPort
}
// newPodLabels creates pod labels from v1.Pod.
func newPodLabels(pod *v1.Pod) map[string]string {
labels := map[string]string{}
// Get labels from v1.Pod
for k, v := range pod.Labels {
labels[k] = v
}
labels[types.KubernetesPodNameLabel] = pod.Name
labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
labels[types.KubernetesPodUIDLabel] = string(pod.UID)
return labels
}
// newPodAnnotations creates pod annotations from v1.Pod.
func newPodAnnotations(pod *v1.Pod) map[string]string {
return pod.Annotations
}
// newContainerLabels creates container labels from v1.Container and v1.Pod.
func newContainerLabels(container *v1.Container, pod *v1.Pod) map[string]string {
labels := map[string]string{}
labels[types.KubernetesPodNameLabel] = pod.Name
labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
labels[types.KubernetesPodUIDLabel] = string(pod.UID)
labels[types.KubernetesContainerNameLabel] = container.Name
return labels
}
// newContainerAnnotations creates container annotations from v1.Container and v1.Pod.
func newContainerAnnotations(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, opts *kubecontainer.RunContainerOptions) map[string]string {
logger := klog.FromContext(ctx)
annotations := map[string]string{}
// Kubelet always overrides device plugin annotations if they are conflicting
for _, a := range opts.Annotations {
annotations[a.Name] = a.Value
}
annotations[containerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16)
annotations[containerRestartCountLabel] = strconv.Itoa(restartCount)
annotations[containerTerminationMessagePathLabel] = container.TerminationMessagePath
annotations[containerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)
if pod.DeletionGracePeriodSeconds != nil {
annotations[podDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10)
}
if pod.Spec.TerminationGracePeriodSeconds != nil {
annotations[podTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
}
if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
// Using json encoding so that the PreStop handler object is readable after writing as a label
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
if err != nil {
logger.Error(err, "Unable to marshal lifecycle PreStop handler for container", "containerName", container.Name, "pod", klog.KObj(pod))
} else {
annotations[containerPreStopHandlerLabel] = string(rawPreStop)
}
}
if len(container.Ports) > 0 {
rawContainerPorts, err := json.Marshal(container.Ports)
if err != nil {
logger.Error(err, "Unable to marshal container ports for container", "containerName", container.Name, "pod", klog.KObj(pod))
} else {
annotations[containerPortsLabel] = string(rawContainerPorts)
}
}
return annotations
}
// getPodSandboxInfoFromLabels gets labeledPodSandboxInfo from labels.
func getPodSandboxInfoFromLabels(ctx context.Context, labels map[string]string) *labeledPodSandboxInfo {
logger := klog.FromContext(ctx)
podSandboxInfo := &labeledPodSandboxInfo{
Labels: make(map[string]string),
PodName: getStringValueFromLabel(logger, labels, types.KubernetesPodNameLabel),
PodNamespace: getStringValueFromLabel(logger, labels, types.KubernetesPodNamespaceLabel),
PodUID: kubetypes.UID(getStringValueFromLabel(logger, labels, types.KubernetesPodUIDLabel)),
}
// Remain only labels from v1.Pod
for k, v := range labels {
if k != types.KubernetesPodNameLabel && k != types.KubernetesPodNamespaceLabel && k != types.KubernetesPodUIDLabel {
podSandboxInfo.Labels[k] = v
}
}
return podSandboxInfo
}
// getPodSandboxInfoFromAnnotations gets annotatedPodSandboxInfo from annotations.
func getPodSandboxInfoFromAnnotations(annotations map[string]string) *annotatedPodSandboxInfo {
return &annotatedPodSandboxInfo{
Annotations: annotations,
}
}
// getContainerInfoFromLabels gets labeledContainerInfo from labels.
func getContainerInfoFromLabels(ctx context.Context, labels map[string]string) *labeledContainerInfo {
logger := klog.FromContext(ctx)
return &labeledContainerInfo{
PodName: getStringValueFromLabel(logger, labels, types.KubernetesPodNameLabel),
PodNamespace: getStringValueFromLabel(logger, labels, types.KubernetesPodNamespaceLabel),
PodUID: kubetypes.UID(getStringValueFromLabel(logger, labels, types.KubernetesPodUIDLabel)),
ContainerName: getStringValueFromLabel(logger, labels, types.KubernetesContainerNameLabel),
}
}
// getContainerInfoFromAnnotations gets annotatedContainerInfo from annotations.
func getContainerInfoFromAnnotations(ctx context.Context, annotations map[string]string) *annotatedContainerInfo {
logger := klog.FromContext(ctx)
var err error
containerInfo := &annotatedContainerInfo{
TerminationMessagePath: getStringValueFromLabel(logger, annotations, containerTerminationMessagePathLabel),
TerminationMessagePolicy: v1.TerminationMessagePolicy(getStringValueFromLabel(logger, annotations, containerTerminationMessagePolicyLabel)),
}
if containerInfo.Hash, err = getUint64ValueFromLabel(ctx, annotations, containerHashLabel); err != nil {
logger.Error(err, "Unable to get label value from annotations", "label", containerHashLabel, "annotations", annotations)
}
if containerInfo.RestartCount, err = getIntValueFromLabel(logger, annotations, containerRestartCountLabel); err != nil {
logger.Error(err, "Unable to get label value from annotations", "label", containerRestartCountLabel, "annotations", annotations)
}
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(logger, annotations, podDeletionGracePeriodLabel); err != nil {
logger.Error(err, "Unable to get label value from annotations", "label", podDeletionGracePeriodLabel, "annotations", annotations)
}
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(logger, annotations, podTerminationGracePeriodLabel); err != nil {
logger.Error(err, "Unable to get label value from annotations", "label", podTerminationGracePeriodLabel, "annotations", annotations)
}
preStopHandler := &v1.LifecycleHandler{}
if found, err := getJSONObjectFromLabel(logger, annotations, containerPreStopHandlerLabel, preStopHandler); err != nil {
logger.Error(err, "Unable to get label value from annotations", "label", containerPreStopHandlerLabel, "annotations", annotations)
} else if found {
containerInfo.PreStopHandler = preStopHandler
}
containerPorts := []v1.ContainerPort{}
if found, err := getJSONObjectFromLabel(logger, annotations, containerPortsLabel, &containerPorts); err != nil {
logger.Error(err, "Unable to get label value from annotations", "label", containerPortsLabel, "annotations", annotations)
} else if found {
containerInfo.ContainerPorts = containerPorts
}
return containerInfo
}
func getStringValueFromLabel(logger klog.Logger, labels map[string]string, label string) string {
if value, found := labels[label]; found {
return value
}
// Do not report error, because there should be many old containers without label now.
logger.V(3).Info("Container doesn't have requested label, it may be an old or invalid container", "label", label)
// Return empty string "" for these containers, the caller will get value by other ways.
return ""
}
func getIntValueFromLabel(logger klog.Logger, labels map[string]string, label string) (int, error) {
if strValue, found := labels[label]; found {
intValue, err := strconv.Atoi(strValue)
if err != nil {
// This really should not happen. Just set value to 0 to handle this abnormal case
return 0, err
}
return intValue, nil
}
// Do not report error, because there should be many old containers without label now.
logger.V(3).Info("Container doesn't have requested label, it may be an old or invalid container", "label", label)
// Just set the value to 0
return 0, nil
}
func getUint64ValueFromLabel(ctx context.Context, labels map[string]string, label string) (uint64, error) {
logger := klog.FromContext(ctx)
if strValue, found := labels[label]; found {
intValue, err := strconv.ParseUint(strValue, 16, 64)
if err != nil {
// This really should not happen. Just set value to 0 to handle this abnormal case
return 0, err
}
return intValue, nil
}
// Do not report error, because there should be many old containers without label now.
logger.V(3).Info("Container doesn't have requested label, it may be an old or invalid container", "label", label)
// Just set the value to 0
return 0, nil
}
func getInt64PointerFromLabel(logger klog.Logger, labels map[string]string, label string) (*int64, error) {
if strValue, found := labels[label]; found {
int64Value, err := strconv.ParseInt(strValue, 10, 64)
if err != nil {
return nil, err
}
return &int64Value, nil
}
// If the label is not found, return pointer nil.
logger.V(4).Info("Label not found", "label", label)
return nil, nil
}
// getJSONObjectFromLabel returns a bool value indicating whether an object is found.
func getJSONObjectFromLabel(logger klog.Logger, labels map[string]string, label string, value interface{}) (bool, error) {
if strValue, found := labels[label]; found {
err := json.Unmarshal([]byte(strValue), value)
return found, err
}
// If the label is not found, return not found.
logger.V(4).Info("Label not found", "label", label)
return false, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"fmt"
"path/filepath"
"strings"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// This file implements the functions that are needed for backward
// compatibility. Therefore, it imports various kubernetes packages
// directly.
const (
// legacyContainerLogsDir is the legacy location of container logs. It is the same with
// kubelet.containerLogsDir.
legacyContainerLogsDir = "/var/log/containers"
// legacyLogSuffix is the legacy log suffix.
legacyLogSuffix = "log"
ext4MaxFileNameLen = 255
)
// legacyLogSymlink composes the legacy container log path. It is only used for legacy cluster
// logging support.
func legacyLogSymlink(containerID string, containerName, podName, podNamespace string) string {
return logSymlink(legacyContainerLogsDir, kubecontainer.BuildPodFullName(podName, podNamespace),
containerName, containerID)
}
// getContainerIDFromLegacyLogSymlink returns error if container Id cannot be parsed
func getContainerIDFromLegacyLogSymlink(logSymlink string) (string, error) {
parts := strings.Split(logSymlink, "-")
if len(parts) == 0 {
return "", fmt.Errorf("unable to find separator in %q", logSymlink)
}
containerIDWithSuffix := parts[len(parts)-1]
suffix := fmt.Sprintf(".%s", legacyLogSuffix)
if !strings.HasSuffix(containerIDWithSuffix, suffix) {
return "", fmt.Errorf("%q doesn't end with %q", logSymlink, suffix)
}
containerIDWithoutSuffix := strings.TrimSuffix(containerIDWithSuffix, suffix)
// container can be retrieved with container Id as short as 6 characters
if len(containerIDWithoutSuffix) < 6 {
return "", fmt.Errorf("container Id %q is too short", containerIDWithoutSuffix)
}
return containerIDWithoutSuffix, nil
}
func logSymlink(containerLogsDir, podFullName, containerName, containerID string) string {
suffix := fmt.Sprintf(".%s", legacyLogSuffix)
logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, containerID)
// Length of a filename cannot exceed 255 characters in ext4 on Linux.
if len(logPath) > ext4MaxFileNameLen-len(suffix) {
logPath = logPath[:ext4MaxFileNameLen-len(suffix)]
}
return filepath.Join(containerLogsDir, logPath+suffix)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"fmt"
v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/securitycontext"
)
// determineEffectiveSecurityContext gets container's security context from v1.Pod and v1.Container.
func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username string) (*runtimeapi.LinuxContainerSecurityContext, error) {
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
synthesized := convertToRuntimeSecurityContext(effectiveSc)
if synthesized == nil {
synthesized = &runtimeapi.LinuxContainerSecurityContext{
MaskedPaths: securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount),
ReadonlyPaths: securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount),
}
}
var err error
synthesized.Seccomp, err = m.getSeccompProfile(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault)
if err != nil {
return nil, err
}
// set ApparmorProfile.
synthesized.Apparmor, synthesized.ApparmorProfile, err = getAppArmorProfile(pod, container)
if err != nil {
return nil, err
}
// set RunAsUser.
if synthesized.RunAsUser == nil {
if uid != nil {
synthesized.RunAsUser = &runtimeapi.Int64Value{Value: *uid}
}
synthesized.RunAsUsername = username
}
// set namespace options and supplemental groups.
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper, m.runtimeClassManager)
if err != nil {
return nil, err
}
synthesized.NamespaceOptions = namespaceOptions
podSc := pod.Spec.SecurityContext
if podSc != nil {
if podSc.FSGroup != nil {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, int64(*podSc.FSGroup))
}
if podSc.SupplementalGroups != nil {
for _, sg := range podSc.SupplementalGroups {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, int64(sg))
}
}
if podSc.SupplementalGroupsPolicy != nil {
policyValue, ok := runtimeapi.SupplementalGroupsPolicy_value[string(*podSc.SupplementalGroupsPolicy)]
if !ok {
return nil, fmt.Errorf("unsupported supplementalGroupsPolicy: %s", string(*podSc.SupplementalGroupsPolicy))
}
synthesized.SupplementalGroupsPolicy = runtimeapi.SupplementalGroupsPolicy(policyValue)
}
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, groups...)
}
synthesized.NoNewPrivs = securitycontext.AddNoNewPrivileges(effectiveSc)
synthesized.MaskedPaths = securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount)
synthesized.ReadonlyPaths = securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount)
return synthesized, nil
}
// convertToRuntimeSecurityContext converts v1.SecurityContext to runtimeapi.SecurityContext.
func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runtimeapi.LinuxContainerSecurityContext {
if securityContext == nil {
return nil
}
sc := &runtimeapi.LinuxContainerSecurityContext{
Capabilities: convertToRuntimeCapabilities(securityContext.Capabilities),
SelinuxOptions: convertToRuntimeSELinuxOption(securityContext.SELinuxOptions),
}
if securityContext.RunAsUser != nil {
sc.RunAsUser = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsUser)}
}
if securityContext.RunAsGroup != nil {
sc.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsGroup)}
}
if securityContext.Privileged != nil {
sc.Privileged = *securityContext.Privileged
}
if securityContext.ReadOnlyRootFilesystem != nil {
sc.ReadonlyRootfs = *securityContext.ReadOnlyRootFilesystem
}
return sc
}
// convertToRuntimeSELinuxOption converts v1.SELinuxOptions to runtimeapi.SELinuxOption.
func convertToRuntimeSELinuxOption(opts *v1.SELinuxOptions) *runtimeapi.SELinuxOption {
if opts == nil {
return nil
}
return &runtimeapi.SELinuxOption{
User: opts.User,
Role: opts.Role,
Type: opts.Type,
Level: opts.Level,
}
}
// convertToRuntimeCapabilities converts v1.Capabilities to runtimeapi.Capability.
func convertToRuntimeCapabilities(opts *v1.Capabilities) *runtimeapi.Capability {
if opts == nil {
return nil
}
capabilities := &runtimeapi.Capability{
AddCapabilities: make([]string, len(opts.Add)),
DropCapabilities: make([]string, len(opts.Drop)),
}
for index, value := range opts.Add {
capabilities.AddCapabilities[index] = string(value)
}
for index, value := range opts.Drop {
capabilities.DropCapabilities[index] = string(value)
}
return capabilities
}
//go:build !windows
// +build !windows
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"fmt"
"math"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/securitycontext"
)
// verifyRunAsNonRoot verifies RunAsNonRoot.
func verifyRunAsNonRoot(ctx context.Context, pod *v1.Pod, container *v1.Container, uid *int64, username string) error {
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
// If the option is not set, or if running as root is allowed, return nil.
if effectiveSc == nil || effectiveSc.RunAsNonRoot == nil || !*effectiveSc.RunAsNonRoot {
return nil
}
if effectiveSc.RunAsUser != nil {
if *effectiveSc.RunAsUser == 0 {
return fmt.Errorf("container's runAsUser breaks non-root policy (pod: %q, container: %s)", format.Pod(pod), container.Name)
}
return nil
}
switch {
case uid == nil && len(username) > 0:
return fmt.Errorf("container has runAsNonRoot and image has non-numeric user (%s), cannot verify user is non-root (pod: %q, container: %s)", username, format.Pod(pod), container.Name)
case uid != nil:
if *uid == 0 {
return fmt.Errorf("container has runAsNonRoot and image will run as root (pod: %q, container: %s)", format.Pod(pod), container.Name)
}
if errs := validation.IsValidUserID(*uid); len(errs) > 0 {
return fmt.Errorf(
"container has runAsNonRoot and image has an invalid user id (%d). (Must be 1-(%d)): %s (pod: %q, container: %s)",
*uid,
math.MaxInt32,
strings.Join(errs, "; "),
format.Pod(pod),
container.Name,
)
}
return nil
default:
return nil
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// PodSandboxChanged checks whether the spec of the pod is changed and returns
// (changed, new attempt, original sandboxID if exist).
func PodSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) {
ctx := context.TODO() // This context will be passed as parameter in the future
logger := klog.FromContext(ctx)
if len(podStatus.SandboxStatuses) == 0 {
logger.V(2).Info("No sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
return true, 0, ""
}
readySandboxCount := 0
for _, s := range podStatus.SandboxStatuses {
if s.State == runtimeapi.PodSandboxState_SANDBOX_READY {
readySandboxCount++
}
}
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
sandboxStatus := podStatus.SandboxStatuses[0]
if readySandboxCount > 1 {
logger.V(2).Info("Multiple sandboxes are ready for Pod. Need to reconcile them", "pod", klog.KObj(pod))
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
}
if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
logger.V(2).Info("No ready sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
}
// Needs to create a new sandbox when network namespace changed.
if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != NetworkNamespaceForPod(pod) {
logger.V(2).Info("Sandbox for pod has changed. Need to start a new one", "pod", klog.KObj(pod))
return true, sandboxStatus.Metadata.Attempt + 1, ""
}
// Needs to create a new sandbox when the sandbox does not have an IP address.
if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network != nil && sandboxStatus.Network.Ip == "" {
logger.V(2).Info("Sandbox for pod has no IP address. Need to start a new one", "pod", klog.KObj(pod))
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
}
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
}
// IpcNamespaceForPod returns the runtimeapi.NamespaceMode
// for the IPC namespace of a pod
func IpcNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
if pod != nil && pod.Spec.HostIPC {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_POD
}
// NetworkNamespaceForPod returns the runtimeapi.NamespaceMode
// for the network namespace of a pod
func NetworkNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
if pod != nil && pod.Spec.HostNetwork {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_POD
}
// PidNamespaceForPod returns the runtimeapi.NamespaceMode
// for the PID namespace of a pod
func PidNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
if pod != nil {
if pod.Spec.HostPID {
return runtimeapi.NamespaceMode_NODE
}
if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace {
return runtimeapi.NamespaceMode_POD
}
}
// Note that PID does not default to the zero value for v1.Pod
return runtimeapi.NamespaceMode_CONTAINER
}
// LookupRuntimeHandler is implemented by *runtimeclass.Manager.
type RuntimeHandlerResolver interface {
LookupRuntimeHandler(runtimeClassName *string) (string, error)
}
// namespacesForPod returns the runtimeapi.NamespaceOption for a given pod.
// An empty or nil pod can be used to get the namespace defaults for v1.Pod.
func NamespacesForPod(pod *v1.Pod, runtimeHelper kubecontainer.RuntimeHelper, rcManager RuntimeHandlerResolver) (*runtimeapi.NamespaceOption, error) {
runtimeHandler := ""
if pod != nil && rcManager != nil {
var err error
runtimeHandler, err = rcManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
return nil, err
}
}
userNs, err := runtimeHelper.GetOrCreateUserNamespaceMappings(pod, runtimeHandler)
if err != nil {
return nil, err
}
return &runtimeapi.NamespaceOption{
Ipc: IpcNamespaceForPod(pod),
Network: NetworkNamespaceForPod(pod),
Pid: PidNamespaceForPod(pod),
UsernsOptions: userNs,
}, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
v1 "k8s.io/api/core/v1"
)
// AdmissionFailureHandlerStub is an AdmissionFailureHandler that does not perform any handling of admission failure.
// It simply passes the failure on.
type AdmissionFailureHandlerStub struct{}
var _ AdmissionFailureHandler = &AdmissionFailureHandlerStub{}
// NewAdmissionFailureHandlerStub returns an instance of AdmissionFailureHandlerStub.
func NewAdmissionFailureHandlerStub() *AdmissionFailureHandlerStub {
return &AdmissionFailureHandlerStub{}
}
// HandleAdmissionFailure simply passes admission rejection on, with no special handling.
func (n *AdmissionFailureHandlerStub) HandleAdmissionFailure(ctx context.Context, admitPod *v1.Pod, failureReasons []PredicateFailureReason) ([]PredicateFailureReason, error) {
return failureReasons, nil
}
//go:build linux
// +build linux
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-helpers/resource"
"k8s.io/kubernetes/pkg/features"
)
func isPodLevelResourcesSupported(pod *v1.Pod) PodAdmitResult {
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources)
if resource.IsPodLevelResourcesSet(pod) && !podLevelResourcesEnabled {
return PodAdmitResult{
Admit: false,
Reason: PodLevelResourcesNotAdmittedReason,
Message: "PodLevelResources feature gate is disabled",
}
}
return PodAdmitResult{Admit: true}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
httpprobe "k8s.io/kubernetes/pkg/probe/http"
"k8s.io/kubernetes/pkg/security/apparmor"
)
const (
maxRespBodyLength = 10 * 1 << 10 // 10KB
AppArmorNotAdmittedReason = "AppArmor"
PodLevelResourcesNotAdmittedReason = "PodLevelResourcesNotSupported"
)
type handlerRunner struct {
httpDoer kubetypes.HTTPDoer
commandRunner kubecontainer.CommandRunner
containerManager podStatusProvider
eventRecorder record.EventRecorder
}
type podStatusProvider interface {
GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
}
// NewHandlerRunner returns a configured lifecycle handler for a container.
func NewHandlerRunner(httpDoer kubetypes.HTTPDoer, commandRunner kubecontainer.CommandRunner, containerManager podStatusProvider, eventRecorder record.EventRecorder) kubecontainer.HandlerRunner {
return &handlerRunner{
httpDoer: httpDoer,
commandRunner: commandRunner,
containerManager: containerManager,
eventRecorder: eventRecorder,
}
}
func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) {
logger := klog.FromContext(ctx)
switch {
case handler.Exec != nil:
var msg string
// TODO(tallclair): Pass a proper timeout value.
output, err := hr.commandRunner.RunInContainer(ctx, containerID, handler.Exec.Command, 0)
if err != nil {
msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output))
logger.V(1).Error(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output))
}
return msg, err
case handler.HTTPGet != nil:
err := hr.runHTTPHandler(ctx, pod, container, handler, hr.eventRecorder)
var msg string
if err != nil {
msg = fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", handler.HTTPGet.Path, container.Name, format.Pod(pod), err)
logger.V(1).Error(err, "HTTP lifecycle hook for Container in Pod failed", "path", handler.HTTPGet.Path, "containerName", container.Name, "pod", klog.KObj(pod))
}
return msg, err
case handler.Sleep != nil:
err := hr.runSleepHandler(ctx, handler.Sleep.Seconds)
var msg string
if err != nil {
msg = fmt.Sprintf("Sleep lifecycle hook (%d) for Container %q in Pod %q failed - error: %v", handler.Sleep.Seconds, container.Name, format.Pod(pod), err)
logger.V(1).Error(err, "Sleep lifecycle hook for Container in Pod failed", "sleepSeconds", handler.Sleep.Seconds, "containerName", container.Name, "pod", klog.KObj(pod))
}
return msg, err
default:
err := fmt.Errorf("invalid handler: %v", handler)
msg := fmt.Sprintf("Cannot run handler: %v", err)
logger.Error(err, "Cannot run handler")
return msg, err
}
}
// resolvePort attempts to turn an IntOrString port reference into a concrete port number.
// If portReference has an int value, it is treated as a literal, and simply returns that value.
// If portReference is a string, an attempt is first made to parse it as an integer. If that fails,
// an attempt is made to find a port with the same name in the container spec.
// If a port with the same name is found, it's ContainerPort value is returned. If no matching
// port is found, an error is returned.
func resolvePort(portReference intstr.IntOrString, container *v1.Container) (int, error) {
if portReference.Type == intstr.Int {
return portReference.IntValue(), nil
}
portName := portReference.StrVal
port, err := strconv.Atoi(portName)
if err == nil {
return port, nil
}
for _, portSpec := range container.Ports {
if portSpec.Name == portName {
return int(portSpec.ContainerPort), nil
}
}
return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container)
}
func (hr *handlerRunner) runSleepHandler(ctx context.Context, seconds int64) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepAction) {
return nil
}
c := time.After(time.Duration(seconds) * time.Second)
select {
case <-ctx.Done():
// unexpected termination
metrics.LifecycleHandlerSleepTerminated.Inc()
return fmt.Errorf("container terminated before sleep hook finished")
case <-c:
return nil
}
}
func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error {
logger := klog.FromContext(ctx)
host := handler.HTTPGet.Host
podIP := host
if len(host) == 0 {
status, err := hr.containerManager.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil {
logger.Error(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod))
return err
}
if len(status.IPs) == 0 {
return fmt.Errorf("failed to find networking container: %v", status)
}
host = status.IPs[0]
podIP = host
}
req, err := httpprobe.NewRequestForHTTPGetAction(handler.HTTPGet, container, podIP, "lifecycle")
if err != nil {
return err
}
resp, err := hr.httpDoer.Do(req)
discardHTTPRespBody(resp)
if isHTTPResponseError(err) {
logger.V(1).Error(err, "HTTPS request to lifecycle hook got HTTP response, retrying with HTTP.", "pod", klog.KObj(pod), "host", req.URL.Host)
req := req.Clone(ctx)
req.URL.Scheme = "http"
req.Header.Del("Authorization")
resp, httpErr := hr.httpDoer.Do(req)
// clear err since the fallback succeeded
if httpErr == nil {
metrics.LifecycleHandlerHTTPFallbacks.Inc()
if eventRecorder != nil {
// report the fallback with an event
eventRecorder.Event(pod, v1.EventTypeWarning, "LifecycleHTTPFallback", fmt.Sprintf("request to HTTPS lifecycle hook %s got HTTP response, retry with HTTP succeeded", req.URL.Host))
}
err = nil
}
discardHTTPRespBody(resp)
}
return err
}
func discardHTTPRespBody(resp *http.Response) {
if resp == nil {
return
}
// Ensure the response body is fully read and closed
// before we reconnect, so that we reuse the same TCP
// connection.
defer resp.Body.Close()
if resp.ContentLength <= maxRespBodyLength {
io.Copy(io.Discard, &io.LimitedReader{R: resp.Body, N: maxRespBodyLength})
}
}
// NewAppArmorAdmitHandler returns a PodAdmitHandler which is used to evaluate
// if a pod can be admitted from the perspective of AppArmor.
func NewAppArmorAdmitHandler(validator apparmor.Validator) PodAdmitHandler {
return &appArmorAdmitHandler{
Validator: validator,
}
}
type appArmorAdmitHandler struct {
apparmor.Validator
}
func (a *appArmorAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult {
// If the pod is already running or terminated, no need to recheck AppArmor.
if attrs.Pod.Status.Phase != v1.PodPending {
return PodAdmitResult{Admit: true}
}
err := a.Validate(attrs.Pod)
if err == nil {
return PodAdmitResult{Admit: true}
}
return PodAdmitResult{
Admit: false,
Reason: AppArmorNotAdmittedReason,
Message: fmt.Sprintf("Cannot enforce AppArmor: %v", err),
}
}
func isHTTPResponseError(err error) bool {
if err == nil {
return false
}
urlErr := &url.Error{}
if !errors.As(err, &urlErr) {
return false
}
return strings.Contains(urlErr.Err.Error(), "server gave HTTP response to HTTPS client")
}
// NewPodFeaturesAdmitHandler returns a PodAdmitHandler which is used to evaluate
// if a pod can be admitted from the perspective of pod features compatibility.
func NewPodFeaturesAdmitHandler() PodAdmitHandler {
return &podFeaturesAdmitHandler{}
}
type podFeaturesAdmitHandler struct{}
func (h *podFeaturesAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult {
return isPodLevelResourcesSupported(attrs.Pod)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import "k8s.io/api/core/v1"
// PodAdmitAttributes is the context for a pod admission decision.
// The member fields of this struct should never be mutated.
type PodAdmitAttributes struct {
// the pod to evaluate for admission
Pod *v1.Pod
// all pods bound to the kubelet excluding the pod being evaluated
OtherPods []*v1.Pod
}
// PodAdmitResult provides the result of a pod admission decision.
type PodAdmitResult struct {
// if true, the pod should be admitted.
Admit bool
// a brief single-word reason why the pod could not be admitted.
Reason string
// a brief message explaining why the pod could not be admitted.
Message string
}
// PodAdmitHandler is notified during pod admission.
type PodAdmitHandler interface {
// Admit evaluates if a pod can be admitted.
Admit(attrs *PodAdmitAttributes) PodAdmitResult
}
// PodAdmitTarget maintains a list of handlers to invoke.
type PodAdmitTarget interface {
// AddPodAdmitHandler adds the specified handler.
AddPodAdmitHandler(a PodAdmitHandler)
}
// PodSyncLoopHandler is invoked during each sync loop iteration.
type PodSyncLoopHandler interface {
// ShouldSync returns true if the pod needs to be synced.
// This operation must return immediately as its called for each pod.
// The provided pod should never be modified.
ShouldSync(pod *v1.Pod) bool
}
// PodSyncLoopTarget maintains a list of handlers to pod sync loop.
type PodSyncLoopTarget interface {
// AddPodSyncLoopHandler adds the specified handler.
AddPodSyncLoopHandler(a PodSyncLoopHandler)
}
// ShouldEvictResponse provides the result of a should evict request.
type ShouldEvictResponse struct {
// if true, the pod should be evicted.
Evict bool
// a brief CamelCase reason why the pod should be evicted.
Reason string
// a brief message why the pod should be evicted.
Message string
}
// PodSyncHandler is invoked during each sync pod operation.
type PodSyncHandler interface {
// ShouldEvict is invoked during each sync pod operation to determine
// if the pod should be evicted from the kubelet. If so, the pod status
// is updated to mark its phase as failed with the provided reason and message,
// and the pod is immediately killed.
// This operation must return immediately as its called for each sync pod.
// The provided pod should never be modified.
ShouldEvict(pod *v1.Pod) ShouldEvictResponse
}
// PodSyncTarget maintains a list of handlers to pod sync.
type PodSyncTarget interface {
// AddPodSyncHandler adds the specified handler
AddPodSyncHandler(a PodSyncHandler)
}
// PodLifecycleTarget groups a set of lifecycle interfaces for convenience.
type PodLifecycleTarget interface {
PodAdmitTarget
PodSyncLoopTarget
PodSyncTarget
}
// PodAdmitHandlers maintains a list of handlers to pod admission.
type PodAdmitHandlers []PodAdmitHandler
// AddPodAdmitHandler adds the specified observer.
func (handlers *PodAdmitHandlers) AddPodAdmitHandler(a PodAdmitHandler) {
*handlers = append(*handlers, a)
}
// PodSyncLoopHandlers maintains a list of handlers to pod sync loop.
type PodSyncLoopHandlers []PodSyncLoopHandler
// AddPodSyncLoopHandler adds the specified observer.
func (handlers *PodSyncLoopHandlers) AddPodSyncLoopHandler(a PodSyncLoopHandler) {
*handlers = append(*handlers, a)
}
// PodSyncHandlers maintains a list of handlers to pod sync.
type PodSyncHandlers []PodSyncHandler
// AddPodSyncHandler adds the specified handler.
func (handlers *PodSyncHandlers) AddPodSyncHandler(a PodSyncHandler) {
*handlers = append(*handlers, a)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"fmt"
"runtime"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
"k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/scheduler"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/utils/ptr"
)
const (
// PodOSSelectorNodeLabelDoesNotMatch is used to denote that the pod was
// rejected admission to the node because the pod's node selector
// corresponding to kubernetes.io/os label didn't match the node label.
PodOSSelectorNodeLabelDoesNotMatch = "PodOSSelectorNodeLabelDoesNotMatch"
// PodOSNotSupported is used to denote that the pod was rejected admission
// to the node because the pod's OS field didn't match the node OS.
PodOSNotSupported = "PodOSNotSupported"
// InvalidNodeInfo is used to denote that the pod was rejected admission
// to the node because the kubelet was unable to retrieve the node info.
InvalidNodeInfo = "InvalidNodeInfo"
// InitContainerRestartPolicyForbidden is used to denote that the pod was
// rejected admission to the node because it uses a restart policy other
// than Always for some of its init containers.
InitContainerRestartPolicyForbidden = "InitContainerRestartPolicyForbidden"
// SupplementalGroupsPolicyNotSupported is used to denote that the pod was
// rejected admission to the node because the node does not support
// the pod's SupplementalGroupsPolicy.
SupplementalGroupsPolicyNotSupported = "SupplementalGroupsPolicyNotSupported"
// UnexpectedAdmissionError is used to denote that the pod was rejected
// admission to the node because of an error during admission that could not
// be categorized.
UnexpectedAdmissionError = "UnexpectedAdmissionError"
// UnknownReason is used to denote that the pod was rejected admission to
// the node because a predicate failed for a reason that could not be
// determined.
UnknownReason = "UnknownReason"
// UnexpectedPredicateFailureType is used to denote that the pod was
// rejected admission to the node because a predicate returned a reason
// object that was not an InsufficientResourceError or a PredicateFailureError.
UnexpectedPredicateFailureType = "UnexpectedPredicateFailureType"
// Prefix for admission reason when kubelet rejects a pod due to insufficient
// resources available.
InsufficientResourcePrefix = "OutOf"
// These reasons are used to denote that the pod has reject admission
// to the node because there's not enough resources to run the pod.
OutOfCPU = "OutOfcpu"
OutOfMemory = "OutOfmemory"
OutOfEphemeralStorage = "OutOfephemeral-storage"
OutOfPods = "OutOfpods"
)
type getNodeAnyWayFuncType func() (*v1.Node, error)
type pluginResourceUpdateFuncType func(*schedulerframework.NodeInfo, *PodAdmitAttributes) error
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
// This allows for the graceful handling of pod admission failure.
type AdmissionFailureHandler interface {
HandleAdmissionFailure(ctx context.Context, admitPod *v1.Pod, failureReasons []PredicateFailureReason) ([]PredicateFailureReason, error)
}
type predicateAdmitHandler struct {
getNodeAnyWayFunc getNodeAnyWayFuncType
pluginResourceUpdateFunc pluginResourceUpdateFuncType
admissionFailureHandler AdmissionFailureHandler
}
var _ PodAdmitHandler = &predicateAdmitHandler{}
// NewPredicateAdmitHandler returns a PodAdmitHandler which is used to evaluates
// if a pod can be admitted from the perspective of predicates.
func NewPredicateAdmitHandler(getNodeAnyWayFunc getNodeAnyWayFuncType, admissionFailureHandler AdmissionFailureHandler, pluginResourceUpdateFunc pluginResourceUpdateFuncType) PodAdmitHandler {
return &predicateAdmitHandler{
getNodeAnyWayFunc,
pluginResourceUpdateFunc,
admissionFailureHandler,
}
}
func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult {
// TODO: pass context to Admit when migrating this component to
// contextual logging
ctx := context.TODO()
logger := klog.FromContext(ctx)
node, err := w.getNodeAnyWayFunc()
if err != nil {
logger.Error(err, "Cannot get Node info")
return PodAdmitResult{
Admit: false,
Reason: InvalidNodeInfo,
Message: "Kubelet cannot get node info.",
}
}
admitPod := attrs.Pod
// perform the checks that preemption will not help first to avoid meaningless pod eviction
if rejectPodAdmissionBasedOnOSSelector(admitPod, node) {
return PodAdmitResult{
Admit: false,
Reason: PodOSSelectorNodeLabelDoesNotMatch,
Message: "Failed to admit pod as the `kubernetes.io/os` label doesn't match node label",
}
}
if rejectPodAdmissionBasedOnOSField(admitPod) {
return PodAdmitResult{
Admit: false,
Reason: PodOSNotSupported,
Message: "Failed to admit pod as the OS field doesn't match node OS",
}
}
if rejectPodAdmissionBasedOnSupplementalGroupsPolicy(admitPod, node) {
message := fmt.Sprintf("SupplementalGroupsPolicy=%s is not supported in this node", v1.SupplementalGroupsPolicyStrict)
logger.Info("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
return PodAdmitResult{
Admit: false,
Reason: SupplementalGroupsPolicyNotSupported,
Message: message,
}
}
pods := attrs.OtherPods
nodeInfo := schedulerframework.NewNodeInfo(pods...)
nodeInfo.SetNode(node)
// ensure the node has enough plugin resources for that required in pods
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
message := fmt.Sprintf("Update plugin resources failed due to %v, which is unexpected.", err)
logger.Info("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
return PodAdmitResult{
Admit: false,
Reason: UnexpectedAdmissionError,
Message: message,
}
}
// Remove the requests of the extended resources that are missing in the
// node info. This is required to support cluster-level resources, which
// are extended resources unknown to nodes, and also extended resources
// backed by DRA.
//
// Caveat: If a pod was manually bound to a node (e.g., static pod) where a
// node-level extended resource it requires is not found, then kubelet will
// not fail admission while it should. This issue will be addressed with
// the Resource Class API in the future.
podWithoutMissingExtendedResources := removeMissingExtendedResources(admitPod, nodeInfo)
reasons := generalFilter(podWithoutMissingExtendedResources, nodeInfo)
fit := len(reasons) == 0
if !fit {
reasons, err = w.admissionFailureHandler.HandleAdmissionFailure(ctx, admitPod, reasons)
fit = len(reasons) == 0 && err == nil
if err != nil {
message := fmt.Sprintf("Unexpected error while attempting to recover from admission failure: %v", err)
logger.Info("Failed to admit pod, unexpected error while attempting to recover from admission failure", "pod", klog.KObj(admitPod), "err", err)
return PodAdmitResult{
Admit: fit,
Reason: UnexpectedAdmissionError,
Message: message,
}
}
}
if !fit {
var reason string
var message string
if len(reasons) == 0 {
message = fmt.Sprint("GeneralPredicates failed due to unknown reason, which is unexpected.")
logger.Info("Failed to admit pod: GeneralPredicates failed due to unknown reason, which is unexpected", "pod", klog.KObj(admitPod))
return PodAdmitResult{
Admit: fit,
Reason: UnknownReason,
Message: message,
}
}
// If there are failed predicates, we only return the first one as a reason.
r := reasons[0]
switch re := r.(type) {
case *PredicateFailureError:
reason = re.PredicateName
message = re.Error()
logger.V(2).Info("Predicate failed on Pod", "pod", klog.KObj(admitPod), "err", message)
case *InsufficientResourceError:
switch re.ResourceName {
case v1.ResourceCPU:
reason = OutOfCPU
case v1.ResourceMemory:
reason = OutOfMemory
case v1.ResourceEphemeralStorage:
reason = OutOfEphemeralStorage
case v1.ResourcePods:
reason = OutOfPods
default:
reason = fmt.Sprintf("%s%s", InsufficientResourcePrefix, re.ResourceName)
}
message = re.Error()
logger.V(2).Info("Predicate failed on Pod", "pod", klog.KObj(admitPod), "err", message)
default:
reason = UnexpectedPredicateFailureType
message = fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", r)
logger.Info("Failed to admit pod", "pod", klog.KObj(admitPod), "err", message)
}
return PodAdmitResult{
Admit: fit,
Reason: reason,
Message: message,
}
}
return PodAdmitResult{
Admit: true,
}
}
// rejectPodAdmissionBasedOnOSSelector rejects pod if it's nodeSelector doesn't match
// We expect the kubelet status reconcile which happens every 10sec to update the node labels if there is a mismatch.
func rejectPodAdmissionBasedOnOSSelector(pod *v1.Pod, node *v1.Node) bool {
labels := node.Labels
osName, osLabelExists := labels[v1.LabelOSStable]
if !osLabelExists || osName != runtime.GOOS {
if len(labels) == 0 {
labels = make(map[string]string)
}
labels[v1.LabelOSStable] = runtime.GOOS
}
podLabelSelector, podOSLabelExists := pod.Labels[v1.LabelOSStable]
if !podOSLabelExists {
// If the labelselector didn't exist, let's keep the current behavior as is
return false
} else if podOSLabelExists && podLabelSelector != labels[v1.LabelOSStable] {
return true
}
return false
}
// rejectPodAdmissionBasedOnOSField rejects pods if their OS field doesn't match runtime.GOOS.
// TODO: Relax this restriction when we start supporting LCOW in kubernetes where podOS may not match
// node's OS.
func rejectPodAdmissionBasedOnOSField(pod *v1.Pod) bool {
if pod.Spec.OS == nil {
return false
}
// If the pod OS doesn't match runtime.GOOS return false
return string(pod.Spec.OS.Name) != runtime.GOOS
}
// rejectPodAdmissionBasedOnSupplementalGroupsPolicy rejects pod only if
// - the feature is beta or above, and SupplementalPolicy=Strict is set in the pod
// - but, the node does not support the feature
//
// Note: During the feature is alpha or before(not yet released) in emulated version,
// it should admit for backward compatibility
func rejectPodAdmissionBasedOnSupplementalGroupsPolicy(pod *v1.Pod, node *v1.Node) bool {
admit, reject := false, true // just for readability
inUse := (pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SupplementalGroupsPolicy != nil)
if !inUse {
return admit
}
isBetaOrAbove := false
if featureSpec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[features.SupplementalGroupsPolicy]; ok {
isBetaOrAbove = (featureSpec.PreRelease == featuregate.Beta) || (featureSpec.PreRelease == featuregate.GA)
}
if !isBetaOrAbove {
return admit
}
featureSupportedOnNode := ptr.Deref(
ptr.Deref(node.Status.Features, v1.NodeFeatures{SupplementalGroupsPolicy: ptr.To(false)}).SupplementalGroupsPolicy,
false,
)
effectivePolicy := ptr.Deref(
pod.Spec.SecurityContext.SupplementalGroupsPolicy,
v1.SupplementalGroupsPolicyMerge,
)
if effectivePolicy == v1.SupplementalGroupsPolicyStrict && !featureSupportedOnNode {
return reject
}
return admit
}
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod {
filterExtendedResources := func(containers []v1.Container) {
for i, c := range containers {
// We only handle requests in Requests but not Limits because the
// PodFitsResources predicate, to which the result pod will be passed,
// does not use Limits.
filteredResources := make(v1.ResourceList)
for rName, rQuant := range c.Resources.Requests {
if v1helper.IsExtendedResourceName(rName) {
if _, found := nodeInfo.Allocatable.ScalarResources[rName]; !found {
continue
}
}
filteredResources[rName] = rQuant
}
containers[i].Resources.Requests = filteredResources
}
}
podCopy := pod.DeepCopy()
filterExtendedResources(podCopy.Spec.Containers)
filterExtendedResources(podCopy.Spec.InitContainers)
return podCopy
}
// InsufficientResourceError is an error type that indicates what kind of resource limit is
// hit and caused the unfitting failure.
type InsufficientResourceError struct {
ResourceName v1.ResourceName
Requested int64
Used int64
Capacity int64
}
func (e *InsufficientResourceError) Error() string {
return fmt.Sprintf("Node didn't have enough resource: %s, requested: %d, used: %d, capacity: %d",
e.ResourceName, e.Requested, e.Used, e.Capacity)
}
// PredicateFailureReason interface represents the failure reason of a predicate.
type PredicateFailureReason interface {
GetReason() string
}
// GetReason returns the reason of the InsufficientResourceError.
func (e *InsufficientResourceError) GetReason() string {
return fmt.Sprintf("Insufficient %v", e.ResourceName)
}
// GetInsufficientAmount returns the amount of the insufficient resource of the error.
func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
return e.Requested - (e.Capacity - e.Used)
}
// PredicateFailureError describes a failure error of predicate.
type PredicateFailureError struct {
PredicateName string
PredicateDesc string
}
func (e *PredicateFailureError) Error() string {
return fmt.Sprintf("Predicate %s failed: %s", e.PredicateName, e.PredicateDesc)
}
// GetReason returns the reason of the PredicateFailureError.
func (e *PredicateFailureError) GetReason() string {
return e.PredicateDesc
}
// generalFilter checks a group of filterings that the kubelet cares about.
func generalFilter(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) []PredicateFailureReason {
admissionResults := scheduler.AdmissionCheck(pod, nodeInfo, true)
var reasons []PredicateFailureReason
for _, r := range admissionResults {
if r.InsufficientResource != nil {
reasons = append(reasons, &InsufficientResourceError{
ResourceName: r.InsufficientResource.ResourceName,
Requested: r.InsufficientResource.Requested,
Used: r.InsufficientResource.Used,
Capacity: r.InsufficientResource.Capacity,
})
} else {
reasons = append(reasons, &PredicateFailureError{r.Name, r.Reason})
}
}
// Check taint/toleration except for static pods
if !types.IsStaticPod(pod) {
_, isUntolerated := corev1.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
// Kubelet is only interested in the NoExecute taint.
return t.Effect == v1.TaintEffectNoExecute
})
if isUntolerated {
reasons = append(reasons, &PredicateFailureError{tainttoleration.Name, tainttoleration.ErrReasonNotMatch})
}
}
return reasons
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logs
import (
"compress/gzip"
"context"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/utils/clock"
)
const (
// timestampFormat is format of the timestamp suffix for rotated log.
// See https://golang.org/pkg/time/#Time.Format.
timestampFormat = "20060102-150405"
// compressSuffix is the suffix for compressed log.
compressSuffix = ".gz"
// tmpSuffix is the suffix for temporary file.
tmpSuffix = ".tmp"
)
// ContainerLogManager manages lifecycle of all container logs.
//
// Implementation is thread-safe.
type ContainerLogManager interface {
// Start container log manager.
Start(ctx context.Context)
// Clean removes all logs of specified container.
Clean(ctx context.Context, containerID string) error
}
// LogRotatePolicy is a policy for container log rotation. The policy applies to all
// containers managed by kubelet.
type LogRotatePolicy struct {
// MaxSize in bytes of the container log file before it is rotated. Negative
// number means to disable container log rotation.
MaxSize int64
// MaxFiles is the maximum number of log files that can be present.
// If rotating the logs creates excess files, the oldest file is removed.
MaxFiles int
}
// GetAllLogs gets all inuse (rotated/compressed) logs for a specific container log.
// Returned logs are sorted in oldest to newest order.
func GetAllLogs(log string) ([]string, error) {
// pattern is used to match all rotated files.
pattern := fmt.Sprintf("%s.*", log)
logs, err := filepath.Glob(pattern)
if err != nil {
return nil, fmt.Errorf("failed to list all log files with pattern %q: %w", pattern, err)
}
inuse, _ := filterUnusedLogs(logs)
sort.Strings(inuse)
return append(inuse, log), nil
}
// parseMaxSize parses quantity string to int64 max size in bytes.
func parseMaxSize(size string) (int64, error) {
quantity, err := resource.ParseQuantity(size)
if err != nil {
return 0, err
}
maxSize, ok := quantity.AsInt64()
if !ok {
return 0, fmt.Errorf("invalid max log size")
}
return maxSize, nil
}
type containerLogManager struct {
runtimeService internalapi.RuntimeService
osInterface kubecontainer.OSInterface
policy LogRotatePolicy
clock clock.Clock
mutex sync.Mutex
queue workqueue.TypedRateLimitingInterface[string]
maxWorkers int
monitoringPeriod metav1.Duration
}
// NewContainerLogManager creates a new container log manager.
func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterface kubecontainer.OSInterface, maxSize string, maxFiles int, maxWorkers int, monitorInterval metav1.Duration) (ContainerLogManager, error) {
if maxFiles <= 1 {
return nil, fmt.Errorf("invalid MaxFiles %d, must be > 1", maxFiles)
}
parsedMaxSize, err := parseMaxSize(maxSize)
if err != nil {
return nil, fmt.Errorf("failed to parse container log max size %q: %w", maxSize, err)
}
// Negative number means to disable container log rotation
if parsedMaxSize < 0 {
return NewStubContainerLogManager(), nil
}
// policy LogRotatePolicy
return &containerLogManager{
osInterface: osInterface,
runtimeService: runtimeService,
policy: LogRotatePolicy{
MaxSize: parsedMaxSize,
MaxFiles: maxFiles,
},
clock: clock.RealClock{},
mutex: sync.Mutex{},
maxWorkers: maxWorkers,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "kubelet_log_rotate_manager"},
),
monitoringPeriod: monitorInterval,
}, nil
}
// Start the container log manager.
func (c *containerLogManager) Start(ctx context.Context) {
logger := klog.FromContext(ctx)
logger.Info("Initializing container log rotate workers", "workers", c.maxWorkers, "monitorPeriod", c.monitoringPeriod)
for i := 0; i < c.maxWorkers; i++ {
worker := i + 1
go c.processQueueItems(ctx, worker)
}
// Start a goroutine periodically does container log rotation.
go wait.Forever(func() {
if err := c.rotateLogs(ctx); err != nil {
logger.Error(err, "Failed to rotate container logs")
}
}, c.monitoringPeriod.Duration)
}
// Clean removes all logs of specified container (including rotated one).
func (c *containerLogManager) Clean(ctx context.Context, containerID string) error {
c.mutex.Lock()
defer c.mutex.Unlock()
resp, err := c.runtimeService.ContainerStatus(ctx, containerID, false)
if err != nil {
return fmt.Errorf("failed to get container status %q: %w", containerID, err)
}
if resp.GetStatus() == nil {
return fmt.Errorf("container status is nil for %q", containerID)
}
pattern := fmt.Sprintf("%s*", resp.GetStatus().GetLogPath())
logs, err := c.osInterface.Glob(pattern)
if err != nil {
return fmt.Errorf("failed to list all log files with pattern %q: %w", pattern, err)
}
for _, l := range logs {
if err := c.osInterface.Remove(l); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove container %q log %q: %w", containerID, l, err)
}
}
return nil
}
func (c *containerLogManager) processQueueItems(ctx context.Context, worker int) {
logger := klog.FromContext(ctx)
logger.V(4).Info("Starting container log rotation worker", "workerID", worker)
for c.processContainer(ctx, worker) {
}
logger.V(4).Info("Terminating container log rotation worker", "workerID", worker)
}
func (c *containerLogManager) rotateLogs(ctx context.Context) error {
logger := klog.FromContext(ctx)
c.mutex.Lock()
defer c.mutex.Unlock()
logger.V(4).Info("Starting container log rotation sequence")
// TODO(#59998): Use kubelet pod cache.
containers, err := c.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{})
if err != nil {
return fmt.Errorf("failed to list containers: %w", err)
}
for _, container := range containers {
// Only rotate logs for running containers. Non-running containers won't
// generate new output, it doesn't make sense to keep an empty latest log.
if container.GetState() != runtimeapi.ContainerState_CONTAINER_RUNNING {
continue
}
// Doing this to avoid additional overhead with logging of label like arguments that can prove costly
if v := logger.V(4); v.Enabled() {
logger.V(4).Info("Adding new entry to the queue for processing", "id", container.GetId(), "name", container.Metadata.GetName(), "labels", container.GetLabels())
}
c.queue.Add(container.GetId())
}
return nil
}
func (c *containerLogManager) processContainer(ctx context.Context, worker int) (ok bool) {
key, quit := c.queue.Get()
if quit {
return false
}
defer func() {
c.queue.Done(key)
c.queue.Forget(key)
}()
// Always default the return to true to keep the processing of Queue ongoing
ok = true
id := key
logger := klog.FromContext(ctx)
resp, err := c.runtimeService.ContainerStatus(ctx, id, false)
if err != nil {
logger.Error(err, "Failed to get container status", "worker", worker, "containerID", id)
return
}
if resp.GetStatus() == nil {
logger.Error(err, "Container status is nil", "worker", worker, "containerID", id)
return
}
path := resp.GetStatus().GetLogPath()
info, err := c.osInterface.Stat(path)
if err != nil {
if !os.IsNotExist(err) {
logger.Error(err, "Failed to stat container log", "worker", worker, "containerID", id, "path", path)
return
}
if err = c.runtimeService.ReopenContainerLog(ctx, id); err != nil {
logger.Error(err, "Container log doesn't exist, reopen container log failed", "worker", worker, "containerID", id, "path", path)
return
}
info, err = c.osInterface.Stat(path)
if err != nil {
logger.Error(err, "Failed to stat container log after reopen", "worker", worker, "containerID", id, "path", path)
return
}
}
if info.Size() < c.policy.MaxSize {
logger.V(7).Info("log file doesn't need to be rotated", "worker", worker, "containerID", id, "path", path, "currentSize", info.Size(), "maxSize", c.policy.MaxSize)
return
}
if err := c.rotateLog(ctx, id, path); err != nil {
logger.Error(err, "Failed to rotate log for container", "worker", worker, "containerID", id, "path", path, "currentSize", info.Size(), "maxSize", c.policy.MaxSize)
return
}
return
}
func (c *containerLogManager) rotateLog(ctx context.Context, id, log string) error {
// pattern is used to match all rotated files.
pattern := fmt.Sprintf("%s.*", log)
logs, err := filepath.Glob(pattern)
if err != nil {
return fmt.Errorf("failed to list all log files with pattern %q: %w", pattern, err)
}
logs, err = c.cleanupUnusedLogs(logs)
if err != nil {
return fmt.Errorf("failed to cleanup logs: %w", err)
}
logs, err = c.removeExcessLogs(logs)
if err != nil {
return fmt.Errorf("failed to remove excess logs: %w", err)
}
// Compress uncompressed log files.
for _, l := range logs {
if strings.HasSuffix(l, compressSuffix) {
continue
}
if err := c.compressLog(l); err != nil {
return fmt.Errorf("failed to compress log %q: %w", l, err)
}
}
if err := c.rotateLatestLog(ctx, id, log); err != nil {
return fmt.Errorf("failed to rotate log %q: %w", log, err)
}
return nil
}
// cleanupUnusedLogs cleans up temporary or unused log files generated by previous log rotation
// failure.
func (c *containerLogManager) cleanupUnusedLogs(logs []string) ([]string, error) {
inuse, unused := filterUnusedLogs(logs)
for _, l := range unused {
if err := c.osInterface.Remove(l); err != nil {
return nil, fmt.Errorf("failed to remove unused log %q: %w", l, err)
}
}
return inuse, nil
}
// filterUnusedLogs splits logs into 2 groups, the 1st group is in used logs,
// the second group is unused logs.
func filterUnusedLogs(logs []string) (inuse []string, unused []string) {
for _, l := range logs {
if isInUse(l, logs) {
inuse = append(inuse, l)
} else {
unused = append(unused, l)
}
}
return inuse, unused
}
// isInUse checks whether a container log file is still inuse.
func isInUse(l string, logs []string) bool {
// All temporary files are not in use.
if strings.HasSuffix(l, tmpSuffix) {
return false
}
// All compressed logs are in use.
if strings.HasSuffix(l, compressSuffix) {
return true
}
// Files has already been compressed are not in use.
for _, another := range logs {
if l+compressSuffix == another {
return false
}
}
return true
}
// removeExcessLogs removes old logs to make sure there are only at most MaxFiles log files.
func (c *containerLogManager) removeExcessLogs(logs []string) ([]string, error) {
// Sort log files in oldest to newest order.
sort.Strings(logs)
// Container will create a new log file, and we'll rotate the latest log file.
// Other than those 2 files, we can have at most MaxFiles-2 rotated log files.
// Keep MaxFiles-2 files by removing old files.
// We should remove from oldest to newest, so as not to break ongoing `kubectl logs`.
maxRotatedFiles := c.policy.MaxFiles - 2
if maxRotatedFiles < 0 {
maxRotatedFiles = 0
}
i := 0
for ; i < len(logs)-maxRotatedFiles; i++ {
if err := c.osInterface.Remove(logs[i]); err != nil {
return nil, fmt.Errorf("failed to remove old log %q: %w", logs[i], err)
}
}
logs = logs[i:]
return logs, nil
}
// compressLog compresses a log to log.gz with gzip.
func (c *containerLogManager) compressLog(log string) error {
logInfo, err := os.Stat(log)
if err != nil {
return fmt.Errorf("failed to stat log file: %w", err)
}
r, err := c.osInterface.Open(log)
if err != nil {
return fmt.Errorf("failed to open log %q: %w", log, err)
}
defer r.Close()
tmpLog := log + tmpSuffix
f, err := c.osInterface.OpenFile(tmpLog, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, logInfo.Mode())
if err != nil {
return fmt.Errorf("failed to create temporary log %q: %w", tmpLog, err)
}
defer func() {
// Best effort cleanup of tmpLog.
c.osInterface.Remove(tmpLog)
}()
defer f.Close()
w := gzip.NewWriter(f)
defer w.Close()
if _, err := io.Copy(w, r); err != nil {
return fmt.Errorf("failed to compress %q to %q: %w", log, tmpLog, err)
}
// The archive needs to be closed before renaming, otherwise an error will occur on Windows.
w.Close()
f.Close()
compressedLog := log + compressSuffix
if err := c.osInterface.Rename(tmpLog, compressedLog); err != nil {
return fmt.Errorf("failed to rename %q to %q: %w", tmpLog, compressedLog, err)
}
// Remove old log file.
r.Close()
if err := c.osInterface.Remove(log); err != nil {
return fmt.Errorf("failed to remove log %q after compress: %w", log, err)
}
return nil
}
// rotateLatestLog rotates latest log without compression, so that container can still write
// and fluentd can finish reading.
func (c *containerLogManager) rotateLatestLog(ctx context.Context, id, log string) error {
logger := klog.FromContext(ctx)
timestamp := c.clock.Now().Format(timestampFormat)
rotated := fmt.Sprintf("%s.%s", log, timestamp)
if err := c.osInterface.Rename(log, rotated); err != nil {
return fmt.Errorf("failed to rotate log %q to %q: %w", log, rotated, err)
}
if err := c.runtimeService.ReopenContainerLog(ctx, id); err != nil {
// Rename the rotated log back, so that we can try rotating it again
// next round.
// If kubelet gets restarted at this point, we'll lose original log.
if renameErr := c.osInterface.Rename(rotated, log); renameErr != nil {
// This shouldn't happen.
// Report an error if this happens, because we will lose original
// log.
logger.Error(renameErr, "Failed to rename rotated log", "rotatedLog", rotated, "newLog", log, "containerID", id)
}
return fmt.Errorf("failed to reopen container log %q: %w", id, err)
}
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logs
import "context"
type containerLogManagerStub struct{}
func (*containerLogManagerStub) Start(ctx context.Context) {}
func (*containerLogManagerStub) Clean(ctx context.Context, containerID string) error {
return nil
}
// NewStubContainerLogManager returns an empty ContainerLogManager which does nothing.
func NewStubContainerLogManager() ContainerLogManager {
return &containerLogManagerStub{}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collectors
import (
"context"
"fmt"
"time"
"k8s.io/component-base/metrics"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
)
type criMetricsCollector struct {
metrics.BaseStableCollector
// The descriptors structure will be populated by one call to ListMetricDescriptors from the runtime.
// They will be saved in this map, where the key is the Name and the value is the Desc.
descriptors map[string]*metrics.Desc
listPodSandboxMetricsFn func(context.Context) ([]*runtimeapi.PodSandboxMetrics, error)
}
// Check if criMetricsCollector implements necessary interface
var _ metrics.StableCollector = &criMetricsCollector{}
// NewCRIMetricsCollector implements the metrics.Collector interface
func NewCRIMetricsCollector(ctx context.Context, listPodSandboxMetricsFn func(context.Context) ([]*runtimeapi.PodSandboxMetrics, error), listMetricDescriptorsFn func(context.Context) ([]*runtimeapi.MetricDescriptor, error)) metrics.StableCollector {
descs, err := listMetricDescriptorsFn(ctx)
if err != nil {
logger := klog.FromContext(ctx)
logger.Error(err, "Error reading MetricDescriptors")
return &criMetricsCollector{
listPodSandboxMetricsFn: listPodSandboxMetricsFn,
}
}
c := &criMetricsCollector{
listPodSandboxMetricsFn: listPodSandboxMetricsFn,
descriptors: make(map[string]*metrics.Desc, len(descs)),
}
for _, desc := range descs {
c.descriptors[desc.Name] = criDescToProm(desc)
}
return c
}
// Describe implements the metrics.DescribeWithStability interface.
func (c *criMetricsCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
for _, desc := range c.descriptors {
ch <- desc
}
}
// Collect implements the metrics.CollectWithStability interface.
// TODO(haircommander): would it be better if these were processed async?
func (c *criMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) {
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
ctx := context.TODO()
logger := klog.FromContext(ctx)
podMetrics, err := c.listPodSandboxMetricsFn(ctx)
if err != nil {
logger.Error(err, "Failed to get pod metrics")
return
}
for _, podMetric := range podMetrics {
for _, metric := range podMetric.GetMetrics() {
promMetric, err := c.criMetricToProm(logger, metric)
if err == nil {
ch <- promMetric
}
}
for _, ctrMetric := range podMetric.GetContainerMetrics() {
for _, metric := range ctrMetric.GetMetrics() {
promMetric, err := c.criMetricToProm(logger, metric)
if err == nil {
ch <- promMetric
}
}
}
}
}
func criDescToProm(m *runtimeapi.MetricDescriptor) *metrics.Desc {
// Labels in the translation are variableLabels, as opposed to constant labels.
// This is because the values of the labels will be different for each container.
return metrics.NewDesc(m.Name, m.Help, m.LabelKeys, nil, metrics.INTERNAL, "")
}
func (c *criMetricsCollector) criMetricToProm(logger klog.Logger, m *runtimeapi.Metric) (metrics.Metric, error) {
desc, ok := c.descriptors[m.Name]
if !ok {
err := fmt.Errorf("error converting CRI Metric to prometheus format")
logger.V(5).Error(err, "Descriptor not present in pre-populated list of descriptors", "name", m.Name)
return nil, err
}
typ := criTypeToProm[m.MetricType]
pm, err := metrics.NewConstMetric(desc, typ, float64(m.GetValue().Value), m.LabelValues...)
if err != nil {
logger.Error(err, "Error getting CRI prometheus metric", "descriptor", desc.String())
return nil, err
}
// If Timestamp is 0, then the runtime did not cache the result.
// In this case, a cached result is a metric that was collected ahead of time,
// as opposed to on-demand.
// If the metric was requested as needed, then Timestamp==0.
if m.Timestamp == 0 {
return pm, nil
}
return metrics.NewLazyMetricWithTimestamp(time.Unix(0, m.Timestamp), pm), nil
}
var criTypeToProm = map[runtimeapi.MetricType]metrics.ValueType{
runtimeapi.MetricType_COUNTER: metrics.CounterValue,
runtimeapi.MetricType_GAUGE: metrics.GaugeValue,
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collectors
import (
"context"
"k8s.io/component-base/metrics"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
var (
descLogSize = metrics.NewDesc(
"kubelet_container_log_filesystem_used_bytes",
"Bytes used by the container's logs on the filesystem.",
[]string{
"uid",
"namespace",
"pod",
"container",
}, nil,
metrics.ALPHA,
"",
)
)
type logMetricsCollector struct {
metrics.BaseStableCollector
podStats func(ctx context.Context) ([]statsapi.PodStats, error)
}
// Check if logMetricsCollector implements necessary interface
var _ metrics.StableCollector = &logMetricsCollector{}
// NewLogMetricsCollector implements the metrics.StableCollector interface and
// exposes metrics about container's log volume size.
func NewLogMetricsCollector(podStats func(ctx context.Context) ([]statsapi.PodStats, error)) metrics.StableCollector {
return &logMetricsCollector{
podStats: podStats,
}
}
// DescribeWithStability implements the metrics.StableCollector interface.
func (c *logMetricsCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- descLogSize
}
// CollectWithStability implements the metrics.StableCollector interface.
func (c *logMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) {
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
ctx := context.TODO()
logger := klog.FromContext(ctx)
podStats, err := c.podStats(ctx)
if err != nil {
logger.Error(err, "Failed to get pod stats")
return
}
for _, ps := range podStats {
for _, c := range ps.Containers {
if c.Logs != nil && c.Logs.UsedBytes != nil {
ch <- metrics.NewLazyConstMetric(
descLogSize,
metrics.GaugeValue,
float64(*c.Logs.UsedBytes),
ps.PodRef.UID,
ps.PodRef.Namespace,
ps.PodRef.Name,
c.Name,
)
}
}
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collectors
import (
"context"
"time"
"k8s.io/component-base/metrics"
"k8s.io/klog/v2"
summary "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
)
var (
nodeCPUUsageDesc = metrics.NewDesc("node_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the node in core-seconds",
nil,
nil,
metrics.STABLE,
"")
nodeMemoryUsageDesc = metrics.NewDesc("node_memory_working_set_bytes",
"Current working set of the node in bytes",
nil,
nil,
metrics.STABLE,
"")
nodeSwapUsageDesc = metrics.NewDesc("node_swap_usage_bytes",
"Current swap usage of the node in bytes. Reported only on non-windows systems",
nil,
nil,
metrics.ALPHA,
"")
containerCPUUsageDesc = metrics.NewDesc("container_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the container in core-seconds",
[]string{"container", "pod", "namespace"},
nil,
metrics.STABLE,
"")
containerMemoryUsageDesc = metrics.NewDesc("container_memory_working_set_bytes",
"Current working set of the container in bytes",
[]string{"container", "pod", "namespace"},
nil,
metrics.STABLE,
"")
containerSwapUsageDesc = metrics.NewDesc("container_swap_usage_bytes",
"Current amount of the container swap usage in bytes. Reported only on non-windows systems",
[]string{"container", "pod", "namespace"},
nil,
metrics.ALPHA,
"")
containerSwapLimitDesc = metrics.NewDesc("container_swap_limit_bytes",
"Current amount of the container swap limit in bytes. Reported only on non-windows systems",
[]string{"container", "pod", "namespace"},
nil,
metrics.ALPHA,
"")
podCPUUsageDesc = metrics.NewDesc("pod_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the pod in core-seconds",
[]string{"pod", "namespace"},
nil,
metrics.STABLE,
"")
podMemoryUsageDesc = metrics.NewDesc("pod_memory_working_set_bytes",
"Current working set of the pod in bytes",
[]string{"pod", "namespace"},
nil,
metrics.STABLE,
"")
podSwapUsageDesc = metrics.NewDesc("pod_swap_usage_bytes",
"Current amount of the pod swap usage in bytes. Reported only on non-windows systems",
[]string{"pod", "namespace"},
nil,
metrics.ALPHA,
"")
resourceScrapeResultDesc = metrics.NewDesc("scrape_error",
"1 if there was an error while getting container metrics, 0 otherwise",
nil,
nil,
metrics.ALPHA,
"1.29.0")
resourceScrapeErrorResultDesc = metrics.NewDesc("resource_scrape_error",
"1 if there was an error while getting container metrics, 0 otherwise",
nil,
nil,
metrics.STABLE,
"")
containerStartTimeDesc = metrics.NewDesc("container_start_time_seconds",
"Start time of the container since unix epoch in seconds",
[]string{"container", "pod", "namespace"},
nil,
metrics.STABLE,
"")
)
// NewResourceMetricsCollector returns a metrics.StableCollector which exports resource metrics
func NewResourceMetricsCollector(provider stats.SummaryProvider) metrics.StableCollector {
return &resourceMetricsCollector{
provider: provider,
}
}
type resourceMetricsCollector struct {
metrics.BaseStableCollector
provider stats.SummaryProvider
}
// Check if resourceMetricsCollector implements necessary interface
var _ metrics.StableCollector = &resourceMetricsCollector{}
// DescribeWithStability implements metrics.StableCollector
func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- nodeCPUUsageDesc
ch <- nodeMemoryUsageDesc
ch <- nodeSwapUsageDesc
ch <- containerStartTimeDesc
ch <- containerCPUUsageDesc
ch <- containerMemoryUsageDesc
ch <- containerSwapUsageDesc
ch <- containerSwapLimitDesc
ch <- podCPUUsageDesc
ch <- podMemoryUsageDesc
ch <- podSwapUsageDesc
ch <- resourceScrapeResultDesc
ch <- resourceScrapeErrorResultDesc
}
// CollectWithStability implements metrics.StableCollector
// Since new containers are frequently created and removed, using the Gauge would
// leak metric collectors for containers or pods that no longer exist. Instead, implement
// custom collector in a way that only collects metrics for active containers.
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) {
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
ctx := context.TODO()
var errorCount float64
defer func() {
ch <- metrics.NewLazyConstMetric(resourceScrapeResultDesc, metrics.GaugeValue, errorCount)
ch <- metrics.NewLazyConstMetric(resourceScrapeErrorResultDesc, metrics.GaugeValue, errorCount)
}()
statsSummary, err := rc.provider.GetCPUAndMemoryStats(ctx)
if err != nil {
logger := klog.FromContext(ctx)
errorCount = 1
logger.Error(err, "Error getting summary for resourceMetric prometheus endpoint")
return
}
rc.collectNodeCPUMetrics(ch, statsSummary.Node)
rc.collectNodeMemoryMetrics(ch, statsSummary.Node)
rc.collectNodeSwapMetrics(ch, statsSummary.Node)
for _, pod := range statsSummary.Pods {
for _, container := range pod.Containers {
rc.collectContainerStartTime(ch, pod, container)
rc.collectContainerCPUMetrics(ch, pod, container)
rc.collectContainerMemoryMetrics(ch, pod, container)
rc.collectContainerSwapMetrics(ch, pod, container)
}
rc.collectPodCPUMetrics(ch, pod)
rc.collectPodMemoryMetrics(ch, pod)
rc.collectPodSwapMetrics(ch, pod)
}
}
func (rc *resourceMetricsCollector) collectNodeCPUMetrics(ch chan<- metrics.Metric, s summary.NodeStats) {
if s.CPU == nil || s.CPU.UsageCoreNanoSeconds == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
metrics.NewLazyConstMetric(nodeCPUUsageDesc, metrics.CounterValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second)))
}
func (rc *resourceMetricsCollector) collectNodeMemoryMetrics(ch chan<- metrics.Metric, s summary.NodeStats) {
if s.Memory == nil || s.Memory.WorkingSetBytes == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(s.Memory.Time.Time,
metrics.NewLazyConstMetric(nodeMemoryUsageDesc, metrics.GaugeValue, float64(*s.Memory.WorkingSetBytes)))
}
func (rc *resourceMetricsCollector) collectNodeSwapMetrics(ch chan<- metrics.Metric, s summary.NodeStats) {
if s.Swap == nil || s.Swap.SwapUsageBytes == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(s.Memory.Time.Time,
metrics.NewLazyConstMetric(nodeSwapUsageDesc, metrics.GaugeValue, float64(*s.Swap.SwapUsageBytes)))
}
func (rc *resourceMetricsCollector) collectContainerStartTime(ch chan<- metrics.Metric, pod summary.PodStats, s summary.ContainerStats) {
if s.StartTime.Unix() <= 0 {
return
}
ch <- metrics.NewLazyConstMetric(containerStartTimeDesc, metrics.GaugeValue, float64(s.StartTime.UnixNano())/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace)
}
func (rc *resourceMetricsCollector) collectContainerCPUMetrics(ch chan<- metrics.Metric, pod summary.PodStats, s summary.ContainerStats) {
if s.CPU == nil || s.CPU.UsageCoreNanoSeconds == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
metrics.NewLazyConstMetric(containerCPUUsageDesc, metrics.CounterValue,
float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectContainerMemoryMetrics(ch chan<- metrics.Metric, pod summary.PodStats, s summary.ContainerStats) {
if s.Memory == nil || s.Memory.WorkingSetBytes == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(s.Memory.Time.Time,
metrics.NewLazyConstMetric(containerMemoryUsageDesc, metrics.GaugeValue,
float64(*s.Memory.WorkingSetBytes), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectContainerSwapMetrics(ch chan<- metrics.Metric, pod summary.PodStats, s summary.ContainerStats) {
if s.Swap == nil {
return
}
swapUsageBytes := float64(0)
if s.Swap.SwapUsageBytes != nil {
swapUsageBytes = float64(*s.Swap.SwapUsageBytes)
ch <- metrics.NewLazyMetricWithTimestamp(s.Swap.Time.Time,
metrics.NewLazyConstMetric(containerSwapUsageDesc, metrics.GaugeValue,
swapUsageBytes, s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
}
if s.Swap.SwapAvailableBytes != nil {
ch <- metrics.NewLazyMetricWithTimestamp(s.Swap.Time.Time,
metrics.NewLazyConstMetric(containerSwapLimitDesc, metrics.GaugeValue,
float64(*s.Swap.SwapAvailableBytes)+swapUsageBytes, s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
}
}
func (rc *resourceMetricsCollector) collectPodCPUMetrics(ch chan<- metrics.Metric, pod summary.PodStats) {
if pod.CPU == nil || pod.CPU.UsageCoreNanoSeconds == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(pod.CPU.Time.Time,
metrics.NewLazyConstMetric(podCPUUsageDesc, metrics.CounterValue,
float64(*pod.CPU.UsageCoreNanoSeconds)/float64(time.Second), pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectPodMemoryMetrics(ch chan<- metrics.Metric, pod summary.PodStats) {
if pod.Memory == nil || pod.Memory.WorkingSetBytes == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(pod.Memory.Time.Time,
metrics.NewLazyConstMetric(podMemoryUsageDesc, metrics.GaugeValue,
float64(*pod.Memory.WorkingSetBytes), pod.PodRef.Name, pod.PodRef.Namespace))
}
func (rc *resourceMetricsCollector) collectPodSwapMetrics(ch chan<- metrics.Metric, pod summary.PodStats) {
if pod.Swap == nil || pod.Swap.SwapUsageBytes == nil {
return
}
ch <- metrics.NewLazyMetricWithTimestamp(pod.Swap.Time.Time,
metrics.NewLazyConstMetric(podSwapUsageDesc, metrics.GaugeValue,
float64(*pod.Swap.SwapUsageBytes), pod.PodRef.Name, pod.PodRef.Namespace))
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package collectors
import (
"context"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/metrics"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
)
var (
volumeStatsCapacityBytesDesc = metrics.NewDesc(
metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsCapacityBytesKey),
"Capacity in bytes of the volume",
[]string{"namespace", "persistentvolumeclaim"}, nil,
metrics.ALPHA, "",
)
volumeStatsAvailableBytesDesc = metrics.NewDesc(
metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsAvailableBytesKey),
"Number of available bytes in the volume",
[]string{"namespace", "persistentvolumeclaim"}, nil,
metrics.ALPHA, "",
)
volumeStatsUsedBytesDesc = metrics.NewDesc(
metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsUsedBytesKey),
"Number of used bytes in the volume",
[]string{"namespace", "persistentvolumeclaim"}, nil,
metrics.ALPHA, "",
)
volumeStatsInodesDesc = metrics.NewDesc(
metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsInodesKey),
"Maximum number of inodes in the volume",
[]string{"namespace", "persistentvolumeclaim"}, nil,
metrics.ALPHA, "",
)
volumeStatsInodesFreeDesc = metrics.NewDesc(
metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsInodesFreeKey),
"Number of free inodes in the volume",
[]string{"namespace", "persistentvolumeclaim"}, nil,
metrics.ALPHA, "",
)
volumeStatsInodesUsedDesc = metrics.NewDesc(
metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsInodesUsedKey),
"Number of used inodes in the volume",
[]string{"namespace", "persistentvolumeclaim"}, nil,
metrics.ALPHA, "",
)
volumeStatsHealthAbnormalDesc = metrics.NewDesc(
metrics.BuildFQName("", kubeletmetrics.KubeletSubsystem, kubeletmetrics.VolumeStatsHealthStatusAbnormalKey),
"Abnormal volume health status. The count is either 1 or 0. 1 indicates the volume is unhealthy, 0 indicates volume is healthy",
[]string{"namespace", "persistentvolumeclaim"}, nil,
metrics.ALPHA, "")
)
type volumeStatsCollector struct {
metrics.BaseStableCollector
statsProvider serverstats.Provider
}
// Check if volumeStatsCollector implements necessary interface
var _ metrics.StableCollector = &volumeStatsCollector{}
// NewVolumeStatsCollector creates a volume stats metrics.StableCollector.
func NewVolumeStatsCollector(statsProvider serverstats.Provider) metrics.StableCollector {
return &volumeStatsCollector{statsProvider: statsProvider}
}
// DescribeWithStability implements the metrics.StableCollector interface.
func (collector *volumeStatsCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- volumeStatsCapacityBytesDesc
ch <- volumeStatsAvailableBytesDesc
ch <- volumeStatsUsedBytesDesc
ch <- volumeStatsInodesDesc
ch <- volumeStatsInodesFreeDesc
ch <- volumeStatsInodesUsedDesc
ch <- volumeStatsHealthAbnormalDesc
}
// CollectWithStability implements the metrics.StableCollector interface.
func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Metric) {
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
ctx := context.TODO()
podStats, err := collector.statsProvider.ListPodStats(ctx)
if err != nil {
return
}
addGauge := func(desc *metrics.Desc, pvcRef *stats.PVCReference, v float64, lv ...string) {
lv = append([]string{pvcRef.Namespace, pvcRef.Name}, lv...)
ch <- metrics.NewLazyConstMetric(desc, metrics.GaugeValue, v, lv...)
}
allPVCs := sets.Set[stats.PVCReference]{}
for _, podStat := range podStats {
if podStat.VolumeStats == nil {
continue
}
for _, volumeStat := range podStat.VolumeStats {
pvcRef := volumeStat.PVCRef
if pvcRef == nil {
// ignore if no PVC reference
continue
}
if allPVCs.Has(*pvcRef) {
// ignore if already collected
continue
}
addGauge(volumeStatsCapacityBytesDesc, pvcRef, float64(*volumeStat.CapacityBytes))
addGauge(volumeStatsAvailableBytesDesc, pvcRef, float64(*volumeStat.AvailableBytes))
addGauge(volumeStatsUsedBytesDesc, pvcRef, float64(*volumeStat.UsedBytes))
addGauge(volumeStatsInodesDesc, pvcRef, float64(*volumeStat.Inodes))
addGauge(volumeStatsInodesFreeDesc, pvcRef, float64(*volumeStat.InodesFree))
addGauge(volumeStatsInodesUsedDesc, pvcRef, float64(*volumeStat.InodesUsed))
if volumeStat.VolumeHealthStats != nil {
addGauge(volumeStatsHealthAbnormalDesc, pvcRef, convertBoolToFloat64(volumeStat.VolumeHealthStats.Abnormal))
}
allPVCs.Insert(*pvcRef)
}
}
}
func convertBoolToFloat64(boolVal bool) float64 {
if boolVal {
return 1
}
return 0
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"time"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
// This const block defines the metric names for the kubelet metrics.
const (
FirstNetworkPodStartSLIDurationKey = "first_network_pod_start_sli_duration_seconds"
KubeletSubsystem = "kubelet"
DRASubsystem = "dra"
NodeNameKey = "node_name"
NodeLabelKey = "node"
NodeStartupPreKubeletKey = "node_startup_pre_kubelet_duration_seconds"
NodeStartupPreRegistrationKey = "node_startup_pre_registration_duration_seconds"
NodeStartupRegistrationKey = "node_startup_registration_duration_seconds"
NodeStartupPostRegistrationKey = "node_startup_post_registration_duration_seconds"
NodeStartupKey = "node_startup_duration_seconds"
PodWorkerDurationKey = "pod_worker_duration_seconds"
PodStartDurationKey = "pod_start_duration_seconds"
PodStartSLIDurationKey = "pod_start_sli_duration_seconds"
PodStartTotalDurationKey = "pod_start_total_duration_seconds"
CgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
PodWorkerStartDurationKey = "pod_worker_start_duration_seconds"
PodStatusSyncDurationKey = "pod_status_sync_duration_seconds"
PLEGRelistDurationKey = "pleg_relist_duration_seconds"
PLEGDiscardEventsKey = "pleg_discard_events"
PLEGRelistIntervalKey = "pleg_relist_interval_seconds"
PLEGLastSeenKey = "pleg_last_seen_seconds"
EventedPLEGConnErrKey = "evented_pleg_connection_error_count"
EventedPLEGConnKey = "evented_pleg_connection_success_count"
EventedPLEGConnLatencyKey = "evented_pleg_connection_latency_seconds"
EvictionsKey = "evictions"
EvictionStatsAgeKey = "eviction_stats_age_seconds"
PreemptionsKey = "preemptions"
VolumeStatsCapacityBytesKey = "volume_stats_capacity_bytes"
VolumeStatsAvailableBytesKey = "volume_stats_available_bytes"
VolumeStatsUsedBytesKey = "volume_stats_used_bytes"
VolumeStatsInodesKey = "volume_stats_inodes"
VolumeStatsInodesFreeKey = "volume_stats_inodes_free"
VolumeStatsInodesUsedKey = "volume_stats_inodes_used"
VolumeStatsHealthStatusAbnormalKey = "volume_stats_health_status_abnormal"
RunningPodsKey = "running_pods"
RunningContainersKey = "running_containers"
DesiredPodCountKey = "desired_pods"
ActivePodCountKey = "active_pods"
MirrorPodCountKey = "mirror_pods"
WorkingPodCountKey = "working_pods"
OrphanedRuntimePodTotalKey = "orphaned_runtime_pods_total"
RestartedPodTotalKey = "restarted_pods_total"
ImagePullDurationKey = "image_pull_duration_seconds"
CgroupVersionKey = "cgroup_version"
CRILosingSupportKey = "cri_losing_support"
// Metrics keys of remote runtime operations
RuntimeOperationsKey = "runtime_operations_total"
RuntimeOperationsDurationKey = "runtime_operations_duration_seconds"
RuntimeOperationsErrorsKey = "runtime_operations_errors_total"
// Metrics keys of device plugin operations
DevicePluginRegistrationCountKey = "device_plugin_registration_total"
DevicePluginAllocationDurationKey = "device_plugin_alloc_duration_seconds"
// Metrics keys of pod resources operations
PodResourcesEndpointRequestsTotalKey = "pod_resources_endpoint_requests_total"
PodResourcesEndpointRequestsListKey = "pod_resources_endpoint_requests_list"
PodResourcesEndpointRequestsGetAllocatableKey = "pod_resources_endpoint_requests_get_allocatable"
PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list"
PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable"
PodResourcesEndpointRequestsGetKey = "pod_resources_endpoint_requests_get"
PodResourcesEndpointErrorsGetKey = "pod_resources_endpoint_errors_get"
// Metrics keys for RuntimeClass
RunPodSandboxDurationKey = "run_podsandbox_duration_seconds"
RunPodSandboxErrorsKey = "run_podsandbox_errors_total"
// Metrics to keep track of total number of Pods and Containers started
StartedPodsTotalKey = "started_pods_total"
StartedPodsErrorsTotalKey = "started_pods_errors_total"
StartedContainersTotalKey = "started_containers_total"
StartedContainersErrorsTotalKey = "started_containers_errors_total"
// Metrics to track HostProcess container usage by this kubelet
StartedHostProcessContainersTotalKey = "started_host_process_containers_total"
StartedHostProcessContainersErrorsTotalKey = "started_host_process_containers_errors_total"
// Metrics to track UserNamespaced (hostUsers = false) pods.
StartedUserNamespacedPodsTotalKey = "started_user_namespaced_pods_total"
StartedUserNamespacedPodsErrorsTotalKey = "started_user_namespaced_pods_errors_total"
// Metrics to track ephemeral container usage by this kubelet
ManagedEphemeralContainersKey = "managed_ephemeral_containers"
// Metrics to track the CPU manager behavior
CPUManagerPinningRequestsTotalKey = "cpu_manager_pinning_requests_total"
CPUManagerPinningErrorsTotalKey = "cpu_manager_pinning_errors_total"
CPUManagerSharedPoolSizeMilliCoresKey = "cpu_manager_shared_pool_size_millicores"
CPUManagerExclusiveCPUsAllocationCountKey = "cpu_manager_exclusive_cpu_allocation_count"
CPUManagerAllocationPerNUMAKey = "cpu_manager_allocation_per_numa"
// Metrics to track the Memory manager behavior
MemoryManagerPinningRequestsTotalKey = "memory_manager_pinning_requests_total"
MemoryManagerPinningErrorsTotalKey = "memory_manager_pinning_errors_total"
// Metrics to track the Topology manager behavior
TopologyManagerAdmissionRequestsTotalKey = "topology_manager_admission_requests_total"
TopologyManagerAdmissionErrorsTotalKey = "topology_manager_admission_errors_total"
TopologyManagerAdmissionDurationKey = "topology_manager_admission_duration_ms"
// Metrics to track orphan pod cleanup
orphanPodCleanedVolumesKey = "orphan_pod_cleaned_volumes"
orphanPodCleanedVolumesErrorsKey = "orphan_pod_cleaned_volumes_errors"
// Metric for tracking garbage collected images
ImageGarbageCollectedTotalKey = "image_garbage_collected_total"
// Metric for tracking aligment of compute resources
ContainerAlignedComputeResourcesNameKey = "container_aligned_compute_resources_count"
ContainerAlignedComputeResourcesFailureNameKey = "container_aligned_compute_resources_failure_count"
ContainerAlignedComputeResourcesScopeLabelKey = "scope"
ContainerAlignedComputeResourcesBoundaryLabelKey = "boundary"
// Metric keys for DRA operations
DRAOperationsDurationKey = "operations_duration_seconds"
DRAGRPCOperationsDurationKey = "grpc_operations_duration_seconds"
// Values used in metric labels
Container = "container"
InitContainer = "init_container"
EphemeralContainer = "ephemeral_container"
AlignScopePod = "pod"
AlignScopeContainer = "container"
AlignedPhysicalCPU = "physical_cpu"
AlignedNUMANode = "numa_node"
AlignedUncoreCache = "uncore_cache"
// Metrics to track kubelet admission rejections.
AdmissionRejectionsTotalKey = "admission_rejections_total"
// Image Volume metrics
ImageVolumeRequestedTotalKey = "image_volume_requested_total"
ImageVolumeMountedSucceedTotalKey = "image_volume_mounted_succeed_total"
ImageVolumeMountedErrorsTotalKey = "image_volume_mounted_errors_total"
// Special label for [DRAResourceClaimsInUseDesc] which counts ResourceClaims regardless of the driver.
DRAResourceClaimsInUseAnyDriver = "<any>"
// Metric keys for in-place pod resize operations.
ContainerRequestedResizesKey = "container_requested_resizes_total"
PodResizeDurationMillisecondsKey = "pod_resize_duration_milliseconds"
PodPendingResizesKey = "pod_pending_resizes"
PodInfeasibleResizesKey = "pod_infeasible_resizes_total"
PodInProgressResizesKey = "pod_in_progress_resizes"
PodDeferredAcceptedResizesKey = "pod_deferred_accepted_resizes_total"
)
type imageSizeBucket struct {
lowerBoundInBytes uint64
label string
}
var (
podStartupDurationBuckets = []float64{0.5, 1, 2, 3, 4, 5, 6, 8, 10, 20, 30, 45, 60, 120, 180, 240, 300, 360, 480, 600, 900, 1200, 1800, 2700, 3600}
imagePullDurationBuckets = []float64{1, 5, 10, 20, 30, 60, 120, 180, 240, 300, 360, 480, 600, 900, 1200, 1800, 2700, 3600}
// imageSizeBuckets has the labels to be associated with image_pull_duration_seconds metric. For example, if the size of
// an image pulled is between 1GB and 5GB, the label will be "1GB-5GB".
imageSizeBuckets = []imageSizeBucket{
{0, "0-10MB"},
{10 * 1024 * 1024, "10MB-100MB"},
{100 * 1024 * 1024, "100MB-500MB"},
{500 * 1024 * 1024, "500MB-1GB"},
{1 * 1024 * 1024 * 1024, "1GB-5GB"},
{5 * 1024 * 1024 * 1024, "5GB-10GB"},
{10 * 1024 * 1024 * 1024, "10GB-20GB"},
{20 * 1024 * 1024 * 1024, "20GB-30GB"},
{30 * 1024 * 1024 * 1024, "30GB-40GB"},
{40 * 1024 * 1024 * 1024, "40GB-60GB"},
{60 * 1024 * 1024 * 1024, "60GB-100GB"},
{100 * 1024 * 1024 * 1024, "GT100GB"},
}
// DRADurationBuckets is the bucket boundaries for DRA operation duration metrics
// DRAOperationsDuration and DRAGRPCOperationsDuration defined below in this file.
// The buckets max value 40 is based on the 45sec max gRPC timeout value defined
// for the DRA gRPC calls in the pkg/kubelet/cm/dra/plugin/registration.go
DRADurationBuckets = metrics.ExponentialBucketsRange(.1, 40, 15)
// podResizeDurationBuckets is the bucket boundaries for pod_resize_duration_milliseconds metrics.
podResizeDurationBuckets = []float64{10, 50, 100, 500, 1000, 2000, 5000, 10000, 20000, 30000, 60000, 120000, 300000, 600000}
)
var (
// NodeName is a Gauge that tracks the node's name. The count is always 1.
NodeName = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: NodeNameKey,
Help: "The node's name. The count is always 1.",
StabilityLevel: metrics.ALPHA,
},
[]string{NodeLabelKey},
)
// ContainersPerPodCount is a Histogram that tracks the number of containers per pod.
ContainersPerPodCount = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: "containers_per_pod_count",
Help: "The number of containers per pod.",
Buckets: metrics.ExponentialBuckets(1, 2, 5),
StabilityLevel: metrics.ALPHA,
},
)
// PodWorkerDuration is a Histogram that tracks the duration (in seconds) in takes to sync a single pod.
// Broken down by the operation type.
PodWorkerDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodWorkerDurationKey,
Help: "Duration in seconds to sync a single pod. Broken down by operation type: create, update, or sync",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"operation_type"},
)
// PodStartDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to run since it's
// first time seen by kubelet.
PodStartDuration = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodStartDurationKey,
Help: "Duration in seconds from kubelet seeing a pod for the first time to the pod starting to run",
Buckets: podStartupDurationBuckets,
StabilityLevel: metrics.ALPHA,
},
)
// PodStartSLIDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to run,
// excluding the time for image pulling. This metric should reflect the "Pod startup latency SLI" definition
// ref: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/pod_startup_latency.md
//
// The histogram bucket boundaries for pod startup latency metrics, measured in seconds. These are hand-picked
// so as to be roughly exponential but still round numbers in everyday units. This is to minimise the number
// of buckets while allowing accurate measurement of thresholds which might be used in SLOs
// e.g. x% of pods start up within 30 seconds, or 15 minutes, etc.
PodStartSLIDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodStartSLIDurationKey,
Help: "Duration in seconds to start a pod, excluding time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch",
Buckets: podStartupDurationBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{},
)
// PodStartTotalDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to run
// since creation, including the time for image pulling.
//
// The histogram bucket boundaries for pod startup latency metrics, measured in seconds. These are hand-picked
// so as to be roughly exponential but still round numbers in everyday units. This is to minimise the number
// of buckets while allowing accurate measurement of thresholds which might be used in SLOs
// e.g. x% of pods start up within 30 seconds, or 15 minutes, etc.
PodStartTotalDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodStartTotalDurationKey,
Help: "Duration in seconds to start a pod since creation, including time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch",
Buckets: podStartupDurationBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{},
)
// FirstNetworkPodStartSLIDuration is a gauge that tracks the duration (in seconds) it takes for the first network pod to run,
// excluding the time for image pulling. This is an internal and temporary metric required because of the existing limitations of the
// existing networking subsystem and CRI/CNI implementations that will be solved by https://github.com/containernetworking/cni/issues/859
// The metric represents the latency observed by an user to run workloads in a new node.
// ref: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/pod_startup_latency.md
FirstNetworkPodStartSLIDuration = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: FirstNetworkPodStartSLIDurationKey,
Help: "Duration in seconds to start the first network pod, excluding time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch",
StabilityLevel: metrics.INTERNAL,
},
)
// CgroupManagerDuration is a Histogram that tracks the duration (in seconds) it takes for cgroup manager operations to complete.
// Broken down by method.
CgroupManagerDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: CgroupManagerOperationsKey,
Help: "Duration in seconds for cgroup manager operations. Broken down by method.",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"operation_type"},
)
// PodWorkerStartDuration is a Histogram that tracks the duration (in seconds) it takes from kubelet seeing a pod to starting a worker.
PodWorkerStartDuration = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodWorkerStartDurationKey,
Help: "Duration in seconds from kubelet seeing a pod to starting a worker.",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
)
// PodStatusSyncDuration is a Histogram that tracks the duration (in seconds) in takes from the time a pod
// status is generated to the time it is synced with the apiserver. If multiple status changes are generated
// on a pod before it is written to the API, the latency is from the first update to the last event.
PodStatusSyncDuration = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodStatusSyncDurationKey,
Help: "Duration in seconds to sync a pod status update. Measures time from detection of a change to pod status until the API is successfully updated for that pod, even if multiple intevening changes to pod status occur.",
Buckets: []float64{0.010, 0.050, 0.100, 0.500, 1, 5, 10, 20, 30, 45, 60},
StabilityLevel: metrics.ALPHA,
},
)
// PLEGRelistDuration is a Histogram that tracks the duration (in seconds) it takes for relisting pods in the Kubelet's
// Pod Lifecycle Event Generator (PLEG).
PLEGRelistDuration = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PLEGRelistDurationKey,
Help: "Duration in seconds for relisting pods in PLEG.",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
)
// PLEGDiscardEvents is a Counter that tracks the number of discarding events in the Kubelet's Pod Lifecycle Event Generator (PLEG).
PLEGDiscardEvents = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PLEGDiscardEventsKey,
Help: "The number of discard events in PLEG.",
StabilityLevel: metrics.ALPHA,
},
)
// PLEGRelistInterval is a Histogram that tracks the intervals (in seconds) between relisting in the Kubelet's
// Pod Lifecycle Event Generator (PLEG).
PLEGRelistInterval = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PLEGRelistIntervalKey,
Help: "Interval in seconds between relisting in PLEG.",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
)
// PLEGLastSeen is a Gauge giving the Unix timestamp when the Kubelet's
// Pod Lifecycle Event Generator (PLEG) was last seen active.
PLEGLastSeen = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: PLEGLastSeenKey,
Help: "Timestamp in seconds when PLEG was last seen active.",
StabilityLevel: metrics.ALPHA,
},
)
// EventedPLEGConnErr is a Counter that tracks the number of errors encountered during
// the establishment of streaming connection with the CRI runtime.
EventedPLEGConnErr = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: EventedPLEGConnErrKey,
Help: "The number of errors encountered during the establishment of streaming connection with the CRI runtime.",
StabilityLevel: metrics.ALPHA,
},
)
// EventedPLEGConn is a Counter that tracks the number of times a streaming client
// was obtained to receive CRI Events.
EventedPLEGConn = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: EventedPLEGConnKey,
Help: "The number of times a streaming client was obtained to receive CRI Events.",
StabilityLevel: metrics.ALPHA,
},
)
// EventedPLEGConnLatency is a Histogram that tracks the latency of streaming connection
// with the CRI runtime, measured in seconds.
EventedPLEGConnLatency = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: EventedPLEGConnLatencyKey,
Help: "The latency of streaming connection with the CRI runtime, measured in seconds.",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
)
// RuntimeOperations is a Counter that tracks the cumulative number of remote runtime operations.
// Broken down by operation type.
RuntimeOperations = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: RuntimeOperationsKey,
Help: "Cumulative number of runtime operations by operation type.",
StabilityLevel: metrics.ALPHA,
},
[]string{"operation_type"},
)
// RuntimeOperationsDuration is a Histogram that tracks the duration (in seconds) for remote runtime operations to complete.
// Broken down by operation type.
RuntimeOperationsDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: RuntimeOperationsDurationKey,
Help: "Duration in seconds of runtime operations. Broken down by operation type.",
Buckets: metrics.ExponentialBuckets(.005, 2.5, 14),
StabilityLevel: metrics.ALPHA,
},
[]string{"operation_type"},
)
// RuntimeOperationsErrors is a Counter that tracks the cumulative number of remote runtime operations errors.
// Broken down by operation type.
RuntimeOperationsErrors = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: RuntimeOperationsErrorsKey,
Help: "Cumulative number of runtime operation errors by operation type.",
StabilityLevel: metrics.ALPHA,
},
[]string{"operation_type"},
)
// Evictions is a Counter that tracks the cumulative number of pod evictions initiated by the kubelet.
// Broken down by eviction signal.
Evictions = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: EvictionsKey,
Help: "Cumulative number of pod evictions by eviction signal",
StabilityLevel: metrics.ALPHA,
},
[]string{"eviction_signal"},
)
// EvictionStatsAge is a Histogram that tracks the time (in seconds) between when stats are collected and when a pod is evicted
// based on those stats. Broken down by eviction signal.
EvictionStatsAge = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: EvictionStatsAgeKey,
Help: "Time between when stats are collected, and when pod is evicted based on those stats by eviction signal",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"eviction_signal"},
)
// Preemptions is a Counter that tracks the cumulative number of pod preemptions initiated by the kubelet.
// Broken down by preemption signal. A preemption is only recorded for one resource, the sum of all signals
// is the number of preemptions on the given node.
Preemptions = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PreemptionsKey,
Help: "Cumulative number of pod preemptions by preemption resource",
StabilityLevel: metrics.ALPHA,
},
[]string{"preemption_signal"},
)
// DevicePluginRegistrationCount is a Counter that tracks the cumulative number of device plugin registrations.
// Broken down by resource name.
DevicePluginRegistrationCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: DevicePluginRegistrationCountKey,
Help: "Cumulative number of device plugin registrations. Broken down by resource name.",
StabilityLevel: metrics.ALPHA,
},
[]string{"resource_name"},
)
// DevicePluginAllocationDuration is a Histogram that tracks the duration (in seconds) to serve a device plugin allocation request.
// Broken down by resource name.
DevicePluginAllocationDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: DevicePluginAllocationDurationKey,
Help: "Duration in seconds to serve a device plugin Allocation request. Broken down by resource name.",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"resource_name"},
)
// PodResourcesEndpointRequestsTotalCount is a Counter that tracks the cumulative number of requests to the PodResource endpoints.
// Broken down by server API version.
PodResourcesEndpointRequestsTotalCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodResourcesEndpointRequestsTotalKey,
Help: "Cumulative number of requests to the PodResource endpoint. Broken down by server api version.",
StabilityLevel: metrics.ALPHA,
},
[]string{"server_api_version"},
)
// PodResourcesEndpointRequestsListCount is a Counter that tracks the number of requests to the PodResource List() endpoint.
// Broken down by server API version.
PodResourcesEndpointRequestsListCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodResourcesEndpointRequestsListKey,
Help: "Number of requests to the PodResource List endpoint. Broken down by server api version.",
StabilityLevel: metrics.ALPHA,
},
[]string{"server_api_version"},
)
// PodResourcesEndpointRequestsGetAllocatableCount is a Counter that tracks the number of requests to the PodResource GetAllocatableResources() endpoint.
// Broken down by server API version.
PodResourcesEndpointRequestsGetAllocatableCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodResourcesEndpointRequestsGetAllocatableKey,
Help: "Number of requests to the PodResource GetAllocatableResources endpoint. Broken down by server api version.",
StabilityLevel: metrics.ALPHA,
},
[]string{"server_api_version"},
)
// PodResourcesEndpointErrorsListCount is a Counter that tracks the number of errors returned by he PodResource List() endpoint.
// Broken down by server API version.
PodResourcesEndpointErrorsListCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodResourcesEndpointErrorsListKey,
Help: "Number of requests to the PodResource List endpoint which returned error. Broken down by server api version.",
StabilityLevel: metrics.ALPHA,
},
[]string{"server_api_version"},
)
// PodResourcesEndpointErrorsGetAllocatableCount is a Counter that tracks the number of errors returned by the PodResource GetAllocatableResources() endpoint.
// Broken down by server API version.
PodResourcesEndpointErrorsGetAllocatableCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodResourcesEndpointErrorsGetAllocatableKey,
Help: "Number of requests to the PodResource GetAllocatableResources endpoint which returned error. Broken down by server api version.",
StabilityLevel: metrics.ALPHA,
},
[]string{"server_api_version"},
)
// PodResourcesEndpointRequestsGetCount is a Counter that tracks the number of requests to the PodResource Get() endpoint.
// Broken down by server API version.
PodResourcesEndpointRequestsGetCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodResourcesEndpointRequestsGetKey,
Help: "Number of requests to the PodResource Get endpoint. Broken down by server api version.",
StabilityLevel: metrics.ALPHA,
},
[]string{"server_api_version"},
)
// PodResourcesEndpointErrorsGetCount is a Counter that tracks the number of errors returned by he PodResource List() endpoint.
// Broken down by server API version.
PodResourcesEndpointErrorsGetCount = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodResourcesEndpointErrorsGetKey,
Help: "Number of requests to the PodResource Get endpoint which returned error. Broken down by server api version.",
StabilityLevel: metrics.ALPHA,
},
[]string{"server_api_version"},
)
// RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations.
// Broken down by RuntimeClass.Handler.
RunPodSandboxDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: RunPodSandboxDurationKey,
Help: "Duration in seconds of the run_podsandbox operations. Broken down by RuntimeClass.Handler.",
// Use DefBuckets for now, will customize the buckets if necessary.
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"runtime_handler"},
)
// RunPodSandboxErrors is a Counter that tracks the cumulative number of Pod Sandbox operations errors.
// Broken down by RuntimeClass.Handler.
RunPodSandboxErrors = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: RunPodSandboxErrorsKey,
Help: "Cumulative number of the run_podsandbox operation errors by RuntimeClass.Handler.",
StabilityLevel: metrics.ALPHA,
},
[]string{"runtime_handler"},
)
// RunningPodCount is a gauge that tracks the number of Pods currently with a running sandbox
// It is used to expose the kubelet internal state: how many pods have running containers in the container runtime, and mainly for debugging purpose.
RunningPodCount = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: RunningPodsKey,
Help: "Number of pods that have a running pod sandbox",
StabilityLevel: metrics.ALPHA,
},
)
// RunningContainerCount is a gauge that tracks the number of containers currently running
RunningContainerCount = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: RunningContainersKey,
Help: "Number of containers currently running",
StabilityLevel: metrics.ALPHA,
},
[]string{"container_state"},
)
// DesiredPodCount tracks the count of pods the Kubelet thinks it should be running
DesiredPodCount = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: DesiredPodCountKey,
Help: "The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.",
StabilityLevel: metrics.ALPHA,
},
[]string{"static"},
)
// ActivePodCount tracks the count of pods the Kubelet considers as active when deciding to admit a new pod
ActivePodCount = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: ActivePodCountKey,
Help: "The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.",
StabilityLevel: metrics.ALPHA,
},
[]string{"static"},
)
// MirrorPodCount tracks the number of mirror pods the Kubelet should have created for static pods
MirrorPodCount = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: MirrorPodCountKey,
Help: "The number of mirror pods the kubelet will try to create (one per admitted static pod)",
StabilityLevel: metrics.ALPHA,
},
)
// WorkingPodCount tracks the count of pods in each lifecycle phase, whether they are static pods, and whether they are desired, orphaned, or runtime_only
WorkingPodCount = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: WorkingPodCountKey,
Help: "Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.",
StabilityLevel: metrics.ALPHA,
},
[]string{"lifecycle", "config", "static"},
)
// OrphanedRuntimePodTotal is incremented every time a pod is detected in the runtime without being known to the pod worker first
OrphanedRuntimePodTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: OrphanedRuntimePodTotalKey,
Help: "Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.",
StabilityLevel: metrics.ALPHA,
},
)
// RestartedPodTotal is incremented every time a pod with the same UID is deleted and recreated
RestartedPodTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: RestartedPodTotalKey,
Help: "Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)",
StabilityLevel: metrics.ALPHA,
},
[]string{"static"},
)
// StartedPodsTotal is a counter that tracks pod sandbox creation operations
StartedPodsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedPodsTotalKey,
Help: "Cumulative number of pods started",
StabilityLevel: metrics.ALPHA,
},
)
// StartedPodsErrorsTotal is a counter that tracks the number of errors creating pod sandboxes
StartedPodsErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedPodsErrorsTotalKey,
Help: "Cumulative number of errors when starting pods",
StabilityLevel: metrics.ALPHA,
},
)
// StartedContainersTotal is a counter that tracks the number of container creation operations
StartedContainersTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedContainersTotalKey,
Help: "Cumulative number of containers started",
StabilityLevel: metrics.ALPHA,
},
[]string{"container_type"},
)
// StartedContainersTotal is a counter that tracks the number of errors creating containers
StartedContainersErrorsTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedContainersErrorsTotalKey,
Help: "Cumulative number of errors when starting containers",
StabilityLevel: metrics.ALPHA,
},
[]string{"container_type", "code"},
)
// StartedHostProcessContainersTotal is a counter that tracks the number of hostprocess container creation operations
StartedHostProcessContainersTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedHostProcessContainersTotalKey,
Help: "Cumulative number of hostprocess containers started. This metric will only be collected on Windows.",
StabilityLevel: metrics.ALPHA,
},
[]string{"container_type"},
)
// StartedHostProcessContainersErrorsTotal is a counter that tracks the number of errors creating hostprocess containers
StartedHostProcessContainersErrorsTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedHostProcessContainersErrorsTotalKey,
Help: "Cumulative number of errors when starting hostprocess containers. This metric will only be collected on Windows.",
StabilityLevel: metrics.ALPHA,
},
[]string{"container_type", "code"},
)
// StartedUserNamespacedPodsTotal is a counter that tracks the number of user namespaced pods that are attempted to be created.
StartedUserNamespacedPodsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedUserNamespacedPodsTotalKey,
Help: "Cumulative number of pods with user namespaces started. This metric will only be collected on Linux.",
StabilityLevel: metrics.ALPHA,
},
)
// StartedUserNamespacedPodsErrorsTotal is a counter that tracks the number of errors creating user namespaced pods
StartedUserNamespacedPodsErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: StartedUserNamespacedPodsErrorsTotalKey,
Help: "Cumulative number of errors when starting pods with user namespaces. This metric will only be collected on Linux.",
StabilityLevel: metrics.ALPHA,
},
)
// ManagedEphemeralContainers is a gauge that indicates how many ephemeral containers are managed by this kubelet.
ManagedEphemeralContainers = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: ManagedEphemeralContainersKey,
Help: "Current number of ephemeral containers in pods managed by this kubelet.",
StabilityLevel: metrics.ALPHA,
},
)
// GracefulShutdownStartTime is a gauge that records the time at which the kubelet started graceful shutdown.
GracefulShutdownStartTime = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: "graceful_shutdown_start_time_seconds",
Help: "Last graceful shutdown start time since unix epoch in seconds",
StabilityLevel: metrics.ALPHA,
},
)
// GracefulShutdownEndTime is a gauge that records the time at which the kubelet completed graceful shutdown.
GracefulShutdownEndTime = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: "graceful_shutdown_end_time_seconds",
Help: "Last graceful shutdown end time since unix epoch in seconds",
StabilityLevel: metrics.ALPHA,
},
)
LifecycleHandlerHTTPFallbacks = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: "lifecycle_handler_http_fallbacks_total",
Help: "The number of times lifecycle handlers successfully fell back to http from https.",
StabilityLevel: metrics.ALPHA,
},
)
// CPUManagerPinningRequestsTotal tracks the number of times the pod spec will cause the cpu manager to pin cores
CPUManagerPinningRequestsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: CPUManagerPinningRequestsTotalKey,
Help: "The number of cpu core allocations which required pinning.",
StabilityLevel: metrics.ALPHA,
},
)
// CPUManagerPinningErrorsTotal tracks the number of times the pod spec required the cpu manager to pin cores, but the allocation failed
CPUManagerPinningErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: CPUManagerPinningErrorsTotalKey,
Help: "The number of cpu core allocations which required pinning failed.",
StabilityLevel: metrics.ALPHA,
},
)
// CPUManagerSharedPoolSizeMilliCores reports the current size of the shared CPU pool for non-guaranteed pods
CPUManagerSharedPoolSizeMilliCores = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: CPUManagerSharedPoolSizeMilliCoresKey,
Help: "The size of the shared CPU pool for non-guaranteed QoS pods, in millicores.",
StabilityLevel: metrics.ALPHA,
},
)
// CPUManagerExclusiveCPUsAllocationCount reports the total number of CPUs exclusively allocated to containers running on this node
CPUManagerExclusiveCPUsAllocationCount = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: CPUManagerExclusiveCPUsAllocationCountKey,
Help: "The total number of CPUs exclusively allocated to containers running on this node",
StabilityLevel: metrics.ALPHA,
},
)
// CPUManagerAllocationPerNUMA tracks the count of CPUs allocated per NUMA node
CPUManagerAllocationPerNUMA = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: CPUManagerAllocationPerNUMAKey,
Help: "Number of CPUs allocated per NUMA node",
StabilityLevel: metrics.ALPHA,
},
[]string{AlignedNUMANode},
)
// ContainerAlignedComputeResources reports the count of resources allocation which granted aligned resources, per alignment boundary
ContainerAlignedComputeResources = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: ContainerAlignedComputeResourcesNameKey,
Help: "Cumulative number of aligned compute resources allocated to containers by alignment type.",
StabilityLevel: metrics.ALPHA,
},
[]string{ContainerAlignedComputeResourcesScopeLabelKey, ContainerAlignedComputeResourcesBoundaryLabelKey},
)
// ContainerAlignedComputeResourcesFailure reports the count of resources allocation attempts which failed to align resources, per alignment boundary
ContainerAlignedComputeResourcesFailure = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: ContainerAlignedComputeResourcesFailureNameKey,
Help: "Cumulative number of failures to allocate aligned compute resources to containers by alignment type.",
StabilityLevel: metrics.ALPHA,
},
[]string{ContainerAlignedComputeResourcesScopeLabelKey, ContainerAlignedComputeResourcesBoundaryLabelKey},
)
MemoryManagerPinningRequestTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: MemoryManagerPinningRequestsTotalKey,
Help: "The number of memory pages allocations which required pinning.",
StabilityLevel: metrics.ALPHA,
})
// MemoryManagerPinningErrorsTotal tracks the number of times the pod spec required the memory manager to pin memory pages, but the allocation failed
MemoryManagerPinningErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: MemoryManagerPinningErrorsTotalKey,
Help: "The number of memory pages allocations which required pinning that failed.",
StabilityLevel: metrics.ALPHA,
},
)
// TopologyManagerAdmissionRequestsTotal tracks the number of times the pod spec will cause the topology manager to admit a pod
TopologyManagerAdmissionRequestsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: TopologyManagerAdmissionRequestsTotalKey,
Help: "The number of admission requests where resources have to be aligned.",
StabilityLevel: metrics.ALPHA,
},
)
// TopologyManagerAdmissionErrorsTotal tracks the number of times the pod spec required the topology manager to admit a pod, but the admission failed
TopologyManagerAdmissionErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: TopologyManagerAdmissionErrorsTotalKey,
Help: "The number of admission request failures where resources could not be aligned.",
StabilityLevel: metrics.ALPHA,
},
)
// TopologyManagerAdmissionDuration is a Histogram that tracks the duration (in seconds) to serve a pod admission request.
TopologyManagerAdmissionDuration = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: TopologyManagerAdmissionDurationKey,
Help: "Duration in milliseconds to serve a pod admission request.",
Buckets: metrics.ExponentialBuckets(.05, 2, 15),
StabilityLevel: metrics.ALPHA,
},
)
// OrphanPodCleanedVolumes is number of orphaned Pods that times that removeOrphanedPodVolumeDirs was called during the last sweep.
OrphanPodCleanedVolumes = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: orphanPodCleanedVolumesKey,
Help: "The total number of orphaned Pods whose volumes were cleaned in the last periodic sweep.",
StabilityLevel: metrics.ALPHA,
},
)
// OrphanPodCleanedVolumes is number of times that removeOrphanedPodVolumeDirs failed.
OrphanPodCleanedVolumesErrors = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: orphanPodCleanedVolumesErrorsKey,
Help: "The number of orphaned Pods whose volumes failed to be cleaned in the last periodic sweep.",
StabilityLevel: metrics.ALPHA,
},
)
NodeStartupPreKubeletDuration = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: NodeStartupPreKubeletKey,
Help: "Duration in seconds of node startup before kubelet starts.",
StabilityLevel: metrics.ALPHA,
},
)
NodeStartupPreRegistrationDuration = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: NodeStartupPreRegistrationKey,
Help: "Duration in seconds of node startup before registration.",
StabilityLevel: metrics.ALPHA,
},
)
NodeStartupRegistrationDuration = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: NodeStartupRegistrationKey,
Help: "Duration in seconds of node startup during registration.",
StabilityLevel: metrics.ALPHA,
},
)
NodeStartupPostRegistrationDuration = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: NodeStartupPostRegistrationKey,
Help: "Duration in seconds of node startup after registration.",
StabilityLevel: metrics.ALPHA,
},
)
NodeStartupDuration = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: NodeStartupKey,
Help: "Duration in seconds of node startup in total.",
StabilityLevel: metrics.ALPHA,
},
)
ImageGarbageCollectedTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: ImageGarbageCollectedTotalKey,
Help: "Total number of images garbage collected by the kubelet, whether through disk usage or image age.",
StabilityLevel: metrics.ALPHA,
},
[]string{"reason"},
)
// ImagePullDuration is a Histogram that tracks the duration (in seconds) it takes for an image to be pulled,
// including the time spent in the waiting queue of image puller.
// The metric is broken down by bucketed image size.
ImagePullDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: ImagePullDurationKey,
Help: "Duration in seconds to pull an image.",
Buckets: imagePullDurationBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"image_size_in_bytes"},
)
LifecycleHandlerSleepTerminated = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: "sleep_action_terminated_early_total",
Help: "The number of times lifecycle sleep handler got terminated before it finishes",
StabilityLevel: metrics.ALPHA,
},
)
CgroupVersion = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: CgroupVersionKey,
Help: "cgroup version on the hosts.",
StabilityLevel: metrics.ALPHA,
},
)
CRILosingSupport = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: CRILosingSupportKey,
Help: "the Kubernetes version that the currently running CRI implementation will lose support on if not upgraded.",
StabilityLevel: metrics.ALPHA,
},
[]string{"version"},
)
// DRAOperationsDuration tracks the duration of the DRA PrepareResources and UnprepareResources requests.
DRAOperationsDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DRASubsystem,
Name: DRAOperationsDurationKey,
Help: "Latency histogram in seconds for the duration of handling all ResourceClaims referenced by a pod when the pod starts or stops. Identified by the name of the operation (PrepareResources or UnprepareResources) and separated by the success of the operation. The number of failed operations is provided through the histogram's overall count.",
Buckets: DRADurationBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"operation_name", "is_error"},
)
// DRAGRPCOperationsDuration tracks the duration of the DRA GRPC operations.
DRAGRPCOperationsDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DRASubsystem,
Name: DRAGRPCOperationsDurationKey,
Help: "Duration in seconds of the DRA gRPC operations",
Buckets: DRADurationBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"driver_name", "method_name", "grpc_status_code"},
)
DRAResourceClaimsInUseDesc = metrics.NewDesc(DRASubsystem+"_resource_claims_in_use",
"The number of ResourceClaims that are currently in use on the node, by driver name (driver_name label value) and across all drivers (special value <any> for driver_name). Note that the sum of all by-driver counts is not the total number of in-use ResourceClaims because the same ResourceClaim might use devices from different drivers. Instead, use the count for the <any> driver_name.",
[]string{"driver_name"},
nil,
metrics.ALPHA,
"",
)
// AdmissionRejectionsTotal tracks the number of failed admission times, currently, just record it for pod additions
AdmissionRejectionsTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: AdmissionRejectionsTotalKey,
Help: "Cumulative number pod admission rejections by the Kubelet.",
StabilityLevel: metrics.ALPHA,
},
[]string{"reason"},
)
// ImageVolumeRequestedTotal trakcs the number of requested image volumes.
ImageVolumeRequestedTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: ImageVolumeRequestedTotalKey,
Help: "Number of requested image volumes.",
StabilityLevel: metrics.ALPHA,
},
)
// ImageVolumeMountedSucceedTotal tracks the number of successful image volume mounts.
ImageVolumeMountedSucceedTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: ImageVolumeMountedSucceedTotalKey,
Help: "Number of successful image volume mounts.",
StabilityLevel: metrics.ALPHA,
},
)
// ImageVolumeMountedErrorsTotal tracks the number of failed image volume mounts.
ImageVolumeMountedErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: ImageVolumeMountedErrorsTotalKey,
Help: "Number of failed image volume mounts.",
StabilityLevel: metrics.ALPHA,
},
)
// ContainerRequestedResizes tracks the cumulative number of requested resizes at the container level.
ContainerRequestedResizes = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: ContainerRequestedResizesKey,
Help: "Number of requested resizes, counted at the container level. Different resources on the same container are counted separately. The 'requirement' label refers to 'memory' or 'limits'; the 'operation' label can be one of 'add', 'remove', 'increase' or 'decrease'.",
StabilityLevel: metrics.ALPHA,
},
[]string{"resource", "requirement", "operation"},
)
// PodResizeDurationMilliseconds tracks the duration (in milliseconds) it takes to resize a pod.
PodResizeDurationMilliseconds = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodResizeDurationMillisecondsKey,
Help: "Duration in milliseconds to actuate a pod resize",
Buckets: podResizeDurationBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"success"},
)
// PodPendingResizes tracks the number of pending resizes for pods.
PodPendingResizes = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: PodPendingResizesKey,
Help: "Number of pending resizes for pods.",
StabilityLevel: metrics.ALPHA,
},
[]string{"reason"},
)
// PodInfeasibleResizes tracks the number of infeasible resizes for pods.
PodInfeasibleResizes = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodInfeasibleResizesKey,
Help: "Number of infeasible resizes for pods.",
StabilityLevel: metrics.ALPHA,
},
[]string{"reason_detail"},
)
// PodInProgressResizes tracks the number of in-progress resizes for pods.
PodInProgressResizes = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: KubeletSubsystem,
Name: PodInProgressResizesKey,
Help: "Number of in-progress resizes for pods.",
StabilityLevel: metrics.ALPHA,
},
)
// PodDeferredAcceptedResizes tracks the cumulative number of deferred accepted resizes for pods.
PodDeferredAcceptedResizes = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: KubeletSubsystem,
Name: PodDeferredAcceptedResizesKey,
Help: "Cumulative number of resizes that were accepted after being deferred.",
StabilityLevel: metrics.ALPHA,
},
[]string{"retry_trigger"},
)
)
var registerMetrics sync.Once
// Register registers all metrics.
func Register() {
// Register the metrics.
registerMetrics.Do(func() {
legacyregistry.MustRegister(FirstNetworkPodStartSLIDuration)
legacyregistry.MustRegister(NodeName)
legacyregistry.MustRegister(PodWorkerDuration)
legacyregistry.MustRegister(PodStartDuration)
legacyregistry.MustRegister(PodStartSLIDuration)
legacyregistry.MustRegister(PodStartTotalDuration)
legacyregistry.MustRegister(ImagePullDuration)
legacyregistry.MustRegister(ImageGarbageCollectedTotal)
legacyregistry.MustRegister(NodeStartupPreKubeletDuration)
legacyregistry.MustRegister(NodeStartupPreRegistrationDuration)
legacyregistry.MustRegister(NodeStartupRegistrationDuration)
legacyregistry.MustRegister(NodeStartupPostRegistrationDuration)
legacyregistry.MustRegister(NodeStartupDuration)
legacyregistry.MustRegister(CgroupManagerDuration)
legacyregistry.MustRegister(PodWorkerStartDuration)
legacyregistry.MustRegister(PodStatusSyncDuration)
legacyregistry.MustRegister(ContainersPerPodCount)
legacyregistry.MustRegister(PLEGRelistDuration)
legacyregistry.MustRegister(PLEGDiscardEvents)
legacyregistry.MustRegister(PLEGRelistInterval)
legacyregistry.MustRegister(PLEGLastSeen)
legacyregistry.MustRegister(EventedPLEGConnErr)
legacyregistry.MustRegister(EventedPLEGConn)
legacyregistry.MustRegister(EventedPLEGConnLatency)
legacyregistry.MustRegister(RuntimeOperations)
legacyregistry.MustRegister(RuntimeOperationsDuration)
legacyregistry.MustRegister(RuntimeOperationsErrors)
legacyregistry.MustRegister(Evictions)
legacyregistry.MustRegister(EvictionStatsAge)
legacyregistry.MustRegister(Preemptions)
legacyregistry.MustRegister(DevicePluginRegistrationCount)
legacyregistry.MustRegister(DevicePluginAllocationDuration)
legacyregistry.MustRegister(RunningContainerCount)
legacyregistry.MustRegister(RunningPodCount)
legacyregistry.MustRegister(DesiredPodCount)
legacyregistry.MustRegister(ActivePodCount)
legacyregistry.MustRegister(MirrorPodCount)
legacyregistry.MustRegister(WorkingPodCount)
legacyregistry.MustRegister(OrphanedRuntimePodTotal)
legacyregistry.MustRegister(RestartedPodTotal)
legacyregistry.MustRegister(ManagedEphemeralContainers)
legacyregistry.MustRegister(PodResourcesEndpointRequestsTotalCount)
legacyregistry.MustRegister(PodResourcesEndpointRequestsListCount)
legacyregistry.MustRegister(PodResourcesEndpointRequestsGetAllocatableCount)
legacyregistry.MustRegister(PodResourcesEndpointErrorsListCount)
legacyregistry.MustRegister(PodResourcesEndpointErrorsGetAllocatableCount)
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResourcesGet) {
legacyregistry.MustRegister(PodResourcesEndpointRequestsGetCount)
legacyregistry.MustRegister(PodResourcesEndpointErrorsGetCount)
}
if utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) {
legacyregistry.MustRegister(StartedUserNamespacedPodsTotal)
legacyregistry.MustRegister(StartedUserNamespacedPodsErrorsTotal)
}
legacyregistry.MustRegister(StartedPodsTotal)
legacyregistry.MustRegister(StartedPodsErrorsTotal)
legacyregistry.MustRegister(StartedContainersTotal)
legacyregistry.MustRegister(StartedContainersErrorsTotal)
legacyregistry.MustRegister(StartedHostProcessContainersTotal)
legacyregistry.MustRegister(StartedHostProcessContainersErrorsTotal)
legacyregistry.MustRegister(RunPodSandboxDuration)
legacyregistry.MustRegister(RunPodSandboxErrors)
legacyregistry.MustRegister(CPUManagerPinningRequestsTotal)
legacyregistry.MustRegister(CPUManagerPinningErrorsTotal)
legacyregistry.MustRegister(CPUManagerSharedPoolSizeMilliCores)
legacyregistry.MustRegister(CPUManagerExclusiveCPUsAllocationCount)
legacyregistry.MustRegister(CPUManagerAllocationPerNUMA)
legacyregistry.MustRegister(ContainerAlignedComputeResources)
legacyregistry.MustRegister(ContainerAlignedComputeResourcesFailure)
legacyregistry.MustRegister(MemoryManagerPinningRequestTotal)
legacyregistry.MustRegister(MemoryManagerPinningErrorsTotal)
legacyregistry.MustRegister(TopologyManagerAdmissionRequestsTotal)
legacyregistry.MustRegister(TopologyManagerAdmissionErrorsTotal)
legacyregistry.MustRegister(TopologyManagerAdmissionDuration)
legacyregistry.MustRegister(OrphanPodCleanedVolumes)
legacyregistry.MustRegister(OrphanPodCleanedVolumesErrors)
if utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdown) &&
utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdownBasedOnPodPriority) {
legacyregistry.MustRegister(GracefulShutdownStartTime)
legacyregistry.MustRegister(GracefulShutdownEndTime)
}
legacyregistry.MustRegister(LifecycleHandlerHTTPFallbacks)
legacyregistry.MustRegister(LifecycleHandlerSleepTerminated)
legacyregistry.MustRegister(CgroupVersion)
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
legacyregistry.MustRegister(
DRAOperationsDuration,
DRAGRPCOperationsDuration,
)
}
legacyregistry.MustRegister(AdmissionRejectionsTotal)
if utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
legacyregistry.MustRegister(ImageVolumeRequestedTotal)
legacyregistry.MustRegister(ImageVolumeMountedSucceedTotal)
legacyregistry.MustRegister(ImageVolumeMountedErrorsTotal)
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
legacyregistry.MustRegister(ContainerRequestedResizes)
legacyregistry.MustRegister(PodResizeDurationMilliseconds)
legacyregistry.MustRegister(PodPendingResizes)
legacyregistry.MustRegister(PodInfeasibleResizes)
legacyregistry.MustRegister(PodInProgressResizes)
legacyregistry.MustRegister(PodDeferredAcceptedResizes)
}
})
}
func RegisterCollectors(collectors ...metrics.StableCollector) {
legacyregistry.CustomMustRegister(collectors...)
}
// GetGather returns the gatherer. It used by test case outside current package.
func GetGather() metrics.Gatherer {
return legacyregistry.DefaultGatherer
}
// SinceInSeconds gets the time since the specified start in seconds.
func SinceInSeconds(start time.Time) float64 {
return time.Since(start).Seconds()
}
// SetNodeName sets the NodeName Gauge to 1.
func SetNodeName(name types.NodeName) {
NodeName.WithLabelValues(string(name)).Set(1)
}
func GetImageSizeBucket(sizeInBytes uint64) string {
if sizeInBytes == 0 {
return "N/A"
}
for i := len(imageSizeBuckets) - 1; i >= 0; i-- {
if sizeInBytes > imageSizeBuckets[i].lowerBoundInBytes {
return imageSizeBuckets[i].label
}
}
// return empty string when sizeInBytes is 0 (error getting image size)
return ""
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dns
import (
"context"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
v1 "k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/tools/record"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/apis/core/validation"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/klog/v2"
utilio "k8s.io/utils/io"
utilnet "k8s.io/utils/net"
)
var (
// The default dns opt strings.
defaultDNSOptions = []string{"ndots:5"}
)
type podDNSType int
const (
podDNSCluster podDNSType = iota
podDNSHost
podDNSNone
)
const (
maxResolvConfLength = 10 * 1 << 20 // 10MB
)
// Configurer is used for setting up DNS resolver configuration when launching pods.
type Configurer struct {
recorder record.EventRecorder
getHostDNSConfig func(klog.Logger, string) (*runtimeapi.DNSConfig, error)
nodeRef *v1.ObjectReference
nodeIPs []net.IP
// If non-nil, use this for container DNS server.
clusterDNS []net.IP
// If non-empty, use this for container DNS search.
ClusterDomain string
// The path to the DNS resolver configuration file used as the base to generate
// the container's DNS resolver configuration file. This can be used in
// conjunction with clusterDomain and clusterDNS.
ResolverConfig string
}
// NewConfigurer returns a DNS configurer for launching pods.
func NewConfigurer(recorder record.EventRecorder, nodeRef *v1.ObjectReference, nodeIPs []net.IP, clusterDNS []net.IP, clusterDomain, resolverConfig string) *Configurer {
return &Configurer{
recorder: recorder,
getHostDNSConfig: getHostDNSConfig,
nodeRef: nodeRef,
nodeIPs: nodeIPs,
clusterDNS: clusterDNS,
ClusterDomain: clusterDomain,
ResolverConfig: resolverConfig,
}
}
func omitDuplicates(strs []string) []string {
uniqueStrs := make(map[string]bool)
var ret []string
for _, str := range strs {
if !uniqueStrs[str] {
ret = append(ret, str)
uniqueStrs[str] = true
}
}
return ret
}
func (c *Configurer) formDNSSearchFitsLimits(logger klog.Logger, composedSearch []string, pod *v1.Pod) []string {
limitsExceeded := false
maxDNSSearchPaths, maxDNSSearchListChars := validation.MaxDNSSearchPaths, validation.MaxDNSSearchListChars
if len(composedSearch) > maxDNSSearchPaths {
composedSearch = composedSearch[:maxDNSSearchPaths]
limitsExceeded = true
}
// In some DNS resolvers(e.g. glibc 2.28), DNS resolving causes abort() if there is a
// search path exceeding 255 characters. We have to filter them out.
l := 0
for _, search := range composedSearch {
if len(search) > utilvalidation.DNS1123SubdomainMaxLength {
limitsExceeded = true
continue
}
composedSearch[l] = search
l++
}
composedSearch = composedSearch[:l]
if resolvSearchLineStrLen := len(strings.Join(composedSearch, " ")); resolvSearchLineStrLen > maxDNSSearchListChars {
cutDomainsNum := 0
cutDomainsLen := 0
for i := len(composedSearch) - 1; i >= 0; i-- {
cutDomainsLen += len(composedSearch[i]) + 1
cutDomainsNum++
if (resolvSearchLineStrLen - cutDomainsLen) <= maxDNSSearchListChars {
break
}
}
composedSearch = composedSearch[:(len(composedSearch) - cutDomainsNum)]
limitsExceeded = true
}
if limitsExceeded {
err := fmt.Errorf("Search Line limits were exceeded, some search paths have been omitted, the applied search line is: %s", strings.Join(composedSearch, " "))
c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", err.Error())
logger.Error(err, "Search Line limits exceeded")
}
return composedSearch
}
func (c *Configurer) formDNSNameserversFitsLimits(logger klog.Logger, nameservers []string, pod *v1.Pod) []string {
if len(nameservers) > validation.MaxDNSNameservers {
nameservers = nameservers[0:validation.MaxDNSNameservers]
err := fmt.Errorf("Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: %s", strings.Join(nameservers, " "))
c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", err.Error())
logger.Error(err, "Nameserver limits exceeded")
}
return nameservers
}
func (c *Configurer) formDNSConfigFitsLimits(logger klog.Logger, dnsConfig *runtimeapi.DNSConfig, pod *v1.Pod) *runtimeapi.DNSConfig {
dnsConfig.Servers = c.formDNSNameserversFitsLimits(logger, dnsConfig.Servers, pod)
dnsConfig.Searches = c.formDNSSearchFitsLimits(logger, dnsConfig.Searches, pod)
return dnsConfig
}
func (c *Configurer) generateSearchesForDNSClusterFirst(hostSearch []string, pod *v1.Pod) []string {
if c.ClusterDomain == "" {
return hostSearch
}
nsSvcDomain := fmt.Sprintf("%s.svc.%s", pod.Namespace, c.ClusterDomain)
svcDomain := fmt.Sprintf("svc.%s", c.ClusterDomain)
clusterSearch := []string{nsSvcDomain, svcDomain, c.ClusterDomain}
return omitDuplicates(append(clusterSearch, hostSearch...))
}
// CheckLimitsForResolvConf checks limits in resolv.conf.
func (c *Configurer) CheckLimitsForResolvConf(logger klog.Logger) {
f, err := os.Open(c.ResolverConfig)
if err != nil {
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
logger.V(4).Info("Check limits for resolv.conf failed at file open", "err", err)
return
}
defer f.Close()
_, hostSearch, _, err := parseResolvConf(f)
if err != nil {
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
logger.V(4).Info("Check limits for resolv.conf failed at parse resolv.conf", "err", err)
return
}
domainCountLimit, maxDNSSearchListChars := validation.MaxDNSSearchPaths, validation.MaxDNSSearchListChars
if c.ClusterDomain != "" {
domainCountLimit -= 3
}
if len(hostSearch) > domainCountLimit {
log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit)
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
logger.V(4).Info("Check limits for resolv.conf failed", "eventlog", log)
return
}
for _, search := range hostSearch {
if len(search) > utilvalidation.DNS1123SubdomainMaxLength {
log := fmt.Sprintf("Resolv.conf file %q contains a search path which length is more than allowed %d chars!", c.ResolverConfig, utilvalidation.DNS1123SubdomainMaxLength)
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
logger.V(4).Info("Check limits for resolv.conf failed", "eventlog", log)
return
}
}
if len(strings.Join(hostSearch, " ")) > maxDNSSearchListChars {
log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, maxDNSSearchListChars)
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
logger.V(4).Info("Check limits for resolv.conf failed", "eventlog", log)
return
}
}
// parseResolvConf reads a resolv.conf file from the given reader, and parses
// it into nameservers, searches and options, possibly returning an error.
func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, options []string, err error) {
file, err := utilio.ReadAtMost(reader, maxResolvConfLength)
if err != nil {
return nil, nil, nil, err
}
// Lines of the form "nameserver 1.2.3.4" accumulate.
nameservers = []string{}
// Lines of the form "search example.com" overrule - last one wins.
searches = []string{}
// Lines of the form "option ndots:5 attempts:2" overrule - last one wins.
// Each option is recorded as an element in the array.
options = []string{}
var allErrors []error
lines := strings.Split(string(file), "\n")
for l := range lines {
trimmed := strings.TrimSpace(lines[l])
if strings.HasPrefix(trimmed, "#") {
continue
}
fields := strings.Fields(trimmed)
if len(fields) == 0 {
continue
}
if fields[0] == "nameserver" {
if len(fields) >= 2 {
nameservers = append(nameservers, fields[1])
} else {
allErrors = append(allErrors, fmt.Errorf("nameserver list is empty "))
}
}
if fields[0] == "search" {
// Normalise search fields so the same domain with and without trailing dot will only count once, to avoid hitting search validation limits.
searches = []string{}
for _, s := range fields[1:] {
if s != "." {
searches = append(searches, strings.TrimSuffix(s, "."))
}
}
}
if fields[0] == "options" {
options = appendOptions(options, fields[1:]...)
}
}
return nameservers, searches, options, utilerrors.NewAggregate(allErrors)
}
// Reads a resolv.conf-like file and returns the DNS config options from it.
// Returns an empty DNSConfig if the given resolverConfigFile is an empty string.
func getDNSConfig(logger klog.Logger, resolverConfigFile string) (*runtimeapi.DNSConfig, error) {
var hostDNS, hostSearch, hostOptions []string
// Get host DNS settings
if resolverConfigFile != "" {
f, err := os.Open(resolverConfigFile)
if err != nil {
logger.Error(err, "Could not open resolv conf file.")
return nil, err
}
defer f.Close()
hostDNS, hostSearch, hostOptions, err = parseResolvConf(f)
if err != nil {
err := fmt.Errorf("Encountered error while parsing resolv conf file. Error: %w", err)
logger.Error(err, "Could not parse resolv conf file.")
return nil, err
}
}
return &runtimeapi.DNSConfig{
Servers: hostDNS,
Searches: hostSearch,
Options: hostOptions,
}, nil
}
func getPodDNSType(pod *v1.Pod) (podDNSType, error) {
dnsPolicy := pod.Spec.DNSPolicy
switch dnsPolicy {
case v1.DNSNone:
return podDNSNone, nil
case v1.DNSClusterFirstWithHostNet:
return podDNSCluster, nil
case v1.DNSClusterFirst:
if !kubecontainer.IsHostNetworkPod(pod) {
return podDNSCluster, nil
}
// Fallback to DNSDefault for pod on hostnetwork.
fallthrough
case v1.DNSDefault:
return podDNSHost, nil
}
// This should not happen as kube-apiserver should have rejected
// invalid dnsPolicy.
return podDNSCluster, fmt.Errorf("invalid DNSPolicy=%v", dnsPolicy)
}
// mergeDNSOptions merges DNS options. If duplicated, entries given by PodDNSConfigOption will
// overwrite the existing ones.
func mergeDNSOptions(existingDNSConfigOptions []string, dnsConfigOptions []v1.PodDNSConfigOption) []string {
optionsMap := make(map[string]string)
for _, op := range existingDNSConfigOptions {
if index := strings.Index(op, ":"); index != -1 {
optionsMap[op[:index]] = op[index+1:]
} else {
optionsMap[op] = ""
}
}
for _, op := range dnsConfigOptions {
if op.Value != nil {
optionsMap[op.Name] = *op.Value
} else {
optionsMap[op.Name] = ""
}
}
// Reconvert DNS options into a string array.
options := []string{}
for opName, opValue := range optionsMap {
op := opName
if opValue != "" {
op = op + ":" + opValue
}
options = append(options, op)
}
return options
}
// appendOptions appends options to the given list, but does not add duplicates.
// append option will overwrite the previous one either in new line or in the same line.
func appendOptions(options []string, newOption ...string) []string {
var optionMap = make(map[string]string)
for _, option := range options {
optName := strings.Split(option, ":")[0]
optionMap[optName] = option
}
for _, option := range newOption {
optName := strings.Split(option, ":")[0]
optionMap[optName] = option
}
options = []string{}
for _, v := range optionMap {
options = append(options, v)
}
return options
}
// appendDNSConfig appends DNS servers, search paths and options given by
// PodDNSConfig to the existing DNS config. Duplicated entries will be merged.
// This assumes existingDNSConfig and dnsConfig are not nil.
func appendDNSConfig(existingDNSConfig *runtimeapi.DNSConfig, dnsConfig *v1.PodDNSConfig) *runtimeapi.DNSConfig {
existingDNSConfig.Servers = omitDuplicates(append(existingDNSConfig.Servers, dnsConfig.Nameservers...))
existingDNSConfig.Searches = omitDuplicates(append(existingDNSConfig.Searches, dnsConfig.Searches...))
existingDNSConfig.Options = mergeDNSOptions(existingDNSConfig.Options, dnsConfig.Options)
return existingDNSConfig
}
// GetPodDNS returns DNS settings for the pod.
func (c *Configurer) GetPodDNS(ctx context.Context, pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
logger := klog.FromContext(ctx)
dnsConfig, err := c.getHostDNSConfig(logger, c.ResolverConfig)
if err != nil {
return nil, err
}
dnsType, err := getPodDNSType(pod)
if err != nil {
logger.Error(err, "Failed to get DNS type for pod. Falling back to DNSClusterFirst policy.", "pod", klog.KObj(pod))
dnsType = podDNSCluster
}
switch dnsType {
case podDNSNone:
// DNSNone should use empty DNS settings as the base.
dnsConfig = &runtimeapi.DNSConfig{}
case podDNSCluster:
if len(c.clusterDNS) != 0 {
// For a pod with DNSClusterFirst policy, the cluster DNS server is
// the only nameserver configured for the pod. The cluster DNS server
// itself will forward queries to other nameservers that is configured
// to use, in case the cluster DNS server cannot resolve the DNS query
// itself.
dnsConfig.Servers = []string{}
for _, ip := range c.clusterDNS {
dnsConfig.Servers = append(dnsConfig.Servers, ip.String())
}
dnsConfig.Searches = c.generateSearchesForDNSClusterFirst(dnsConfig.Searches, pod)
dnsConfig.Options = defaultDNSOptions
break
}
// clusterDNS is not known. Pod with ClusterDNSFirst Policy cannot be created.
nodeErrorMsg := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to %q policy.", v1.DNSClusterFirst, v1.DNSDefault)
c.recorder.Eventf(c.nodeRef, v1.EventTypeWarning, "MissingClusterDNS", nodeErrorMsg)
c.recorder.Eventf(pod, v1.EventTypeWarning, "MissingClusterDNS", "pod: %q. %s", format.Pod(pod), nodeErrorMsg)
// Fallback to DNSDefault.
fallthrough
case podDNSHost:
// When the kubelet --resolv-conf flag is set to the empty string, use
// DNS settings that override the docker default (which is to use
// /etc/resolv.conf) and effectively disable DNS lookups. According to
// the bind documentation, the behavior of the DNS client library when
// "nameservers" are not specified is to "use the nameserver on the
// local machine". A nameserver setting of localhost is equivalent to
// this documented behavior.
if c.ResolverConfig == "" {
for _, nodeIP := range c.nodeIPs {
if utilnet.IsIPv6(nodeIP) {
dnsConfig.Servers = append(dnsConfig.Servers, "::1")
} else {
dnsConfig.Servers = append(dnsConfig.Servers, "127.0.0.1")
}
}
if len(dnsConfig.Servers) == 0 {
dnsConfig.Servers = append(dnsConfig.Servers, "127.0.0.1")
}
dnsConfig.Searches = []string{"."}
}
}
if pod.Spec.DNSConfig != nil {
dnsConfig = appendDNSConfig(dnsConfig, pod.Spec.DNSConfig)
}
return c.formDNSConfigFitsLimits(logger, dnsConfig, pod), nil
}
// SetupDNSinContainerizedMounter replaces the nameserver in containerized-mounter's rootfs/etc/resolv.conf with kubelet.ClusterDNS
func (c *Configurer) SetupDNSinContainerizedMounter(logger klog.Logger, mounterPath string) {
resolvePath := filepath.Join(strings.TrimSuffix(mounterPath, "/mounter"), "rootfs", "etc", "resolv.conf")
dnsString := ""
for _, dns := range c.clusterDNS {
dnsString = dnsString + fmt.Sprintf("nameserver %s\n", dns)
}
if c.ResolverConfig != "" {
f, err := os.Open(c.ResolverConfig)
if err != nil {
logger.Error(err, "Could not open resolverConf file")
} else {
defer f.Close()
_, hostSearch, _, err := parseResolvConf(f)
if err != nil {
logger.Error(err, "Error for parsing the resolv.conf file")
} else {
dnsString = dnsString + "search"
for _, search := range hostSearch {
dnsString = dnsString + fmt.Sprintf(" %s", search)
}
dnsString = dnsString + "\n"
}
}
}
if err := os.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil {
logger.Error(err, "Could not write dns nameserver in the file", "path", resolvePath)
}
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeshutdown
import (
"context"
"fmt"
"sort"
"sync"
"time"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
"k8s.io/utils/clock"
)
// Manager interface provides methods for Kubelet to manage node shutdown.
type Manager interface {
lifecycle.PodAdmitHandler
Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult
Start() error
ShutdownStatus() error
}
// Config represents Manager configuration
type Config struct {
Logger klog.Logger
VolumeManager volumemanager.VolumeManager
Recorder record.EventRecorder
NodeRef *v1.ObjectReference
GetPodsFunc eviction.ActivePodsFunc
KillPodFunc eviction.KillPodFunc
SyncNodeStatusFunc func()
ShutdownGracePeriodRequested time.Duration
ShutdownGracePeriodCriticalPods time.Duration
ShutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
StateDirectory string
Clock clock.Clock
}
// managerStub is a fake node shutdown managerImpl .
type managerStub struct{}
// Admit returns a fake Pod admission which always returns true
func (managerStub) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
return lifecycle.PodAdmitResult{Admit: true}
}
// Start is a no-op always returning nil for non linux platforms.
func (managerStub) Start() error {
return nil
}
// ShutdownStatus is a no-op always returning nil for non linux platforms.
func (managerStub) ShutdownStatus() error {
return nil
}
const (
NodeShutdownNotAdmittedReason = "NodeShutdown"
nodeShutdownNotAdmittedMessage = "Pod was rejected as the node is shutting down."
localStorageStateFile = "graceful_node_shutdown_state"
nodeShutdownReason = "Terminated"
nodeShutdownMessage = "Pod was terminated in response to imminent node shutdown."
)
// podManager is responsible for killing active pods by priority.
type podManager struct {
logger klog.Logger
shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority
clock clock.Clock
killPodFunc eviction.KillPodFunc
volumeManager volumemanager.VolumeManager
}
func newPodManager(conf *Config) *podManager {
shutdownGracePeriodByPodPriority := conf.ShutdownGracePeriodByPodPriority
// Migration from the original configuration
if !utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdownBasedOnPodPriority) ||
len(shutdownGracePeriodByPodPriority) == 0 {
shutdownGracePeriodByPodPriority = migrateConfig(conf.ShutdownGracePeriodRequested, conf.ShutdownGracePeriodCriticalPods)
}
// Sort by priority from low to high
sort.Slice(shutdownGracePeriodByPodPriority, func(i, j int) bool {
return shutdownGracePeriodByPodPriority[i].Priority < shutdownGracePeriodByPodPriority[j].Priority
})
if conf.Clock == nil {
conf.Clock = clock.RealClock{}
}
return &podManager{
logger: conf.Logger,
shutdownGracePeriodByPodPriority: shutdownGracePeriodByPodPriority,
clock: conf.Clock,
killPodFunc: conf.KillPodFunc,
volumeManager: conf.VolumeManager,
}
}
// killPods terminates pods by priority.
func (m *podManager) killPods(activePods []*v1.Pod) error {
groups := groupByPriority(m.shutdownGracePeriodByPodPriority, activePods)
for _, group := range groups {
// If there are no pods in a particular range,
// then do not wait for pods in that priority range.
if len(group.Pods) == 0 {
continue
}
var wg sync.WaitGroup
wg.Add(len(group.Pods))
for _, pod := range group.Pods {
go func(pod *v1.Pod, group podShutdownGroup) {
defer wg.Done()
gracePeriodOverride := group.ShutdownGracePeriodSeconds
// If the pod's spec specifies a termination gracePeriod which is less than the gracePeriodOverride calculated, use the pod spec termination gracePeriod.
if pod.Spec.TerminationGracePeriodSeconds != nil && *pod.Spec.TerminationGracePeriodSeconds <= gracePeriodOverride {
gracePeriodOverride = *pod.Spec.TerminationGracePeriodSeconds
}
m.logger.V(1).Info("Shutdown manager killing pod with gracePeriod", "pod", klog.KObj(pod), "gracePeriod", gracePeriodOverride)
if err := m.killPodFunc(pod, false, &gracePeriodOverride, func(status *v1.PodStatus) {
// set the pod status to failed (unless it was already in a successful terminal phase)
if status.Phase != v1.PodSucceeded {
status.Phase = v1.PodFailed
}
status.Message = nodeShutdownMessage
status.Reason = nodeShutdownReason
podutil.UpdatePodCondition(status, &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(status, pod.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: nodeShutdownMessage,
})
}); err != nil {
m.logger.V(1).Info("Shutdown manager failed killing pod", "pod", klog.KObj(pod), "err", err)
} else {
m.logger.V(1).Info("Shutdown manager finished killing pod", "pod", klog.KObj(pod))
}
}(pod, group)
}
// This duration determines how long the shutdown manager will wait for the pods in this group
// to terminate before proceeding to the next group.
var groupTerminationWaitDuration = time.Duration(group.ShutdownGracePeriodSeconds) * time.Second
var (
doneCh = make(chan struct{})
timer = m.clock.NewTimer(groupTerminationWaitDuration)
ctx, ctxCancel = context.WithTimeout(context.Background(), groupTerminationWaitDuration)
)
go func() {
defer close(doneCh)
defer ctxCancel()
wg.Wait()
// The signal to kill a Pod was sent successfully to all the pods,
// let's wait until all the volumes are unmounted from all the pods before
// continuing to the next group. This is done so that the CSI Driver (assuming
// that it's part of the highest group) has a chance to perform unmounts.
if err := m.volumeManager.WaitForAllPodsUnmount(ctx, group.Pods); err != nil {
var podIdentifiers []string
for _, pod := range group.Pods {
podIdentifiers = append(podIdentifiers, fmt.Sprintf("%s/%s", pod.Namespace, pod.Name))
}
// Waiting for volume teardown is done on a best basis effort,
// report an error and continue.
//
// Depending on the user provided kubelet configuration value
// either the `timer` will tick and we'll continue to shutdown the next group, or,
// WaitForAllPodsUnmount will timeout, therefore this goroutine
// will close doneCh and we'll continue to shutdown the next group.
m.logger.Error(err, "Failed while waiting for all the volumes belonging to Pods in this group to unmount", "pods", podIdentifiers)
}
}()
select {
case <-doneCh:
timer.Stop()
m.logger.V(1).Info("Done waiting for all pods in group to terminate", "gracePeriod", group.ShutdownGracePeriodSeconds, "priority", group.Priority)
case <-timer.C():
ctxCancel()
m.logger.V(1).Info("Shutdown manager pod killing time out", "gracePeriod", group.ShutdownGracePeriodSeconds, "priority", group.Priority)
}
}
return nil
}
func (m *podManager) periodRequested() time.Duration {
var sum int64
for _, period := range m.shutdownGracePeriodByPodPriority {
sum += period.ShutdownGracePeriodSeconds
}
return time.Duration(sum) * time.Second
}
func migrateConfig(shutdownGracePeriodRequested, shutdownGracePeriodCriticalPods time.Duration) []kubeletconfig.ShutdownGracePeriodByPodPriority {
if shutdownGracePeriodRequested == 0 {
return nil
}
defaultPriority := shutdownGracePeriodRequested - shutdownGracePeriodCriticalPods
if defaultPriority < 0 {
return nil
}
criticalPriority := shutdownGracePeriodRequested - defaultPriority
if criticalPriority < 0 {
return nil
}
return []kubeletconfig.ShutdownGracePeriodByPodPriority{
{
Priority: scheduling.DefaultPriorityWhenNoDefaultClassExists,
ShutdownGracePeriodSeconds: int64(defaultPriority / time.Second),
},
{
Priority: scheduling.SystemCriticalPriority,
ShutdownGracePeriodSeconds: int64(criticalPriority / time.Second),
},
}
}
type podShutdownGroup struct {
kubeletconfig.ShutdownGracePeriodByPodPriority
Pods []*v1.Pod
}
func groupByPriority(shutdownGracePeriodByPodPriority []kubeletconfig.ShutdownGracePeriodByPodPriority, pods []*v1.Pod) []podShutdownGroup {
groups := make([]podShutdownGroup, 0, len(shutdownGracePeriodByPodPriority))
for _, period := range shutdownGracePeriodByPodPriority {
groups = append(groups, podShutdownGroup{
ShutdownGracePeriodByPodPriority: period,
})
}
for _, pod := range pods {
var priority int32
if pod.Spec.Priority != nil {
priority = *pod.Spec.Priority
}
// Find the group index according to the priority.
index := sort.Search(len(groups), func(i int) bool {
return groups[i].Priority >= priority
})
// 1. Those higher than the highest priority default to the highest priority
// 2. Those lower than the lowest priority default to the lowest priority
// 3. Those boundary priority default to the lower priority
// if priority of pod is:
// groups[index-1].Priority <= pod priority < groups[index].Priority
// in which case we want to pick lower one (i.e index-1)
if index == len(groups) {
index = len(groups) - 1
} else if index < 0 {
index = 0
} else if index > 0 && groups[index].Priority > priority {
index--
}
groups[index].Pods = append(groups[index].Pods, pod)
}
return groups
}
//go:build linux
// +build linux
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nodeshutdown can watch for node level shutdown events and trigger graceful termination of pods running on the node prior to a system shutdown.
package nodeshutdown
import (
"fmt"
"path/filepath"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubeletevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
)
const (
dbusReconnectPeriod = 1 * time.Second
)
var systemDbus = func() (dbusInhibiter, error) {
return systemd.NewDBusCon()
}
type dbusInhibiter interface {
CurrentInhibitDelay() (time.Duration, error)
InhibitShutdown() (systemd.InhibitLock, error)
ReleaseInhibitLock(lock systemd.InhibitLock) error
ReloadLogindConf() error
MonitorShutdown(klog.Logger) (<-chan bool, error)
OverrideInhibitDelay(inhibitDelayMax time.Duration) error
}
// managerImpl has functions that can be used to interact with the Node Shutdown Manager.
type managerImpl struct {
logger klog.Logger
recorder record.EventRecorder
nodeRef *v1.ObjectReference
getPods eviction.ActivePodsFunc
syncNodeStatus func()
dbusCon dbusInhibiter
inhibitLock systemd.InhibitLock
nodeShuttingDownMutex sync.Mutex
nodeShuttingDownNow bool
podManager *podManager
enableMetrics bool
storage storage
}
// NewManager returns a new node shutdown manager.
func NewManager(conf *Config) Manager {
if !utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdown) {
m := managerStub{}
return m
}
podManager := newPodManager(conf)
// Disable if the configuration is empty
if len(podManager.shutdownGracePeriodByPodPriority) == 0 {
m := managerStub{}
return m
}
manager := &managerImpl{
logger: conf.Logger,
recorder: conf.Recorder,
nodeRef: conf.NodeRef,
getPods: conf.GetPodsFunc,
syncNodeStatus: conf.SyncNodeStatusFunc,
podManager: podManager,
enableMetrics: utilfeature.DefaultFeatureGate.Enabled(features.GracefulNodeShutdownBasedOnPodPriority),
storage: localStorage{
Path: filepath.Join(conf.StateDirectory, localStorageStateFile),
},
}
manager.logger.Info("Creating node shutdown manager",
"shutdownGracePeriodRequested", conf.ShutdownGracePeriodRequested,
"shutdownGracePeriodCriticalPods", conf.ShutdownGracePeriodCriticalPods,
"shutdownGracePeriodByPodPriority", podManager.shutdownGracePeriodByPodPriority,
)
return manager
}
// Admit rejects all pods if node is shutting
func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
nodeShuttingDown := m.ShutdownStatus() != nil
if nodeShuttingDown {
return lifecycle.PodAdmitResult{
Admit: false,
Reason: NodeShutdownNotAdmittedReason,
Message: nodeShutdownNotAdmittedMessage,
}
}
return lifecycle.PodAdmitResult{Admit: true}
}
// setMetrics sets the metrics for the node shutdown manager.
func (m *managerImpl) setMetrics() {
if m.enableMetrics && m.storage != nil {
sta := state{}
err := m.storage.Load(&sta)
if err != nil {
m.logger.Error(err, "Failed to load graceful shutdown state")
} else {
if !sta.StartTime.IsZero() {
metrics.GracefulShutdownStartTime.Set(timestamp(sta.StartTime))
}
if !sta.EndTime.IsZero() {
metrics.GracefulShutdownEndTime.Set(timestamp(sta.EndTime))
}
}
}
}
// Start starts the node shutdown manager and will start watching the node for shutdown events.
func (m *managerImpl) Start() error {
stop, err := m.start()
if err != nil {
return err
}
go func() {
for {
if stop != nil {
<-stop
}
time.Sleep(dbusReconnectPeriod)
m.logger.V(1).Info("Restarting watch for node shutdown events")
stop, err = m.start()
if err != nil {
m.logger.Error(err, "Unable to watch the node for shutdown events")
}
}
}()
m.setMetrics()
return nil
}
func (m *managerImpl) start() (chan struct{}, error) {
systemBus, err := systemDbus()
if err != nil {
return nil, err
}
m.dbusCon = systemBus
currentInhibitDelay, err := m.dbusCon.CurrentInhibitDelay()
if err != nil {
return nil, err
}
// If the logind's InhibitDelayMaxUSec as configured in (logind.conf) is less than periodRequested, attempt to update the value to periodRequested.
if periodRequested := m.podManager.periodRequested(); periodRequested > currentInhibitDelay {
err := m.dbusCon.OverrideInhibitDelay(periodRequested)
if err != nil {
return nil, fmt.Errorf("unable to override inhibit delay by shutdown manager: %v", err)
}
err = m.dbusCon.ReloadLogindConf()
if err != nil {
return nil, err
}
// The ReloadLogindConf call is asynchronous. Poll with exponential backoff until the configuration is updated.
backoff := wait.Backoff{
Duration: 100 * time.Millisecond,
Factor: 2.0,
Steps: 5,
}
var updatedInhibitDelay time.Duration
attempt := 0
err = wait.ExponentialBackoff(backoff, func() (bool, error) {
attempt += 1
// Read the current inhibitDelay again, if the override was successful, currentInhibitDelay will be equal to shutdownGracePeriodRequested.
updatedInhibitDelay, err = m.dbusCon.CurrentInhibitDelay()
if err != nil {
return false, err
}
if periodRequested <= updatedInhibitDelay {
return true, nil
}
if attempt < backoff.Steps {
m.logger.V(3).Info("InhibitDelayMaxSec still less than requested, retrying", "attempt", attempt, "current", updatedInhibitDelay, "requested", periodRequested)
}
return false, nil
})
if err != nil {
if !wait.Interrupted(err) {
return nil, err
}
if periodRequested > updatedInhibitDelay {
return nil, fmt.Errorf("node shutdown manager was timed out after %d attempts waiting for logind InhibitDelayMaxSec to update to %v (ShutdownGracePeriod), current value is %v", attempt, periodRequested, updatedInhibitDelay)
}
}
}
err = m.acquireInhibitLock()
if err != nil {
return nil, err
}
events, err := m.dbusCon.MonitorShutdown(m.logger)
if err != nil {
releaseErr := m.dbusCon.ReleaseInhibitLock(m.inhibitLock)
if releaseErr != nil {
return nil, fmt.Errorf("failed releasing inhibitLock: %v and failed monitoring shutdown: %v", releaseErr, err)
}
return nil, fmt.Errorf("failed to monitor shutdown: %v", err)
}
stop := make(chan struct{})
go func() {
// Monitor for shutdown events. This follows the logind Inhibit Delay pattern described on https://www.freedesktop.org/wiki/Software/systemd/inhibit/
// 1. When shutdown manager starts, an inhibit lock is taken.
// 2. When shutdown(true) event is received, process the shutdown and release the inhibit lock.
// 3. When shutdown(false) event is received, this indicates a previous shutdown was cancelled. In this case, acquire the inhibit lock again.
for {
select {
case isShuttingDown, ok := <-events:
if !ok {
m.logger.Error(err, "Ended to watching the node for shutdown events")
close(stop)
return
}
m.logger.V(1).Info("Shutdown manager detected new shutdown event, isNodeShuttingDownNow", "event", isShuttingDown)
var shutdownType string
if isShuttingDown {
shutdownType = "shutdown"
} else {
shutdownType = "cancelled"
}
m.logger.V(1).Info("Shutdown manager detected new shutdown event", "event", shutdownType)
if isShuttingDown {
m.recorder.Event(m.nodeRef, v1.EventTypeNormal, kubeletevents.NodeShutdown, "Shutdown manager detected shutdown event")
} else {
m.recorder.Event(m.nodeRef, v1.EventTypeNormal, kubeletevents.NodeShutdown, "Shutdown manager detected shutdown cancellation")
}
m.nodeShuttingDownMutex.Lock()
m.nodeShuttingDownNow = isShuttingDown
m.nodeShuttingDownMutex.Unlock()
if isShuttingDown {
// Update node status and ready condition
go m.syncNodeStatus()
m.processShutdownEvent()
} else {
_ = m.acquireInhibitLock()
}
}
}
}()
return stop, nil
}
func (m *managerImpl) acquireInhibitLock() error {
lock, err := m.dbusCon.InhibitShutdown()
if err != nil {
return err
}
if m.inhibitLock != 0 {
m.dbusCon.ReleaseInhibitLock(m.inhibitLock)
}
m.inhibitLock = lock
return nil
}
// ShutdownStatus will return an error if the node is currently shutting down.
func (m *managerImpl) ShutdownStatus() error {
m.nodeShuttingDownMutex.Lock()
defer m.nodeShuttingDownMutex.Unlock()
if m.nodeShuttingDownNow {
return fmt.Errorf("node is shutting down")
}
return nil
}
func (m *managerImpl) processShutdownEvent() error {
m.logger.V(1).Info("Shutdown manager processing shutdown event")
activePods := m.getPods()
defer func() {
m.dbusCon.ReleaseInhibitLock(m.inhibitLock)
m.logger.V(1).Info("Shutdown manager completed processing shutdown event, node will shutdown shortly")
}()
if m.enableMetrics && m.storage != nil {
startTime := time.Now()
err := m.storage.Store(state{
StartTime: startTime,
})
if err != nil {
m.logger.Error(err, "Failed to store graceful shutdown state")
}
metrics.GracefulShutdownStartTime.Set(timestamp(startTime))
metrics.GracefulShutdownEndTime.Set(0)
defer func() {
endTime := time.Now()
err := m.storage.Store(state{
StartTime: startTime,
EndTime: endTime,
})
if err != nil {
m.logger.Error(err, "Failed to store graceful shutdown state")
}
metrics.GracefulShutdownEndTime.Set(timestamp(endTime))
}()
}
return m.podManager.killPods(activePods)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeshutdown
import (
"encoding/json"
"io"
"os"
"path/filepath"
"time"
)
type storage interface {
Store(data interface{}) (err error)
Load(data interface{}) (err error)
}
type localStorage struct {
Path string
}
func (l localStorage) Store(data interface{}) (err error) {
b, err := json.Marshal(data)
if err != nil {
return err
}
return atomicWrite(l.Path, b, 0644)
}
func (l localStorage) Load(data interface{}) (err error) {
b, err := os.ReadFile(l.Path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return json.Unmarshal(b, data)
}
func timestamp(t time.Time) float64 {
if t.IsZero() {
return 0
}
return float64(t.Unix())
}
type state struct {
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
}
// atomicWrite atomically writes data to a file specified by filename.
func atomicWrite(filename string, data []byte, perm os.FileMode) error {
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return err
}
err = os.Chmod(f.Name(), perm)
if err != nil {
f.Close()
return err
}
n, err := f.Write(data)
if err != nil {
f.Close()
return err
}
if n < len(data) {
f.Close()
return io.ErrShortWrite
}
if err := f.Sync(); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
return os.Rename(f.Name(), filename)
}
//go:build linux
// +build linux
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package systemd
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
"github.com/godbus/dbus/v5"
"k8s.io/klog/v2"
)
const (
logindService = "org.freedesktop.login1"
logindObject = dbus.ObjectPath("/org/freedesktop/login1")
logindInterface = "org.freedesktop.login1.Manager"
)
type dBusConnector interface {
Object(dest string, path dbus.ObjectPath) dbus.BusObject
AddMatchSignal(options ...dbus.MatchOption) error
Signal(ch chan<- *dbus.Signal)
}
// DBusCon has functions that can be used to interact with systemd and logind over dbus.
type DBusCon struct {
SystemBus dBusConnector
}
func NewDBusCon() (*DBusCon, error) {
conn, err := dbus.SystemBus()
if err != nil {
return nil, err
}
return &DBusCon{
SystemBus: conn,
}, nil
}
// InhibitLock is a lock obtained after creating an systemd inhibitor by calling InhibitShutdown().
type InhibitLock uint32
// CurrentInhibitDelay returns the current delay inhibitor timeout value as configured in logind.conf(5).
// see https://www.freedesktop.org/software/systemd/man/logind.conf.html for more details.
func (bus *DBusCon) CurrentInhibitDelay() (time.Duration, error) {
obj := bus.SystemBus.Object(logindService, logindObject)
res, err := obj.GetProperty(logindInterface + ".InhibitDelayMaxUSec")
if err != nil {
return 0, fmt.Errorf("failed reading InhibitDelayMaxUSec property from logind: %w", err)
}
delay, ok := res.Value().(uint64)
if !ok {
return 0, fmt.Errorf("InhibitDelayMaxUSec from logind is not a uint64 as expected")
}
// InhibitDelayMaxUSec is in microseconds
duration := time.Duration(delay) * time.Microsecond
return duration, nil
}
// InhibitShutdown creates an systemd inhibitor by calling logind's Inhibt() and returns the inhibitor lock
// see https://www.freedesktop.org/wiki/Software/systemd/inhibit/ for more details.
func (bus *DBusCon) InhibitShutdown() (InhibitLock, error) {
obj := bus.SystemBus.Object(logindService, logindObject)
what := "shutdown"
who := "kubelet"
why := "Kubelet needs time to handle node shutdown"
mode := "delay"
call := obj.Call("org.freedesktop.login1.Manager.Inhibit", 0, what, who, why, mode)
if call.Err != nil {
return InhibitLock(0), fmt.Errorf("failed creating systemd inhibitor: %w", call.Err)
}
var fd uint32
err := call.Store(&fd)
if err != nil {
return InhibitLock(0), fmt.Errorf("failed storing inhibit lock file descriptor: %w", err)
}
return InhibitLock(fd), nil
}
// ReleaseInhibitLock will release the underlying inhibit lock which will cause the shutdown to start.
func (bus *DBusCon) ReleaseInhibitLock(lock InhibitLock) error {
err := syscall.Close(int(lock))
if err != nil {
return fmt.Errorf("unable to close systemd inhibitor lock: %w", err)
}
return nil
}
// ReloadLogindConf uses dbus to send a SIGHUP to the systemd-logind service causing logind to reload it's configuration.
func (bus *DBusCon) ReloadLogindConf() error {
systemdService := "org.freedesktop.systemd1"
systemdObject := "/org/freedesktop/systemd1"
systemdInterface := "org.freedesktop.systemd1.Manager"
obj := bus.SystemBus.Object(systemdService, dbus.ObjectPath(systemdObject))
unit := "systemd-logind.service"
who := "all"
var signal int32 = 1 // SIGHUP
call := obj.Call(systemdInterface+".KillUnit", 0, unit, who, signal)
if call.Err != nil {
return fmt.Errorf("unable to reload logind conf: %w", call.Err)
}
return nil
}
// MonitorShutdown detects the node shutdown by watching for "PrepareForShutdown" logind events.
// see https://www.freedesktop.org/wiki/Software/systemd/inhibit/ for more details.
func (bus *DBusCon) MonitorShutdown(logger klog.Logger) (<-chan bool, error) {
err := bus.SystemBus.AddMatchSignal(dbus.WithMatchInterface(logindInterface), dbus.WithMatchMember("PrepareForShutdown"), dbus.WithMatchObjectPath("/org/freedesktop/login1"))
if err != nil {
return nil, err
}
busChan := make(chan *dbus.Signal, 1)
bus.SystemBus.Signal(busChan)
shutdownChan := make(chan bool, 1)
go func() {
for {
event, ok := <-busChan
if !ok {
close(shutdownChan)
return
}
if event == nil || len(event.Body) == 0 {
logger.Error(nil, "Failed obtaining shutdown event, PrepareForShutdown event was empty")
continue
}
shutdownActive, ok := event.Body[0].(bool)
if !ok {
logger.Error(nil, "Failed obtaining shutdown event, PrepareForShutdown event was not bool type as expected")
continue
}
shutdownChan <- shutdownActive
}
}()
return shutdownChan, nil
}
const (
logindConfigDirectory = "/etc/systemd/logind.conf.d/"
kubeletLogindConf = "99-kubelet.conf"
)
// OverrideInhibitDelay writes a config file to logind overriding InhibitDelayMaxSec to the value desired.
func (bus *DBusCon) OverrideInhibitDelay(inhibitDelayMax time.Duration) error {
err := os.MkdirAll(logindConfigDirectory, 0755)
if err != nil {
return fmt.Errorf("failed creating %v directory: %w", logindConfigDirectory, err)
}
// This attempts to set the `InhibitDelayMaxUSec` dbus property of logind which is MaxInhibitDelay measured in microseconds.
// The corresponding logind config file property is named `InhibitDelayMaxSec` and is measured in seconds which is set via logind.conf config.
// Refer to https://www.freedesktop.org/software/systemd/man/logind.conf.html for more details.
inhibitOverride := fmt.Sprintf(`# Kubelet logind override
[Login]
InhibitDelayMaxSec=%.0f
`, inhibitDelayMax.Seconds())
logindOverridePath := filepath.Join(logindConfigDirectory, kubeletLogindConf)
if err := os.WriteFile(logindOverridePath, []byte(inhibitOverride), 0644); err != nil {
return fmt.Errorf("failed writing logind shutdown inhibit override file %v: %w", logindOverridePath, err)
}
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodestatus
import (
"context"
"fmt"
"math"
"net"
goruntime "runtime"
"strings"
"sync"
"time"
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
cloudproviderapi "k8s.io/cloud-provider/api"
"k8s.io/component-base/version"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
netutils "k8s.io/utils/net"
"k8s.io/utils/ptr"
"k8s.io/klog/v2"
)
const (
// MaxNamesPerImageInNodeStatus is max number of names
// per image stored in the node status.
MaxNamesPerImageInNodeStatus = 5
)
// Setter modifies the node in-place, and returns an error if the modification failed.
// Setters may partially mutate the node before returning an error.
type Setter func(ctx context.Context, node *v1.Node) error
// Only emit one reboot event
var rebootEvent sync.Once
// NodeAddress returns a Setter that updates address-related information on the node.
func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
validateNodeIPFunc func(net.IP) error, // typically Kubelet.nodeIPValidator
hostname string, // typically Kubelet.hostname
externalCloudProvider bool, // typically Kubelet.externalCloudProvider
resolveAddressFunc func(net.IP) (net.IP, error), // typically k8s.io/apimachinery/pkg/util/net.ResolveBindAddress
) Setter {
var nodeIP, secondaryNodeIP net.IP
if len(nodeIPs) > 0 {
nodeIP = nodeIPs[0]
}
preferIPv4 := nodeIP == nil || nodeIP.To4() != nil
isPreferredIPFamily := func(ip net.IP) bool { return (ip.To4() != nil) == preferIPv4 }
nodeIPSpecified := nodeIP != nil && !nodeIP.IsUnspecified()
if len(nodeIPs) > 1 {
secondaryNodeIP = nodeIPs[1]
}
secondaryNodeIPSpecified := secondaryNodeIP != nil && !secondaryNodeIP.IsUnspecified()
return func(ctx context.Context, node *v1.Node) error {
logger := klog.FromContext(ctx)
if nodeIPSpecified {
if err := validateNodeIPFunc(nodeIP); err != nil {
return fmt.Errorf("failed to validate nodeIP: %v", err)
}
logger.V(4).Info("Using node IP", "IP", nodeIP.String())
}
if secondaryNodeIPSpecified {
if err := validateNodeIPFunc(secondaryNodeIP); err != nil {
return fmt.Errorf("failed to validate secondaryNodeIP: %v", err)
}
logger.V(4).Info("Using secondary node IP", "IP", secondaryNodeIP.String())
}
if externalCloudProvider && nodeIPSpecified {
// Annotate the Node object with nodeIP for external cloud provider.
//
// We do not add the annotation in the case where there is no cloud
// controller at all, as we don't expect to migrate these clusters to use an
// external CCM.
if node.ObjectMeta.Annotations == nil {
node.ObjectMeta.Annotations = make(map[string]string)
}
annotation := nodeIP.String()
if secondaryNodeIPSpecified {
annotation += "," + secondaryNodeIP.String()
}
node.ObjectMeta.Annotations[cloudproviderapi.AnnotationAlphaProvidedIPAddr] = annotation
} else if node.ObjectMeta.Annotations != nil {
// Clean up stale annotations if no longer using a cloud provider or
// no longer overriding node IP.
delete(node.ObjectMeta.Annotations, cloudproviderapi.AnnotationAlphaProvidedIPAddr)
}
if externalCloudProvider {
// If --cloud-provider=external and node address is already set,
// then we return early because provider set addresses should take precedence.
// Otherwise, we try to use the node IP defined via flags and let the cloud provider override it later
// This should alleviate a lot of the bootstrapping issues with out-of-tree providers
if len(node.Status.Addresses) > 0 {
return nil
}
// If nodeIPs are not set wait for the external cloud-provider to set the node addresses.
// If the nodeIP is the unspecified address 0.0.0.0 or ::, then use the IP of the default gateway of
// the corresponding IP family to bootstrap the node until the out-of-tree provider overrides it later.
// xref: https://github.com/kubernetes/kubernetes/issues/125348
// Otherwise uses them on the assumption that the installer/administrator has the previous knowledge
// required to ensure the external cloud provider will use the same addresses to avoid the issues explained
// in https://github.com/kubernetes/kubernetes/issues/120720.
// We are already hinting the external cloud provider via the annotation AnnotationAlphaProvidedIPAddr.
if nodeIP == nil {
node.Status.Addresses = []v1.NodeAddress{
{Type: v1.NodeHostName, Address: hostname},
}
return nil
}
}
if nodeIPSpecified && secondaryNodeIPSpecified {
node.Status.Addresses = []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: nodeIP.String()},
{Type: v1.NodeInternalIP, Address: secondaryNodeIP.String()},
{Type: v1.NodeHostName, Address: hostname},
}
} else {
var ipAddr net.IP
var err error
// 1) Use nodeIP if set (and not "0.0.0.0"/"::")
// 2) If the user has specified an IP to HostnameOverride, use it
// 3) Lookup the IP from node name by DNS
// 4) Try to get the IP from the network interface used as default gateway
//
// For steps 3 and 4, IPv4 addresses are preferred to IPv6 addresses
// unless nodeIP is "::", in which case it is reversed.
if nodeIPSpecified {
ipAddr = nodeIP
} else if addr := netutils.ParseIPSloppy(hostname); addr != nil {
ipAddr = addr
} else {
var addrs []net.IP
addrs, _ = net.LookupIP(node.Name)
for _, addr := range addrs {
if err = validateNodeIPFunc(addr); err == nil {
if isPreferredIPFamily(addr) {
ipAddr = addr
break
} else if ipAddr == nil {
ipAddr = addr
}
}
}
if ipAddr == nil {
ipAddr, err = resolveAddressFunc(nodeIP)
}
}
if ipAddr == nil {
// We tried everything we could, but the IP address wasn't fetchable; error out
return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err)
}
node.Status.Addresses = []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: ipAddr.String()},
{Type: v1.NodeHostName, Address: hostname},
}
}
return nil
}
}
// MachineInfo returns a Setter that updates machine-related information on the node.
func MachineInfo(nodeName string,
maxPods int,
podsPerCore int,
machineInfoFunc func() (*cadvisorapiv1.MachineInfo, error), // typically Kubelet.GetCachedMachineInfo
capacityFunc func(localStorageCapacityIsolation bool) v1.ResourceList, // typically Kubelet.containerManager.GetCapacity
devicePluginResourceCapacityFunc func() (v1.ResourceList, v1.ResourceList, []string), // typically Kubelet.containerManager.GetDevicePluginResourceCapacity
nodeAllocatableReservationFunc func() v1.ResourceList, // typically Kubelet.containerManager.GetNodeAllocatableReservation
recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent
localStorageCapacityIsolation bool,
) Setter {
return func(ctx context.Context, node *v1.Node) error {
logger := klog.FromContext(ctx)
// Note: avoid blindly overwriting the capacity in case opaque
// resources are being advertised.
if node.Status.Capacity == nil {
node.Status.Capacity = v1.ResourceList{}
}
var devicePluginAllocatable v1.ResourceList
var devicePluginCapacity v1.ResourceList
var removedDevicePlugins []string
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
info, err := machineInfoFunc()
if err != nil {
// TODO(roberthbailey): This is required for test-cmd.sh to pass.
// See if the test should be updated instead.
node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI)
node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi")
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(maxPods), resource.DecimalSI)
logger.Error(err, "Error getting machine info")
} else {
node.Status.NodeInfo.MachineID = info.MachineID
node.Status.NodeInfo.SystemUUID = info.SystemUUID
for rName, rCap := range cadvisor.CapacityFromMachineInfo(info) {
node.Status.Capacity[rName] = rCap
}
if podsPerCore > 0 {
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(math.Min(float64(info.NumCores*podsPerCore), float64(maxPods))), resource.DecimalSI)
} else {
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(maxPods), resource.DecimalSI)
}
if node.Status.NodeInfo.BootID != "" &&
node.Status.NodeInfo.BootID != info.BootID {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
//
// Only emit one reboot event. recordEventFunc queues events and can emit many superfluous reboot events
rebootEvent.Do(func() {
recordEventFunc(v1.EventTypeWarning, events.NodeRebooted,
fmt.Sprintf("Node %s has been rebooted, boot id: %s", nodeName, info.BootID))
})
}
node.Status.NodeInfo.BootID = info.BootID
// TODO: all the node resources should use ContainerManager.GetCapacity instead of deriving the
// capacity for every node status request
initialCapacity := capacityFunc(localStorageCapacityIsolation)
if initialCapacity != nil {
if v, exists := initialCapacity[v1.ResourceEphemeralStorage]; exists {
node.Status.Capacity[v1.ResourceEphemeralStorage] = v
}
}
devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = devicePluginResourceCapacityFunc()
for k, v := range devicePluginCapacity {
if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() {
logger.V(2).Info("Updated capacity for device plugin", "plugin", k, "capacity", v.Value())
}
node.Status.Capacity[k] = v
}
for _, removedResource := range removedDevicePlugins {
logger.V(2).Info("Set capacity for removed resource to 0 on device removal", "device", removedResource)
// Set the capacity of the removed resource to 0 instead of
// removing the resource from the node status. This is to indicate
// that the resource is managed by device plugin and had been
// registered before.
//
// This is required to differentiate the device plugin managed
// resources and the cluster-level resources, which are absent in
// node status.
node.Status.Capacity[v1.ResourceName(removedResource)] = *resource.NewQuantity(int64(0), resource.DecimalSI)
}
if utilfeature.DefaultFeatureGate.Enabled(features.NodeSwap) && info.SwapCapacity != 0 {
node.Status.NodeInfo.Swap = &v1.NodeSwapStatus{
Capacity: ptr.To(int64(info.SwapCapacity)),
}
}
}
// Set Allocatable.
if node.Status.Allocatable == nil {
node.Status.Allocatable = make(v1.ResourceList)
}
// Remove extended resources from allocatable that are no longer
// present in capacity.
for k := range node.Status.Allocatable {
_, found := node.Status.Capacity[k]
if !found && v1helper.IsExtendedResourceName(k) {
delete(node.Status.Allocatable, k)
}
}
allocatableReservation := nodeAllocatableReservationFunc()
for k, v := range node.Status.Capacity {
value := v.DeepCopy()
if res, exists := allocatableReservation[k]; exists {
value.Sub(res)
}
if value.Sign() < 0 {
// Negative Allocatable resources don't make sense.
value.Set(0)
}
node.Status.Allocatable[k] = value
}
for k, v := range devicePluginAllocatable {
if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() {
logger.V(2).Info("Updated allocatable", "device", k, "allocatable", v.Value())
}
node.Status.Allocatable[k] = v
}
// for every huge page reservation, we need to remove it from allocatable memory
for k, v := range node.Status.Capacity {
if v1helper.IsHugePageResourceName(k) {
allocatableMemory := node.Status.Allocatable[v1.ResourceMemory]
value := v.DeepCopy()
allocatableMemory.Sub(value)
if allocatableMemory.Sign() < 0 {
// Negative Allocatable resources don't make sense.
allocatableMemory.Set(0)
}
node.Status.Allocatable[v1.ResourceMemory] = allocatableMemory
}
}
return nil
}
}
// VersionInfo returns a Setter that updates version-related information on the node.
func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // typically Kubelet.cadvisor.VersionInfo
runtimeTypeFunc func() string, // typically Kubelet.containerRuntime.Type
runtimeVersionFunc func(ctx context.Context) (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version
) Setter {
return func(ctx context.Context, node *v1.Node) error {
verinfo, err := versionInfoFunc()
if err != nil {
return fmt.Errorf("error getting version info: %v", err)
}
node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion
node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion
runtimeVersion := "Unknown"
if runtimeVer, err := runtimeVersionFunc(ctx); err == nil {
runtimeVersion = runtimeVer.String()
}
node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", runtimeTypeFunc(), runtimeVersion)
node.Status.NodeInfo.KubeletVersion = version.Get().String()
if utilfeature.DefaultFeatureGate.Enabled(features.DisableNodeKubeProxyVersion) {
// This field is deprecated and should be cleared if it was previously set.
node.Status.NodeInfo.KubeProxyVersion = ""
} else {
node.Status.NodeInfo.KubeProxyVersion = version.Get().String()
}
return nil
}
}
// DaemonEndpoints returns a Setter that updates the daemon endpoints on the node.
func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter {
return func(ctx context.Context, node *v1.Node) error {
node.Status.DaemonEndpoints = *daemonEndpoints
return nil
}
}
// Images returns a Setter that updates the images on the node.
// imageListFunc is expected to return a list of images sorted in descending order by image size.
// nodeStatusMaxImages is ignored if set to -1.
func Images(nodeStatusMaxImages int32,
imageListFunc func() ([]kubecontainer.Image, error), // typically Kubelet.imageManager.GetImageList
) Setter {
return func(ctx context.Context, node *v1.Node) error {
// Update image list of this node
var imagesOnNode []v1.ContainerImage
containerImages, err := imageListFunc()
if err != nil {
node.Status.Images = imagesOnNode
return fmt.Errorf("error getting image list: %v", err)
}
// we expect imageListFunc to return a sorted list, so we just need to truncate
if int(nodeStatusMaxImages) > -1 &&
int(nodeStatusMaxImages) < len(containerImages) {
containerImages = containerImages[0:nodeStatusMaxImages]
}
for _, image := range containerImages {
// make a copy to avoid modifying slice members of the image items in the list
names := append([]string{}, image.RepoDigests...)
names = append(names, image.RepoTags...)
// Report up to MaxNamesPerImageInNodeStatus names per image.
if len(names) > MaxNamesPerImageInNodeStatus {
names = names[0:MaxNamesPerImageInNodeStatus]
}
imagesOnNode = append(imagesOnNode, v1.ContainerImage{
Names: names,
SizeBytes: image.Size,
})
}
node.Status.Images = imagesOnNode
return nil
}
}
// GoRuntime returns a Setter that sets GOOS and GOARCH on the node.
func GoRuntime() Setter {
return func(ctx context.Context, node *v1.Node) error {
node.Status.NodeInfo.OperatingSystem = goruntime.GOOS
node.Status.NodeInfo.Architecture = goruntime.GOARCH
return nil
}
}
// NodeFeatures returns a Setter that sets NodeFeatures on the node.
func NodeFeatures(featuresGetter func() *kubecontainer.RuntimeFeatures) Setter {
return func(ctx context.Context, node *v1.Node) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
return nil
}
features := featuresGetter()
if features == nil {
return nil
}
node.Status.Features = &v1.NodeFeatures{
SupplementalGroupsPolicy: &features.SupplementalGroupsPolicy,
}
return nil
}
}
// RuntimeHandlers returns a Setter that sets RuntimeHandlers on the node.
func RuntimeHandlers(fn func() []kubecontainer.RuntimeHandler) Setter {
return func(ctx context.Context, node *v1.Node) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.RecursiveReadOnlyMounts) && !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) {
return nil
}
handlers := fn()
node.Status.RuntimeHandlers = make([]v1.NodeRuntimeHandler, len(handlers))
for i, h := range handlers {
node.Status.RuntimeHandlers[i] = v1.NodeRuntimeHandler{
Name: h.Name,
Features: &v1.NodeRuntimeHandlerFeatures{
RecursiveReadOnlyMounts: &h.SupportsRecursiveReadOnlyMounts,
UserNamespaces: &h.SupportsUserNamespaces,
},
}
}
return nil
}
}
// ReadyCondition returns a Setter that updates the v1.NodeReady condition on the node.
func ReadyCondition(
nowFunc func() time.Time, // typically Kubelet.clock.Now
runtimeErrorsFunc func() error, // typically Kubelet.runtimeState.runtimeErrors
networkErrorsFunc func() error, // typically Kubelet.runtimeState.networkErrors
storageErrorsFunc func() error, // typically Kubelet.runtimeState.storageErrors
cmStatusFunc func() cm.Status, // typically Kubelet.containerManager.Status
nodeShutdownManagerErrorsFunc func() error, // typically kubelet.shutdownManager.errors.
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
localStorageCapacityIsolation bool,
) Setter {
return func(ctx context.Context, node *v1.Node) error {
// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
// This is due to an issue with version skewed kubelet and master components.
// ref: https://github.com/kubernetes/kubernetes/issues/16961
currentTime := metav1.NewTime(nowFunc())
newNodeReadyCondition := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status",
LastHeartbeatTime: currentTime,
}
errs := []error{runtimeErrorsFunc(), networkErrorsFunc(), storageErrorsFunc(), nodeShutdownManagerErrorsFunc()}
requiredCapacities := []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods}
if localStorageCapacityIsolation {
requiredCapacities = append(requiredCapacities, v1.ResourceEphemeralStorage)
}
missingCapacities := []string{}
for _, resource := range requiredCapacities {
if _, found := node.Status.Capacity[resource]; !found {
missingCapacities = append(missingCapacities, string(resource))
}
}
if len(missingCapacities) > 0 {
errs = append(errs, fmt.Errorf("missing node capacity for resources: %s", strings.Join(missingCapacities, ", ")))
}
if aggregatedErr := errors.NewAggregate(errs); aggregatedErr != nil {
newNodeReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
Reason: "KubeletNotReady",
Message: aggregatedErr.Error(),
LastHeartbeatTime: currentTime,
}
}
// Record any soft requirements that were not met in the container manager.
status := cmStatusFunc()
if status.SoftRequirements != nil {
newNodeReadyCondition.Message = fmt.Sprintf("%s. WARNING: %s", newNodeReadyCondition.Message, status.SoftRequirements.Error())
}
readyConditionUpdated := false
needToRecordEvent := false
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodeReady {
if node.Status.Conditions[i].Status == newNodeReadyCondition.Status {
newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
} else {
newNodeReadyCondition.LastTransitionTime = currentTime
needToRecordEvent = true
}
node.Status.Conditions[i] = newNodeReadyCondition
readyConditionUpdated = true
break
}
}
if !readyConditionUpdated {
newNodeReadyCondition.LastTransitionTime = currentTime
node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
}
if needToRecordEvent {
if newNodeReadyCondition.Status == v1.ConditionTrue {
recordEventFunc(v1.EventTypeNormal, events.NodeReady)
} else {
recordEventFunc(v1.EventTypeNormal, events.NodeNotReady)
logger := klog.FromContext(ctx)
logger.Info("Node became not ready", "node", klog.KObj(node), "condition", newNodeReadyCondition)
}
}
return nil
}
}
// MemoryPressureCondition returns a Setter that updates the v1.NodeMemoryPressure condition on the node.
func MemoryPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderMemoryPressure
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
) Setter {
return func(ctx context.Context, node *v1.Node) error {
currentTime := metav1.NewTime(nowFunc())
var condition *v1.NodeCondition
// Check if NodeMemoryPressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodeMemoryPressure {
condition = &node.Status.Conditions[i]
}
}
newCondition := false
// If the NodeMemoryPressure condition doesn't exist, create one
if condition == nil {
condition = &v1.NodeCondition{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
// updates we make below are reflected in the slice.
newCondition = true
}
// Update the heartbeat time
condition.LastHeartbeatTime = currentTime
// Note: The conditions below take care of the case when a new NodeMemoryPressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to v1.ConditionUnknown which matches either
// condition.Status != v1.ConditionTrue or
// condition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is under memory pressure or not.
if pressureFunc() {
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = "KubeletHasInsufficientMemory"
condition.Message = "kubelet has insufficient memory available"
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, "NodeHasInsufficientMemory")
}
} else if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Reason = "KubeletHasSufficientMemory"
condition.Message = "kubelet has sufficient memory available"
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, "NodeHasSufficientMemory")
}
if newCondition {
node.Status.Conditions = append(node.Status.Conditions, *condition)
}
return nil
}
}
// PIDPressureCondition returns a Setter that updates the v1.NodePIDPressure condition on the node.
func PIDPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderPIDPressure
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
) Setter {
return func(ctx context.Context, node *v1.Node) error {
currentTime := metav1.NewTime(nowFunc())
var condition *v1.NodeCondition
// Check if NodePIDPressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodePIDPressure {
condition = &node.Status.Conditions[i]
}
}
newCondition := false
// If the NodePIDPressure condition doesn't exist, create one
if condition == nil {
condition = &v1.NodeCondition{
Type: v1.NodePIDPressure,
Status: v1.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
// updates we make below are reflected in the slice.
newCondition = true
}
// Update the heartbeat time
condition.LastHeartbeatTime = currentTime
// Note: The conditions below take care of the case when a new NodePIDPressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to v1.ConditionUnknown which matches either
// condition.Status != v1.ConditionTrue or
// condition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is under PID pressure or not.
if pressureFunc() {
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = "KubeletHasInsufficientPID"
condition.Message = "kubelet has insufficient PID available"
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, "NodeHasInsufficientPID")
}
} else if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Reason = "KubeletHasSufficientPID"
condition.Message = "kubelet has sufficient PID available"
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, "NodeHasSufficientPID")
}
if newCondition {
node.Status.Conditions = append(node.Status.Conditions, *condition)
}
return nil
}
}
// DiskPressureCondition returns a Setter that updates the v1.NodeDiskPressure condition on the node.
func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now
pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderDiskPressure
recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
) Setter {
return func(ctx context.Context, node *v1.Node) error {
currentTime := metav1.NewTime(nowFunc())
var condition *v1.NodeCondition
// Check if NodeDiskPressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodeDiskPressure {
condition = &node.Status.Conditions[i]
}
}
newCondition := false
// If the NodeDiskPressure condition doesn't exist, create one
if condition == nil {
condition = &v1.NodeCondition{
Type: v1.NodeDiskPressure,
Status: v1.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
// updates we make below are reflected in the slice.
newCondition = true
}
// Update the heartbeat time
condition.LastHeartbeatTime = currentTime
// Note: The conditions below take care of the case when a new NodeDiskPressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to v1.ConditionUnknown which matches either
// condition.Status != v1.ConditionTrue or
// condition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is under disk pressure or not.
if pressureFunc() {
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = "KubeletHasDiskPressure"
condition.Message = "kubelet has disk pressure"
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, "NodeHasDiskPressure")
}
} else if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Reason = "KubeletHasNoDiskPressure"
condition.Message = "kubelet has no disk pressure"
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, "NodeHasNoDiskPressure")
}
if newCondition {
node.Status.Conditions = append(node.Status.Conditions, *condition)
}
return nil
}
}
// VolumesInUse returns a Setter that updates the volumes in use on the node.
func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced
volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse
) Setter {
return func(ctx context.Context, node *v1.Node) error {
// Make sure to only update node status after reconciler starts syncing up states
if syncedFunc() {
node.Status.VolumesInUse = volumesInUseFunc()
}
return nil
}
}
//go:build linux
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oom
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"github.com/google/cadvisor/utils/oomparser"
)
type streamer interface {
StreamOoms(chan<- *oomparser.OomInstance)
}
var _ streamer = &oomparser.OomParser{}
type realWatcher struct {
recorder record.EventRecorder
oomStreamer streamer
}
var _ Watcher = &realWatcher{}
// NewWatcher creates and initializes a OOMWatcher backed by Cadvisor as
// the oom streamer.
func NewWatcher(recorder record.EventRecorder) (Watcher, error) {
// for test purpose
_, ok := recorder.(*record.FakeRecorder)
if ok {
return nil, nil
}
oomStreamer, err := oomparser.New()
if err != nil {
return nil, err
}
watcher := &realWatcher{
recorder: recorder,
oomStreamer: oomStreamer,
}
return watcher, nil
}
const (
systemOOMEvent = "SystemOOM"
recordEventContainerName = "/"
)
// Start watches for system oom's and records an event for every system oom encountered.
func (ow *realWatcher) Start(ctx context.Context, ref *v1.ObjectReference) error {
outStream := make(chan *oomparser.OomInstance, 10)
go ow.oomStreamer.StreamOoms(outStream)
go func() {
logger := klog.FromContext(ctx)
defer runtime.HandleCrash()
for event := range outStream {
if event.VictimContainerName == recordEventContainerName {
logger.V(1).Info("Got sys oom event", "event", event)
eventMsg := "System OOM encountered"
if event.ProcessName != "" && event.Pid != 0 {
eventMsg = fmt.Sprintf("%s, victim process: %s, pid: %d", eventMsg, event.ProcessName, event.Pid)
}
ow.recorder.Eventf(ref, v1.EventTypeWarning, systemOOMEvent, eventMsg)
}
}
logger.Error(nil, "Unexpectedly stopped receiving OOM notifications")
}()
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pleg
import (
"context"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
)
// The frequency with which global timestamp of the cache is to
// is to be updated periodically. If pod workers get stuck at cache.GetNewerThan
// call, after this period it will be unblocked.
const globalCacheUpdatePeriod = 5 * time.Second
var (
eventedPLEGUsage = false
eventedPLEGUsageMu = sync.RWMutex{}
)
// isEventedPLEGInUse indicates whether Evented PLEG is in use. Even after enabling
// the Evented PLEG feature gate, there could be several reasons it may not be in use.
// e.g. Streaming data issues from the runtime or the runtime does not implement the
// container events stream.
func isEventedPLEGInUse() bool {
eventedPLEGUsageMu.RLock()
defer eventedPLEGUsageMu.RUnlock()
return eventedPLEGUsage
}
// setEventedPLEGUsage should only be accessed from
// Start/Stop of Evented PLEG.
func setEventedPLEGUsage(enable bool) {
eventedPLEGUsageMu.Lock()
defer eventedPLEGUsageMu.Unlock()
eventedPLEGUsage = enable
}
type EventedPLEG struct {
// The container runtime.
runtime kubecontainer.Runtime
// The runtime service.
runtimeService internalapi.RuntimeService
// The channel from which the subscriber listens events.
eventChannel chan *PodLifecycleEvent
// Cache for storing the runtime states required for syncing pods.
cache kubecontainer.Cache
// For testability.
clock clock.Clock
// GenericPLEG is used to force relist when required.
genericPleg podLifecycleEventGeneratorHandler
// The maximum number of retries when getting container events from the runtime.
eventedPlegMaxStreamRetries int
// Indicates relisting related parameters
relistDuration *RelistDuration
// Stop the Evented PLEG by closing the channel.
stopCh chan struct{}
// Stops the periodic update of the cache global timestamp.
stopCacheUpdateCh chan struct{}
// Locks the start/stop operation of the Evented PLEG.
runningMu sync.Mutex
// logger is used for contextual logging
logger klog.Logger
}
// NewEventedPLEG instantiates a new EventedPLEG object and return it.
func NewEventedPLEG(logger klog.Logger, runtime kubecontainer.Runtime, runtimeService internalapi.RuntimeService, eventChannel chan *PodLifecycleEvent,
cache kubecontainer.Cache, genericPleg PodLifecycleEventGenerator, eventedPlegMaxStreamRetries int,
relistDuration *RelistDuration, clock clock.Clock) (PodLifecycleEventGenerator, error) {
handler, ok := genericPleg.(podLifecycleEventGeneratorHandler)
if !ok {
return nil, fmt.Errorf("%v doesn't implement podLifecycleEventGeneratorHandler interface", genericPleg)
}
return &EventedPLEG{
runtime: runtime,
runtimeService: runtimeService,
eventChannel: eventChannel,
cache: cache,
genericPleg: handler,
eventedPlegMaxStreamRetries: eventedPlegMaxStreamRetries,
relistDuration: relistDuration,
clock: clock,
logger: logger,
}, nil
}
// Watch returns a channel from which the subscriber can receive PodLifecycleEvent events.
func (e *EventedPLEG) Watch() chan *PodLifecycleEvent {
return e.eventChannel
}
// Relist relists all containers using GenericPLEG
func (e *EventedPLEG) Relist() {
e.genericPleg.Relist()
}
// Start starts the Evented PLEG
func (e *EventedPLEG) Start() {
e.runningMu.Lock()
defer e.runningMu.Unlock()
if isEventedPLEGInUse() {
return
}
setEventedPLEGUsage(true)
e.stopCh = make(chan struct{})
e.stopCacheUpdateCh = make(chan struct{})
go wait.Until(e.watchEventsChannel, 0, e.stopCh)
go wait.Until(e.updateGlobalCache, globalCacheUpdatePeriod, e.stopCacheUpdateCh)
}
// Stop stops the Evented PLEG
func (e *EventedPLEG) Stop() {
e.runningMu.Lock()
defer e.runningMu.Unlock()
if !isEventedPLEGInUse() {
return
}
setEventedPLEGUsage(false)
close(e.stopCh)
close(e.stopCacheUpdateCh)
}
// In case the Evented PLEG experiences undetectable issues in the underlying
// GRPC connection there is a remote chance the pod might get stuck in a
// given state while it has progressed in its life cycle. This function will be
// called periodically to update the global timestamp of the cache so that those
// pods stuck at GetNewerThan in pod workers will get unstuck.
func (e *EventedPLEG) updateGlobalCache() {
e.cache.UpdateTime(time.Now())
}
// Update the relisting period and threshold
func (e *EventedPLEG) Update(relistDuration *RelistDuration) {
e.genericPleg.Update(relistDuration)
}
// Healthy check if PLEG work properly.
func (e *EventedPLEG) Healthy() (bool, error) {
// GenericPLEG is declared unhealthy when relisting time is more
// than the relistThreshold. In case EventedPLEG is turned on,
// relistingPeriod and relistingThreshold are adjusted to higher
// values. So the health check of Generic PLEG should check
// the adjusted values of relistingPeriod and relistingThreshold.
// EventedPLEG is declared unhealthy only if eventChannel is out of capacity.
if len(e.eventChannel) == cap(e.eventChannel) {
return false, fmt.Errorf("EventedPLEG: pleg event channel capacity is full with %v events", len(e.eventChannel))
}
timestamp := e.clock.Now()
metrics.PLEGLastSeen.Set(float64(timestamp.Unix()))
return true, nil
}
func (e *EventedPLEG) watchEventsChannel() {
containerEventsResponseCh := make(chan *runtimeapi.ContainerEventResponse, cap(e.eventChannel))
defer close(containerEventsResponseCh)
// Get the container events from the runtime.
go func() {
numAttempts := 0
for {
if numAttempts >= e.eventedPlegMaxStreamRetries {
if isEventedPLEGInUse() {
// Fall back to Generic PLEG relisting since Evented PLEG is not working.
e.logger.V(4).Info("Fall back to Generic PLEG relisting since Evented PLEG is not working")
e.Stop()
e.genericPleg.Stop() // Stop the existing Generic PLEG which runs with longer relisting period when Evented PLEG is in use.
e.Update(e.relistDuration) // Update the relisting period to the default value for the Generic PLEG.
e.genericPleg.Start()
break
}
}
err := e.runtimeService.GetContainerEvents(context.Background(), containerEventsResponseCh, func(runtimeapi.RuntimeService_GetContainerEventsClient) {
metrics.EventedPLEGConn.Inc()
})
if err != nil {
metrics.EventedPLEGConnErr.Inc()
numAttempts++
e.Relist() // Force a relist to get the latest container and pods running metric.
e.logger.V(4).Info("Evented PLEG: Failed to get container events, retrying: ", "err", err)
}
}
}()
if isEventedPLEGInUse() {
e.processCRIEvents(containerEventsResponseCh)
}
}
func (e *EventedPLEG) processCRIEvents(containerEventsResponseCh chan *runtimeapi.ContainerEventResponse) {
for event := range containerEventsResponseCh {
// Ignore the event if PodSandboxStatus is nil.
// This might happen under some race condition where the podSandbox has
// been deleted, and therefore container runtime couldn't find the
// podSandbox for the container when generating the event.
// It is safe to ignore because
// a) a event would have been received for the sandbox deletion,
// b) in worst case, a relist will eventually sync the pod status.
// TODO(#114371): Figure out a way to handle this case instead of ignoring.
if event.PodSandboxStatus == nil || event.PodSandboxStatus.Metadata == nil {
e.logger.Error(nil, "Evented PLEG: received ContainerEventResponse with nil PodSandboxStatus or PodSandboxStatus.Metadata", "containerEventResponse", event)
continue
}
podID := types.UID(event.PodSandboxStatus.Metadata.Uid)
shouldSendPLEGEvent := false
status := e.runtime.GeneratePodStatus(event)
if klogV := e.logger.V(6); klogV.Enabled() {
e.logger.Info("Evented PLEG: Generated pod status from the received event", "podUID", podID, "podStatus", status)
} else {
e.logger.V(4).Info("Evented PLEG: Generated pod status from the received event", "podUID", podID)
}
// Preserve the pod IP across cache updates if the new IP is empty.
// When a pod is torn down, kubelet may race with PLEG and retrieve
// a pod status after network teardown, but the kubernetes API expects
// the completed pod's IP to be available after the pod is dead.
status.IPs = e.getPodIPs(podID, status)
e.updateRunningPodMetric(status)
e.updateRunningContainerMetric(status)
e.updateLatencyMetric(event)
if event.ContainerEventType == runtimeapi.ContainerEventType_CONTAINER_DELETED_EVENT {
for _, sandbox := range status.SandboxStatuses {
if sandbox.Id == event.ContainerId {
// When the CONTAINER_DELETED_EVENT is received by the kubelet,
// the runtime has indicated that the container has been removed
// by the runtime and hence, it must be removed from the cache
// of kubelet too.
e.cache.Delete(podID)
}
}
shouldSendPLEGEvent = true
} else {
if e.cache.Set(podID, status, nil, time.Unix(0, event.GetCreatedAt())) {
shouldSendPLEGEvent = true
}
}
if shouldSendPLEGEvent {
e.processCRIEvent(event)
}
}
}
func (e *EventedPLEG) processCRIEvent(event *runtimeapi.ContainerEventResponse) {
switch event.ContainerEventType {
case runtimeapi.ContainerEventType_CONTAINER_STOPPED_EVENT:
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerDied, Data: event.ContainerId})
e.logger.V(4).Info("Received Container Stopped Event", "event", event.String())
case runtimeapi.ContainerEventType_CONTAINER_CREATED_EVENT:
// We only need to update the pod status on container create.
// But we don't have to generate any PodLifeCycleEvent. Container creation related
// PodLifeCycleEvent is ignored by the existing Generic PLEG as well.
// https://github.com/kubernetes/kubernetes/blob/24753aa8a4df8d10bfd6330e0f29186000c018be/pkg/kubelet/pleg/generic.go#L88 and
// https://github.com/kubernetes/kubernetes/blob/24753aa8a4df8d10bfd6330e0f29186000c018be/pkg/kubelet/pleg/generic.go#L273
e.logger.V(4).Info("Received Container Created Event", "event", event.String())
case runtimeapi.ContainerEventType_CONTAINER_STARTED_EVENT:
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerStarted, Data: event.ContainerId})
e.logger.V(4).Info("Received Container Started Event", "event", event.String())
case runtimeapi.ContainerEventType_CONTAINER_DELETED_EVENT:
// In case the pod is deleted it is safe to generate both ContainerDied and ContainerRemoved events, just like in the case of
// Generic PLEG. https://github.com/kubernetes/kubernetes/blob/24753aa8a4df8d10bfd6330e0f29186000c018be/pkg/kubelet/pleg/generic.go#L169
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerDied, Data: event.ContainerId})
e.sendPodLifecycleEvent(&PodLifecycleEvent{ID: types.UID(event.PodSandboxStatus.Metadata.Uid), Type: ContainerRemoved, Data: event.ContainerId})
e.logger.V(4).Info("Received Container Deleted Event", "event", event)
}
}
func (e *EventedPLEG) getPodIPs(pid types.UID, status *kubecontainer.PodStatus) []string {
if len(status.IPs) != 0 {
return status.IPs
}
oldStatus, err := e.cache.Get(pid)
if err != nil || len(oldStatus.IPs) == 0 {
return nil
}
for _, sandboxStatus := range status.SandboxStatuses {
// If at least one sandbox is ready, then use this status update's pod IP
if sandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY {
return status.IPs
}
}
// For pods with no ready containers or sandboxes (like exited pods)
// use the old status' pod IP
return oldStatus.IPs
}
func (e *EventedPLEG) sendPodLifecycleEvent(event *PodLifecycleEvent) {
select {
case e.eventChannel <- event:
default:
// record how many events were discarded due to channel out of capacity
metrics.PLEGDiscardEvents.Inc()
e.logger.Error(nil, "Evented PLEG: Event channel is full, discarded pod lifecycle event")
}
}
func getPodSandboxState(podStatus *kubecontainer.PodStatus) kubecontainer.State {
// increase running pod count when cache doesn't contain podID
var sandboxId string
for _, sandbox := range podStatus.SandboxStatuses {
sandboxId = sandbox.Id
// pod must contain only one sandbox
break
}
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.ID.ID == sandboxId {
if containerStatus.State == kubecontainer.ContainerStateRunning {
return containerStatus.State
}
}
}
return kubecontainer.ContainerStateExited
}
func (e *EventedPLEG) updateRunningPodMetric(podStatus *kubecontainer.PodStatus) {
cachedPodStatus, err := e.cache.Get(podStatus.ID)
if err != nil {
e.logger.Error(err, "Evented PLEG: Get cache", "podID", podStatus.ID)
}
// cache miss condition: The pod status object will have empty state if missed in cache
if len(cachedPodStatus.SandboxStatuses) < 1 {
sandboxState := getPodSandboxState(podStatus)
if sandboxState == kubecontainer.ContainerStateRunning {
metrics.RunningPodCount.Inc()
}
} else {
oldSandboxState := getPodSandboxState(cachedPodStatus)
currentSandboxState := getPodSandboxState(podStatus)
if oldSandboxState == kubecontainer.ContainerStateRunning && currentSandboxState != kubecontainer.ContainerStateRunning {
metrics.RunningPodCount.Dec()
} else if oldSandboxState != kubecontainer.ContainerStateRunning && currentSandboxState == kubecontainer.ContainerStateRunning {
metrics.RunningPodCount.Inc()
}
}
}
func getContainerStateCount(podStatus *kubecontainer.PodStatus) map[kubecontainer.State]int {
containerStateCount := make(map[kubecontainer.State]int)
for _, container := range podStatus.ContainerStatuses {
containerStateCount[container.State]++
}
return containerStateCount
}
func (e *EventedPLEG) updateRunningContainerMetric(podStatus *kubecontainer.PodStatus) {
cachedPodStatus, err := e.cache.Get(podStatus.ID)
if err != nil {
e.logger.Error(err, "Evented PLEG: Get cache", "podID", podStatus.ID)
}
// cache miss condition: The pod status object will have empty state if missed in cache
if len(cachedPodStatus.SandboxStatuses) < 1 {
containerStateCount := getContainerStateCount(podStatus)
for state, count := range containerStateCount {
// add currently obtained count
metrics.RunningContainerCount.WithLabelValues(string(state)).Add(float64(count))
}
} else {
oldContainerStateCount := getContainerStateCount(cachedPodStatus)
currentContainerStateCount := getContainerStateCount(podStatus)
// old and new set of container states may vary;
// get a unique set of container states combining both
containerStates := make(map[kubecontainer.State]bool)
for state := range oldContainerStateCount {
containerStates[state] = true
}
for state := range currentContainerStateCount {
containerStates[state] = true
}
// update the metric via difference of old and current counts
for state := range containerStates {
diff := currentContainerStateCount[state] - oldContainerStateCount[state]
metrics.RunningContainerCount.WithLabelValues(string(state)).Add(float64(diff))
}
}
}
func (e *EventedPLEG) updateLatencyMetric(event *runtimeapi.ContainerEventResponse) {
duration := time.Duration(time.Now().UnixNano()-event.CreatedAt) * time.Nanosecond
metrics.EventedPLEGConnLatency.Observe(duration.Seconds())
}
func (e *EventedPLEG) SetPodWatchCondition(podUID types.UID, conditionKey string, condition WatchCondition) {
e.genericPleg.SetPodWatchCondition(podUID, conditionKey, condition)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pleg
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
)
// GenericPLEG is an extremely simple generic PLEG that relies solely on
// periodic listing to discover container changes. It should be used
// as temporary replacement for container runtimes do not support a proper
// event generator yet.
//
// Note that GenericPLEG assumes that a container would not be created,
// terminated, and garbage collected within one relist period. If such an
// incident happens, GenenricPLEG would miss all events regarding this
// container. In the case of relisting failure, the window may become longer.
// Note that this assumption is not unique -- many kubelet internal components
// rely on terminated containers as tombstones for bookkeeping purposes. The
// garbage collector is implemented to work with such situations. However, to
// guarantee that kubelet can handle missing container events, it is
// recommended to set the relist period short and have an auxiliary, longer
// periodic sync in kubelet as the safety net.
type GenericPLEG struct {
// The container runtime.
runtime kubecontainer.Runtime
// The channel from which the subscriber listens events.
eventChannel chan *PodLifecycleEvent
// The internal cache for pod/container information.
podRecords podRecords
// Time of the last relisting.
relistTime atomic.Value
// Cache for storing the runtime states required for syncing pods.
cache kubecontainer.Cache
// For testability.
clock clock.Clock
// Pods that failed to have their status retrieved during a relist. These pods will be
// retried during the next relisting.
podsToReinspect map[types.UID]*kubecontainer.Pod
// Stop the Generic PLEG by closing the channel.
stopCh chan struct{}
// Locks the relisting of the Generic PLEG
relistLock sync.Mutex
// Indicates if the Generic PLEG is running or not
isRunning bool
// Locks the start/stop operation of Generic PLEG
runningMu sync.Mutex
// Indicates relisting related parameters
relistDuration *RelistDuration
// logger is used for contextual logging
logger klog.Logger
// watchConditions tracks pod watch conditions, guarded by watchConditionsLock
// watchConditions is a map of pod UID -> condition key -> condition
watchConditions map[types.UID]map[string]versionedWatchCondition
watchConditionsLock sync.Mutex
}
type versionedWatchCondition struct {
key string
condition WatchCondition
version uint32
}
// plegContainerState has a one-to-one mapping to the
// kubecontainer.State except for the non-existent state. This state
// is introduced here to complete the state transition scenarios.
type plegContainerState string
const (
plegContainerRunning plegContainerState = "running"
plegContainerExited plegContainerState = "exited"
plegContainerUnknown plegContainerState = "unknown"
plegContainerNonExistent plegContainerState = "non-existent"
)
func convertState(state kubecontainer.State) plegContainerState {
switch state {
case kubecontainer.ContainerStateCreated:
// kubelet doesn't use the "created" state yet, hence convert it to "unknown".
return plegContainerUnknown
case kubecontainer.ContainerStateRunning:
return plegContainerRunning
case kubecontainer.ContainerStateExited:
return plegContainerExited
case kubecontainer.ContainerStateUnknown:
return plegContainerUnknown
default:
panic(fmt.Sprintf("unrecognized container state: %v", state))
}
}
type podRecord struct {
old *kubecontainer.Pod
current *kubecontainer.Pod
}
type podRecords map[types.UID]*podRecord
// NewGenericPLEG instantiates a new GenericPLEG object and return it.
func NewGenericPLEG(logger klog.Logger, runtime kubecontainer.Runtime, eventChannel chan *PodLifecycleEvent,
relistDuration *RelistDuration, cache kubecontainer.Cache,
clock clock.Clock) PodLifecycleEventGenerator {
if cache == nil {
panic("cache cannot be nil")
}
return &GenericPLEG{
logger: logger,
relistDuration: relistDuration,
runtime: runtime,
eventChannel: eventChannel,
podRecords: make(podRecords),
cache: cache,
clock: clock,
watchConditions: make(map[types.UID]map[string]versionedWatchCondition),
}
}
// Watch returns a channel from which the subscriber can receive PodLifecycleEvent
// events.
// TODO: support multiple subscribers.
func (g *GenericPLEG) Watch() chan *PodLifecycleEvent {
return g.eventChannel
}
// Start spawns a goroutine to relist periodically.
func (g *GenericPLEG) Start() {
g.runningMu.Lock()
defer g.runningMu.Unlock()
if !g.isRunning {
g.isRunning = true
g.stopCh = make(chan struct{})
go wait.Until(g.Relist, g.relistDuration.RelistPeriod, g.stopCh)
}
}
func (g *GenericPLEG) Stop() {
g.runningMu.Lock()
defer g.runningMu.Unlock()
if g.isRunning {
close(g.stopCh)
g.isRunning = false
}
}
func (g *GenericPLEG) Update(relistDuration *RelistDuration) {
g.relistDuration = relistDuration
}
// Healthy check if PLEG work properly.
// relistThreshold is the maximum interval between two relist.
func (g *GenericPLEG) Healthy() (bool, error) {
relistTime := g.getRelistTime()
if relistTime.IsZero() {
return false, fmt.Errorf("pleg has yet to be successful")
}
// Expose as metric so you can alert on `time()-pleg_last_seen_seconds > nn`
metrics.PLEGLastSeen.Set(float64(relistTime.Unix()))
elapsed := g.clock.Since(relistTime)
if elapsed > g.relistDuration.RelistThreshold {
return false, fmt.Errorf("pleg was last seen active %v ago; threshold is %v", elapsed, g.relistDuration.RelistThreshold)
}
return true, nil
}
func generateEvents(logger klog.Logger, podID types.UID, cid string, oldState, newState plegContainerState) []*PodLifecycleEvent {
if newState == oldState {
return nil
}
logger.V(4).Info("GenericPLEG", "podUID", podID, "containerID", cid, "oldState", oldState, "newState", newState)
switch newState {
case plegContainerRunning:
return []*PodLifecycleEvent{{ID: podID, Type: ContainerStarted, Data: cid}}
case plegContainerExited:
return []*PodLifecycleEvent{{ID: podID, Type: ContainerDied, Data: cid}}
case plegContainerUnknown:
return []*PodLifecycleEvent{{ID: podID, Type: ContainerChanged, Data: cid}}
case plegContainerNonExistent:
switch oldState {
case plegContainerExited:
// We already reported that the container died before.
return []*PodLifecycleEvent{{ID: podID, Type: ContainerRemoved, Data: cid}}
default:
return []*PodLifecycleEvent{{ID: podID, Type: ContainerDied, Data: cid}, {ID: podID, Type: ContainerRemoved, Data: cid}}
}
default:
panic(fmt.Sprintf("unrecognized container state: %v", newState))
}
}
func (g *GenericPLEG) getRelistTime() time.Time {
val := g.relistTime.Load()
if val == nil {
return time.Time{}
}
return val.(time.Time)
}
func (g *GenericPLEG) updateRelistTime(timestamp time.Time) {
g.relistTime.Store(timestamp)
}
// Relist queries the container runtime for list of pods/containers, compare
// with the internal pods/containers, and generates events accordingly.
func (g *GenericPLEG) Relist() {
g.relistLock.Lock()
defer g.relistLock.Unlock()
ctx := context.Background()
g.logger.V(5).Info("GenericPLEG: Relisting")
if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() {
metrics.PLEGRelistInterval.Observe(metrics.SinceInSeconds(lastRelistTime))
}
timestamp := g.clock.Now()
defer func() {
metrics.PLEGRelistDuration.Observe(metrics.SinceInSeconds(timestamp))
}()
// Get all the pods.
podList, err := g.runtime.GetPods(ctx, true)
if err != nil {
g.logger.Error(err, "GenericPLEG: Unable to retrieve pods")
return
}
g.updateRelistTime(timestamp)
pods := kubecontainer.Pods(podList)
// update running pod and container count
updateRunningPodAndContainerMetrics(pods)
g.podRecords.setCurrent(pods)
g.cleanupOrphanedWatchConditions()
needsReinspection := make(map[types.UID]*kubecontainer.Pod)
for pid := range g.podRecords {
// Compare the old and the current pods, and generate events.
oldPod := g.podRecords.getOld(pid)
pod := g.podRecords.getCurrent(pid)
// Get all containers in the old and the new pod.
allContainers := getContainersFromPods(oldPod, pod)
var events []*PodLifecycleEvent
for _, container := range allContainers {
containerEvents := computeEvents(g.logger, oldPod, pod, &container.ID)
events = append(events, containerEvents...)
}
watchConditions := g.getPodWatchConditions(pid)
_, reinspect := g.podsToReinspect[pid]
if len(events) == 0 && len(watchConditions) == 0 && !reinspect {
// Nothing else needed for this pod.
continue
}
// updateCache() will inspect the pod and update the cache. If an
// error occurs during the inspection, we want PLEG to retry again
// in the next relist. To achieve this, we do not update the
// associated podRecord of the pod, so that the change will be
// detect again in the next relist.
// TODO: If many pods changed during the same relist period,
// inspecting the pod and getting the PodStatus to update the cache
// serially may take a while. We should be aware of this and
// parallelize if needed.
status, updated, err := g.updateCache(ctx, pod, pid)
if err != nil {
// Rely on updateCache calling GetPodStatus to log the actual error.
g.logger.V(4).Error(err, "PLEG: Ignoring events for pod", "pod", klog.KRef(pod.Namespace, pod.Name))
// make sure we try to reinspect the pod during the next relisting
needsReinspection[pid] = pod
continue
} else if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
if !updated {
continue
}
}
var completedConditions []versionedWatchCondition
for _, condition := range watchConditions {
if condition.condition(status) {
// condition was met: add it to the list of completed conditions.
completedConditions = append(completedConditions, condition)
}
}
if len(completedConditions) > 0 {
g.completeWatchConditions(pid, completedConditions)
// If at least 1 condition completed, emit a ConditionMet event to trigger a pod sync.
// We only emit 1 event even if multiple conditions are met, since SyncPod reevaluates
// all containers in the pod with the latest status.
events = append(events, &PodLifecycleEvent{ID: pid, Type: ConditionMet})
}
// Update the internal storage and send out the events.
g.podRecords.update(pid)
// Map from containerId to exit code; used as a temporary cache for lookup
containerExitCode := make(map[string]int)
for i := range events {
// Filter out events that are not reliable and no other components use yet.
if events[i].Type == ContainerChanged {
continue
}
select {
case g.eventChannel <- events[i]:
default:
metrics.PLEGDiscardEvents.Inc()
g.logger.Error(nil, "Event channel is full, discard this relist() cycle event")
}
// Log exit code of containers when they finished in a particular event
if events[i].Type == ContainerDied {
// Fill up containerExitCode map for ContainerDied event when first time appeared
if len(containerExitCode) == 0 && pod != nil {
if err == nil {
for _, containerStatus := range status.ContainerStatuses {
containerExitCode[containerStatus.ID.ID] = containerStatus.ExitCode
}
}
}
if containerID, ok := events[i].Data.(string); ok {
if exitCode, ok := containerExitCode[containerID]; ok && pod != nil {
g.logger.V(2).Info("Generic (PLEG): container finished", "podID", pod.ID, "containerID", containerID, "exitCode", exitCode)
}
}
}
}
}
// Update the cache timestamp. This needs to happen *after*
// all pods have been properly updated in the cache.
g.cache.UpdateTime(timestamp)
// make sure we retain the list of pods that need reinspecting the next time relist is called
g.podsToReinspect = needsReinspection
}
func getContainersFromPods(pods ...*kubecontainer.Pod) []*kubecontainer.Container {
cidSet := sets.New[string]()
var containers []*kubecontainer.Container
fillCidSet := func(cs []*kubecontainer.Container) {
for _, c := range cs {
cid := c.ID.ID
if cidSet.Has(cid) {
continue
}
cidSet.Insert(cid)
containers = append(containers, c)
}
}
for _, p := range pods {
if p == nil {
continue
}
fillCidSet(p.Containers)
// Update sandboxes as containers
// TODO: keep track of sandboxes explicitly.
fillCidSet(p.Sandboxes)
}
return containers
}
func computeEvents(logger klog.Logger, oldPod, newPod *kubecontainer.Pod, cid *kubecontainer.ContainerID) []*PodLifecycleEvent {
var pid types.UID
if oldPod != nil {
pid = oldPod.ID
} else if newPod != nil {
pid = newPod.ID
}
oldState := getContainerState(oldPod, cid)
newState := getContainerState(newPod, cid)
return generateEvents(logger, pid, cid.ID, oldState, newState)
}
// getPodIP preserves an older cached status' pod IP if the new status has no pod IPs
// and its sandboxes have exited
func (g *GenericPLEG) getPodIPs(pid types.UID, status *kubecontainer.PodStatus) []string {
if len(status.IPs) != 0 {
return status.IPs
}
oldStatus, err := g.cache.Get(pid)
if err != nil || len(oldStatus.IPs) == 0 {
return nil
}
for _, sandboxStatus := range status.SandboxStatuses {
// If at least one sandbox is ready, then use this status update's pod IP
if sandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY {
return status.IPs
}
}
// For pods with no ready containers or sandboxes (like exited pods)
// use the old status' pod IP
return oldStatus.IPs
}
// updateCache tries to update the pod status in the kubelet cache and returns true if the
// pod status was actually updated in the cache. It will return false if the pod status
// was ignored by the cache.
func (g *GenericPLEG) updateCache(ctx context.Context, pod *kubecontainer.Pod, pid types.UID) (*kubecontainer.PodStatus, bool, error) {
if pod == nil {
// The pod is missing in the current relist. This means that
// the pod has no visible (active or inactive) containers.
g.logger.V(4).Info("PLEG: Delete status for pod", "podUID", string(pid))
g.cache.Delete(pid)
return nil, true, nil
}
timestamp := g.clock.Now()
status, err := g.runtime.GetPodStatus(ctx, pod.ID, pod.Name, pod.Namespace)
if err != nil {
// nolint:logcheck // Not using the result of klog.V inside the
// if branch is okay, we just use it to determine whether the
// additional "podStatus" key and its value should be added.
if klog.V(6).Enabled() {
g.logger.Error(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
} else {
g.logger.Error(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
}
} else {
if klogV := g.logger.V(6); klogV.Enabled() {
g.logger.Info("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
} else {
g.logger.V(4).Info("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
}
// Preserve the pod IP across cache updates if the new IP is empty.
// When a pod is torn down, kubelet may race with PLEG and retrieve
// a pod status after network teardown, but the kubernetes API expects
// the completed pod's IP to be available after the pod is dead.
status.IPs = g.getPodIPs(pid, status)
}
// When we use Generic PLEG only, the PodStatus is saved in the cache without
// any validation of the existing status against the current timestamp.
// This works well when there is only Generic PLEG setting the PodStatus in the cache however,
// if we have multiple entities, such as Evented PLEG, while trying to set the PodStatus in the
// cache we may run into the racy timestamps given each of them were to calculate the timestamps
// in their respective execution flow. While Generic PLEG calculates this timestamp and gets
// the PodStatus, we can only calculate the corresponding timestamp in
// Evented PLEG after the event has been received by the Kubelet.
// For more details refer to:
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/3386-kubelet-evented-pleg#timestamp-of-the-pod-status
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) && isEventedPLEGInUse() && status != nil {
timestamp = status.TimeStamp
}
return status, g.cache.Set(pod.ID, status, err, timestamp), err
}
// SetPodWatchCondition flags the pod for reinspection on every Relist iteration until the watch
// condition is met. The condition is keyed so it can be updated before the condition
// is met.
func (g *GenericPLEG) SetPodWatchCondition(podUID types.UID, conditionKey string, condition WatchCondition) {
g.watchConditionsLock.Lock()
defer g.watchConditionsLock.Unlock()
conditions, ok := g.watchConditions[podUID]
if !ok {
conditions = make(map[string]versionedWatchCondition)
}
versioned, found := conditions[conditionKey]
if found {
// Watch condition was already set. Increment its version & update the condition function.
versioned.version++
versioned.condition = condition
conditions[conditionKey] = versioned
} else {
conditions[conditionKey] = versionedWatchCondition{
key: conditionKey,
condition: condition,
}
}
g.watchConditions[podUID] = conditions
}
// getPodWatchConditions returns a list of the active watch conditions for the pod.
func (g *GenericPLEG) getPodWatchConditions(podUID types.UID) []versionedWatchCondition {
g.watchConditionsLock.Lock()
defer g.watchConditionsLock.Unlock()
podConditions, ok := g.watchConditions[podUID]
if !ok {
return nil
}
// Flatten the map into a list of conditions. This also serves to create a copy, so the lock can
// be released.
conditions := make([]versionedWatchCondition, 0, len(podConditions))
for _, condition := range podConditions {
conditions = append(conditions, condition)
}
return conditions
}
// completeWatchConditions removes the completed watch conditions, unless they have been updated
// since the condition was checked.
func (g *GenericPLEG) completeWatchConditions(podUID types.UID, completedConditions []versionedWatchCondition) {
g.watchConditionsLock.Lock()
defer g.watchConditionsLock.Unlock()
conditions, ok := g.watchConditions[podUID]
if !ok {
// Pod was deleted, nothing to do.
return
}
for _, completed := range completedConditions {
condition := conditions[completed.key]
// Only clear the condition if it has not been updated.
if condition.version == completed.version {
delete(conditions, completed.key)
}
}
g.watchConditions[podUID] = conditions
}
// cleanupOrphanedWatchConditions purges the watchConditions map of any pods that were removed from
// the pod records. Events are not emitted for removed pods.
func (g *GenericPLEG) cleanupOrphanedWatchConditions() {
g.watchConditionsLock.Lock()
defer g.watchConditionsLock.Unlock()
for podUID := range g.watchConditions {
if g.podRecords.getCurrent(podUID) == nil {
// Pod was deleted, remove it from the watch conditions.
delete(g.watchConditions, podUID)
}
}
}
func getContainerState(pod *kubecontainer.Pod, cid *kubecontainer.ContainerID) plegContainerState {
// Default to the non-existent state.
state := plegContainerNonExistent
if pod == nil {
return state
}
c := pod.FindContainerByID(*cid)
if c != nil {
return convertState(c.State)
}
// Search through sandboxes too.
c = pod.FindSandboxByID(*cid)
if c != nil {
return convertState(c.State)
}
return state
}
func updateRunningPodAndContainerMetrics(pods []*kubecontainer.Pod) {
runningSandboxNum := 0
// intermediate map to store the count of each "container_state"
containerStateCount := make(map[string]int)
for _, pod := range pods {
containers := pod.Containers
for _, container := range containers {
// update the corresponding "container_state" in map to set value for the gaugeVec metrics
containerStateCount[string(container.State)]++
}
sandboxes := pod.Sandboxes
for _, sandbox := range sandboxes {
if sandbox.State == kubecontainer.ContainerStateRunning {
runningSandboxNum++
// every pod should only have one running sandbox
break
}
}
}
for key, value := range containerStateCount {
metrics.RunningContainerCount.WithLabelValues(key).Set(float64(value))
}
// Set the number of running pods in the parameter
metrics.RunningPodCount.Set(float64(runningSandboxNum))
}
func (pr podRecords) getOld(id types.UID) *kubecontainer.Pod {
r, ok := pr[id]
if !ok {
return nil
}
return r.old
}
func (pr podRecords) getCurrent(id types.UID) *kubecontainer.Pod {
r, ok := pr[id]
if !ok {
return nil
}
return r.current
}
func (pr podRecords) setCurrent(pods []*kubecontainer.Pod) {
for i := range pr {
pr[i].current = nil
}
for _, pod := range pods {
if r, ok := pr[pod.ID]; ok {
r.current = pod
} else {
pr[pod.ID] = &podRecord{current: pod}
}
}
}
func (pr podRecords) update(id types.UID) {
r, ok := pr[id]
if !ok {
return
}
pr.updateInternal(id, r)
}
func (pr podRecords) updateInternal(id types.UID, r *podRecord) {
if r.current == nil {
// Pod no longer exists; delete the entry.
delete(pr, id)
return
}
r.old = r.current
r.current = nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pleg
import (
"time"
"k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// PodLifeCycleEventType define the event type of pod life cycle events.
type PodLifeCycleEventType string
type RelistDuration struct {
// The period for relisting.
RelistPeriod time.Duration
// The relisting threshold needs to be greater than the relisting period +
// the relisting time, which can vary significantly. Set a conservative
// threshold to avoid flipping between healthy and unhealthy.
RelistThreshold time.Duration
}
const (
// ContainerStarted - event type when the new state of container is running.
ContainerStarted PodLifeCycleEventType = "ContainerStarted"
// ContainerDied - event type when the new state of container is exited.
ContainerDied PodLifeCycleEventType = "ContainerDied"
// ContainerRemoved - event type when the old state of container is exited.
ContainerRemoved PodLifeCycleEventType = "ContainerRemoved"
// PodSync is used to trigger syncing of a pod when the observed change of
// the state of the pod cannot be captured by any single event above.
PodSync PodLifeCycleEventType = "PodSync"
// ContainerChanged - event type when the new state of container is unknown.
ContainerChanged PodLifeCycleEventType = "ContainerChanged"
// ConditionMet - event type triggered when any number of watch conditions are met.
ConditionMet PodLifeCycleEventType = "ConditionMet"
)
// PodLifecycleEvent is an event that reflects the change of the pod state.
type PodLifecycleEvent struct {
// The pod ID.
ID types.UID
// The type of the event.
Type PodLifeCycleEventType
// The accompanied data which varies based on the event type.
// - ContainerStarted/ContainerStopped: the container name (string).
// - All other event types: unused.
Data interface{}
}
// PodLifecycleEventGenerator contains functions for generating pod life cycle events.
type PodLifecycleEventGenerator interface {
Start()
Watch() chan *PodLifecycleEvent
Healthy() (bool, error)
// SetPodWatchCondition flags the pod for reinspection on every Relist iteration until the watch
// condition is met. The condition is keyed so it can be updated before the condition
// is met.
SetPodWatchCondition(podUID types.UID, conditionKey string, condition WatchCondition)
}
// podLifecycleEventGeneratorHandler contains functions that are useful for different PLEGs
// and need not be exposed to rest of the kubelet
type podLifecycleEventGeneratorHandler interface {
PodLifecycleEventGenerator
Stop()
Update(relistDuration *RelistDuration)
Relist()
}
// WatchCondition takes the latest PodStatus, and returns whether the condition is met.
type WatchCondition = func(*kubecontainer.PodStatus) bool
// RunningContainerWatchCondition wraps a condition on the container status to make a pod
// WatchCondition. If the container is no longer running, the condition is implicitly cleared.
func RunningContainerWatchCondition(containerName string, condition func(*kubecontainer.Status) bool) WatchCondition {
return func(podStatus *kubecontainer.PodStatus) bool {
status := podStatus.FindContainerStatusByName(containerName)
if status == nil || status.State != kubecontainer.ContainerStateRunning {
// Container isn't running. Consider the condition "completed" so it is cleared.
return true
}
return condition(status)
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the kubelet plugin manager to
keep track of registered plugins.
*/
package cache
import (
"context"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
)
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
// plugin manager's actual state of the world cache.
// This cache contains a map of socket file path to plugin information of
// all plugins attached to this node.
type ActualStateOfWorld interface {
// GetRegisteredPlugins generates and returns a list of plugins
// that are successfully registered plugins in the current actual state of world.
GetRegisteredPlugins() []PluginInfo
// AddPlugin add the given plugin in the cache.
// An error will be returned if socketPath of the PluginInfo object is empty.
// Note that this is different from desired world cache's AddOrUpdatePlugin
// because for the actual state of world cache, there won't be a scenario where
// we need to update an existing plugin if the timestamps don't match. This is
// because the plugin should have been unregistered in the reconciler and therefore
// removed from the actual state of world cache first before adding it back into
// the actual state of world cache again with the new timestamp
AddPlugin(ctx context.Context, pluginInfo PluginInfo) error
// RemovePlugin deletes the plugin with the given socket path from the actual
// state of world.
// If a plugin does not exist with the given socket path, this is a no-op.
RemovePlugin(socketPath string)
// PluginExistsWithCorrectTimestamp checks if the given plugin exists in the current actual
// state of world cache with the correct timestamp.
// Deprecated: please use `PluginExistsWithCorrectUUID` instead as it provides a better
// cross-platform support
PluginExistsWithCorrectTimestamp(pluginInfo PluginInfo) bool
// PluginExistsWithCorrectUUID checks if the given plugin exists in the current actual
// state of world cache with the correct UUID
PluginExistsWithCorrectUUID(pluginInfo PluginInfo) bool
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld
func NewActualStateOfWorld() ActualStateOfWorld {
return &actualStateOfWorld{
socketFileToInfo: make(map[string]PluginInfo),
}
}
type actualStateOfWorld struct {
// socketFileToInfo is a map containing the set of successfully registered plugins
// The keys are plugin socket file paths. The values are PluginInfo objects
socketFileToInfo map[string]PluginInfo
sync.RWMutex
}
var _ ActualStateOfWorld = &actualStateOfWorld{}
// PluginInfo holds information of a plugin
type PluginInfo struct {
SocketPath string
Timestamp time.Time
UUID types.UID
Handler PluginHandler
Name string
Endpoint string
}
func (asw *actualStateOfWorld) AddPlugin(ctx context.Context, pluginInfo PluginInfo) error {
asw.Lock()
defer asw.Unlock()
logger := klog.FromContext(ctx)
if pluginInfo.SocketPath == "" {
return fmt.Errorf("socket path is empty")
}
if _, ok := asw.socketFileToInfo[pluginInfo.SocketPath]; ok {
logger.V(2).Info("Plugin exists in actual state cache", "path", pluginInfo.SocketPath)
}
asw.socketFileToInfo[pluginInfo.SocketPath] = pluginInfo
return nil
}
func (asw *actualStateOfWorld) RemovePlugin(socketPath string) {
asw.Lock()
defer asw.Unlock()
delete(asw.socketFileToInfo, socketPath)
}
func (asw *actualStateOfWorld) GetRegisteredPlugins() []PluginInfo {
asw.RLock()
defer asw.RUnlock()
currentPlugins := []PluginInfo{}
for _, pluginInfo := range asw.socketFileToInfo {
currentPlugins = append(currentPlugins, pluginInfo)
}
return currentPlugins
}
func (asw *actualStateOfWorld) PluginExistsWithCorrectTimestamp(pluginInfo PluginInfo) bool {
asw.RLock()
defer asw.RUnlock()
// We need to check both if the socket file path exists, and the timestamp
// matches the given plugin (from the desired state cache) timestamp
actualStatePlugin, exists := asw.socketFileToInfo[pluginInfo.SocketPath]
return exists && (actualStatePlugin.Timestamp == pluginInfo.Timestamp)
}
func (asw *actualStateOfWorld) PluginExistsWithCorrectUUID(pluginInfo PluginInfo) bool {
asw.RLock()
defer asw.RUnlock()
// We need to check both if the socket file path exists, and the UUID
// matches the given plugin (from the desired state cache) UUID
actualStatePlugin, exists := asw.socketFileToInfo[pluginInfo.SocketPath]
return exists && (actualStatePlugin.UUID == pluginInfo.UUID)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the kubelet plugin manager to
keep track of registered plugins.
*/
package cache
import (
"context"
"errors"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/klog/v2"
)
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
// plugin manager's desired state of the world cache.
// This cache contains a map of socket file path to plugin information of
// all plugins attached to this node.
type DesiredStateOfWorld interface {
// AddOrUpdatePlugin add the given plugin in the cache if it doesn't already exist.
// If it does exist in the cache, then the timestamp of the PluginInfo object in the cache will be updated.
// An error will be returned if socketPath is empty.
AddOrUpdatePlugin(ctx context.Context, socketPath string) error
// RemovePlugin deletes the plugin with the given socket path from the desired
// state of world.
// If a plugin does not exist with the given socket path, this is a no-op.
RemovePlugin(socketPath string)
// GetPluginsToRegister generates and returns a list of plugins
// in the current desired state of world.
GetPluginsToRegister() []PluginInfo
// PluginExists checks if the given socket path exists in the current desired
// state of world cache
PluginExists(socketPath string) bool
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld() DesiredStateOfWorld {
return &desiredStateOfWorld{
socketFileToInfo: make(map[string]PluginInfo),
}
}
type desiredStateOfWorld struct {
// socketFileToInfo is a map containing the set of successfully registered plugins
// The keys are plugin socket file paths. The values are PluginInfo objects
socketFileToInfo map[string]PluginInfo
sync.RWMutex
}
var _ DesiredStateOfWorld = &desiredStateOfWorld{}
// Generate a detailed error msg for logs
func generatePluginMsgDetailed(prefixMsg, suffixMsg, socketPath, details string) (detailedMsg string) {
return fmt.Sprintf("%v for plugin at %q %v %v", prefixMsg, socketPath, details, suffixMsg)
}
// Generate a simplified error msg for events and a detailed error msg for logs
func generatePluginMsg(prefixMsg, suffixMsg, socketPath, details string) (simpleMsg, detailedMsg string) {
simpleMsg = fmt.Sprintf("%v for plugin at %q %v", prefixMsg, socketPath, suffixMsg)
return simpleMsg, generatePluginMsgDetailed(prefixMsg, suffixMsg, socketPath, details)
}
// GenerateMsgDetailed returns detailed msgs for plugins to register
// that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <plugin details> <suffixMsg>"
func (plugin *PluginInfo) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(plugin details: %v)", plugin)
return generatePluginMsgDetailed(prefixMsg, suffixMsg, plugin.SocketPath, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for plugins to register
// that is user friendly and a detailed msg that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <plugin details> <suffixMsg>".
func (plugin *PluginInfo) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(plugin details: %v)", plugin)
return generatePluginMsg(prefixMsg, suffixMsg, plugin.SocketPath, detailedStr)
}
// GenerateErrorDetailed returns detailed errors for plugins to register
// that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ",
func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return errors.New(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for plugins to register
// that is user friendly and a detailed error that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ".
func (plugin *PluginInfo) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := plugin.GenerateMsg(prefixMsg, errSuffix(err))
return errors.New(simpleMsg), errors.New(detailedMsg)
}
// Generates an error string with the format ": <err>" if err exists
func errSuffix(err error) string {
errStr := ""
if err != nil {
errStr = fmt.Sprintf(": %v", err)
}
return errStr
}
func (dsw *desiredStateOfWorld) AddOrUpdatePlugin(ctx context.Context, socketPath string) error {
dsw.Lock()
defer dsw.Unlock()
logger := klog.FromContext(ctx)
if socketPath == "" {
return fmt.Errorf("socket path is empty")
}
if _, ok := dsw.socketFileToInfo[socketPath]; ok {
logger.V(2).Info("Plugin exists in desired state cache, timestamp will be updated", "path", socketPath)
}
// Update the PluginInfo object.
// Note that we only update the timestamp and UUID in the desired state of world, not the actual state of world
// because in the reconciler, we need to check if the plugin in the actual state of world is the same
// version as the plugin in the desired state of world
dsw.socketFileToInfo[socketPath] = PluginInfo{
SocketPath: socketPath,
Timestamp: time.Now(),
UUID: uuid.NewUUID(),
}
return nil
}
func (dsw *desiredStateOfWorld) RemovePlugin(socketPath string) {
dsw.Lock()
defer dsw.Unlock()
delete(dsw.socketFileToInfo, socketPath)
}
func (dsw *desiredStateOfWorld) GetPluginsToRegister() []PluginInfo {
dsw.RLock()
defer dsw.RUnlock()
pluginsToRegister := []PluginInfo{}
for _, pluginInfo := range dsw.socketFileToInfo {
pluginsToRegister = append(pluginsToRegister, pluginInfo)
}
return pluginsToRegister
}
func (dsw *desiredStateOfWorld) PluginExists(socketPath string) bool {
dsw.RLock()
defer dsw.RUnlock()
_, exists := dsw.socketFileToInfo[socketPath]
return exists
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
)
const (
// Metric keys for Plugin Manager.
pluginManagerTotalPlugins = "plugin_manager_total_plugins"
)
var (
registerMetrics sync.Once
totalPluginsDesc = metrics.NewDesc(
pluginManagerTotalPlugins,
"Number of plugins in Plugin Manager",
[]string{"socket_path", "state"},
nil,
metrics.ALPHA,
"",
)
)
// pluginCount is a map of maps used as a counter.
type pluginCount map[string]map[string]int64
func (pc pluginCount) add(state, pluginName string) {
count, ok := pc[state]
if !ok {
count = map[string]int64{}
}
count[pluginName]++
pc[state] = count
}
// Register registers Plugin Manager metrics.
func Register(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld) {
registerMetrics.Do(func() {
legacyregistry.CustomMustRegister(&totalPluginsCollector{asw: asw, dsw: dsw})
})
}
type totalPluginsCollector struct {
metrics.BaseStableCollector
asw cache.ActualStateOfWorld
dsw cache.DesiredStateOfWorld
}
var _ metrics.StableCollector = &totalPluginsCollector{}
// DescribeWithStability implements the metrics.StableCollector interface.
func (c *totalPluginsCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- totalPluginsDesc
}
// CollectWithStability implements the metrics.StableCollector interface.
func (c *totalPluginsCollector) CollectWithStability(ch chan<- metrics.Metric) {
for stateName, pluginCount := range c.getPluginCount() {
for socketPath, count := range pluginCount {
ch <- metrics.NewLazyConstMetric(totalPluginsDesc,
metrics.GaugeValue,
float64(count),
socketPath,
stateName)
}
}
}
func (c *totalPluginsCollector) getPluginCount() pluginCount {
counter := make(pluginCount)
for _, registeredPlugin := range c.asw.GetRegisteredPlugins() {
socketPath := registeredPlugin.SocketPath
counter.add("actual_state_of_world", socketPath)
}
for _, pluginToRegister := range c.dsw.GetPluginsToRegister() {
socketPath := pluginToRegister.SocketPath
counter.add("desired_state_of_world", socketPath)
}
return counter
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package operationexecutor implements interfaces that enable execution of
// register and unregister operations with a
// goroutinemap so that more than one operation is never triggered
// on the same plugin.
package operationexecutor
import (
"context"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/util/goroutinemap"
)
// OperationExecutor defines a set of operations for registering and unregistering
// a plugin that are executed with a NewGoRoutineMap which
// prevents more than one operation from being triggered on the same socket path.
//
// These operations should be idempotent (for example, RegisterPlugin should
// still succeed if the plugin is already registered, etc.). However,
// they depend on the plugin handlers (for each plugin type) to implement this
// behavior.
//
// Once an operation completes successfully, the actualStateOfWorld is updated
// to indicate the plugin is registered/unregistered.
//
// Once the operation is started, since it is executed asynchronously,
// errors are simply logged and the goroutine is terminated without updating
// actualStateOfWorld.
type OperationExecutor interface {
// RegisterPlugin registers the given plugin using a handler in the plugin handler map.
// It then updates the actual state of the world to reflect that.
RegisterPlugin(ctx context.Context, socketPath string, UUID types.UID, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error
// UnregisterPlugin deregisters the given plugin using a handler in the given plugin handler map.
// It then updates the actual state of the world to reflect that.
UnregisterPlugin(ctx context.Context, pluginInfo cache.PluginInfo, actualStateOfWorld ActualStateOfWorldUpdater) error
}
// NewOperationExecutor returns a new instance of OperationExecutor.
func NewOperationExecutor(
operationGenerator OperationGenerator) OperationExecutor {
return &operationExecutor{
pendingOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
operationGenerator: operationGenerator,
}
}
// ActualStateOfWorldUpdater defines a set of operations updating the actual
// state of the world cache after successful registration/deregistration.
type ActualStateOfWorldUpdater interface {
// AddPlugin add the given plugin in the cache if no existing plugin
// in the cache has the same socket path.
// An error will be returned if socketPath is empty.
AddPlugin(ctx context.Context, pluginInfo cache.PluginInfo) error
// RemovePlugin deletes the plugin with the given socket path from the actual
// state of world.
// If a plugin does not exist with the given socket path, this is a no-op.
RemovePlugin(socketPath string)
}
type operationExecutor struct {
// pendingOperations keeps track of pending attach and detach operations so
// multiple operations are not started on the same volume
pendingOperations goroutinemap.GoRoutineMap
// operationGenerator is an interface that provides implementations for
// generating volume function
operationGenerator OperationGenerator
}
var _ OperationExecutor = &operationExecutor{}
func (oe *operationExecutor) IsOperationPending(socketPath string) bool {
return oe.pendingOperations.IsOperationPending(socketPath)
}
func (oe *operationExecutor) RegisterPlugin(
ctx context.Context,
socketPath string,
pluginUUID types.UID,
pluginHandlers map[string]cache.PluginHandler,
actualStateOfWorld ActualStateOfWorldUpdater) error {
generatedOperation :=
oe.operationGenerator.GenerateRegisterPluginFunc(ctx, socketPath, pluginUUID, pluginHandlers, actualStateOfWorld)
return oe.pendingOperations.Run(
socketPath, generatedOperation)
}
func (oe *operationExecutor) UnregisterPlugin(
ctx context.Context,
pluginInfo cache.PluginInfo,
actualStateOfWorld ActualStateOfWorldUpdater) error {
generatedOperation :=
oe.operationGenerator.GenerateUnregisterPluginFunc(ctx, pluginInfo, actualStateOfWorld)
return oe.pendingOperations.Run(
pluginInfo.SocketPath, generatedOperation)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package operationexecutor implements interfaces that enable execution of
// register and unregister operations with a
// goroutinemap so that more than one operation is never triggered
// on the same plugin.
package operationexecutor
import (
"context"
"errors"
"fmt"
"net"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/client-go/tools/record"
registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
)
const (
dialTimeoutDuration = 10 * time.Second
notifyTimeoutDuration = 5 * time.Second
)
var _ OperationGenerator = &operationGenerator{}
type operationGenerator struct {
// recorder is used to record events in the API server
recorder record.EventRecorder
}
// NewOperationGenerator is returns instance of operationGenerator
func NewOperationGenerator(recorder record.EventRecorder) OperationGenerator {
return &operationGenerator{
recorder: recorder,
}
}
// OperationGenerator interface that extracts out the functions from operation_executor to make it dependency injectable
type OperationGenerator interface {
// Generates the RegisterPlugin function needed to perform the registration of a plugin
GenerateRegisterPluginFunc(
ctx context.Context,
socketPath string,
UUID types.UID,
pluginHandlers map[string]cache.PluginHandler,
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error
// Generates the UnregisterPlugin function needed to perform the unregistration of a plugin
GenerateUnregisterPluginFunc(
ctx context.Context,
pluginInfo cache.PluginInfo,
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error
}
func (og *operationGenerator) GenerateRegisterPluginFunc(
ctx context.Context,
socketPath string,
pluginUUID types.UID,
pluginHandlers map[string]cache.PluginHandler,
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error {
registerPluginFunc := func() error {
logger := klog.FromContext(ctx)
client, conn, err := dial(ctx, socketPath, dialTimeoutDuration)
if err != nil {
return fmt.Errorf("RegisterPlugin error -- dial failed at socket %s, err: %v", socketPath, err)
}
defer conn.Close()
// Create separate context from parent context
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
infoResp, err := client.GetInfo(ctxWithTimeout, ®isterapi.InfoRequest{})
if err != nil {
return fmt.Errorf("RegisterPlugin error -- failed to get plugin info using RPC GetInfo at socket %s, err: %v", socketPath, err)
}
handler, ok := pluginHandlers[infoResp.Type]
if !ok {
if err := og.notifyPlugin(ctx, client, false, fmt.Sprintf("RegisterPlugin error -- no handler registered for plugin type: %s at socket %s", infoResp.Type, socketPath)); err != nil {
return fmt.Errorf("RegisterPlugin error -- failed to send error at socket %s, err: %v", socketPath, err)
}
return fmt.Errorf("RegisterPlugin error -- no handler registered for plugin type: %s at socket %s", infoResp.Type, socketPath)
}
if infoResp.Endpoint == "" {
infoResp.Endpoint = socketPath
}
if err := handler.ValidatePlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions); err != nil {
if err = og.notifyPlugin(ctx, client, false, fmt.Sprintf("RegisterPlugin error -- plugin validation failed with err: %v", err)); err != nil {
return fmt.Errorf("RegisterPlugin error -- failed to send error at socket %s, err: %v", socketPath, err)
}
return fmt.Errorf("RegisterPlugin error -- pluginHandler.ValidatePluginFunc failed")
}
// We add the plugin to the actual state of world cache before calling a plugin consumer's Register handle
// so that if we receive a delete event during Register Plugin, we can process it as a DeRegister call.
err = actualStateOfWorldUpdater.AddPlugin(ctx, cache.PluginInfo{
SocketPath: socketPath,
UUID: pluginUUID,
Handler: handler,
Name: infoResp.Name,
Endpoint: infoResp.Endpoint,
})
if err != nil {
logger.Error(err, "RegisterPlugin error -- failed to add plugin", "path", socketPath)
}
if err := handler.RegisterPlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions, nil); err != nil {
return og.notifyPlugin(ctx, client, false, fmt.Sprintf("RegisterPlugin error -- plugin registration failed with err: %v", err))
}
// Notify is called after register to guarantee that even if notify throws an error Register will always be called after validate
if err := og.notifyPlugin(ctx, client, true, ""); err != nil {
return fmt.Errorf("RegisterPlugin error -- failed to send registration status at socket %s, err: %v", socketPath, err)
}
return nil
}
return registerPluginFunc
}
func (og *operationGenerator) GenerateUnregisterPluginFunc(
ctx context.Context,
pluginInfo cache.PluginInfo,
actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error {
unregisterPluginFunc := func() error {
logger := klog.FromContext(ctx)
if pluginInfo.Handler == nil {
return fmt.Errorf("UnregisterPlugin error -- failed to get plugin handler for %s", pluginInfo.SocketPath)
}
// We remove the plugin to the actual state of world cache before calling a plugin consumer's Unregister handle
// so that if we receive a register event during Register Plugin, we can process it as a Register call.
actualStateOfWorldUpdater.RemovePlugin(pluginInfo.SocketPath)
pluginInfo.Handler.DeRegisterPlugin(pluginInfo.Name, pluginInfo.Endpoint)
logger.V(4).Info("DeRegisterPlugin called", "pluginName", pluginInfo.Name, "pluginHandler", pluginInfo.Handler)
return nil
}
return unregisterPluginFunc
}
func (og *operationGenerator) notifyPlugin(ctx context.Context, client registerapi.RegistrationClient, registered bool, errStr string) error {
ctx, cancel := context.WithTimeout(ctx, notifyTimeoutDuration)
defer cancel()
status := ®isterapi.RegistrationStatus{
PluginRegistered: registered,
Error: errStr,
}
if _, err := client.NotifyRegistrationStatus(ctx, status); err != nil {
return fmt.Errorf("%s: %w", errStr, err)
}
if errStr != "" {
return errors.New(errStr)
}
return nil
}
// Dial establishes the gRPC communication with the picked up plugin socket. https://godoc.org/google.golang.org/grpc#Dial
func dial(ctx context.Context, unixSocketPath string, timeout time.Duration) (registerapi.RegistrationClient, *grpc.ClientConn, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
c, err := grpc.DialContext(ctx, unixSocketPath,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, "unix", addr)
}),
)
if err != nil {
return nil, nil, fmt.Errorf("failed to dial socket %s, err: %v", unixSocketPath, err)
}
return registerapi.NewRegistrationClient(c), c, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluginmanager
import (
"context"
"time"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler"
)
// PluginManager runs a set of asynchronous loops that figure out which plugins
// need to be registered/deregistered and makes it so.
type PluginManager interface {
// Starts the plugin manager and all the asynchronous loops that it controls
Run(ctx context.Context, sourcesReady config.SourcesReady, stopCh <-chan struct{})
// AddHandler adds the given plugin handler for a specific plugin type, which
// will be added to the actual state of world cache so that it can be passed to
// the desired state of world cache in order to be used during plugin
// registration/deregistration
AddHandler(pluginType string, pluginHandler cache.PluginHandler)
}
const (
// loopSleepDuration is the amount of time the reconciler loop waits
// between successive executions
loopSleepDuration = 1 * time.Second
)
// NewPluginManager returns a new concrete instance implementing the
// PluginManager interface.
func NewPluginManager(
sockDir string,
recorder record.EventRecorder) PluginManager {
asw := cache.NewActualStateOfWorld()
dsw := cache.NewDesiredStateOfWorld()
reconciler := reconciler.NewReconciler(
operationexecutor.NewOperationExecutor(
operationexecutor.NewOperationGenerator(
recorder,
),
),
loopSleepDuration,
dsw,
asw,
)
pm := &pluginManager{
desiredStateOfWorldPopulator: pluginwatcher.NewWatcher(
sockDir,
dsw,
),
reconciler: reconciler,
desiredStateOfWorld: dsw,
actualStateOfWorld: asw,
}
return pm
}
// pluginManager implements the PluginManager interface
type pluginManager struct {
// desiredStateOfWorldPopulator (the plugin watcher) runs an asynchronous
// periodic loop to populate the desiredStateOfWorld.
desiredStateOfWorldPopulator *pluginwatcher.Watcher
// reconciler runs an asynchronous periodic loop to reconcile the
// desiredStateOfWorld with the actualStateOfWorld by triggering register
// and unregister operations using the operationExecutor.
reconciler reconciler.Reconciler
// actualStateOfWorld is a data structure containing the actual state of
// the world according to the manager: i.e. which plugins are registered.
// The data structure is populated upon successful completion of register
// and unregister actions triggered by the reconciler.
actualStateOfWorld cache.ActualStateOfWorld
// desiredStateOfWorld is a data structure containing the desired state of
// the world according to the plugin manager: i.e. what plugins are registered.
// The data structure is populated by the desired state of the world
// populator (plugin watcher).
desiredStateOfWorld cache.DesiredStateOfWorld
}
var _ PluginManager = &pluginManager{}
func (pm *pluginManager) Run(ctx context.Context, sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
defer runtime.HandleCrash()
logger := klog.FromContext(ctx)
if err := pm.desiredStateOfWorldPopulator.Start(ctx, stopCh); err != nil {
logger.Error(err, "The desired_state_of_world populator (plugin watcher) starts failed!")
return
}
logger.V(2).Info("The desired_state_of_world populator (plugin watcher) starts")
logger.Info("Starting Kubelet Plugin Manager")
go pm.reconciler.Run(stopCh)
metrics.Register(pm.actualStateOfWorld, pm.desiredStateOfWorld)
<-stopCh
logger.Info("Shutting down Kubelet Plugin Manager")
}
func (pm *pluginManager) AddHandler(pluginType string, handler cache.PluginHandler) {
pm.reconciler.AddHandler(pluginType, handler)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluginwatcher
import (
"context"
"errors"
"fmt"
"net"
"reflect"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"k8s.io/klog/v2"
registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2"
)
type exampleHandler struct {
SupportedVersions []string
ExpectedNames map[string]int
eventChans map[string]chan examplePluginEvent // map[pluginName]eventChan
m sync.Mutex
permitDeprecatedDir bool
}
type examplePluginEvent int
const (
exampleEventValidate examplePluginEvent = 0
exampleEventRegister examplePluginEvent = 1
exampleEventDeRegister examplePluginEvent = 2
)
// NewExampleHandler provide a example handler
func NewExampleHandler(supportedVersions []string, permitDeprecatedDir bool) *exampleHandler {
return &exampleHandler{
SupportedVersions: supportedVersions,
ExpectedNames: make(map[string]int),
eventChans: make(map[string]chan examplePluginEvent),
permitDeprecatedDir: permitDeprecatedDir,
}
}
func (p *exampleHandler) ValidatePlugin(ctx context.Context, pluginName string, endpoint string, versions []string) error {
p.SendEvent(ctx, pluginName, exampleEventValidate)
n, ok := p.DecreasePluginCount(pluginName)
if !ok && n > 0 {
return fmt.Errorf("pluginName('%s') wasn't expected (count is %d)", pluginName, n)
}
if !reflect.DeepEqual(versions, p.SupportedVersions) {
return fmt.Errorf("versions('%v') != supported versions('%v')", versions, p.SupportedVersions)
}
// this handler expects non-empty endpoint as an example
if len(endpoint) == 0 {
return errors.New("expecting non empty endpoint")
}
return nil
}
func (p *exampleHandler) RegisterPlugin(ctx context.Context, pluginName, endpoint string, versions []string) error {
p.SendEvent(ctx, pluginName, exampleEventRegister)
// Verifies the grpcServer is ready to serve services.
_, conn, err := dial(ctx, endpoint, time.Second)
if err != nil {
return fmt.Errorf("failed dialing endpoint (%s): %v", endpoint, err)
}
defer conn.Close()
// The plugin handler should be able to use any listed service API version.
v1beta1Client := v1beta1.NewExampleClient(conn)
v1beta2Client := v1beta2.NewExampleClient(conn)
// Tests v1beta1 GetExampleInfo
_, err = v1beta1Client.GetExampleInfo(ctx, &v1beta1.ExampleRequest{})
if err != nil {
return fmt.Errorf("failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err)
}
// Tests v1beta1 GetExampleInfo
_, err = v1beta2Client.GetExampleInfo(ctx, &v1beta2.ExampleRequest{})
if err != nil {
return fmt.Errorf("failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err)
}
return nil
}
func (p *exampleHandler) DeRegisterPlugin(ctx context.Context, pluginName string) {
p.SendEvent(ctx, pluginName, exampleEventDeRegister)
}
func (p *exampleHandler) SendEvent(ctx context.Context, pluginName string, event examplePluginEvent) {
logger := klog.FromContext(ctx)
logger.V(2).Info("Sending event for plugin", "pluginName", pluginName, "event", event, "channel", p.eventChans[pluginName])
p.eventChans[pluginName] <- event
}
func (p *exampleHandler) DecreasePluginCount(pluginName string) (old int, ok bool) {
p.m.Lock()
defer p.m.Unlock()
v, ok := p.ExpectedNames[pluginName]
if !ok {
v = -1
}
return v, ok
}
// Dial establishes the gRPC communication with the picked up plugin socket. https://godoc.org/google.golang.org/grpc#Dial
func dial(ctx context.Context, unixSocketPath string, timeout time.Duration) (registerapi.RegistrationClient, *grpc.ClientConn, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
c, err := grpc.DialContext(ctx, unixSocketPath,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, "unix", addr)
}),
)
if err != nil {
return nil, nil, fmt.Errorf("failed to dial socket %s, err: %v", unixSocketPath, err)
}
return registerapi.NewRegistrationClient(c), c, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluginwatcher
import (
"context"
"errors"
"fmt"
"net"
"os"
"sync"
"time"
"google.golang.org/grpc"
"k8s.io/klog/v2"
registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
v1beta1 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1"
v1beta2 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2"
)
// examplePlugin is a sample plugin to work with plugin watcher
type examplePlugin struct {
grpcServer *grpc.Server
wg sync.WaitGroup
registrationStatus chan registerapi.RegistrationStatus // for testing
endpoint string // for testing
pluginName string
pluginType string
versions []string
registerapi.UnsafeRegistrationServer
}
type pluginServiceV1Beta1 struct {
server *examplePlugin
v1beta1.UnsafeExampleServer
}
func (s *pluginServiceV1Beta1) GetExampleInfo(ctx context.Context, rqt *v1beta1.ExampleRequest) (*v1beta1.ExampleResponse, error) {
logger := klog.FromContext(ctx)
logger.Info("GetExampleInfo v1beta1field", "field", rqt.V1Beta1Field)
return &v1beta1.ExampleResponse{}, nil
}
func (s *pluginServiceV1Beta1) RegisterService() {
v1beta1.RegisterExampleServer(s.server.grpcServer, s)
}
type pluginServiceV1Beta2 struct {
server *examplePlugin
v1beta2.UnsafeExampleServer
}
func (s *pluginServiceV1Beta2) GetExampleInfo(ctx context.Context, rqt *v1beta2.ExampleRequest) (*v1beta2.ExampleResponse, error) {
logger := klog.FromContext(ctx)
logger.Info("GetExampleInfo v1beta2_field", "field", rqt.V1Beta2Field)
return &v1beta2.ExampleResponse{}, nil
}
func (s *pluginServiceV1Beta2) RegisterService() {
v1beta2.RegisterExampleServer(s.server.grpcServer, s)
}
// NewExamplePlugin returns an initialized examplePlugin instance
func NewExamplePlugin() *examplePlugin {
return &examplePlugin{}
}
// NewTestExamplePlugin returns an initialized examplePlugin instance for testing
func NewTestExamplePlugin(pluginName string, pluginType string, endpoint string, advertisedVersions ...string) *examplePlugin {
return &examplePlugin{
pluginName: pluginName,
pluginType: pluginType,
endpoint: endpoint,
versions: advertisedVersions,
registrationStatus: make(chan registerapi.RegistrationStatus),
}
}
// GetPluginInfo returns a PluginInfo object
func GetPluginInfo(plugin *examplePlugin) cache.PluginInfo {
return cache.PluginInfo{
SocketPath: plugin.endpoint,
}
}
// GetInfo is the RPC invoked by plugin watcher
func (e *examplePlugin) GetInfo(ctx context.Context, req *registerapi.InfoRequest) (*registerapi.PluginInfo, error) {
return ®isterapi.PluginInfo{
Type: e.pluginType,
Name: e.pluginName,
Endpoint: e.endpoint,
SupportedVersions: e.versions,
}, nil
}
func (e *examplePlugin) NotifyRegistrationStatus(ctx context.Context, status *registerapi.RegistrationStatus) (*registerapi.RegistrationStatusResponse, error) {
logger := klog.FromContext(ctx)
logger.Info("Notify registration status", "status", status)
if e.registrationStatus != nil {
e.registrationStatus <- *status
}
return ®isterapi.RegistrationStatusResponse{}, nil
}
// Serve starts a pluginwatcher server and one or more of the plugin services
func (e *examplePlugin) Serve(ctx context.Context, services ...string) error {
logger := klog.FromContext(ctx)
logger.Info("Starting example server", "endpoint", e.endpoint)
lis, err := net.Listen("unix", e.endpoint)
if err != nil {
return err
}
logger.Info("Example server started", "endpoint", e.endpoint)
e.grpcServer = grpc.NewServer()
// Registers kubelet plugin watcher api.
registerapi.RegisterRegistrationServer(e.grpcServer, e)
for _, service := range services {
switch service {
case "v1beta1":
v1beta1 := &pluginServiceV1Beta1{server: e}
v1beta1.RegisterService()
case "v1beta2":
v1beta2 := &pluginServiceV1Beta2{server: e}
v1beta2.RegisterService()
default:
return fmt.Errorf("unsupported service: '%s'", service)
}
}
// Starts service
e.wg.Add(1)
go func() {
defer e.wg.Done()
// Blocking call to accept incoming connections.
if err := e.grpcServer.Serve(lis); err != nil {
logger.Error(err, "Example server stopped serving")
}
}()
return nil
}
func (e *examplePlugin) Stop(ctx context.Context) error {
logger := klog.FromContext(ctx)
logger.Info("Stopping example server", "endpoint", e.endpoint)
e.grpcServer.Stop()
c := make(chan struct{})
go func() {
defer close(c)
e.wg.Wait()
}()
select {
case <-c:
break
case <-time.After(time.Second):
return errors.New("timed out on waiting for stop completion")
}
if err := os.Remove(e.endpoint); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
//Copyright 2018 The Kubernetes Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.4
// protoc v4.23.4
// source: pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.proto
package v1beta1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ExampleRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Request string `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"`
V1Beta1Field string `protobuf:"bytes,2,opt,name=v1beta1_field,json=v1beta1Field,proto3" json:"v1beta1_field,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExampleRequest) Reset() {
*x = ExampleRequest{}
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExampleRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExampleRequest) ProtoMessage() {}
func (x *ExampleRequest) ProtoReflect() protoreflect.Message {
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExampleRequest.ProtoReflect.Descriptor instead.
func (*ExampleRequest) Descriptor() ([]byte, []int) {
return file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescGZIP(), []int{0}
}
func (x *ExampleRequest) GetRequest() string {
if x != nil {
return x.Request
}
return ""
}
func (x *ExampleRequest) GetV1Beta1Field() string {
if x != nil {
return x.V1Beta1Field
}
return ""
}
type ExampleResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExampleResponse) Reset() {
*x = ExampleResponse{}
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExampleResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExampleResponse) ProtoMessage() {}
func (x *ExampleResponse) ProtoReflect() protoreflect.Message {
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExampleResponse.ProtoReflect.Descriptor instead.
func (*ExampleResponse) Descriptor() ([]byte, []int) {
return file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescGZIP(), []int{1}
}
func (x *ExampleResponse) GetError() string {
if x != nil {
return x.Error
}
return ""
}
var File_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto protoreflect.FileDescriptor
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDesc = string([]byte{
0x0a, 0x4d, 0x70, 0x6b, 0x67, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x6c, 0x65, 0x74, 0x2f, 0x70, 0x6c,
0x75, 0x67, 0x69, 0x6e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67,
0x69, 0x6e, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c,
0x65, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31,
0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x07, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x22, 0x4f, 0x0a, 0x0e, 0x45, 0x78, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5f,
0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x76, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x22, 0x27, 0x0a, 0x0f, 0x45, 0x78, 0x61,
0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x32, 0x50, 0x0a, 0x07, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x45, 0x0a,
0x0e, 0x47, 0x65, 0x74, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x17, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x42, 0x57, 0x5a, 0x55, 0x6b, 0x38, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x6b,
0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x6b, 0x75,
0x62, 0x65, 0x6c, 0x65, 0x74, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x6d, 0x61, 0x6e, 0x61,
0x67, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65,
0x72, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
0x5f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescOnce sync.Once
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescData []byte
)
func file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescGZIP() []byte {
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescOnce.Do(func() {
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDesc), len(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDesc)))
})
return file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDescData
}
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_goTypes = []any{
(*ExampleRequest)(nil), // 0: v1beta1.ExampleRequest
(*ExampleResponse)(nil), // 1: v1beta1.ExampleResponse
}
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_depIdxs = []int32{
0, // 0: v1beta1.Example.GetExampleInfo:input_type -> v1beta1.ExampleRequest
1, // 1: v1beta1.Example.GetExampleInfo:output_type -> v1beta1.ExampleResponse
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() {
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_init()
}
func file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_init() {
if File_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDesc), len(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_goTypes,
DependencyIndexes: file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_depIdxs,
MessageInfos: file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_msgTypes,
}.Build()
File_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto = out.File
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_goTypes = nil
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta1_api_proto_depIdxs = nil
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
//Copyright 2018 The Kubernetes Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v4.23.4
// source: pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.proto
package v1beta1
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Example_GetExampleInfo_FullMethodName = "/v1beta1.Example/GetExampleInfo"
)
// ExampleClient is the client API for Example service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// Example is a simple example service for general reference on the recommended
// kubelet plugin model and plugin watcher testing.
type ExampleClient interface {
GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error)
}
type exampleClient struct {
cc grpc.ClientConnInterface
}
func NewExampleClient(cc grpc.ClientConnInterface) ExampleClient {
return &exampleClient{cc}
}
func (c *exampleClient) GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExampleResponse)
err := c.cc.Invoke(ctx, Example_GetExampleInfo_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ExampleServer is the server API for Example service.
// All implementations must embed UnimplementedExampleServer
// for forward compatibility.
//
// Example is a simple example service for general reference on the recommended
// kubelet plugin model and plugin watcher testing.
type ExampleServer interface {
GetExampleInfo(context.Context, *ExampleRequest) (*ExampleResponse, error)
mustEmbedUnimplementedExampleServer()
}
// UnimplementedExampleServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedExampleServer struct{}
func (UnimplementedExampleServer) GetExampleInfo(context.Context, *ExampleRequest) (*ExampleResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetExampleInfo not implemented")
}
func (UnimplementedExampleServer) mustEmbedUnimplementedExampleServer() {}
func (UnimplementedExampleServer) testEmbeddedByValue() {}
// UnsafeExampleServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ExampleServer will
// result in compilation errors.
type UnsafeExampleServer interface {
mustEmbedUnimplementedExampleServer()
}
func RegisterExampleServer(s grpc.ServiceRegistrar, srv ExampleServer) {
// If the following call pancis, it indicates UnimplementedExampleServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Example_ServiceDesc, srv)
}
func _Example_GetExampleInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExampleRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExampleServer).GetExampleInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Example_GetExampleInfo_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExampleServer).GetExampleInfo(ctx, req.(*ExampleRequest))
}
return interceptor(ctx, in, info, handler)
}
// Example_ServiceDesc is the grpc.ServiceDesc for Example service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Example_ServiceDesc = grpc.ServiceDesc{
ServiceName: "v1beta1.Example",
HandlerType: (*ExampleServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetExampleInfo",
Handler: _Example_GetExampleInfo_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1/api.proto",
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
//Copyright 2018 The Kubernetes Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.4
// protoc v4.23.4
// source: pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.proto
package v1beta2
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Renames a field from v1beta1 ExampleRequest.
type ExampleRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Request string `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"`
V1Beta2Field string `protobuf:"bytes,2,opt,name=v1beta2_field,json=v1beta2Field,proto3" json:"v1beta2_field,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExampleRequest) Reset() {
*x = ExampleRequest{}
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExampleRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExampleRequest) ProtoMessage() {}
func (x *ExampleRequest) ProtoReflect() protoreflect.Message {
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExampleRequest.ProtoReflect.Descriptor instead.
func (*ExampleRequest) Descriptor() ([]byte, []int) {
return file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescGZIP(), []int{0}
}
func (x *ExampleRequest) GetRequest() string {
if x != nil {
return x.Request
}
return ""
}
func (x *ExampleRequest) GetV1Beta2Field() string {
if x != nil {
return x.V1Beta2Field
}
return ""
}
type ExampleResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExampleResponse) Reset() {
*x = ExampleResponse{}
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExampleResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExampleResponse) ProtoMessage() {}
func (x *ExampleResponse) ProtoReflect() protoreflect.Message {
mi := &file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExampleResponse.ProtoReflect.Descriptor instead.
func (*ExampleResponse) Descriptor() ([]byte, []int) {
return file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescGZIP(), []int{1}
}
func (x *ExampleResponse) GetError() string {
if x != nil {
return x.Error
}
return ""
}
var File_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto protoreflect.FileDescriptor
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDesc = string([]byte{
0x0a, 0x4d, 0x70, 0x6b, 0x67, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x6c, 0x65, 0x74, 0x2f, 0x70, 0x6c,
0x75, 0x67, 0x69, 0x6e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67,
0x69, 0x6e, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c,
0x65, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31,
0x62, 0x65, 0x74, 0x61, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x07, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x32, 0x22, 0x4f, 0x0a, 0x0e, 0x45, 0x78, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x32, 0x5f,
0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x76, 0x31, 0x62,
0x65, 0x74, 0x61, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x22, 0x27, 0x0a, 0x0f, 0x45, 0x78, 0x61,
0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x32, 0x50, 0x0a, 0x07, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x45, 0x0a,
0x0e, 0x47, 0x65, 0x74, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x17, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74,
0x61, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x42, 0x57, 0x5a, 0x55, 0x6b, 0x38, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x6b,
0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x6b, 0x75,
0x62, 0x65, 0x6c, 0x65, 0x74, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x6d, 0x61, 0x6e, 0x61,
0x67, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65,
0x72, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
0x5f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x32, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescOnce sync.Once
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescData []byte
)
func file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescGZIP() []byte {
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescOnce.Do(func() {
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDesc), len(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDesc)))
})
return file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDescData
}
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_goTypes = []any{
(*ExampleRequest)(nil), // 0: v1beta2.ExampleRequest
(*ExampleResponse)(nil), // 1: v1beta2.ExampleResponse
}
var file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_depIdxs = []int32{
0, // 0: v1beta2.Example.GetExampleInfo:input_type -> v1beta2.ExampleRequest
1, // 1: v1beta2.Example.GetExampleInfo:output_type -> v1beta2.ExampleResponse
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() {
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_init()
}
func file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_init() {
if File_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDesc), len(file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_goTypes,
DependencyIndexes: file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_depIdxs,
MessageInfos: file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_msgTypes,
}.Build()
File_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto = out.File
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_goTypes = nil
file_pkg_kubelet_pluginmanager_pluginwatcher_example_plugin_apis_v1beta2_api_proto_depIdxs = nil
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
//Copyright 2018 The Kubernetes Authors.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v4.23.4
// source: pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.proto
package v1beta2
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Example_GetExampleInfo_FullMethodName = "/v1beta2.Example/GetExampleInfo"
)
// ExampleClient is the client API for Example service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// Example is a simple example service for general reference on the recommended
// kubelet plugin model and plugin watcher testing.
type ExampleClient interface {
GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error)
}
type exampleClient struct {
cc grpc.ClientConnInterface
}
func NewExampleClient(cc grpc.ClientConnInterface) ExampleClient {
return &exampleClient{cc}
}
func (c *exampleClient) GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExampleResponse)
err := c.cc.Invoke(ctx, Example_GetExampleInfo_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ExampleServer is the server API for Example service.
// All implementations must embed UnimplementedExampleServer
// for forward compatibility.
//
// Example is a simple example service for general reference on the recommended
// kubelet plugin model and plugin watcher testing.
type ExampleServer interface {
GetExampleInfo(context.Context, *ExampleRequest) (*ExampleResponse, error)
mustEmbedUnimplementedExampleServer()
}
// UnimplementedExampleServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedExampleServer struct{}
func (UnimplementedExampleServer) GetExampleInfo(context.Context, *ExampleRequest) (*ExampleResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetExampleInfo not implemented")
}
func (UnimplementedExampleServer) mustEmbedUnimplementedExampleServer() {}
func (UnimplementedExampleServer) testEmbeddedByValue() {}
// UnsafeExampleServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ExampleServer will
// result in compilation errors.
type UnsafeExampleServer interface {
mustEmbedUnimplementedExampleServer()
}
func RegisterExampleServer(s grpc.ServiceRegistrar, srv ExampleServer) {
// If the following call pancis, it indicates UnimplementedExampleServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Example_ServiceDesc, srv)
}
func _Example_GetExampleInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExampleRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExampleServer).GetExampleInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Example_GetExampleInfo_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExampleServer).GetExampleInfo(ctx, req.(*ExampleRequest))
}
return interceptor(ctx, in, info, handler)
}
// Example_ServiceDesc is the grpc.ServiceDesc for Example service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Example_ServiceDesc = grpc.ServiceDesc{
ServiceName: "v1beta2.Example",
HandlerType: (*ExampleServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetExampleInfo",
Handler: _Example_GetExampleInfo_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2/api.proto",
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluginwatcher
import (
"context"
"fmt"
"os"
"strings"
"github.com/fsnotify/fsnotify"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/util"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
// Watcher is the plugin watcher
type Watcher struct {
path string
fs utilfs.Filesystem
fsWatcher *fsnotify.Watcher
desiredStateOfWorld cache.DesiredStateOfWorld
}
// NewWatcher provides a new watcher for socket registration
func NewWatcher(sockDir string, desiredStateOfWorld cache.DesiredStateOfWorld) *Watcher {
return &Watcher{
path: sockDir,
fs: &utilfs.DefaultFs{},
desiredStateOfWorld: desiredStateOfWorld,
}
}
// Start watches for the creation and deletion of plugin sockets at the path
func (w *Watcher) Start(ctx context.Context, stopCh <-chan struct{}) error {
logger := klog.FromContext(ctx)
logger.V(2).Info("Plugin Watcher Start", "path", w.path)
// Creating the directory to be watched if it doesn't exist yet,
// and walks through the directory to discover the existing plugins.
if err := w.init(ctx); err != nil {
return err
}
fsWatcher, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("failed to start plugin fsWatcher, err: %v", err)
}
w.fsWatcher = fsWatcher
// Traverse plugin dir and add filesystem watchers before starting the plugin processing goroutine.
if err := w.traversePluginDir(ctx, w.path); err != nil {
logger.Error(err, "Failed to traverse plugin socket path", "path", w.path)
}
go func(fsWatcher *fsnotify.Watcher) {
for {
select {
case event := <-fsWatcher.Events:
//TODO: Handle errors by taking corrective measures
if event.Has(fsnotify.Create) {
err := w.handleCreateEvent(ctx, event)
if err != nil {
logger.Error(err, "Error when handling create event", "event", event)
}
} else if event.Has(fsnotify.Remove) {
w.handleDeleteEvent(ctx, event)
}
continue
case err := <-fsWatcher.Errors:
if err != nil {
logger.Error(err, "FsWatcher received error")
}
continue
case <-stopCh:
w.fsWatcher.Close()
return
}
}
}(fsWatcher)
return nil
}
func (w *Watcher) init(ctx context.Context) error {
logger := klog.FromContext(ctx)
logger.V(4).Info("Ensuring Plugin directory", "path", w.path)
if err := w.fs.MkdirAll(w.path, 0755); err != nil {
return fmt.Errorf("error (re-)creating root %s: %v", w.path, err)
}
return nil
}
// Walks through the plugin directory discover any existing plugin sockets.
// Ignore all errors except root dir not being walkable
func (w *Watcher) traversePluginDir(ctx context.Context, dir string) error {
logger := klog.FromContext(ctx)
// watch the new dir
err := w.fsWatcher.Add(dir)
if err != nil {
return fmt.Errorf("failed to watch %s, err: %v", w.path, err)
}
// traverse existing children in the dir
return w.fs.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if path == dir {
return fmt.Errorf("error accessing path: %s error: %v", path, err)
}
logger.Error(err, "Error accessing path", "path", path)
return nil
}
// do not call fsWatcher.Add twice on the root dir to avoid potential problems.
if path == dir {
return nil
}
mode := info.Mode()
if mode.IsDir() {
if err := w.fsWatcher.Add(path); err != nil {
return fmt.Errorf("failed to watch %s, err: %v", path, err)
}
} else if isSocket, _ := util.IsUnixDomainSocket(path); isSocket {
event := fsnotify.Event{
Name: path,
Op: fsnotify.Create,
}
//TODO: Handle errors by taking corrective measures
if err := w.handleCreateEvent(ctx, event); err != nil {
logger.Error(err, "Error when handling create", "event", event)
}
} else {
logger.V(5).Info("Ignoring file", "path", path, "mode", mode)
}
return nil
})
}
// Handle filesystem notify event.
// Files names:
// - MUST NOT start with a '.'
func (w *Watcher) handleCreateEvent(ctx context.Context, event fsnotify.Event) error {
logger := klog.FromContext(ctx)
logger.V(6).Info("Handling create event", "event", event)
fi, err := getStat(event)
if err != nil {
return fmt.Errorf("stat file %s failed: %v", event.Name, err)
}
if strings.HasPrefix(fi.Name(), ".") {
logger.V(5).Info("Ignoring file (starts with '.')", "path", fi.Name())
return nil
}
if !fi.IsDir() {
isSocket, err := util.IsUnixDomainSocket(util.NormalizePath(event.Name))
if err != nil {
return fmt.Errorf("failed to determine if file: %s is a unix domain socket: %v", event.Name, err)
}
if !isSocket {
logger.V(5).Info("Ignoring non socket file", "path", fi.Name())
return nil
}
return w.handlePluginRegistration(ctx, event.Name)
}
return w.traversePluginDir(ctx, event.Name)
}
func (w *Watcher) handlePluginRegistration(ctx context.Context, socketPath string) error {
logger := klog.FromContext(ctx)
socketPath = getSocketPath(socketPath)
// Update desired state of world list of plugins
// If the socket path does exist in the desired world cache, there's still
// a possibility that it has been deleted and recreated again before it is
// removed from the desired world cache, so we still need to call AddOrUpdatePlugin
// in this case to update the timestamp
logger.V(2).Info("Adding socket path or updating timestamp to desired state cache", "path", socketPath)
err := w.desiredStateOfWorld.AddOrUpdatePlugin(ctx, socketPath)
if err != nil {
return fmt.Errorf("error adding socket path %s or updating timestamp to desired state cache: %v", socketPath, err)
}
return nil
}
func (w *Watcher) handleDeleteEvent(ctx context.Context, event fsnotify.Event) {
logger := klog.FromContext(ctx)
logger.V(6).Info("Handling delete event", "event", event)
socketPath := event.Name
logger.V(2).Info("Removing socket path from desired state cache", "path", socketPath)
w.desiredStateOfWorld.RemovePlugin(socketPath)
}
//go:build !windows
// +build !windows
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluginwatcher
import (
"os"
"github.com/fsnotify/fsnotify"
)
func getStat(event fsnotify.Event) (os.FileInfo, error) {
return os.Stat(event.Name)
}
func getSocketPath(socketPath string) string {
return socketPath
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconciler implements interfaces that attempt to reconcile the
// desired state of the world with the actual state of the world by triggering
// relevant actions (register/deregister plugins).
package reconciler
import (
"context"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor"
"k8s.io/kubernetes/pkg/util/goroutinemap"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
)
// Reconciler runs a periodic loop to reconcile the desired state of the world
// with the actual state of the world by triggering register and unregister
// operations. Also provides a means to add a handler for a plugin type.
type Reconciler interface {
// Run starts running the reconciliation loop which executes periodically,
// checks if plugins are correctly registered or unregistered.
// If not, it will trigger register/unregister operations to rectify.
Run(stopCh <-chan struct{})
// AddHandler adds the given plugin handler for a specific plugin type,
// which will be added to the actual state of world cache.
AddHandler(pluginType string, pluginHandler cache.PluginHandler)
}
// NewReconciler returns a new instance of Reconciler.
//
// operationExecutor - used to trigger register/unregister operations safely
// (prevents more than one operation from being triggered on the same
// socket path)
//
// loopSleepDuration - the amount of time the reconciler loop sleeps between
// successive executions
//
// desiredStateOfWorld - cache containing the desired state of the world
//
// actualStateOfWorld - cache containing the actual state of the world
func NewReconciler(
operationExecutor operationexecutor.OperationExecutor,
loopSleepDuration time.Duration,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld) Reconciler {
return &reconciler{
operationExecutor: operationExecutor,
loopSleepDuration: loopSleepDuration,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
handlers: make(map[string]cache.PluginHandler),
}
}
type reconciler struct {
operationExecutor operationexecutor.OperationExecutor
loopSleepDuration time.Duration
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
handlers map[string]cache.PluginHandler
sync.RWMutex
}
var _ Reconciler = &reconciler{}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
// Use context.TODO() because we currently do not have a proper context to pass in.
// Replace this with an appropriate context when refactoring this function to accept a context parameter.
ctx := context.TODO()
wait.Until(func() {
rc.reconcile(ctx)
},
rc.loopSleepDuration,
stopCh)
}
func (rc *reconciler) AddHandler(pluginType string, pluginHandler cache.PluginHandler) {
rc.Lock()
defer rc.Unlock()
rc.handlers[pluginType] = pluginHandler
}
func (rc *reconciler) getHandlers() map[string]cache.PluginHandler {
rc.RLock()
defer rc.RUnlock()
var copyHandlers = make(map[string]cache.PluginHandler)
for pluginType, handler := range rc.handlers {
copyHandlers[pluginType] = handler
}
return copyHandlers
}
func (rc *reconciler) reconcile(ctx context.Context) {
logger := klog.FromContext(ctx)
// Unregisterations are triggered before registrations
// Ensure plugins that should be unregistered are unregistered.
for _, registeredPlugin := range rc.actualStateOfWorld.GetRegisteredPlugins() {
unregisterPlugin := false
if !rc.desiredStateOfWorld.PluginExists(registeredPlugin.SocketPath) {
unregisterPlugin = true
} else {
// We also need to unregister the plugins that exist in both actual state of world
// and desired state of world cache, but the timestamps don't match.
// Iterate through desired state of world plugins and see if there's any plugin
// with the same socket path but different timestamp.
for _, dswPlugin := range rc.desiredStateOfWorld.GetPluginsToRegister() {
if dswPlugin.SocketPath == registeredPlugin.SocketPath && dswPlugin.UUID != registeredPlugin.UUID {
logger.V(5).Info("An updated version of plugin has been found, unregistering the plugin first before reregistering", "plugin", registeredPlugin)
unregisterPlugin = true
break
}
}
}
if unregisterPlugin {
logger.V(5).Info("Starting operationExecutor.UnregisterPlugin", "plugin", registeredPlugin)
err := rc.operationExecutor.UnregisterPlugin(ctx, registeredPlugin, rc.actualStateOfWorld)
if err != nil &&
!goroutinemap.IsAlreadyExists(err) &&
!exponentialbackoff.IsExponentialBackoff(err) {
// Ignore goroutinemap.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
logger.Error(err, "OperationExecutor.UnregisterPlugin failed", "plugin", registeredPlugin)
}
if err == nil {
logger.V(1).Info("OperationExecutor.UnregisterPlugin started", "plugin", registeredPlugin)
}
}
}
// Ensure plugins that should be registered are registered
for _, pluginToRegister := range rc.desiredStateOfWorld.GetPluginsToRegister() {
if !rc.actualStateOfWorld.PluginExistsWithCorrectUUID(pluginToRegister) {
logger.V(5).Info("Starting operationExecutor.RegisterPlugin", "plugin", pluginToRegister)
err := rc.operationExecutor.RegisterPlugin(ctx, pluginToRegister.SocketPath, pluginToRegister.UUID, rc.getHandlers(), rc.actualStateOfWorld)
if err != nil &&
!goroutinemap.IsAlreadyExists(err) &&
!exponentialbackoff.IsExponentialBackoff(err) {
// Ignore goroutinemap.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
logger.Error(err, "OperationExecutor.RegisterPlugin failed", "plugin", pluginToRegister)
}
if err == nil {
logger.V(1).Info("OperationExecutor.RegisterPlugin started", "plugin", pluginToRegister)
}
}
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
// MirrorClient knows how to create/delete a mirror pod in the API server.
type MirrorClient interface {
// CreateMirrorPod creates a mirror pod in the API server for the given
// pod or returns an error. The mirror pod will have the same annotations
// as the given pod as well as an extra annotation containing the hash of
// the static pod.
CreateMirrorPod(ctx context.Context, pod *v1.Pod) error
// DeleteMirrorPod deletes the mirror pod with the given full name from
// the API server or returns an error.
DeleteMirrorPod(ctx context.Context, podFullName string, uid *types.UID) (bool, error)
}
// nodeGetter is a subset of NodeLister, simplified for testing.
type nodeGetter interface {
// Get retrieves the Node for a given name.
Get(name string) (*v1.Node, error)
}
// basicMirrorClient is a functional MirrorClient. Mirror pods are stored in
// the kubelet directly because they need to be in sync with the internal
// pods.
type basicMirrorClient struct {
apiserverClient clientset.Interface
nodeGetter nodeGetter
nodeName string
}
// NewBasicMirrorClient returns a new MirrorClient.
func NewBasicMirrorClient(apiserverClient clientset.Interface, nodeName string, nodeGetter nodeGetter) MirrorClient {
return &basicMirrorClient{
apiserverClient: apiserverClient,
nodeName: nodeName,
nodeGetter: nodeGetter,
}
}
func (mc *basicMirrorClient) CreateMirrorPod(ctx context.Context, pod *v1.Pod) error {
if mc.apiserverClient == nil {
return nil
}
// Make a copy of the pod.
copyPod := *pod
copyPod.Annotations = make(map[string]string)
for k, v := range pod.Annotations {
copyPod.Annotations[k] = v
}
hash := getPodHash(pod)
copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash
// With the MirrorPodNodeRestriction feature, mirror pods are required to have an owner reference
// to the owning node.
// See https://git.k8s.io/enhancements/keps/sig-auth/1314-node-restriction-pods/README.md
nodeUID, err := mc.getNodeUID()
if err != nil {
return fmt.Errorf("failed to get node UID: %v", err)
}
controller := true
copyPod.OwnerReferences = []metav1.OwnerReference{{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: "Node",
Name: mc.nodeName,
UID: nodeUID,
Controller: &controller,
}}
apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(ctx, ©Pod, metav1.CreateOptions{})
if err != nil && apierrors.IsAlreadyExists(err) {
// Check if the existing pod is the same as the pod we want to create.
if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash {
return nil
}
}
return err
}
// DeleteMirrorPod deletes a mirror pod.
// It takes the full name of the pod and optionally a UID. If the UID
// is non-nil, the pod is deleted only if its UID matches the supplied UID.
// It returns whether the pod was actually deleted, and any error returned
// while parsing the name of the pod.
// Non-existence of the pod or UID mismatch is not treated as an error; the
// routine simply returns false in that case.
func (mc *basicMirrorClient) DeleteMirrorPod(ctx context.Context, podFullName string, uid *types.UID) (bool, error) {
if mc.apiserverClient == nil {
return false, nil
}
logger := klog.FromContext(ctx)
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
logger.Error(err, "Failed to parse a pod full name", "podFullName", podFullName)
return false, err
}
var uidValue types.UID
if uid != nil {
uidValue = *uid
}
logger.V(2).Info("Deleting a mirror pod", "pod", klog.KRef(namespace, name), "podUID", uidValue)
var GracePeriodSeconds int64
if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(ctx, name, metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil {
// Unfortunately, there's no generic error for failing a precondition
if !(apierrors.IsNotFound(err) || apierrors.IsConflict(err)) {
// We should return the error here, but historically this routine does
// not return an error unless it can't parse the pod name
logger.Error(err, "Failed deleting a mirror pod", "pod", klog.KRef(namespace, name))
}
return false, nil
}
return true, nil
}
func (mc *basicMirrorClient) getNodeUID() (types.UID, error) {
node, err := mc.nodeGetter.Get(mc.nodeName)
if err != nil {
return "", err
}
if node.UID == "" {
return "", fmt.Errorf("UID unset for node %s", mc.nodeName)
}
return node.UID, nil
}
func getHashFromMirrorPod(pod *v1.Pod) (string, bool) {
hash, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey]
return hash, ok
}
func getPodHash(pod *v1.Pod) string {
// The annotation exists for all static pods.
return pod.Annotations[kubetypes.ConfigHashAnnotationKey]
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package pod
import (
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
// Manager stores and manages access to pods, maintaining the mappings
// between static pods and mirror pods.
//
// The kubelet discovers pod updates from 3 sources: file, http, and
// apiserver. Pods from non-apiserver sources are called static pods, and API
// server is not aware of the existence of static pods. In order to monitor
// the status of such pods, the kubelet creates a mirror pod for each static
// pod via the API server.
//
// A mirror pod has the same pod full name (name and namespace) as its static
// counterpart (albeit different metadata such as UID, etc). By leveraging the
// fact that the kubelet reports the pod status using the pod full name, the
// status of the mirror pod always reflects the actual status of the static
// pod. When a static pod gets deleted, the associated orphaned mirror pod
// will also be removed.
type Manager interface {
// GetPodByFullName returns the (non-mirror) pod that matches full name, as well as
// whether the pod was found.
GetPodByFullName(podFullName string) (*v1.Pod, bool)
// GetPodByName provides the (non-mirror) pod that matches namespace and
// name, as well as whether the pod was found.
GetPodByName(namespace, name string) (*v1.Pod, bool)
// GetPodByUID provides the (non-mirror) pod that matches pod UID, as well as
// whether the pod is found.
GetPodByUID(types.UID) (*v1.Pod, bool)
// GetPodByMirrorPod returns the static pod for the given mirror pod and
// whether it was known to the pod manager.
GetPodByMirrorPod(*v1.Pod) (*v1.Pod, bool)
// GetMirrorPodByPod returns the mirror pod for the given static pod and
// whether it was known to the pod manager.
GetMirrorPodByPod(*v1.Pod) (*v1.Pod, bool)
// GetPodAndMirrorPod returns the complement for a pod - if a pod was provided
// and a mirror pod can be found, return it. If a mirror pod is provided and
// the pod can be found, return it and true for wasMirror.
GetPodAndMirrorPod(*v1.Pod) (pod, mirrorPod *v1.Pod, wasMirror bool)
// GetPods returns the regular pods bound to the kubelet and their spec.
GetPods() []*v1.Pod
// GetPodsAndMirrorPods returns the set of pods, the set of mirror pods, and
// the pod fullnames of any orphaned mirror pods.
GetPodsAndMirrorPods() (allPods []*v1.Pod, allMirrorPods []*v1.Pod, orphanedMirrorPodFullnames []string)
// GetStaticPodToMirrorPodMap return a map of static pod to its corresponding
// mirror pods. It is possible that there is no mirror pod for a static pod
// if kubelet is running in standalone mode or is in the process of creating
// the mirror pod and in that case, the mirror pod is nil.
GetStaticPodToMirrorPodMap() map[*v1.Pod]*v1.Pod
// SetPods replaces the internal pods with the new pods.
// It is currently only used for testing.
SetPods(pods []*v1.Pod)
// AddPod adds the given pod to the manager.
AddPod(pod *v1.Pod)
// UpdatePod updates the given pod in the manager.
UpdatePod(pod *v1.Pod)
// RemovePod deletes the given pod from the manager. For mirror pods,
// this means deleting the mappings related to mirror pods. For non-
// mirror pods, this means deleting from indexes for all non-mirror pods.
RemovePod(pod *v1.Pod)
// TranslatePodUID returns the actual UID of a pod. If the UID belongs to
// a mirror pod, returns the UID of its static pod. Otherwise, returns the
// original UID.
//
// All public-facing functions should perform this translation for UIDs
// because user may provide a mirror pod UID, which is not recognized by
// internal Kubelet functions.
TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID
// GetUIDTranslations returns the mappings of static pod UIDs to mirror pod
// UIDs and mirror pod UIDs to static pod UIDs.
GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID)
}
// basicManager is a functional Manager.
//
// All fields in basicManager are read-only and are updated calling SetPods,
// AddPod, UpdatePod, or RemovePod.
type basicManager struct {
// Protects all internal maps.
lock sync.RWMutex
// Regular pods indexed by UID.
podByUID map[kubetypes.ResolvedPodUID]*v1.Pod
// Mirror pods indexed by UID.
mirrorPodByUID map[kubetypes.MirrorPodUID]*v1.Pod
// Pods indexed by full name for easy access.
podByFullName map[string]*v1.Pod
mirrorPodByFullName map[string]*v1.Pod
// Mirror pod UID to pod UID map.
translationByUID map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID
}
// NewBasicPodManager returns a functional Manager.
func NewBasicPodManager() Manager {
pm := &basicManager{}
pm.SetPods(nil)
return pm
}
// SetPods set the internal pods based on the new pods.
func (pm *basicManager) SetPods(newPods []*v1.Pod) {
pm.lock.Lock()
defer pm.lock.Unlock()
pm.podByUID = make(map[kubetypes.ResolvedPodUID]*v1.Pod)
pm.podByFullName = make(map[string]*v1.Pod)
pm.mirrorPodByUID = make(map[kubetypes.MirrorPodUID]*v1.Pod)
pm.mirrorPodByFullName = make(map[string]*v1.Pod)
pm.translationByUID = make(map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID)
pm.updatePodsInternal(newPods...)
}
func (pm *basicManager) AddPod(pod *v1.Pod) {
pm.UpdatePod(pod)
}
func (pm *basicManager) UpdatePod(pod *v1.Pod) {
pm.lock.Lock()
defer pm.lock.Unlock()
pm.updatePodsInternal(pod)
}
// updateMetrics updates the metrics surfaced by the pod manager.
// oldPod or newPod may be nil to signify creation or deletion.
func updateMetrics(oldPod, newPod *v1.Pod) {
var numEC int
if oldPod != nil {
numEC -= len(oldPod.Spec.EphemeralContainers)
}
if newPod != nil {
numEC += len(newPod.Spec.EphemeralContainers)
}
if numEC != 0 {
metrics.ManagedEphemeralContainers.Add(float64(numEC))
}
}
// updatePodsInternal replaces the given pods in the current state of the
// manager, updating the various indices. The caller is assumed to hold the
// lock.
func (pm *basicManager) updatePodsInternal(pods ...*v1.Pod) {
for _, pod := range pods {
podFullName := kubecontainer.GetPodFullName(pod)
// This logic relies on a static pod and its mirror to have the same name.
// It is safe to type convert here due to the IsMirrorPod guard.
if kubetypes.IsMirrorPod(pod) {
mirrorPodUID := kubetypes.MirrorPodUID(pod.UID)
pm.mirrorPodByUID[mirrorPodUID] = pod
pm.mirrorPodByFullName[podFullName] = pod
if p, ok := pm.podByFullName[podFullName]; ok {
pm.translationByUID[mirrorPodUID] = kubetypes.ResolvedPodUID(p.UID)
}
} else {
resolvedPodUID := kubetypes.ResolvedPodUID(pod.UID)
updateMetrics(pm.podByUID[resolvedPodUID], pod)
pm.podByUID[resolvedPodUID] = pod
pm.podByFullName[podFullName] = pod
if mirror, ok := pm.mirrorPodByFullName[podFullName]; ok {
pm.translationByUID[kubetypes.MirrorPodUID(mirror.UID)] = resolvedPodUID
}
}
}
}
func (pm *basicManager) RemovePod(pod *v1.Pod) {
updateMetrics(pod, nil)
pm.lock.Lock()
defer pm.lock.Unlock()
podFullName := kubecontainer.GetPodFullName(pod)
// It is safe to type convert here due to the IsMirrorPod guard.
if kubetypes.IsMirrorPod(pod) {
mirrorPodUID := kubetypes.MirrorPodUID(pod.UID)
delete(pm.mirrorPodByUID, mirrorPodUID)
delete(pm.mirrorPodByFullName, podFullName)
delete(pm.translationByUID, mirrorPodUID)
} else {
delete(pm.podByUID, kubetypes.ResolvedPodUID(pod.UID))
delete(pm.podByFullName, podFullName)
}
}
func (pm *basicManager) GetPods() []*v1.Pod {
pm.lock.RLock()
defer pm.lock.RUnlock()
return podsMapToPods(pm.podByUID)
}
func (pm *basicManager) GetPodsAndMirrorPods() (allPods []*v1.Pod, allMirrorPods []*v1.Pod, orphanedMirrorPodFullnames []string) {
pm.lock.RLock()
defer pm.lock.RUnlock()
allPods = podsMapToPods(pm.podByUID)
allMirrorPods = mirrorPodsMapToMirrorPods(pm.mirrorPodByUID)
for podFullName := range pm.mirrorPodByFullName {
if _, ok := pm.podByFullName[podFullName]; !ok {
orphanedMirrorPodFullnames = append(orphanedMirrorPodFullnames, podFullName)
}
}
return allPods, allMirrorPods, orphanedMirrorPodFullnames
}
func (pm *basicManager) GetStaticPodToMirrorPodMap() map[*v1.Pod]*v1.Pod {
pm.lock.RLock()
defer pm.lock.RUnlock()
staticPodsMapToMirrorPods := make(map[*v1.Pod]*v1.Pod)
for _, pod := range podsMapToPods(pm.podByUID) {
if kubetypes.IsStaticPod(pod) {
staticPodsMapToMirrorPods[pod] = pm.mirrorPodByFullName[kubecontainer.GetPodFullName(pod)]
}
}
return staticPodsMapToMirrorPods
}
func (pm *basicManager) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
pm.lock.RLock()
defer pm.lock.RUnlock()
pod, ok := pm.podByUID[kubetypes.ResolvedPodUID(uid)] // Safe conversion, map only holds non-mirrors.
return pod, ok
}
func (pm *basicManager) GetPodByName(namespace, name string) (*v1.Pod, bool) {
podFullName := kubecontainer.BuildPodFullName(name, namespace)
return pm.GetPodByFullName(podFullName)
}
func (pm *basicManager) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
pm.lock.RLock()
defer pm.lock.RUnlock()
pod, ok := pm.podByFullName[podFullName]
return pod, ok
}
func (pm *basicManager) TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID {
// It is safe to type convert to a resolved UID because type conversion is idempotent.
if uid == "" {
return kubetypes.ResolvedPodUID(uid)
}
pm.lock.RLock()
defer pm.lock.RUnlock()
if translated, ok := pm.translationByUID[kubetypes.MirrorPodUID(uid)]; ok {
return translated
}
return kubetypes.ResolvedPodUID(uid)
}
func (pm *basicManager) GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID,
mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID) {
pm.lock.RLock()
defer pm.lock.RUnlock()
podToMirror = make(map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, len(pm.translationByUID))
mirrorToPod = make(map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID, len(pm.translationByUID))
// Insert empty translation mapping for all static pods.
for uid, pod := range pm.podByUID {
if !kubetypes.IsStaticPod(pod) {
continue
}
podToMirror[uid] = ""
}
// Fill in translations. Notice that if there is no mirror pod for a
// static pod, its uid will be translated into empty string "". This
// is WAI, from the caller side we can know that the static pod doesn't
// have a corresponding mirror pod instead of using static pod uid directly.
for k, v := range pm.translationByUID {
mirrorToPod[k] = v
podToMirror[v] = k
}
return podToMirror, mirrorToPod
}
// IsMirrorPodOf returns true if pod and mirrorPod are associated with each other.
func IsMirrorPodOf(mirrorPod, pod *v1.Pod) bool {
// Check name and namespace first.
if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace {
return false
}
hash, ok := getHashFromMirrorPod(mirrorPod)
if !ok {
return false
}
return hash == getPodHash(pod)
}
func podsMapToPods(UIDMap map[kubetypes.ResolvedPodUID]*v1.Pod) []*v1.Pod {
pods := make([]*v1.Pod, 0, len(UIDMap))
for _, pod := range UIDMap {
pods = append(pods, pod)
}
return pods
}
func mirrorPodsMapToMirrorPods(UIDMap map[kubetypes.MirrorPodUID]*v1.Pod) []*v1.Pod {
pods := make([]*v1.Pod, 0, len(UIDMap))
for _, pod := range UIDMap {
pods = append(pods, pod)
}
return pods
}
func (pm *basicManager) GetMirrorPodByPod(pod *v1.Pod) (*v1.Pod, bool) {
pm.lock.RLock()
defer pm.lock.RUnlock()
mirrorPod, ok := pm.mirrorPodByFullName[kubecontainer.GetPodFullName(pod)]
return mirrorPod, ok
}
func (pm *basicManager) GetPodByMirrorPod(mirrorPod *v1.Pod) (*v1.Pod, bool) {
pm.lock.RLock()
defer pm.lock.RUnlock()
pod, ok := pm.podByFullName[kubecontainer.GetPodFullName(mirrorPod)]
return pod, ok
}
func (pm *basicManager) GetPodAndMirrorPod(aPod *v1.Pod) (pod, mirrorPod *v1.Pod, wasMirror bool) {
pm.lock.RLock()
defer pm.lock.RUnlock()
fullName := kubecontainer.GetPodFullName(aPod)
if kubetypes.IsMirrorPod(aPod) {
return pm.podByFullName[fullName], aPod, true
}
return aPod, pm.mirrorPodByFullName[fullName], false
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
type FakeMirrorClient struct {
mirrorPodLock sync.RWMutex
// Note that a real mirror manager does not store the mirror pods in
// itself. This fake manager does this to track calls.
mirrorPods sets.Set[string]
createCounts map[string]int
deleteCounts map[string]int
}
func NewFakeMirrorClient() *FakeMirrorClient {
m := FakeMirrorClient{}
m.mirrorPods = sets.New[string]()
m.createCounts = make(map[string]int)
m.deleteCounts = make(map[string]int)
return &m
}
func (fmc *FakeMirrorClient) CreateMirrorPod(_ context.Context, pod *v1.Pod) error {
fmc.mirrorPodLock.Lock()
defer fmc.mirrorPodLock.Unlock()
podFullName := kubecontainer.GetPodFullName(pod)
fmc.mirrorPods.Insert(podFullName)
fmc.createCounts[podFullName]++
return nil
}
// TODO (Robert Krawitz): Implement UID checking
func (fmc *FakeMirrorClient) DeleteMirrorPod(_ context.Context, podFullName string, _ *types.UID) (bool, error) {
fmc.mirrorPodLock.Lock()
defer fmc.mirrorPodLock.Unlock()
fmc.mirrorPods.Delete(podFullName)
fmc.deleteCounts[podFullName]++
return true, nil
}
func (fmc *FakeMirrorClient) HasPod(podFullName string) bool {
fmc.mirrorPodLock.RLock()
defer fmc.mirrorPodLock.RUnlock()
return fmc.mirrorPods.Has(podFullName)
}
func (fmc *FakeMirrorClient) NumOfPods() int {
fmc.mirrorPodLock.RLock()
defer fmc.mirrorPodLock.RUnlock()
return fmc.mirrorPods.Len()
}
func (fmc *FakeMirrorClient) GetPods() []string {
fmc.mirrorPodLock.RLock()
defer fmc.mirrorPodLock.RUnlock()
return sets.List(fmc.mirrorPods)
}
func (fmc *FakeMirrorClient) GetCounts(podFullName string) (int, int) {
fmc.mirrorPodLock.RLock()
defer fmc.mirrorPodLock.RUnlock()
return fmc.createCounts[podFullName], fmc.deleteCounts[podFullName]
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package testing
import (
mock "github.com/stretchr/testify/mock"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
types0 "k8s.io/kubernetes/pkg/kubelet/types"
)
// NewMockManager creates a new instance of MockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockManager(t interface {
mock.TestingT
Cleanup(func())
}) *MockManager {
mock := &MockManager{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockManager is an autogenerated mock type for the Manager type
type MockManager struct {
mock.Mock
}
type MockManager_Expecter struct {
mock *mock.Mock
}
func (_m *MockManager) EXPECT() *MockManager_Expecter {
return &MockManager_Expecter{mock: &_m.Mock}
}
// AddPod provides a mock function for the type MockManager
func (_mock *MockManager) AddPod(pod *v1.Pod) {
_mock.Called(pod)
return
}
// MockManager_AddPod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddPod'
type MockManager_AddPod_Call struct {
*mock.Call
}
// AddPod is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockManager_Expecter) AddPod(pod interface{}) *MockManager_AddPod_Call {
return &MockManager_AddPod_Call{Call: _e.mock.On("AddPod", pod)}
}
func (_c *MockManager_AddPod_Call) Run(run func(pod *v1.Pod)) *MockManager_AddPod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_AddPod_Call) Return() *MockManager_AddPod_Call {
_c.Call.Return()
return _c
}
func (_c *MockManager_AddPod_Call) RunAndReturn(run func(pod *v1.Pod)) *MockManager_AddPod_Call {
_c.Run(run)
return _c
}
// GetMirrorPodByPod provides a mock function for the type MockManager
func (_mock *MockManager) GetMirrorPodByPod(pod *v1.Pod) (*v1.Pod, bool) {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for GetMirrorPodByPod")
}
var r0 *v1.Pod
var r1 bool
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) (*v1.Pod, bool)); ok {
return returnFunc(pod)
}
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) *v1.Pod); ok {
r0 = returnFunc(pod)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(*v1.Pod) bool); ok {
r1 = returnFunc(pod)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockManager_GetMirrorPodByPod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMirrorPodByPod'
type MockManager_GetMirrorPodByPod_Call struct {
*mock.Call
}
// GetMirrorPodByPod is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockManager_Expecter) GetMirrorPodByPod(pod interface{}) *MockManager_GetMirrorPodByPod_Call {
return &MockManager_GetMirrorPodByPod_Call{Call: _e.mock.On("GetMirrorPodByPod", pod)}
}
func (_c *MockManager_GetMirrorPodByPod_Call) Run(run func(pod *v1.Pod)) *MockManager_GetMirrorPodByPod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_GetMirrorPodByPod_Call) Return(pod1 *v1.Pod, b bool) *MockManager_GetMirrorPodByPod_Call {
_c.Call.Return(pod1, b)
return _c
}
func (_c *MockManager_GetMirrorPodByPod_Call) RunAndReturn(run func(pod *v1.Pod) (*v1.Pod, bool)) *MockManager_GetMirrorPodByPod_Call {
_c.Call.Return(run)
return _c
}
// GetPodAndMirrorPod provides a mock function for the type MockManager
func (_mock *MockManager) GetPodAndMirrorPod(pod *v1.Pod) (*v1.Pod, *v1.Pod, bool) {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for GetPodAndMirrorPod")
}
var r0 *v1.Pod
var r1 *v1.Pod
var r2 bool
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) (*v1.Pod, *v1.Pod, bool)); ok {
return returnFunc(pod)
}
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) *v1.Pod); ok {
r0 = returnFunc(pod)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(*v1.Pod) *v1.Pod); ok {
r1 = returnFunc(pod)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*v1.Pod)
}
}
if returnFunc, ok := ret.Get(2).(func(*v1.Pod) bool); ok {
r2 = returnFunc(pod)
} else {
r2 = ret.Get(2).(bool)
}
return r0, r1, r2
}
// MockManager_GetPodAndMirrorPod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodAndMirrorPod'
type MockManager_GetPodAndMirrorPod_Call struct {
*mock.Call
}
// GetPodAndMirrorPod is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockManager_Expecter) GetPodAndMirrorPod(pod interface{}) *MockManager_GetPodAndMirrorPod_Call {
return &MockManager_GetPodAndMirrorPod_Call{Call: _e.mock.On("GetPodAndMirrorPod", pod)}
}
func (_c *MockManager_GetPodAndMirrorPod_Call) Run(run func(pod *v1.Pod)) *MockManager_GetPodAndMirrorPod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_GetPodAndMirrorPod_Call) Return(pod1 *v1.Pod, mirrorPod *v1.Pod, wasMirror bool) *MockManager_GetPodAndMirrorPod_Call {
_c.Call.Return(pod1, mirrorPod, wasMirror)
return _c
}
func (_c *MockManager_GetPodAndMirrorPod_Call) RunAndReturn(run func(pod *v1.Pod) (*v1.Pod, *v1.Pod, bool)) *MockManager_GetPodAndMirrorPod_Call {
_c.Call.Return(run)
return _c
}
// GetPodByFullName provides a mock function for the type MockManager
func (_mock *MockManager) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
ret := _mock.Called(podFullName)
if len(ret) == 0 {
panic("no return value specified for GetPodByFullName")
}
var r0 *v1.Pod
var r1 bool
if returnFunc, ok := ret.Get(0).(func(string) (*v1.Pod, bool)); ok {
return returnFunc(podFullName)
}
if returnFunc, ok := ret.Get(0).(func(string) *v1.Pod); ok {
r0 = returnFunc(podFullName)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(string) bool); ok {
r1 = returnFunc(podFullName)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockManager_GetPodByFullName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodByFullName'
type MockManager_GetPodByFullName_Call struct {
*mock.Call
}
// GetPodByFullName is a helper method to define mock.On call
// - podFullName string
func (_e *MockManager_Expecter) GetPodByFullName(podFullName interface{}) *MockManager_GetPodByFullName_Call {
return &MockManager_GetPodByFullName_Call{Call: _e.mock.On("GetPodByFullName", podFullName)}
}
func (_c *MockManager_GetPodByFullName_Call) Run(run func(podFullName string)) *MockManager_GetPodByFullName_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_GetPodByFullName_Call) Return(pod *v1.Pod, b bool) *MockManager_GetPodByFullName_Call {
_c.Call.Return(pod, b)
return _c
}
func (_c *MockManager_GetPodByFullName_Call) RunAndReturn(run func(podFullName string) (*v1.Pod, bool)) *MockManager_GetPodByFullName_Call {
_c.Call.Return(run)
return _c
}
// GetPodByMirrorPod provides a mock function for the type MockManager
func (_mock *MockManager) GetPodByMirrorPod(pod *v1.Pod) (*v1.Pod, bool) {
ret := _mock.Called(pod)
if len(ret) == 0 {
panic("no return value specified for GetPodByMirrorPod")
}
var r0 *v1.Pod
var r1 bool
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) (*v1.Pod, bool)); ok {
return returnFunc(pod)
}
if returnFunc, ok := ret.Get(0).(func(*v1.Pod) *v1.Pod); ok {
r0 = returnFunc(pod)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(*v1.Pod) bool); ok {
r1 = returnFunc(pod)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockManager_GetPodByMirrorPod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodByMirrorPod'
type MockManager_GetPodByMirrorPod_Call struct {
*mock.Call
}
// GetPodByMirrorPod is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockManager_Expecter) GetPodByMirrorPod(pod interface{}) *MockManager_GetPodByMirrorPod_Call {
return &MockManager_GetPodByMirrorPod_Call{Call: _e.mock.On("GetPodByMirrorPod", pod)}
}
func (_c *MockManager_GetPodByMirrorPod_Call) Run(run func(pod *v1.Pod)) *MockManager_GetPodByMirrorPod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_GetPodByMirrorPod_Call) Return(pod1 *v1.Pod, b bool) *MockManager_GetPodByMirrorPod_Call {
_c.Call.Return(pod1, b)
return _c
}
func (_c *MockManager_GetPodByMirrorPod_Call) RunAndReturn(run func(pod *v1.Pod) (*v1.Pod, bool)) *MockManager_GetPodByMirrorPod_Call {
_c.Call.Return(run)
return _c
}
// GetPodByName provides a mock function for the type MockManager
func (_mock *MockManager) GetPodByName(namespace string, name string) (*v1.Pod, bool) {
ret := _mock.Called(namespace, name)
if len(ret) == 0 {
panic("no return value specified for GetPodByName")
}
var r0 *v1.Pod
var r1 bool
if returnFunc, ok := ret.Get(0).(func(string, string) (*v1.Pod, bool)); ok {
return returnFunc(namespace, name)
}
if returnFunc, ok := ret.Get(0).(func(string, string) *v1.Pod); ok {
r0 = returnFunc(namespace, name)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(string, string) bool); ok {
r1 = returnFunc(namespace, name)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockManager_GetPodByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodByName'
type MockManager_GetPodByName_Call struct {
*mock.Call
}
// GetPodByName is a helper method to define mock.On call
// - namespace string
// - name string
func (_e *MockManager_Expecter) GetPodByName(namespace interface{}, name interface{}) *MockManager_GetPodByName_Call {
return &MockManager_GetPodByName_Call{Call: _e.mock.On("GetPodByName", namespace, name)}
}
func (_c *MockManager_GetPodByName_Call) Run(run func(namespace string, name string)) *MockManager_GetPodByName_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 string
if args[0] != nil {
arg0 = args[0].(string)
}
var arg1 string
if args[1] != nil {
arg1 = args[1].(string)
}
run(
arg0,
arg1,
)
})
return _c
}
func (_c *MockManager_GetPodByName_Call) Return(pod *v1.Pod, b bool) *MockManager_GetPodByName_Call {
_c.Call.Return(pod, b)
return _c
}
func (_c *MockManager_GetPodByName_Call) RunAndReturn(run func(namespace string, name string) (*v1.Pod, bool)) *MockManager_GetPodByName_Call {
_c.Call.Return(run)
return _c
}
// GetPodByUID provides a mock function for the type MockManager
func (_mock *MockManager) GetPodByUID(uID types.UID) (*v1.Pod, bool) {
ret := _mock.Called(uID)
if len(ret) == 0 {
panic("no return value specified for GetPodByUID")
}
var r0 *v1.Pod
var r1 bool
if returnFunc, ok := ret.Get(0).(func(types.UID) (*v1.Pod, bool)); ok {
return returnFunc(uID)
}
if returnFunc, ok := ret.Get(0).(func(types.UID) *v1.Pod); ok {
r0 = returnFunc(uID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func(types.UID) bool); ok {
r1 = returnFunc(uID)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockManager_GetPodByUID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodByUID'
type MockManager_GetPodByUID_Call struct {
*mock.Call
}
// GetPodByUID is a helper method to define mock.On call
// - uID types.UID
func (_e *MockManager_Expecter) GetPodByUID(uID interface{}) *MockManager_GetPodByUID_Call {
return &MockManager_GetPodByUID_Call{Call: _e.mock.On("GetPodByUID", uID)}
}
func (_c *MockManager_GetPodByUID_Call) Run(run func(uID types.UID)) *MockManager_GetPodByUID_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 types.UID
if args[0] != nil {
arg0 = args[0].(types.UID)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_GetPodByUID_Call) Return(pod *v1.Pod, b bool) *MockManager_GetPodByUID_Call {
_c.Call.Return(pod, b)
return _c
}
func (_c *MockManager_GetPodByUID_Call) RunAndReturn(run func(uID types.UID) (*v1.Pod, bool)) *MockManager_GetPodByUID_Call {
_c.Call.Return(run)
return _c
}
// GetPods provides a mock function for the type MockManager
func (_mock *MockManager) GetPods() []*v1.Pod {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetPods")
}
var r0 []*v1.Pod
if returnFunc, ok := ret.Get(0).(func() []*v1.Pod); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v1.Pod)
}
}
return r0
}
// MockManager_GetPods_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPods'
type MockManager_GetPods_Call struct {
*mock.Call
}
// GetPods is a helper method to define mock.On call
func (_e *MockManager_Expecter) GetPods() *MockManager_GetPods_Call {
return &MockManager_GetPods_Call{Call: _e.mock.On("GetPods")}
}
func (_c *MockManager_GetPods_Call) Run(run func()) *MockManager_GetPods_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockManager_GetPods_Call) Return(pods []*v1.Pod) *MockManager_GetPods_Call {
_c.Call.Return(pods)
return _c
}
func (_c *MockManager_GetPods_Call) RunAndReturn(run func() []*v1.Pod) *MockManager_GetPods_Call {
_c.Call.Return(run)
return _c
}
// GetPodsAndMirrorPods provides a mock function for the type MockManager
func (_mock *MockManager) GetPodsAndMirrorPods() ([]*v1.Pod, []*v1.Pod, []string) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetPodsAndMirrorPods")
}
var r0 []*v1.Pod
var r1 []*v1.Pod
var r2 []string
if returnFunc, ok := ret.Get(0).(func() ([]*v1.Pod, []*v1.Pod, []string)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() []*v1.Pod); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v1.Pod)
}
}
if returnFunc, ok := ret.Get(1).(func() []*v1.Pod); ok {
r1 = returnFunc()
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).([]*v1.Pod)
}
}
if returnFunc, ok := ret.Get(2).(func() []string); ok {
r2 = returnFunc()
} else {
if ret.Get(2) != nil {
r2 = ret.Get(2).([]string)
}
}
return r0, r1, r2
}
// MockManager_GetPodsAndMirrorPods_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodsAndMirrorPods'
type MockManager_GetPodsAndMirrorPods_Call struct {
*mock.Call
}
// GetPodsAndMirrorPods is a helper method to define mock.On call
func (_e *MockManager_Expecter) GetPodsAndMirrorPods() *MockManager_GetPodsAndMirrorPods_Call {
return &MockManager_GetPodsAndMirrorPods_Call{Call: _e.mock.On("GetPodsAndMirrorPods")}
}
func (_c *MockManager_GetPodsAndMirrorPods_Call) Run(run func()) *MockManager_GetPodsAndMirrorPods_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockManager_GetPodsAndMirrorPods_Call) Return(allPods []*v1.Pod, allMirrorPods []*v1.Pod, orphanedMirrorPodFullnames []string) *MockManager_GetPodsAndMirrorPods_Call {
_c.Call.Return(allPods, allMirrorPods, orphanedMirrorPodFullnames)
return _c
}
func (_c *MockManager_GetPodsAndMirrorPods_Call) RunAndReturn(run func() ([]*v1.Pod, []*v1.Pod, []string)) *MockManager_GetPodsAndMirrorPods_Call {
_c.Call.Return(run)
return _c
}
// GetStaticPodToMirrorPodMap provides a mock function for the type MockManager
func (_mock *MockManager) GetStaticPodToMirrorPodMap() map[*v1.Pod]*v1.Pod {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetStaticPodToMirrorPodMap")
}
var r0 map[*v1.Pod]*v1.Pod
if returnFunc, ok := ret.Get(0).(func() map[*v1.Pod]*v1.Pod); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[*v1.Pod]*v1.Pod)
}
}
return r0
}
// MockManager_GetStaticPodToMirrorPodMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStaticPodToMirrorPodMap'
type MockManager_GetStaticPodToMirrorPodMap_Call struct {
*mock.Call
}
// GetStaticPodToMirrorPodMap is a helper method to define mock.On call
func (_e *MockManager_Expecter) GetStaticPodToMirrorPodMap() *MockManager_GetStaticPodToMirrorPodMap_Call {
return &MockManager_GetStaticPodToMirrorPodMap_Call{Call: _e.mock.On("GetStaticPodToMirrorPodMap")}
}
func (_c *MockManager_GetStaticPodToMirrorPodMap_Call) Run(run func()) *MockManager_GetStaticPodToMirrorPodMap_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockManager_GetStaticPodToMirrorPodMap_Call) Return(podToPod map[*v1.Pod]*v1.Pod) *MockManager_GetStaticPodToMirrorPodMap_Call {
_c.Call.Return(podToPod)
return _c
}
func (_c *MockManager_GetStaticPodToMirrorPodMap_Call) RunAndReturn(run func() map[*v1.Pod]*v1.Pod) *MockManager_GetStaticPodToMirrorPodMap_Call {
_c.Call.Return(run)
return _c
}
// GetUIDTranslations provides a mock function for the type MockManager
func (_mock *MockManager) GetUIDTranslations() (map[types0.ResolvedPodUID]types0.MirrorPodUID, map[types0.MirrorPodUID]types0.ResolvedPodUID) {
ret := _mock.Called()
if len(ret) == 0 {
panic("no return value specified for GetUIDTranslations")
}
var r0 map[types0.ResolvedPodUID]types0.MirrorPodUID
var r1 map[types0.MirrorPodUID]types0.ResolvedPodUID
if returnFunc, ok := ret.Get(0).(func() (map[types0.ResolvedPodUID]types0.MirrorPodUID, map[types0.MirrorPodUID]types0.ResolvedPodUID)); ok {
return returnFunc()
}
if returnFunc, ok := ret.Get(0).(func() map[types0.ResolvedPodUID]types0.MirrorPodUID); ok {
r0 = returnFunc()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[types0.ResolvedPodUID]types0.MirrorPodUID)
}
}
if returnFunc, ok := ret.Get(1).(func() map[types0.MirrorPodUID]types0.ResolvedPodUID); ok {
r1 = returnFunc()
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(map[types0.MirrorPodUID]types0.ResolvedPodUID)
}
}
return r0, r1
}
// MockManager_GetUIDTranslations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUIDTranslations'
type MockManager_GetUIDTranslations_Call struct {
*mock.Call
}
// GetUIDTranslations is a helper method to define mock.On call
func (_e *MockManager_Expecter) GetUIDTranslations() *MockManager_GetUIDTranslations_Call {
return &MockManager_GetUIDTranslations_Call{Call: _e.mock.On("GetUIDTranslations")}
}
func (_c *MockManager_GetUIDTranslations_Call) Run(run func()) *MockManager_GetUIDTranslations_Call {
_c.Call.Run(func(args mock.Arguments) {
run()
})
return _c
}
func (_c *MockManager_GetUIDTranslations_Call) Return(podToMirror map[types0.ResolvedPodUID]types0.MirrorPodUID, mirrorToPod map[types0.MirrorPodUID]types0.ResolvedPodUID) *MockManager_GetUIDTranslations_Call {
_c.Call.Return(podToMirror, mirrorToPod)
return _c
}
func (_c *MockManager_GetUIDTranslations_Call) RunAndReturn(run func() (map[types0.ResolvedPodUID]types0.MirrorPodUID, map[types0.MirrorPodUID]types0.ResolvedPodUID)) *MockManager_GetUIDTranslations_Call {
_c.Call.Return(run)
return _c
}
// RemovePod provides a mock function for the type MockManager
func (_mock *MockManager) RemovePod(pod *v1.Pod) {
_mock.Called(pod)
return
}
// MockManager_RemovePod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemovePod'
type MockManager_RemovePod_Call struct {
*mock.Call
}
// RemovePod is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockManager_Expecter) RemovePod(pod interface{}) *MockManager_RemovePod_Call {
return &MockManager_RemovePod_Call{Call: _e.mock.On("RemovePod", pod)}
}
func (_c *MockManager_RemovePod_Call) Run(run func(pod *v1.Pod)) *MockManager_RemovePod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_RemovePod_Call) Return() *MockManager_RemovePod_Call {
_c.Call.Return()
return _c
}
func (_c *MockManager_RemovePod_Call) RunAndReturn(run func(pod *v1.Pod)) *MockManager_RemovePod_Call {
_c.Run(run)
return _c
}
// SetPods provides a mock function for the type MockManager
func (_mock *MockManager) SetPods(pods []*v1.Pod) {
_mock.Called(pods)
return
}
// MockManager_SetPods_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPods'
type MockManager_SetPods_Call struct {
*mock.Call
}
// SetPods is a helper method to define mock.On call
// - pods []*v1.Pod
func (_e *MockManager_Expecter) SetPods(pods interface{}) *MockManager_SetPods_Call {
return &MockManager_SetPods_Call{Call: _e.mock.On("SetPods", pods)}
}
func (_c *MockManager_SetPods_Call) Run(run func(pods []*v1.Pod)) *MockManager_SetPods_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 []*v1.Pod
if args[0] != nil {
arg0 = args[0].([]*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_SetPods_Call) Return() *MockManager_SetPods_Call {
_c.Call.Return()
return _c
}
func (_c *MockManager_SetPods_Call) RunAndReturn(run func(pods []*v1.Pod)) *MockManager_SetPods_Call {
_c.Run(run)
return _c
}
// TranslatePodUID provides a mock function for the type MockManager
func (_mock *MockManager) TranslatePodUID(uid types.UID) types0.ResolvedPodUID {
ret := _mock.Called(uid)
if len(ret) == 0 {
panic("no return value specified for TranslatePodUID")
}
var r0 types0.ResolvedPodUID
if returnFunc, ok := ret.Get(0).(func(types.UID) types0.ResolvedPodUID); ok {
r0 = returnFunc(uid)
} else {
r0 = ret.Get(0).(types0.ResolvedPodUID)
}
return r0
}
// MockManager_TranslatePodUID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TranslatePodUID'
type MockManager_TranslatePodUID_Call struct {
*mock.Call
}
// TranslatePodUID is a helper method to define mock.On call
// - uid types.UID
func (_e *MockManager_Expecter) TranslatePodUID(uid interface{}) *MockManager_TranslatePodUID_Call {
return &MockManager_TranslatePodUID_Call{Call: _e.mock.On("TranslatePodUID", uid)}
}
func (_c *MockManager_TranslatePodUID_Call) Run(run func(uid types.UID)) *MockManager_TranslatePodUID_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 types.UID
if args[0] != nil {
arg0 = args[0].(types.UID)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_TranslatePodUID_Call) Return(resolvedPodUID types0.ResolvedPodUID) *MockManager_TranslatePodUID_Call {
_c.Call.Return(resolvedPodUID)
return _c
}
func (_c *MockManager_TranslatePodUID_Call) RunAndReturn(run func(uid types.UID) types0.ResolvedPodUID) *MockManager_TranslatePodUID_Call {
_c.Call.Return(run)
return _c
}
// UpdatePod provides a mock function for the type MockManager
func (_mock *MockManager) UpdatePod(pod *v1.Pod) {
_mock.Called(pod)
return
}
// MockManager_UpdatePod_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdatePod'
type MockManager_UpdatePod_Call struct {
*mock.Call
}
// UpdatePod is a helper method to define mock.On call
// - pod *v1.Pod
func (_e *MockManager_Expecter) UpdatePod(pod interface{}) *MockManager_UpdatePod_Call {
return &MockManager_UpdatePod_Call{Call: _e.mock.On("UpdatePod", pod)}
}
func (_c *MockManager_UpdatePod_Call) Run(run func(pod *v1.Pod)) *MockManager_UpdatePod_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 *v1.Pod
if args[0] != nil {
arg0 = args[0].(*v1.Pod)
}
run(
arg0,
)
})
return _c
}
func (_c *MockManager_UpdatePod_Call) Return() *MockManager_UpdatePod_Call {
_c.Call.Return()
return _c
}
func (_c *MockManager_UpdatePod_Call) RunAndReturn(run func(pod *v1.Pod)) *MockManager_UpdatePod_Call {
_c.Run(run)
return _c
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"sort"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
const (
// The limit on the number of buffered container deletion requests
// This number is a bit arbitrary and may be adjusted in the future.
containerDeletorBufferLimit = 50
)
type containerStatusbyCreatedList []*kubecontainer.Status
type podContainerDeletor struct {
worker chan<- kubecontainer.ContainerID
containersToKeep int
}
func (a containerStatusbyCreatedList) Len() int { return len(a) }
func (a containerStatusbyCreatedList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a containerStatusbyCreatedList) Less(i, j int) bool {
return a[i].CreatedAt.After(a[j].CreatedAt)
}
func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int) *podContainerDeletor {
buffer := make(chan kubecontainer.ContainerID, containerDeletorBufferLimit)
go wait.Until(func() {
for {
id := <-buffer
if err := runtime.DeleteContainer(context.Background(), id); err != nil {
klog.InfoS("DeleteContainer returned error", "containerID", id, "err", err)
}
}
}, 0, wait.NeverStop)
return &podContainerDeletor{
worker: buffer,
containersToKeep: containersToKeep,
}
}
// getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest.
// If filterContainerID is empty, all dead containers in the pod are returned.
func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.Status {
if filterContainerId == "" {
return nil
}
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.ID.ID == filterContainerId {
return containerStatus
}
}
return nil
}(filterContainerID, podStatus)
if filterContainerID != "" && matchedContainer == nil {
klog.InfoS("Container not found in pod's containers", "containerID", filterContainerID)
return containerStatusbyCreatedList{}
}
// Find the exited containers whose name matches the name of the container with id being filterContainerId
var candidates containerStatusbyCreatedList
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.State != kubecontainer.ContainerStateExited {
continue
}
if matchedContainer == nil || matchedContainer.Name == containerStatus.Name {
candidates = append(candidates, containerStatus)
}
}
if len(candidates) <= containersToKeep {
return containerStatusbyCreatedList{}
}
sort.Sort(candidates)
return candidates[containersToKeep:]
}
// deleteContainersInPod issues container deletion requests for containers selected by getContainersToDeleteInPod.
func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, removeAll bool) {
containersToKeep := p.containersToKeep
if removeAll {
containersToKeep = 0
filterContainerID = ""
}
for _, candidate := range getContainersToDeleteInPod(filterContainerID, podStatus, containersToKeep) {
select {
case p.worker <- candidate.ID:
default:
klog.InfoS("Failed to issue the request to remove container", "containerID", candidate.ID)
}
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/allocation"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/utils/clock"
)
// OnCompleteFunc is a function that is invoked when an operation completes.
// If err is non-nil, the operation did not complete successfully.
type OnCompleteFunc func(err error)
// PodStatusFunc is a function that is invoked to override the pod status when a pod is killed.
type PodStatusFunc func(podStatus *v1.PodStatus)
// KillPodOptions are options when performing a pod update whose update type is kill.
type KillPodOptions struct {
// CompletedCh is closed when the kill request completes (syncTerminatingPod has completed
// without error) or if the pod does not exist, or if the pod has already terminated. This
// could take an arbitrary amount of time to be closed, but is never left open once
// CouldHaveRunningContainers() returns false.
CompletedCh chan<- struct{}
// Evict is true if this is a pod triggered eviction - once a pod is evicted some resources are
// more aggressively reaped than during normal pod operation (stopped containers).
Evict bool
// PodStatusFunc is invoked (if set) and overrides the status of the pod at the time the pod is killed.
// The provided status is populated from the latest state.
PodStatusFunc PodStatusFunc
// PodTerminationGracePeriodSecondsOverride is optional override to use if a pod is being killed as part of kill operation.
PodTerminationGracePeriodSecondsOverride *int64
}
func (k *KillPodOptions) MarshalJSON() ([]byte, error) {
t := struct {
Evict bool
PodTerminationGracePeriodSecondsOverride *int64
}{
Evict: k.Evict,
PodTerminationGracePeriodSecondsOverride: k.PodTerminationGracePeriodSecondsOverride,
}
return json.Marshal(t)
}
// UpdatePodOptions is an options struct to pass to a UpdatePod operation.
type UpdatePodOptions struct {
// The type of update (create, update, sync, kill).
UpdateType kubetypes.SyncPodType
// StartTime is an optional timestamp for when this update was created. If set,
// when this update is fully realized by the pod worker it will be recorded in
// the PodWorkerDuration metric.
StartTime time.Time
// Pod to update. Required.
Pod *v1.Pod
// MirrorPod is the mirror pod if Pod is a static pod. Optional when UpdateType
// is kill or terminated.
MirrorPod *v1.Pod
// RunningPod is a runtime pod that is no longer present in config. Required
// if Pod is nil, ignored if Pod is set.
RunningPod *kubecontainer.Pod
// KillPodOptions is used to override the default termination behavior of the
// pod or to update the pod status after an operation is completed. Since a
// pod can be killed for multiple reasons, PodStatusFunc is invoked in order
// and later kills have an opportunity to override the status (i.e. a preemption
// may be later turned into an eviction).
KillPodOptions *KillPodOptions
}
// PodWorkType classifies the status of pod as seen by the pod worker - setup (sync),
// teardown of containers (terminating), or cleanup (terminated).
type PodWorkerState int
const (
// SyncPod is when the pod is expected to be started and running.
SyncPod PodWorkerState = iota
// TerminatingPod is when the pod is no longer being set up, but some
// containers may be running and are being torn down.
TerminatingPod
// TerminatedPod indicates the pod is stopped, can have no more running
// containers, and any foreground cleanup can be executed.
TerminatedPod
)
func (state PodWorkerState) String() string {
switch state {
case SyncPod:
return "sync"
case TerminatingPod:
return "terminating"
case TerminatedPod:
return "terminated"
default:
panic(fmt.Sprintf("the state %d is not defined", state))
}
}
// PodWorkerSync is the summarization of a single pod worker for sync. Values
// besides state are used to provide metric counts for operators.
type PodWorkerSync struct {
// State of the pod.
State PodWorkerState
// Orphan is true if the pod is no longer in the desired set passed to SyncKnownPods.
Orphan bool
// HasConfig is true if we have a historical pod spec for this pod.
HasConfig bool
// Static is true if we have config and the pod came from a static source.
Static bool
}
// podWork is the internal changes
type podWork struct {
// WorkType is the type of sync to perform - sync (create), terminating (stop
// containers), terminated (clean up and write status).
WorkType PodWorkerState
// Options contains the data to sync.
Options UpdatePodOptions
}
// PodWorkers is an abstract interface for testability.
type PodWorkers interface {
// UpdatePod notifies the pod worker of a change to a pod, which will then
// be processed in FIFO order by a goroutine per pod UID. The state of the
// pod will be passed to the syncPod method until either the pod is marked
// as deleted, it reaches a terminal phase (Succeeded/Failed), or the pod
// is evicted by the kubelet. Once that occurs the syncTerminatingPod method
// will be called until it exits successfully, and after that all further
// UpdatePod() calls will be ignored for that pod until it has been forgotten
// due to significant time passing. A pod that is terminated will never be
// restarted.
UpdatePod(options UpdatePodOptions)
// SyncKnownPods removes workers for pods that are not in the desiredPods set
// and have been terminated for a significant period of time. Once this method
// has been called once, the workers are assumed to be fully initialized and
// subsequent calls to ShouldPodContentBeRemoved on unknown pods will return
// true. It returns a map describing the state of each known pod worker. It
// is the responsibility of the caller to re-add any desired pods that are not
// returned as knownPods.
SyncKnownPods(desiredPods []*v1.Pod) (knownPods map[types.UID]PodWorkerSync)
// IsPodKnownTerminated returns true once SyncTerminatingPod completes
// successfully - the provided pod UID it is known by the pod
// worker to be terminated. If the pod has been force deleted and the pod worker
// has completed termination this method will return false, so this method should
// only be used to filter out pods from the desired set such as in admission.
//
// Intended for use by the kubelet config loops, but not subsystems, which should
// use ShouldPod*().
IsPodKnownTerminated(uid types.UID) bool
// CouldHaveRunningContainers returns true before the pod workers have synced,
// once the pod workers see the pod (syncPod could be called), and returns false
// after the pod has been terminated (running containers guaranteed stopped).
//
// Intended for use by the kubelet config loops, but not subsystems, which should
// use ShouldPod*().
CouldHaveRunningContainers(uid types.UID) bool
// ShouldPodBeFinished returns true once SyncTerminatedPod completes
// successfully - the provided pod UID it is known to the pod worker to
// be terminated and have resources reclaimed. It returns false before the
// pod workers have synced (syncPod could be called). Once the pod workers
// have synced it returns false if the pod has a sync status until
// SyncTerminatedPod completes successfully. If the pod workers have synced,
// but the pod does not have a status it returns true.
//
// Intended for use by subsystem sync loops to avoid performing background setup
// after termination has been requested for a pod. Callers must ensure that the
// syncPod method is non-blocking when their data is absent.
ShouldPodBeFinished(uid types.UID) bool
// IsPodTerminationRequested returns true when pod termination has been requested
// until the termination completes and the pod is removed from config. This should
// not be used in cleanup loops because it will return false if the pod has already
// been cleaned up - use ShouldPodContainersBeTerminating instead. Also, this method
// may return true while containers are still being initialized by the pod worker.
//
// Intended for use by the kubelet sync* methods, but not subsystems, which should
// use ShouldPod*().
IsPodTerminationRequested(uid types.UID) bool
// ShouldPodContainersBeTerminating returns false before pod workers have synced,
// or once a pod has started terminating. This check is similar to
// ShouldPodRuntimeBeRemoved but is also true after pod termination is requested.
//
// Intended for use by subsystem sync loops to avoid performing background setup
// after termination has been requested for a pod. Callers must ensure that the
// syncPod method is non-blocking when their data is absent.
ShouldPodContainersBeTerminating(uid types.UID) bool
// ShouldPodRuntimeBeRemoved returns true if runtime managers within the Kubelet
// should aggressively cleanup pod resources that are not containers or on disk
// content, like attached volumes. This is true when a pod is not yet observed
// by a worker after the first sync (meaning it can't be running yet) or after
// all running containers are stopped.
// TODO: Once pod logs are separated from running containers, this method should
// be used to gate whether containers are kept.
//
// Intended for use by subsystem sync loops to know when to start tearing down
// resources that are used by running containers. Callers should ensure that
// runtime content they own is not required for post-termination - for instance
// containers are required in docker to preserve pod logs until after the pod
// is deleted.
ShouldPodRuntimeBeRemoved(uid types.UID) bool
// ShouldPodContentBeRemoved returns true if resource managers within the Kubelet
// should aggressively cleanup all content related to the pod. This is true
// during pod eviction (when we wish to remove that content to free resources)
// as well as after the request to delete a pod has resulted in containers being
// stopped (which is a more graceful action). Note that a deleting pod can still
// be evicted.
//
// Intended for use by subsystem sync loops to know when to start tearing down
// resources that are used by non-deleted pods. Content is generally preserved
// until deletion+removal_from_etcd or eviction, although garbage collection
// can free content when this method returns false.
ShouldPodContentBeRemoved(uid types.UID) bool
// IsPodForMirrorPodTerminatingByFullName returns true if a static pod with the
// provided pod name is currently terminating and has yet to complete. It is
// intended to be used only during orphan mirror pod cleanup to prevent us from
// deleting a terminating static pod from the apiserver before the pod is shut
// down.
IsPodForMirrorPodTerminatingByFullName(podFullname string) bool
}
// podSyncer describes the core lifecyle operations of the pod state machine. A pod is first
// synced until it naturally reaches termination (true is returned) or an external agent decides
// the pod should be terminated. Once a pod should be terminating, SyncTerminatingPod is invoked
// until it returns no error. Then the SyncTerminatedPod method is invoked until it exits without
// error, and the pod is considered terminal. Implementations of this interface must be threadsafe
// for simultaneous invocation of these methods for multiple pods.
type podSyncer interface {
// SyncPod configures the pod and starts and restarts all containers. If it returns true, the
// pod has reached a terminal state and the presence of the error indicates succeeded or failed.
// If an error is returned, the sync was not successful and should be rerun in the future. This
// is a long running method and should exit early with context.Canceled if the context is canceled.
SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error)
// SyncTerminatingPod attempts to ensure the pod's containers are no longer running and to collect
// any final status. This method is repeatedly invoked with diminishing grace periods until it exits
// without error. Once this method exits with no error other components are allowed to tear down
// supporting resources like volumes and devices. If the context is canceled, the method should
// return context.Canceled unless it has successfully finished, which may occur when a shorter
// grace period is detected.
SyncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error
// SyncTerminatingRuntimePod is invoked when running containers are found that correspond to
// a pod that is no longer known to the kubelet to terminate those containers. It should not
// exit without error unless all containers are known to be stopped.
SyncTerminatingRuntimePod(ctx context.Context, runningPod *kubecontainer.Pod) error
// SyncTerminatedPod is invoked after all running containers are stopped and is responsible
// for releasing resources that should be executed right away rather than in the background.
// Once it exits without error the pod is considered finished on the node.
SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error
}
type syncPodFnType func(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error)
type syncTerminatingPodFnType func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error
type syncTerminatingRuntimePodFnType func(ctx context.Context, runningPod *kubecontainer.Pod) error
type syncTerminatedPodFnType func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error
// podSyncerFuncs implements podSyncer and accepts functions for each method.
type podSyncerFuncs struct {
syncPod syncPodFnType
syncTerminatingPod syncTerminatingPodFnType
syncTerminatingRuntimePod syncTerminatingRuntimePodFnType
syncTerminatedPod syncTerminatedPodFnType
}
func newPodSyncerFuncs(s podSyncer) podSyncerFuncs {
return podSyncerFuncs{
syncPod: s.SyncPod,
syncTerminatingPod: s.SyncTerminatingPod,
syncTerminatingRuntimePod: s.SyncTerminatingRuntimePod,
syncTerminatedPod: s.SyncTerminatedPod,
}
}
var _ podSyncer = podSyncerFuncs{}
func (f podSyncerFuncs) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
return f.syncPod(ctx, updateType, pod, mirrorPod, podStatus)
}
func (f podSyncerFuncs) SyncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
return f.syncTerminatingPod(ctx, pod, podStatus, gracePeriod, podStatusFn)
}
func (f podSyncerFuncs) SyncTerminatingRuntimePod(ctx context.Context, runningPod *kubecontainer.Pod) error {
return f.syncTerminatingRuntimePod(ctx, runningPod)
}
func (f podSyncerFuncs) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error {
return f.syncTerminatedPod(ctx, pod, podStatus)
}
const (
// jitter factor for resyncInterval
workerResyncIntervalJitterFactor = 0.5
// jitter factor for backOffPeriod and backOffOnTransientErrorPeriod
workerBackOffPeriodJitterFactor = 0.5
// backoff period when transient error occurred.
backOffOnTransientErrorPeriod = time.Second
)
// podSyncStatus tracks per-pod transitions through the three phases of pod
// worker sync (setup, terminating, terminated).
type podSyncStatus struct {
// ctx is the context that is associated with the current pod sync.
// TODO: remove this from the struct by having the context initialized
// in startPodSync, the cancelFn used by UpdatePod, and cancellation of
// a parent context for tearing down workers (if needed) on shutdown
ctx context.Context
// cancelFn if set is expected to cancel the current podSyncer operation.
cancelFn context.CancelFunc
// fullname of the pod
fullname string
// working is true if an update is pending or being worked by a pod worker
// goroutine.
working bool
// pendingUpdate is the updated state the pod worker should observe. It is
// cleared and moved to activeUpdate when a pod worker reads it. A new update
// may always replace a pending update as the pod worker does not guarantee
// that all intermediate states are synced to a worker, only the most recent.
// This state will not be visible to downstream components until a pod worker
// has begun processing it.
pendingUpdate *UpdatePodOptions
// activeUpdate is the most recent version of the pod's state that will be
// passed to a sync*Pod function. A pod becomes visible to downstream components
// once a worker decides to start a pod (startedAt is set). The pod and mirror
// pod fields are accumulated if they are missing on a particular call (the last
// known version), and the value of KillPodOptions is accumulated as pods cannot
// have their grace period shortened. This is the source of truth for the pod spec
// the kubelet is reconciling towards for all components that act on running pods.
activeUpdate *UpdatePodOptions
// syncedAt is the time at which the pod worker first observed this pod.
syncedAt time.Time
// startedAt is the time at which the pod worker allowed the pod to start.
startedAt time.Time
// terminatingAt is set once the pod is requested to be killed - note that
// this can be set before the pod worker starts terminating the pod, see
// terminating.
terminatingAt time.Time
// terminatedAt is set once the pod worker has completed a successful
// syncTerminatingPod call and means all running containers are stopped.
terminatedAt time.Time
// gracePeriod is the requested gracePeriod once terminatingAt is nonzero.
gracePeriod int64
// notifyPostTerminating will be closed once the pod transitions to
// terminated. After the pod is in terminated state, nothing should be
// added to this list.
notifyPostTerminating []chan<- struct{}
// statusPostTerminating is a list of the status changes associated
// with kill pod requests. After the pod is in terminated state, nothing
// should be added to this list. The worker will execute the last function
// in this list on each termination attempt.
statusPostTerminating []PodStatusFunc
// startedTerminating is true once the pod worker has observed the request to
// stop a pod (exited syncPod and observed a podWork with WorkType
// TerminatingPod). Once this is set, it is safe for other components
// of the kubelet to assume that no other containers may be started.
startedTerminating bool
// deleted is true if the pod has been marked for deletion on the apiserver
// or has no configuration represented (was deleted before).
deleted bool
// evicted is true if the kill indicated this was an eviction (an evicted
// pod can be more aggressively cleaned up).
evicted bool
// finished is true once the pod worker completes for a pod
// (syncTerminatedPod exited with no errors) until SyncKnownPods is invoked
// to remove the pod. A terminal pod (Succeeded/Failed) will have
// termination status until the pod is deleted.
finished bool
// restartRequested is true if the pod worker was informed the pod is
// expected to exist (update type of create, update, or sync) after
// it has been killed. When known pods are synced, any pod that is
// terminated and has restartRequested will have its history cleared.
restartRequested bool
// observedRuntime is true if the pod has been observed to be present in the
// runtime. A pod that has been observed at runtime must go through either
// SyncTerminatingRuntimePod or SyncTerminatingPod. Otherwise, we can avoid
// invoking the terminating methods if the pod is deleted or orphaned before
// it has been started.
observedRuntime bool
}
func (s *podSyncStatus) IsWorking() bool { return s.working }
func (s *podSyncStatus) IsTerminationRequested() bool { return !s.terminatingAt.IsZero() }
func (s *podSyncStatus) IsTerminationStarted() bool { return s.startedTerminating }
func (s *podSyncStatus) IsTerminated() bool { return !s.terminatedAt.IsZero() }
func (s *podSyncStatus) IsFinished() bool { return s.finished }
func (s *podSyncStatus) IsEvicted() bool { return s.evicted }
func (s *podSyncStatus) IsDeleted() bool { return s.deleted }
func (s *podSyncStatus) IsStarted() bool { return !s.startedAt.IsZero() }
// WorkType returns this pods' current state of the pod in pod lifecycle state machine.
func (s *podSyncStatus) WorkType() PodWorkerState {
if s.IsTerminated() {
return TerminatedPod
}
if s.IsTerminationRequested() {
return TerminatingPod
}
return SyncPod
}
// mergeLastUpdate records the most recent state from a new update. Pod and MirrorPod are
// incremented. KillPodOptions is accumulated. If RunningPod is set, Pod is synthetic and
// will *not* be used as the last pod state unless no previous pod state exists (because
// the pod worker may be responsible for terminating a pod from a previous run of the
// kubelet where no config state is visible). The contents of activeUpdate are used as the
// source of truth for components downstream of the pod workers.
func (s *podSyncStatus) mergeLastUpdate(other UpdatePodOptions) {
opts := s.activeUpdate
if opts == nil {
opts = &UpdatePodOptions{}
s.activeUpdate = opts
}
// UpdatePodOptions states (and UpdatePod enforces) that either Pod or RunningPod
// is set, and we wish to preserve the most recent Pod we have observed, so only
// overwrite our Pod when we have no Pod or when RunningPod is nil.
if opts.Pod == nil || other.RunningPod == nil {
opts.Pod = other.Pod
}
// running pods will not persist but will be remembered for replay
opts.RunningPod = other.RunningPod
// if mirrorPod was not provided, remember the last one for replay
if other.MirrorPod != nil {
opts.MirrorPod = other.MirrorPod
}
// accumulate kill pod options
if other.KillPodOptions != nil {
opts.KillPodOptions = &KillPodOptions{}
if other.KillPodOptions.Evict {
opts.KillPodOptions.Evict = true
}
if override := other.KillPodOptions.PodTerminationGracePeriodSecondsOverride; override != nil {
value := *override
opts.KillPodOptions.PodTerminationGracePeriodSecondsOverride = &value
}
}
// StartTime is not copied - that is purely for tracking latency of config propagation
// from kubelet to pod worker.
}
// podWorkers keeps track of operations on pods and ensures each pod is
// reconciled with the container runtime and other subsystems. The worker
// also tracks which pods are in flight for starting, which pods are
// shutting down but still have running containers, and which pods have
// terminated recently and are guaranteed to have no running containers.
//
// podWorkers is the source of truth for what pods should be active on a
// node at any time, and is kept up to date with the desired state of the
// node (tracked by the kubelet pod config loops and the state in the
// kubelet's podManager) via the UpdatePod method. Components that act
// upon running pods should look to the pod worker for state instead of the
// kubelet podManager. The pod worker is periodically reconciled with the
// state of the podManager via SyncKnownPods() and is responsible for
// ensuring the completion of all observed pods no longer present in
// the podManager (no longer part of the node's desired config).
//
// A pod passed to a pod worker is either being synced (expected to be
// running), terminating (has running containers but no new containers are
// expected to start), terminated (has no running containers but may still
// have resources being consumed), or cleaned up (no resources remaining).
// Once a pod is set to be "torn down" it cannot be started again for that
// UID (corresponding to a delete or eviction) until:
//
// 1. The pod worker is finalized (syncTerminatingPod and
// syncTerminatedPod exit without error sequentially)
// 2. The SyncKnownPods method is invoked by kubelet housekeeping and the pod
// is not part of the known config.
//
// Pod workers provide a consistent source of information to other kubelet
// loops about the status of the pod and whether containers can be
// running. The ShouldPodContentBeRemoved() method tracks whether a pod's
// contents should still exist, which includes non-existent pods after
// SyncKnownPods() has been called once (as per the contract, all existing
// pods should be provided via UpdatePod before SyncKnownPods is invoked).
// Generally other sync loops are expected to separate "setup" and
// "teardown" responsibilities and the information methods here assist in
// each by centralizing that state. A simple visualization of the time
// intervals involved might look like:
//
// ---| = kubelet config has synced at least once
// -------| |- = pod exists in apiserver config
// --------| |---------------- = CouldHaveRunningContainers() is true
//
// ^- pod is observed by pod worker .
// . .
//
// ----------| |------------------------- = syncPod is running
//
// . ^- pod worker loop sees change and invokes syncPod
// . . .
//
// --------------| |------- = ShouldPodContainersBeTerminating() returns true
// --------------| |------- = IsPodTerminationRequested() returns true (pod is known)
//
// . . ^- Kubelet evicts pod .
// . . .
//
// -------------------| |---------------- = syncTerminatingPod runs then exits without error
//
// . . ^ pod worker loop exits syncPod, sees pod is terminating,
// . . invokes syncTerminatingPod
// . . .
//
// ---| |------------------| . = ShouldPodRuntimeBeRemoved() returns true (post-sync)
//
// . ^ syncTerminatingPod has exited successfully
// . .
//
// ----------------------------| |------- = syncTerminatedPod runs then exits without error
//
// . ^ other loops can tear down
// . .
//
// ------------------------------------| |---- = status manager is waiting for SyncTerminatedPod() finished
//
// . ^ .
//
// ----------| |- = status manager can be writing pod status
//
// ^ status manager deletes pod because no longer exists in config
//
// Other components in the Kubelet can request a termination of the pod
// via the UpdatePod method or the killPodNow wrapper - this will ensure
// the components of the pod are stopped until the kubelet is restarted
// or permanently (if the phase of the pod is set to a terminal phase
// in the pod status change).
type podWorkers struct {
// Protects all per worker fields.
podLock sync.Mutex
// podsSynced is true once the pod worker has been synced at least once,
// which means that all working pods have been started via UpdatePod().
podsSynced bool
// Tracks all running per-pod goroutines - per-pod goroutine will be
// processing updates received through its corresponding channel. Sending
// a message on this channel will signal the corresponding goroutine to
// consume podSyncStatuses[uid].pendingUpdate if set.
podUpdates map[types.UID]chan struct{}
// Tracks by UID the termination status of a pod - syncing, terminating,
// terminated, and evicted.
podSyncStatuses map[types.UID]*podSyncStatus
// Tracks all uids for started static pods by full name
startedStaticPodsByFullname map[string]types.UID
// Tracks all uids for static pods that are waiting to start by full name
waitingToStartStaticPodsByFullname map[string][]types.UID
workQueue queue.WorkQueue
// This function is run to sync the desired state of pod.
// NOTE: This function has to be thread-safe - it can be called for
// different pods at the same time.
podSyncer podSyncer
// workerChannelFn is exposed for testing to allow unit tests to impose delays
// in channel communication. The function is invoked once each time a new worker
// goroutine starts.
workerChannelFn func(uid types.UID, in chan struct{}) (out <-chan struct{})
// The EventRecorder to use
recorder record.EventRecorder
// backOffPeriod is the duration to back off when there is a sync error.
backOffPeriod time.Duration
// resyncInterval is the duration to wait until the next sync.
resyncInterval time.Duration
// podCache stores kubecontainer.PodStatus for all pods.
podCache kubecontainer.Cache
// allocationManager is used to allocate resources for pods
allocationManager allocation.Manager
// clock is used for testing timing
clock clock.PassiveClock
}
func newPodWorkers(
podSyncer podSyncer,
recorder record.EventRecorder,
workQueue queue.WorkQueue,
resyncInterval, backOffPeriod time.Duration,
podCache kubecontainer.Cache,
allocationManager allocation.Manager,
) PodWorkers {
return &podWorkers{
podSyncStatuses: map[types.UID]*podSyncStatus{},
podUpdates: map[types.UID]chan struct{}{},
startedStaticPodsByFullname: map[string]types.UID{},
waitingToStartStaticPodsByFullname: map[string][]types.UID{},
podSyncer: podSyncer,
recorder: recorder,
workQueue: workQueue,
resyncInterval: resyncInterval,
backOffPeriod: backOffPeriod,
podCache: podCache,
allocationManager: allocationManager,
clock: clock.RealClock{},
}
}
func (p *podWorkers) IsPodKnownTerminated(uid types.UID) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[uid]; ok {
return status.IsTerminated()
}
// if the pod is not known, we return false (pod worker is not aware of it)
return false
}
func (p *podWorkers) CouldHaveRunningContainers(uid types.UID) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[uid]; ok {
return !status.IsTerminated()
}
// once all pods are synced, any pod without sync status is known to not be running.
return !p.podsSynced
}
func (p *podWorkers) ShouldPodBeFinished(uid types.UID) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[uid]; ok {
return status.IsFinished()
}
// once all pods are synced, any pod without sync status is assumed to
// have SyncTerminatedPod finished.
return p.podsSynced
}
func (p *podWorkers) IsPodTerminationRequested(uid types.UID) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[uid]; ok {
// the pod may still be setting up at this point.
return status.IsTerminationRequested()
}
// an unknown pod is considered not to be terminating (use ShouldPodContainersBeTerminating in
// cleanup loops to avoid failing to cleanup pods that have already been removed from config)
return false
}
func (p *podWorkers) ShouldPodContainersBeTerminating(uid types.UID) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[uid]; ok {
// we wait until the pod worker goroutine observes the termination, which means syncPod will not
// be executed again, which means no new containers can be started
return status.IsTerminationStarted()
}
// once we've synced, if the pod isn't known to the workers we should be tearing them
// down
return p.podsSynced
}
func (p *podWorkers) ShouldPodRuntimeBeRemoved(uid types.UID) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[uid]; ok {
return status.IsTerminated()
}
// a pod that hasn't been sent to the pod worker yet should have no runtime components once we have
// synced all content.
return p.podsSynced
}
func (p *podWorkers) ShouldPodContentBeRemoved(uid types.UID) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[uid]; ok {
return status.IsEvicted() || (status.IsDeleted() && status.IsTerminated())
}
// a pod that hasn't been sent to the pod worker yet should have no content on disk once we have
// synced all content.
return p.podsSynced
}
func (p *podWorkers) IsPodForMirrorPodTerminatingByFullName(podFullName string) bool {
p.podLock.Lock()
defer p.podLock.Unlock()
uid, started := p.startedStaticPodsByFullname[podFullName]
if !started {
return false
}
status, exists := p.podSyncStatuses[uid]
if !exists {
return false
}
if !status.IsTerminationRequested() || status.IsTerminated() {
return false
}
return true
}
func isPodStatusCacheTerminal(status *kubecontainer.PodStatus) bool {
for _, container := range status.ContainerStatuses {
if container.State == kubecontainer.ContainerStateRunning {
return false
}
}
for _, sb := range status.SandboxStatuses {
if sb.State == runtimeapi.PodSandboxState_SANDBOX_READY {
return false
}
}
return true
}
// UpdatePod carries a configuration change or termination state to a pod. A pod is either runnable,
// terminating, or terminated, and will transition to terminating if: deleted on the apiserver,
// discovered to have a terminal phase (Succeeded or Failed), or evicted by the kubelet.
func (p *podWorkers) UpdatePod(options UpdatePodOptions) {
// Handle when the pod is an orphan (no config) and we only have runtime status by running only
// the terminating part of the lifecycle. A running pod contains only a minimal set of information
// about the pod
var isRuntimePod bool
var uid types.UID
var name, ns string
if runningPod := options.RunningPod; runningPod != nil {
if options.Pod == nil {
// the sythetic pod created here is used only as a placeholder and not tracked
if options.UpdateType != kubetypes.SyncPodKill {
klog.InfoS("Pod update is ignored, runtime pods can only be killed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID, "updateType", options.UpdateType)
return
}
uid, ns, name = runningPod.ID, runningPod.Namespace, runningPod.Name
isRuntimePod = true
} else {
options.RunningPod = nil
uid, ns, name = options.Pod.UID, options.Pod.Namespace, options.Pod.Name
klog.InfoS("Pod update included RunningPod which is only valid when Pod is not specified", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
}
} else {
uid, ns, name = options.Pod.UID, options.Pod.Namespace, options.Pod.Name
}
p.podLock.Lock()
defer p.podLock.Unlock()
// decide what to do with this pod - we are either setting it up, tearing it down, or ignoring it
var firstTime bool
now := p.clock.Now()
status, ok := p.podSyncStatuses[uid]
if !ok {
klog.V(4).InfoS("Pod is being synced for the first time", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
firstTime = true
status = &podSyncStatus{
syncedAt: now,
fullname: kubecontainer.BuildPodFullName(name, ns),
}
// if this pod is being synced for the first time, we need to make sure it is an active pod
if options.Pod != nil && (options.Pod.Status.Phase == v1.PodFailed || options.Pod.Status.Phase == v1.PodSucceeded) {
// Check to see if the pod is not running and the pod is terminal; if this succeeds then record in the podWorker that it is terminated.
// This is needed because after a kubelet restart, we need to ensure terminal pods will NOT be considered active in Pod Admission. See http://issues.k8s.io/105523
// However, `filterOutInactivePods`, considers pods that are actively terminating as active. As a result, `IsPodKnownTerminated()` needs to return true and thus `terminatedAt` needs to be set.
if statusCache, err := p.podCache.Get(uid); err == nil {
if isPodStatusCacheTerminal(statusCache) {
// At this point we know:
// (1) The pod is terminal based on the config source.
// (2) The pod is terminal based on the runtime cache.
// This implies that this pod had already completed `SyncTerminatingPod` sometime in the past. The pod is likely being synced for the first time due to a kubelet restart.
// These pods need to complete SyncTerminatedPod to ensure that all resources are cleaned and that the status manager makes the final status updates for the pod.
// As a result, set finished: false, to ensure a Terminated event will be sent and `SyncTerminatedPod` will run.
status = &podSyncStatus{
terminatedAt: now,
terminatingAt: now,
syncedAt: now,
startedTerminating: true,
finished: false,
fullname: kubecontainer.BuildPodFullName(name, ns),
}
}
}
}
p.podSyncStatuses[uid] = status
}
// RunningPods represent an unknown pod execution and don't contain pod spec information
// sufficient to perform any action other than termination. If we received a RunningPod
// after a real pod has already been provided, use the most recent spec instead. Also,
// once we observe a runtime pod we must drive it to completion, even if we weren't the
// ones who started it.
pod := options.Pod
if isRuntimePod {
status.observedRuntime = true
switch {
case status.pendingUpdate != nil && status.pendingUpdate.Pod != nil:
pod = status.pendingUpdate.Pod
options.Pod = pod
options.RunningPod = nil
case status.activeUpdate != nil && status.activeUpdate.Pod != nil:
pod = status.activeUpdate.Pod
options.Pod = pod
options.RunningPod = nil
default:
// we will continue to use RunningPod.ToAPIPod() as pod here, but
// options.Pod will be nil and other methods must handle that appropriately.
pod = options.RunningPod.ToAPIPod()
}
}
// When we see a create update on an already terminating pod, that implies two pods with the same UID were created in
// close temporal proximity (usually static pod but it's possible for an apiserver to extremely rarely do something
// similar) - flag the sync status to indicate that after the pod terminates it should be reset to "not running" to
// allow a subsequent add/update to start the pod worker again. This does not apply to the first time we see a pod,
// such as when the kubelet restarts and we see already terminated pods for the first time.
if !firstTime && status.IsTerminationRequested() {
if options.UpdateType == kubetypes.SyncPodCreate {
status.restartRequested = true
klog.V(4).InfoS("Pod is terminating but has been requested to restart with same UID, will be reconciled later", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
return
}
}
// once a pod is terminated by UID, it cannot reenter the pod worker (until the UID is purged by housekeeping)
if status.IsFinished() {
klog.V(4).InfoS("Pod is finished processing, no further updates", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
return
}
// check for a transition to terminating
var becameTerminating bool
if !status.IsTerminationRequested() {
switch {
case isRuntimePod:
klog.V(4).InfoS("Pod is orphaned and must be torn down", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
status.deleted = true
status.terminatingAt = now
becameTerminating = true
case pod.DeletionTimestamp != nil:
klog.V(4).InfoS("Pod is marked for graceful deletion, begin teardown", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
status.deleted = true
status.terminatingAt = now
becameTerminating = true
case pod.Status.Phase == v1.PodFailed, pod.Status.Phase == v1.PodSucceeded:
klog.V(4).InfoS("Pod is in a terminal phase (success/failed), begin teardown", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
status.terminatingAt = now
becameTerminating = true
case options.UpdateType == kubetypes.SyncPodKill:
if options.KillPodOptions != nil && options.KillPodOptions.Evict {
klog.V(4).InfoS("Pod is being evicted by the kubelet, begin teardown", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
status.evicted = true
} else {
klog.V(4).InfoS("Pod is being removed by the kubelet, begin teardown", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
}
status.terminatingAt = now
becameTerminating = true
}
}
// once a pod is terminating, all updates are kills and the grace period can only decrease
var wasGracePeriodShortened bool
switch {
case status.IsTerminated():
// A terminated pod may still be waiting for cleanup - if we receive a runtime pod kill request
// due to housekeeping seeing an older cached version of the runtime pod simply ignore it until
// after the pod worker completes.
if isRuntimePod {
klog.V(3).InfoS("Pod is waiting for termination, ignoring runtime-only kill until after pod worker is fully terminated", "pod", klog.KRef(ns, name), "podUID", uid, "updateType", options.UpdateType)
return
}
if options.KillPodOptions != nil {
if ch := options.KillPodOptions.CompletedCh; ch != nil {
close(ch)
}
}
options.KillPodOptions = nil
case status.IsTerminationRequested():
if options.KillPodOptions == nil {
options.KillPodOptions = &KillPodOptions{}
}
if ch := options.KillPodOptions.CompletedCh; ch != nil {
status.notifyPostTerminating = append(status.notifyPostTerminating, ch)
}
if fn := options.KillPodOptions.PodStatusFunc; fn != nil {
status.statusPostTerminating = append(status.statusPostTerminating, fn)
}
gracePeriod, gracePeriodShortened := calculateEffectiveGracePeriod(status, pod, options.KillPodOptions)
wasGracePeriodShortened = gracePeriodShortened
status.gracePeriod = gracePeriod
// always set the grace period for syncTerminatingPod so we don't have to recalculate,
// will never be zero.
options.KillPodOptions.PodTerminationGracePeriodSecondsOverride = &gracePeriod
default:
// KillPodOptions is not valid for sync actions outside of the terminating phase
if options.KillPodOptions != nil {
if ch := options.KillPodOptions.CompletedCh; ch != nil {
close(ch)
}
options.KillPodOptions = nil
}
}
// start the pod worker goroutine if it doesn't exist
podUpdates, exists := p.podUpdates[uid]
if !exists {
// buffer the channel to avoid blocking this method
podUpdates = make(chan struct{}, 1)
p.podUpdates[uid] = podUpdates
// ensure that static pods start in the order they are received by UpdatePod
if kubetypes.IsStaticPod(pod) {
p.waitingToStartStaticPodsByFullname[status.fullname] =
append(p.waitingToStartStaticPodsByFullname[status.fullname], uid)
}
// allow testing of delays in the pod update channel
var outCh <-chan struct{}
if p.workerChannelFn != nil {
outCh = p.workerChannelFn(uid, podUpdates)
} else {
outCh = podUpdates
}
// spawn a pod worker
go func() {
// TODO: this should be a wait.Until with backoff to handle panics, and
// accept a context for shutdown
defer runtime.HandleCrash()
defer klog.V(3).InfoS("Pod worker has stopped", "podUID", uid)
p.podWorkerLoop(uid, outCh)
}()
}
// measure the maximum latency between a call to UpdatePod and when the pod worker reacts to it
// by preserving the oldest StartTime
if status.pendingUpdate != nil && !status.pendingUpdate.StartTime.IsZero() && status.pendingUpdate.StartTime.Before(options.StartTime) {
options.StartTime = status.pendingUpdate.StartTime
}
// notify the pod worker there is a pending update
status.pendingUpdate = &options
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
status.pendingUpdate.Pod, _ = p.allocationManager.UpdatePodFromAllocation(options.Pod)
}
status.working = true
klog.V(4).InfoS("Notifying pod of pending update", "pod", klog.KRef(ns, name), "podUID", uid, "workType", status.WorkType())
select {
case podUpdates <- struct{}{}:
default:
}
if (becameTerminating || wasGracePeriodShortened) && status.cancelFn != nil {
klog.V(3).InfoS("Cancelling current pod sync", "pod", klog.KRef(ns, name), "podUID", uid, "workType", status.WorkType())
status.cancelFn()
return
}
}
// calculateEffectiveGracePeriod sets the initial grace period for a newly terminating pod or allows a
// shorter grace period to be provided, returning the desired value.
func calculateEffectiveGracePeriod(status *podSyncStatus, pod *v1.Pod, options *KillPodOptions) (int64, bool) {
// enforce the restriction that a grace period can only decrease and track whatever our value is,
// then ensure a calculated value is passed down to lower levels
gracePeriod := status.gracePeriod
overridden := false
// this value is bedrock truth - the apiserver owns telling us this value calculated by apiserver
if override := pod.DeletionGracePeriodSeconds; override != nil {
if gracePeriod == 0 || *override < gracePeriod {
gracePeriod = *override
overridden = true
}
}
// we allow other parts of the kubelet (namely eviction) to request this pod be terminated faster
if options != nil {
if override := options.PodTerminationGracePeriodSecondsOverride; override != nil {
if gracePeriod == 0 || *override < gracePeriod {
gracePeriod = *override
overridden = true
}
}
}
// make a best effort to default this value to the pod's desired intent, in the event
// the kubelet provided no requested value (graceful termination?)
if !overridden && gracePeriod == 0 && pod.Spec.TerminationGracePeriodSeconds != nil {
gracePeriod = *pod.Spec.TerminationGracePeriodSeconds
}
// no matter what, we always supply a grace period of 1
if gracePeriod < 1 {
gracePeriod = 1
}
return gracePeriod, status.gracePeriod != 0 && status.gracePeriod != gracePeriod
}
// allowPodStart tries to start the pod and returns true if allowed, otherwise
// it requeues the pod and returns false. If the pod will never be able to start
// because data is missing, or the pod was terminated before start, canEverStart
// is false. This method can only be called while holding the pod lock.
func (p *podWorkers) allowPodStart(pod *v1.Pod) (canStart bool, canEverStart bool) {
if !kubetypes.IsStaticPod(pod) {
// TODO: Do we want to allow non-static pods with the same full name?
// Note that it may disable the force deletion of pods.
return true, true
}
status, ok := p.podSyncStatuses[pod.UID]
if !ok {
klog.ErrorS(nil, "Pod sync status does not exist, the worker should not be running", "pod", klog.KObj(pod), "podUID", pod.UID)
return false, false
}
if status.IsTerminationRequested() {
return false, false
}
if !p.allowStaticPodStart(status.fullname, pod.UID) {
p.workQueue.Enqueue(pod.UID, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor))
return false, true
}
return true, true
}
// allowStaticPodStart tries to start the static pod and returns true if
// 1. there are no other started static pods with the same fullname
// 2. the uid matches that of the first valid static pod waiting to start
func (p *podWorkers) allowStaticPodStart(fullname string, uid types.UID) bool {
startedUID, started := p.startedStaticPodsByFullname[fullname]
if started {
return startedUID == uid
}
waitingPods := p.waitingToStartStaticPodsByFullname[fullname]
// TODO: This is O(N) with respect to the number of updates to static pods
// with overlapping full names, and ideally would be O(1).
for i, waitingUID := range waitingPods {
// has pod already terminated or been deleted?
status, ok := p.podSyncStatuses[waitingUID]
if !ok || status.IsTerminationRequested() || status.IsTerminated() {
continue
}
// another pod is next in line
if waitingUID != uid {
p.waitingToStartStaticPodsByFullname[fullname] = waitingPods[i:]
return false
}
// we are up next, remove ourselves
waitingPods = waitingPods[i+1:]
break
}
if len(waitingPods) != 0 {
p.waitingToStartStaticPodsByFullname[fullname] = waitingPods
} else {
delete(p.waitingToStartStaticPodsByFullname, fullname)
}
p.startedStaticPodsByFullname[fullname] = uid
return true
}
// cleanupUnstartedPod is invoked if a pod that has never been started receives a termination
// signal before it can be started. This method must be called holding the pod lock.
func (p *podWorkers) cleanupUnstartedPod(pod *v1.Pod, status *podSyncStatus) {
p.cleanupPodUpdates(pod.UID)
if status.terminatingAt.IsZero() {
klog.V(4).InfoS("Pod worker is complete but did not have terminatingAt set, likely programmer error", "pod", klog.KObj(pod), "podUID", pod.UID)
}
if !status.terminatedAt.IsZero() {
klog.V(4).InfoS("Pod worker is complete and had terminatedAt set, likely programmer error", "pod", klog.KObj(pod), "podUID", pod.UID)
}
status.finished = true
status.working = false
status.terminatedAt = p.clock.Now()
if p.startedStaticPodsByFullname[status.fullname] == pod.UID {
delete(p.startedStaticPodsByFullname, status.fullname)
}
}
// startPodSync is invoked by each pod worker goroutine when a message arrives on the pod update channel.
// This method consumes a pending update, initializes a context, decides whether the pod is already started
// or can be started, and updates the cached pod state so that downstream components can observe what the
// pod worker goroutine is currently attempting to do. If ok is false, there is no available event. If any
// of the boolean values is false, ensure the appropriate cleanup happens before returning.
//
// This method should ensure that either status.pendingUpdate is cleared and merged into status.activeUpdate,
// or when a pod cannot be started status.pendingUpdate remains the same. Pods that have not been started
// should never have an activeUpdate because that is exposed to downstream components on started pods.
func (p *podWorkers) startPodSync(podUID types.UID) (ctx context.Context, update podWork, canStart, canEverStart, ok bool) {
p.podLock.Lock()
defer p.podLock.Unlock()
// verify we are known to the pod worker still
status, ok := p.podSyncStatuses[podUID]
if !ok {
// pod status has disappeared, the worker should exit
klog.V(4).InfoS("Pod worker no longer has status, worker should exit", "podUID", podUID)
return nil, update, false, false, false
}
if !status.working {
// working is used by unit tests to observe whether a worker is currently acting on this pod
klog.V(4).InfoS("Pod should be marked as working by the pod worker, programmer error", "podUID", podUID)
}
if status.pendingUpdate == nil {
// no update available, this means we were queued without work being added or there is a
// race condition, both of which are unexpected
status.working = false
klog.V(4).InfoS("Pod worker received no pending work, programmer error?", "podUID", podUID)
return nil, update, false, false, false
}
// consume the pending update
update.WorkType = status.WorkType()
update.Options = *status.pendingUpdate
status.pendingUpdate = nil
select {
case <-p.podUpdates[podUID]:
// ensure the pod update channel is empty (it is only ever written to under lock)
default:
}
// initialize a context for the worker if one does not exist
if status.ctx == nil || status.ctx.Err() == context.Canceled {
status.ctx, status.cancelFn = context.WithCancel(context.Background())
}
ctx = status.ctx
// if we are already started, make our state visible to downstream components
if status.IsStarted() {
status.mergeLastUpdate(update.Options)
return ctx, update, true, true, true
}
// if we are already terminating and we only have a running pod, allow the worker
// to "start" since we are immediately moving to terminating
if update.Options.RunningPod != nil && update.WorkType == TerminatingPod {
status.mergeLastUpdate(update.Options)
return ctx, update, true, true, true
}
// If we receive an update where Pod is nil (running pod is set) but haven't
// started yet, we can only terminate the pod, not start it. We should not be
// asked to start such a pod, but guard here just in case an accident occurs.
if update.Options.Pod == nil {
status.mergeLastUpdate(update.Options)
klog.V(4).InfoS("Running pod cannot start ever, programmer error", "pod", klog.KObj(update.Options.Pod), "podUID", podUID, "updateType", update.WorkType)
return ctx, update, false, false, true
}
// verify we can start
canStart, canEverStart = p.allowPodStart(update.Options.Pod)
switch {
case !canEverStart:
p.cleanupUnstartedPod(update.Options.Pod, status)
status.working = false
if start := update.Options.StartTime; !start.IsZero() {
metrics.PodWorkerDuration.WithLabelValues("terminated").Observe(metrics.SinceInSeconds(start))
}
klog.V(4).InfoS("Pod cannot start ever", "pod", klog.KObj(update.Options.Pod), "podUID", podUID, "updateType", update.WorkType)
return ctx, update, canStart, canEverStart, true
case !canStart:
// this is the only path we don't start the pod, so we need to put the change back in pendingUpdate
status.pendingUpdate = &update.Options
status.working = false
klog.V(4).InfoS("Pod cannot start yet", "pod", klog.KObj(update.Options.Pod), "podUID", podUID)
return ctx, update, canStart, canEverStart, true
}
// mark the pod as started
status.startedAt = p.clock.Now()
status.mergeLastUpdate(update.Options)
// If we are admitting the pod and it is new, record the count of containers
// TODO: We should probably move this into syncPod and add an execution count
// to the syncPod arguments, and this should be recorded on the first sync.
// Leaving it here complicates a particularly important loop.
metrics.ContainersPerPodCount.Observe(float64(len(update.Options.Pod.Spec.Containers)))
return ctx, update, true, true, true
}
func podUIDAndRefForUpdate(update UpdatePodOptions) (types.UID, klog.ObjectRef) {
if update.RunningPod != nil {
return update.RunningPod.ID, klog.KObj(update.RunningPod.ToAPIPod())
}
return update.Pod.UID, klog.KObj(update.Pod)
}
// podWorkerLoop manages sequential state updates to a pod in a goroutine, exiting once the final
// state is reached. The loop is responsible for driving the pod through four main phases:
//
// 1. Wait to start, guaranteeing no two pods with the same UID or same fullname are running at the same time
// 2. Sync, orchestrating pod setup by reconciling the desired pod spec with the runtime state of the pod
// 3. Terminating, ensuring all running containers in the pod are stopped
// 4. Terminated, cleaning up any resources that must be released before the pod can be deleted
//
// The podWorkerLoop is driven by updates delivered to UpdatePod and by SyncKnownPods. If a particular
// sync method fails, p.workerQueue is updated with backoff but it is the responsibility of the kubelet
// to trigger new UpdatePod calls. SyncKnownPods will only retry pods that are no longer known to the
// caller. When a pod transitions working->terminating or terminating->terminated, the next update is
// queued immediately and no kubelet action is required.
func (p *podWorkers) podWorkerLoop(podUID types.UID, podUpdates <-chan struct{}) {
var lastSyncTime time.Time
for range podUpdates {
ctx, update, canStart, canEverStart, ok := p.startPodSync(podUID)
// If we had no update waiting, it means someone initialized the channel without filling out pendingUpdate.
if !ok {
continue
}
// If the pod was terminated prior to the pod being allowed to start, we exit the loop.
if !canEverStart {
return
}
// If the pod is not yet ready to start, continue and wait for more updates.
if !canStart {
continue
}
podUID, podRef := podUIDAndRefForUpdate(update.Options)
klog.V(4).InfoS("Processing pod event", "pod", podRef, "podUID", podUID, "updateType", update.WorkType)
var isTerminal bool
err := func() error {
// The worker is responsible for ensuring the sync method sees the appropriate
// status updates on resyncs (the result of the last sync), transitions to
// terminating (no wait), or on terminated (whatever the most recent state is).
// Only syncing and terminating can generate pod status changes, while terminated
// pods ensure the most recent status makes it to the api server.
var status *kubecontainer.PodStatus
var err error
switch {
case update.Options.RunningPod != nil:
// when we receive a running pod, we don't need status at all because we are
// guaranteed to be terminating and we skip updates to the pod
default:
// wait until we see the next refresh from the PLEG via the cache (max 2s)
// TODO: this adds ~1s of latency on all transitions from sync to terminating
// to terminated, and on all termination retries (including evictions). We should
// improve latency by making the pleg continuous and by allowing pod status
// changes to be refreshed when key events happen (killPod, sync->terminating).
// Improving this latency also reduces the possibility that a terminated
// container's status is garbage collected before we have a chance to update the
// API server (thus losing the exit code).
status, err = p.podCache.GetNewerThan(update.Options.Pod.UID, lastSyncTime)
if err != nil {
// This is the legacy event thrown by manage pod loop all other events are now dispatched
// from syncPodFn
p.recorder.Eventf(update.Options.Pod, v1.EventTypeWarning, events.FailedSync, "error determining status: %v", err)
return err
}
}
// Take the appropriate action (illegal phases are prevented by UpdatePod)
switch {
case update.WorkType == TerminatedPod:
err = p.podSyncer.SyncTerminatedPod(ctx, update.Options.Pod, status)
case update.WorkType == TerminatingPod:
var gracePeriod *int64
if opt := update.Options.KillPodOptions; opt != nil {
gracePeriod = opt.PodTerminationGracePeriodSecondsOverride
}
podStatusFn := p.acknowledgeTerminating(podUID)
// if we only have a running pod, terminate it directly
if update.Options.RunningPod != nil {
err = p.podSyncer.SyncTerminatingRuntimePod(ctx, update.Options.RunningPod)
} else {
err = p.podSyncer.SyncTerminatingPod(ctx, update.Options.Pod, status, gracePeriod, podStatusFn)
}
default:
isTerminal, err = p.podSyncer.SyncPod(ctx, update.Options.UpdateType, update.Options.Pod, update.Options.MirrorPod, status)
}
lastSyncTime = p.clock.Now()
return err
}()
var phaseTransition bool
switch {
case err == context.Canceled:
// when the context is cancelled we expect an update to already be queued
klog.V(2).InfoS("Sync exited with context cancellation error", "pod", podRef, "podUID", podUID, "updateType", update.WorkType)
case err != nil:
// we will queue a retry
klog.ErrorS(err, "Error syncing pod, skipping", "pod", podRef, "podUID", podUID)
case update.WorkType == TerminatedPod:
// we can shut down the worker
p.completeTerminated(podUID)
if start := update.Options.StartTime; !start.IsZero() {
metrics.PodWorkerDuration.WithLabelValues("terminated").Observe(metrics.SinceInSeconds(start))
}
klog.V(4).InfoS("Processing pod event done", "pod", podRef, "podUID", podUID, "updateType", update.WorkType)
return
case update.WorkType == TerminatingPod:
// pods that don't exist in config don't need to be terminated, other loops will clean them up
if update.Options.RunningPod != nil {
p.completeTerminatingRuntimePod(podUID)
if start := update.Options.StartTime; !start.IsZero() {
metrics.PodWorkerDuration.WithLabelValues(update.Options.UpdateType.String()).Observe(metrics.SinceInSeconds(start))
}
klog.V(4).InfoS("Processing pod event done", "pod", podRef, "podUID", podUID, "updateType", update.WorkType)
return
}
// otherwise we move to the terminating phase
p.completeTerminating(podUID)
phaseTransition = true
case isTerminal:
// if syncPod indicated we are now terminal, set the appropriate pod status to move to terminating
klog.V(4).InfoS("Pod is terminal", "pod", podRef, "podUID", podUID, "updateType", update.WorkType)
p.completeSync(podUID)
phaseTransition = true
}
// queue a retry if necessary, then put the next event in the channel if any
p.completeWork(podUID, phaseTransition, err)
if start := update.Options.StartTime; !start.IsZero() {
metrics.PodWorkerDuration.WithLabelValues(update.Options.UpdateType.String()).Observe(metrics.SinceInSeconds(start))
}
klog.V(4).InfoS("Processing pod event done", "pod", podRef, "podUID", podUID, "updateType", update.WorkType)
}
}
// acknowledgeTerminating sets the terminating flag on the pod status once the pod worker sees
// the termination state so that other components know no new containers will be started in this
// pod. It then returns the status function, if any, that applies to this pod.
func (p *podWorkers) acknowledgeTerminating(podUID types.UID) PodStatusFunc {
p.podLock.Lock()
defer p.podLock.Unlock()
status, ok := p.podSyncStatuses[podUID]
if !ok {
return nil
}
if !status.terminatingAt.IsZero() && !status.startedTerminating {
klog.V(4).InfoS("Pod worker has observed request to terminate", "podUID", podUID)
status.startedTerminating = true
}
if l := len(status.statusPostTerminating); l > 0 {
return status.statusPostTerminating[l-1]
}
return nil
}
// completeSync is invoked when syncPod completes successfully and indicates the pod is now terminal and should
// be terminated. This happens when the natural pod lifecycle completes - any pod which is not RestartAlways
// exits. Unnatural completions, such as evictions, API driven deletion or phase transition, are handled by
// UpdatePod.
func (p *podWorkers) completeSync(podUID types.UID) {
p.podLock.Lock()
defer p.podLock.Unlock()
klog.V(4).InfoS("Pod indicated lifecycle completed naturally and should now terminate", "podUID", podUID)
status, ok := p.podSyncStatuses[podUID]
if !ok {
klog.V(4).InfoS("Pod had no status in completeSync, programmer error?", "podUID", podUID)
return
}
// update the status of the pod
if status.terminatingAt.IsZero() {
status.terminatingAt = p.clock.Now()
} else {
klog.V(4).InfoS("Pod worker attempted to set terminatingAt twice, likely programmer error", "podUID", podUID)
}
status.startedTerminating = true
// the pod has now transitioned to terminating and we want to run syncTerminatingPod
// as soon as possible, so if no update is already waiting queue a synthetic update
p.requeueLastPodUpdate(podUID, status)
}
// completeTerminating is invoked when syncTerminatingPod completes successfully, which means
// no container is running, no container will be started in the future, and we are ready for
// cleanup. This updates the termination state which prevents future syncs and will ensure
// other kubelet loops know this pod is not running any containers.
func (p *podWorkers) completeTerminating(podUID types.UID) {
p.podLock.Lock()
defer p.podLock.Unlock()
klog.V(4).InfoS("Pod terminated all containers successfully", "podUID", podUID)
status, ok := p.podSyncStatuses[podUID]
if !ok {
return
}
// update the status of the pod
if status.terminatingAt.IsZero() {
klog.V(4).InfoS("Pod worker was terminated but did not have terminatingAt set, likely programmer error", "podUID", podUID)
}
status.terminatedAt = p.clock.Now()
for _, ch := range status.notifyPostTerminating {
close(ch)
}
status.notifyPostTerminating = nil
status.statusPostTerminating = nil
// the pod has now transitioned to terminated and we want to run syncTerminatedPod
// as soon as possible, so if no update is already waiting queue a synthetic update
p.requeueLastPodUpdate(podUID, status)
}
// completeTerminatingRuntimePod is invoked when syncTerminatingPod completes successfully,
// which means an orphaned pod (no config) is terminated and we can exit. Since orphaned
// pods have no API representation, we want to exit the loop at this point and ensure no
// status is present afterwards - the running pod is truly terminated when this is invoked.
func (p *podWorkers) completeTerminatingRuntimePod(podUID types.UID) {
p.podLock.Lock()
defer p.podLock.Unlock()
klog.V(4).InfoS("Pod terminated all orphaned containers successfully and worker can now stop", "podUID", podUID)
p.cleanupPodUpdates(podUID)
status, ok := p.podSyncStatuses[podUID]
if !ok {
return
}
if status.terminatingAt.IsZero() {
klog.V(4).InfoS("Pod worker was terminated but did not have terminatingAt set, likely programmer error", "podUID", podUID)
}
status.terminatedAt = p.clock.Now()
status.finished = true
status.working = false
if p.startedStaticPodsByFullname[status.fullname] == podUID {
delete(p.startedStaticPodsByFullname, status.fullname)
}
// A runtime pod is transient and not part of the desired state - once it has reached
// terminated we can abandon tracking it.
delete(p.podSyncStatuses, podUID)
}
// completeTerminated is invoked after syncTerminatedPod completes successfully and means we
// can stop the pod worker. The pod is finalized at this point.
func (p *podWorkers) completeTerminated(podUID types.UID) {
p.podLock.Lock()
defer p.podLock.Unlock()
klog.V(4).InfoS("Pod is complete and the worker can now stop", "podUID", podUID)
p.cleanupPodUpdates(podUID)
status, ok := p.podSyncStatuses[podUID]
if !ok {
return
}
if status.terminatingAt.IsZero() {
klog.V(4).InfoS("Pod worker is complete but did not have terminatingAt set, likely programmer error", "podUID", podUID)
}
if status.terminatedAt.IsZero() {
klog.V(4).InfoS("Pod worker is complete but did not have terminatedAt set, likely programmer error", "podUID", podUID)
}
status.finished = true
status.working = false
if p.startedStaticPodsByFullname[status.fullname] == podUID {
delete(p.startedStaticPodsByFullname, status.fullname)
}
}
// completeWork requeues on error or the next sync interval and then immediately executes any pending
// work.
func (p *podWorkers) completeWork(podUID types.UID, phaseTransition bool, syncErr error) {
// Requeue the last update if the last sync returned error.
switch {
case phaseTransition:
p.workQueue.Enqueue(podUID, 0)
case syncErr == nil:
// No error; requeue at the regular resync interval.
p.workQueue.Enqueue(podUID, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor))
case strings.Contains(syncErr.Error(), NetworkNotReadyErrorMsg):
// Network is not ready; back off for short period of time and retry as network might be ready soon.
p.workQueue.Enqueue(podUID, wait.Jitter(backOffOnTransientErrorPeriod, workerBackOffPeriodJitterFactor))
default:
// Error occurred during the sync; back off and then retry.
p.workQueue.Enqueue(podUID, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor))
}
// if there is a pending update for this worker, requeue immediately, otherwise
// clear working status
p.podLock.Lock()
defer p.podLock.Unlock()
if status, ok := p.podSyncStatuses[podUID]; ok {
if status.pendingUpdate != nil {
select {
case p.podUpdates[podUID] <- struct{}{}:
klog.V(4).InfoS("Requeuing pod due to pending update", "podUID", podUID)
default:
klog.V(4).InfoS("Pending update already queued", "podUID", podUID)
}
} else {
status.working = false
}
}
}
// SyncKnownPods will purge any fully terminated pods that are not in the desiredPods
// list, which means SyncKnownPods must be called in a threadsafe manner from calls
// to UpdatePods for new pods. Because the podworker is dependent on UpdatePod being
// invoked to drive a pod's state machine, if a pod is missing in the desired list the
// pod worker must be responsible for delivering that update. The method returns a map
// of known workers that are not finished with a value of SyncPodTerminated,
// SyncPodKill, or SyncPodSync depending on whether the pod is terminated, terminating,
// or syncing.
func (p *podWorkers) SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorkerSync {
workers := make(map[types.UID]PodWorkerSync)
known := make(map[types.UID]struct{})
for _, pod := range desiredPods {
known[pod.UID] = struct{}{}
}
p.podLock.Lock()
defer p.podLock.Unlock()
p.podsSynced = true
for uid, status := range p.podSyncStatuses {
// We retain the worker history of any pod that is still desired according to
// its UID. However, there are two scenarios during a sync that result in us
// needing to purge the history:
//
// 1. The pod is no longer desired (the local version is orphaned)
// 2. The pod received a kill update and then a subsequent create, which means
// the UID was reused in the source config (vanishingly rare for API servers,
// common for static pods that have specified a fixed UID)
//
// In the former case we wish to bound the amount of information we store for
// deleted pods. In the latter case we wish to minimize the amount of time before
// we restart the static pod. If we succeed at removing the worker, then we
// omit it from the returned map of known workers, and the caller of SyncKnownPods
// is expected to send a new UpdatePod({UpdateType: Create}).
_, knownPod := known[uid]
orphan := !knownPod
if status.restartRequested || orphan {
if p.removeTerminatedWorker(uid, status, orphan) {
// no worker running, we won't return it
continue
}
}
sync := PodWorkerSync{
State: status.WorkType(),
Orphan: orphan,
}
switch {
case status.activeUpdate != nil:
if status.activeUpdate.Pod != nil {
sync.HasConfig = true
sync.Static = kubetypes.IsStaticPod(status.activeUpdate.Pod)
}
case status.pendingUpdate != nil:
if status.pendingUpdate.Pod != nil {
sync.HasConfig = true
sync.Static = kubetypes.IsStaticPod(status.pendingUpdate.Pod)
}
}
workers[uid] = sync
}
return workers
}
// removeTerminatedWorker cleans up and removes the worker status for a worker
// that has reached a terminal state of "finished" - has successfully exited
// syncTerminatedPod. This "forgets" a pod by UID and allows another pod to be
// recreated with the same UID. The kubelet preserves state about recently
// terminated pods to prevent accidentally restarting a terminal pod, which is
// proportional to the number of pods described in the pod config. The method
// returns true if the worker was completely removed.
func (p *podWorkers) removeTerminatedWorker(uid types.UID, status *podSyncStatus, orphaned bool) bool {
if !status.finished {
// If the pod worker has not reached terminal state and the pod is still known, we wait.
if !orphaned {
klog.V(4).InfoS("Pod worker has been requested for removal but is still not fully terminated", "podUID", uid)
return false
}
// all orphaned pods are considered deleted
status.deleted = true
// When a pod is no longer in the desired set, the pod is considered orphaned and the
// the pod worker becomes responsible for driving the pod to completion (there is no
// guarantee another component will notify us of updates).
switch {
case !status.IsStarted() && !status.observedRuntime:
// The pod has not been started, which means we can safely clean up the pod - the
// pod worker will shutdown as a result of this change without executing a sync.
klog.V(4).InfoS("Pod is orphaned and has not been started", "podUID", uid)
case !status.IsTerminationRequested():
// The pod has been started but termination has not been requested - set the appropriate
// timestamp and notify the pod worker. Because the pod has been synced at least once,
// the value of status.activeUpdate will be the fallback for the next sync.
status.terminatingAt = p.clock.Now()
if status.activeUpdate != nil && status.activeUpdate.Pod != nil {
status.gracePeriod, _ = calculateEffectiveGracePeriod(status, status.activeUpdate.Pod, nil)
} else {
status.gracePeriod = 1
}
p.requeueLastPodUpdate(uid, status)
klog.V(4).InfoS("Pod is orphaned and still running, began terminating", "podUID", uid)
return false
default:
// The pod is already moving towards termination, notify the pod worker. Because the pod
// has been synced at least once, the value of status.activeUpdate will be the fallback for
// the next sync.
p.requeueLastPodUpdate(uid, status)
klog.V(4).InfoS("Pod is orphaned and still terminating, notified the pod worker", "podUID", uid)
return false
}
}
if status.restartRequested {
klog.V(4).InfoS("Pod has been terminated but another pod with the same UID was created, remove history to allow restart", "podUID", uid)
} else {
klog.V(4).InfoS("Pod has been terminated and is no longer known to the kubelet, remove all history", "podUID", uid)
}
delete(p.podSyncStatuses, uid)
p.cleanupPodUpdates(uid)
if p.startedStaticPodsByFullname[status.fullname] == uid {
delete(p.startedStaticPodsByFullname, status.fullname)
}
return true
}
// killPodNow returns a KillPodFunc that can be used to kill a pod.
// It is intended to be injected into other modules that need to kill a pod.
func killPodNow(podWorkers PodWorkers, recorder record.EventRecorder) eviction.KillPodFunc {
return func(pod *v1.Pod, isEvicted bool, gracePeriodOverride *int64, statusFn func(*v1.PodStatus)) error {
// determine the grace period to use when killing the pod
gracePeriod := int64(0)
if gracePeriodOverride != nil {
gracePeriod = *gracePeriodOverride
} else if pod.Spec.TerminationGracePeriodSeconds != nil {
gracePeriod = *pod.Spec.TerminationGracePeriodSeconds
}
// we timeout and return an error if we don't get a callback within a reasonable time.
// the default timeout is relative to the grace period (we settle on 10s to wait for kubelet->runtime traffic to complete in sigkill)
timeout := gracePeriod + (gracePeriod / 2)
minTimeout := int64(10)
if timeout < minTimeout {
timeout = minTimeout
}
timeoutDuration := time.Duration(timeout) * time.Second
// open a channel we block against until we get a result
ch := make(chan struct{}, 1)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodKill,
KillPodOptions: &KillPodOptions{
CompletedCh: ch,
Evict: isEvicted,
PodStatusFunc: statusFn,
PodTerminationGracePeriodSecondsOverride: gracePeriodOverride,
},
})
// wait for either a response, or a timeout
select {
case <-ch:
return nil
case <-time.After(timeoutDuration):
recorder.Eventf(pod, v1.EventTypeWarning, events.ExceededGracePeriod, "Container runtime did not kill the pod within specified grace period.")
return fmt.Errorf("timeout waiting to kill pod")
}
}
}
// cleanupPodUpdates closes the podUpdates channel and removes it from
// podUpdates map so that the corresponding pod worker can stop. It also
// removes any undelivered work. This method must be called holding the
// pod lock.
func (p *podWorkers) cleanupPodUpdates(uid types.UID) {
if ch, ok := p.podUpdates[uid]; ok {
close(ch)
}
delete(p.podUpdates, uid)
}
// requeueLastPodUpdate creates a new pending pod update from the most recently
// executed update if no update is already queued, and then notifies the pod
// worker goroutine of the update. This method must be called while holding
// the pod lock.
func (p *podWorkers) requeueLastPodUpdate(podUID types.UID, status *podSyncStatus) {
// if there is already an update queued, we can use that instead, or if
// we have no previously executed update, we cannot replay it.
if status.pendingUpdate != nil || status.activeUpdate == nil {
return
}
copied := *status.activeUpdate
status.pendingUpdate = &copied
// notify the pod worker
status.working = true
select {
case p.podUpdates[podUID] <- struct{}{}:
default:
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"reflect"
"strconv"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/allocation"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/utils/clock"
clocktesting "k8s.io/utils/clock/testing"
)
// fakePodWorkers runs sync pod function in serial, so we can have
// deterministic behaviour in testing.
type fakePodWorkers struct {
lock sync.Mutex
syncPodFn syncPodFnType
cache kubecontainer.Cache
t TestingInterface
triggeredDeletion []types.UID
triggeredTerminal []types.UID
statusLock sync.Mutex
running map[types.UID]bool
terminating map[types.UID]bool
terminated map[types.UID]bool
terminationRequested map[types.UID]bool
finished map[types.UID]bool
removeRuntime map[types.UID]bool
removeContent map[types.UID]bool
terminatingStaticPods map[string]bool
}
func (f *fakePodWorkers) UpdatePod(options UpdatePodOptions) {
f.lock.Lock()
defer f.lock.Unlock()
var uid types.UID
switch {
case options.Pod != nil:
uid = options.Pod.UID
case options.RunningPod != nil:
uid = options.RunningPod.ID
default:
return
}
status, err := f.cache.Get(uid)
if err != nil {
f.t.Errorf("Unexpected error: %v", err)
}
switch options.UpdateType {
case kubetypes.SyncPodKill:
f.triggeredDeletion = append(f.triggeredDeletion, uid)
default:
isTerminal, err := f.syncPodFn(context.Background(), options.UpdateType, options.Pod, options.MirrorPod, status)
if err != nil {
f.t.Errorf("Unexpected error: %v", err)
}
if isTerminal {
f.triggeredTerminal = append(f.triggeredTerminal, uid)
}
}
}
func (f *fakePodWorkers) SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorkerSync {
return map[types.UID]PodWorkerSync{}
}
func (f *fakePodWorkers) IsPodKnownTerminated(uid types.UID) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.terminated[uid]
}
func (f *fakePodWorkers) CouldHaveRunningContainers(uid types.UID) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.running[uid]
}
func (f *fakePodWorkers) ShouldPodBeFinished(uid types.UID) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.finished[uid]
}
func (f *fakePodWorkers) IsPodTerminationRequested(uid types.UID) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.terminationRequested[uid]
}
func (f *fakePodWorkers) ShouldPodContainersBeTerminating(uid types.UID) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.terminating[uid]
}
func (f *fakePodWorkers) ShouldPodRuntimeBeRemoved(uid types.UID) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.removeRuntime[uid]
}
func (f *fakePodWorkers) setPodRuntimeBeRemoved(uid types.UID) {
f.statusLock.Lock()
defer f.statusLock.Unlock()
f.removeRuntime = map[types.UID]bool{uid: true}
}
func (f *fakePodWorkers) ShouldPodContentBeRemoved(uid types.UID) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.removeContent[uid]
}
func (f *fakePodWorkers) IsPodForMirrorPodTerminatingByFullName(podFullname string) bool {
f.statusLock.Lock()
defer f.statusLock.Unlock()
return f.terminatingStaticPods[podFullname]
}
type TestingInterface interface {
Errorf(format string, args ...interface{})
}
func newPodWithPhase(uid, name string, phase v1.PodPhase) *v1.Pod {
pod := newNamedPod(uid, "ns", name, false)
pod.Status.Phase = phase
return pod
}
func newStaticPod(uid, name string) *v1.Pod {
thirty := int64(30)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(uid),
Name: name,
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
},
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &thirty,
},
}
}
func newNamedPod(uid, namespace, name string, isStatic bool) *v1.Pod {
thirty := int64(30)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(uid),
Namespace: namespace,
Name: name,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &thirty,
},
}
if isStatic {
pod.Annotations = map[string]string{
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
}
}
return pod
}
// syncPodRecord is a record of a sync pod call
type syncPodRecord struct {
name string
updateType kubetypes.SyncPodType
runningPod *kubecontainer.Pod
terminated bool
gracePeriod *int64
}
type FakeQueueItem struct {
UID types.UID
Delay time.Duration
}
type fakeQueue struct {
lock sync.Mutex
queue []FakeQueueItem
currentStart int
}
func (q *fakeQueue) Empty() bool {
q.lock.Lock()
defer q.lock.Unlock()
return (len(q.queue) - q.currentStart) == 0
}
func (q *fakeQueue) Items() []FakeQueueItem {
q.lock.Lock()
defer q.lock.Unlock()
return append(make([]FakeQueueItem, 0, len(q.queue)), q.queue...)
}
func (q *fakeQueue) Set() sets.Set[string] {
q.lock.Lock()
defer q.lock.Unlock()
work := sets.New[string]()
for _, item := range q.queue[q.currentStart:] {
work.Insert(string(item.UID))
}
return work
}
func (q *fakeQueue) Enqueue(uid types.UID, delay time.Duration) {
q.lock.Lock()
defer q.lock.Unlock()
q.queue = append(q.queue, FakeQueueItem{UID: uid, Delay: delay})
}
func (q *fakeQueue) GetWork() []types.UID {
q.lock.Lock()
defer q.lock.Unlock()
work := make([]types.UID, 0, len(q.queue)-q.currentStart)
for _, item := range q.queue[q.currentStart:] {
work = append(work, item.UID)
}
q.currentStart = len(q.queue)
return work
}
type timeIncrementingWorkers struct {
lock sync.Mutex
w *podWorkers
runtime *containertest.FakeRuntime
holds map[types.UID]chan struct{}
}
// UpdatePod increments the clock after UpdatePod is called, but before the workers
// are invoked, and then drains all workers before returning. The provided functions
// are invoked while holding the lock to prevent workers from receiving updates.
func (w *timeIncrementingWorkers) UpdatePod(options UpdatePodOptions, afterFns ...func()) {
func() {
w.lock.Lock()
defer w.lock.Unlock()
w.w.UpdatePod(options)
w.w.clock.(*clocktesting.FakePassiveClock).SetTime(w.w.clock.Now().Add(time.Second))
for _, fn := range afterFns {
fn()
}
}()
w.drainUnpausedWorkers()
}
// SyncKnownPods increments the clock after SyncKnownPods is called, but before the workers
// are invoked, and then drains all workers before returning.
func (w *timeIncrementingWorkers) SyncKnownPods(desiredPods []*v1.Pod) (knownPods map[types.UID]PodWorkerSync) {
func() {
w.lock.Lock()
defer w.lock.Unlock()
knownPods = w.w.SyncKnownPods(desiredPods)
w.w.clock.(*clocktesting.FakePassiveClock).SetTime(w.w.clock.Now().Add(time.Second))
}()
w.drainUnpausedWorkers()
return
}
func (w *timeIncrementingWorkers) PauseWorkers(uids ...types.UID) {
w.lock.Lock()
defer w.lock.Unlock()
if w.holds == nil {
w.holds = make(map[types.UID]chan struct{})
}
for _, uid := range uids {
if _, ok := w.holds[uid]; !ok {
w.holds[uid] = make(chan struct{})
}
}
}
func (w *timeIncrementingWorkers) ReleaseWorkers(uids ...types.UID) {
w.lock.Lock()
defer w.lock.Unlock()
w.ReleaseWorkersUnderLock(uids...)
}
func (w *timeIncrementingWorkers) ReleaseWorkersUnderLock(uids ...types.UID) {
for _, uid := range uids {
if ch, ok := w.holds[uid]; ok {
close(ch)
delete(w.holds, uid)
}
}
}
func (w *timeIncrementingWorkers) waitForPod(uid types.UID) {
w.lock.Lock()
ch, ok := w.holds[uid]
w.lock.Unlock()
if !ok {
return
}
<-ch
}
func (w *timeIncrementingWorkers) drainUnpausedWorkers() {
pausedWorkers := make(map[types.UID]struct{})
for {
for uid := range pausedWorkers {
delete(pausedWorkers, uid)
}
stillWorking := false
// ignore held workers
w.lock.Lock()
for uid := range w.holds {
pausedWorkers[uid] = struct{}{}
}
w.lock.Unlock()
// check for at least one still working non-paused worker
w.w.podLock.Lock()
for uid, worker := range w.w.podSyncStatuses {
if _, ok := pausedWorkers[uid]; ok {
continue
}
if worker.working {
stillWorking = true
break
}
}
w.w.podLock.Unlock()
if !stillWorking {
break
}
time.Sleep(time.Millisecond)
}
}
func (w *timeIncrementingWorkers) tick() {
w.lock.Lock()
defer w.lock.Unlock()
w.w.clock.(*clocktesting.FakePassiveClock).SetTime(w.w.clock.Now().Add(time.Second))
}
// createTimeIncrementingPodWorkers will guarantee that each call to UpdatePod and each worker goroutine invocation advances the clock by one second,
// although multiple workers will advance the clock in an unpredictable order. Use to observe
// successive internal updates to each update pod state when only a single pod is being updated.
func createTimeIncrementingPodWorkers() (*timeIncrementingWorkers, map[types.UID][]syncPodRecord) {
nested, runtime, processed := createPodWorkers()
w := &timeIncrementingWorkers{
w: nested,
runtime: runtime,
}
nested.workerChannelFn = func(uid types.UID, in chan struct{}) <-chan struct{} {
ch := make(chan struct{})
go func() {
defer close(ch)
// TODO: this is an eager loop, we might want to lazily read from in only once
// ch is empty
for range in {
w.waitForPod(uid)
w.tick()
ch <- struct{}{}
}
}()
return ch
}
return w, processed
}
func createPodWorkers() (*podWorkers, *containertest.FakeRuntime, map[types.UID][]syncPodRecord) {
lock := sync.Mutex{}
processed := make(map[types.UID][]syncPodRecord)
fakeRecorder := &record.FakeRecorder{}
fakeRuntime := &containertest.FakeRuntime{}
fakeCache := containertest.NewFakeCache(fakeRuntime)
fakeQueue := &fakeQueue{}
clock := clocktesting.NewFakePassiveClock(time.Unix(1, 0))
w := newPodWorkers(
&podSyncerFuncs{
syncPod: func(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
func() {
lock.Lock()
defer lock.Unlock()
pod := pod
processed[pod.UID] = append(processed[pod.UID], syncPodRecord{
name: pod.Name,
updateType: updateType,
})
}()
return false, nil
},
syncTerminatingPod: func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
func() {
lock.Lock()
defer lock.Unlock()
processed[pod.UID] = append(processed[pod.UID], syncPodRecord{
name: pod.Name,
updateType: kubetypes.SyncPodKill,
gracePeriod: gracePeriod,
})
}()
return nil
},
syncTerminatingRuntimePod: func(ctx context.Context, runningPod *kubecontainer.Pod) error {
func() {
lock.Lock()
defer lock.Unlock()
processed[runningPod.ID] = append(processed[runningPod.ID], syncPodRecord{
name: runningPod.Name,
updateType: kubetypes.SyncPodKill,
runningPod: runningPod,
})
}()
return nil
},
syncTerminatedPod: func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error {
func() {
lock.Lock()
defer lock.Unlock()
processed[pod.UID] = append(processed[pod.UID], syncPodRecord{
name: pod.Name,
terminated: true,
})
}()
return nil
},
},
fakeRecorder,
fakeQueue,
time.Second,
time.Millisecond,
fakeCache,
allocation.NewInMemoryManager(cm.NodeConfig{}, nil, nil, nil, nil, nil, nil),
)
workers := w.(*podWorkers)
workers.clock = clock
return workers, fakeRuntime, processed
}
func drainWorkers(podWorkers *podWorkers, numPods int) {
for {
stillWorking := false
podWorkers.podLock.Lock()
for i := 0; i < numPods; i++ {
if s, ok := podWorkers.podSyncStatuses[types.UID(strconv.Itoa(i))]; ok && s.working {
stillWorking = true
break
}
}
podWorkers.podLock.Unlock()
if !stillWorking {
break
}
time.Sleep(50 * time.Millisecond)
}
}
func drainWorkersExcept(podWorkers *podWorkers, uids ...types.UID) {
set := sets.New[string]()
for _, uid := range uids {
set.Insert(string(uid))
}
for {
stillWorking := false
podWorkers.podLock.Lock()
for k, v := range podWorkers.podSyncStatuses {
if set.Has(string(k)) {
continue
}
if v.working {
stillWorking = true
break
}
}
podWorkers.podLock.Unlock()
if !stillWorking {
break
}
time.Sleep(50 * time.Millisecond)
}
}
func drainAllWorkers(podWorkers *podWorkers) {
for {
stillWorking := false
podWorkers.podLock.Lock()
for _, worker := range podWorkers.podSyncStatuses {
if worker.working {
stillWorking = true
break
}
}
podWorkers.podLock.Unlock()
if !stillWorking {
break
}
time.Sleep(50 * time.Millisecond)
}
}
func TestUpdatePodParallel(t *testing.T) {
podWorkers, _, processed := createPodWorkers()
numPods := 20
for i := 0; i < numPods; i++ {
for j := i; j < numPods; j++ {
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod(strconv.Itoa(j), "ns", strconv.Itoa(i), false),
UpdateType: kubetypes.SyncPodCreate,
})
}
}
drainWorkers(podWorkers, numPods)
if len(processed) != numPods {
t.Fatalf("Not all pods processed: %v", len(processed))
}
for i := 0; i < numPods; i++ {
uid := types.UID(strconv.Itoa(i))
events := processed[uid]
if len(events) < 1 || len(events) > i+1 {
t.Errorf("Pod %v processed %v times", i, len(events))
continue
}
// PodWorker guarantees the last event will be processed
last := len(events) - 1
if events[last].name != strconv.Itoa(i) {
t.Errorf("Pod %v: incorrect order %v, %#v", i, last, events)
}
}
}
func TestUpdatePod(t *testing.T) {
one := int64(1)
hasContext := func(status *podSyncStatus) *podSyncStatus {
status.ctx, status.cancelFn = context.Background(), func() {}
return status
}
withLabel := func(pod *v1.Pod, label, value string) *v1.Pod {
if pod.Labels == nil {
pod.Labels = make(map[string]string)
}
pod.Labels[label] = value
return pod
}
withDeletionTimestamp := func(pod *v1.Pod, ts time.Time, gracePeriod *int64) *v1.Pod {
pod.DeletionTimestamp = &metav1.Time{Time: ts}
pod.DeletionGracePeriodSeconds = gracePeriod
return pod
}
intp := func(i int64) *int64 {
return &i
}
expectPodSyncStatus := func(t *testing.T, expected, status *podSyncStatus) {
t.Helper()
// handle special non-comparable fields
if status != nil {
if e, a := expected.ctx != nil, status.ctx != nil; e != a {
t.Errorf("expected context %t, has context %t", e, a)
} else {
expected.ctx, status.ctx = nil, nil
}
if e, a := expected.cancelFn != nil, status.cancelFn != nil; e != a {
t.Errorf("expected cancelFn %t, has cancelFn %t", e, a)
} else {
expected.cancelFn, status.cancelFn = nil, nil
}
}
if e, a := expected, status; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected status: %s", cmp.Diff(e, a, cmp.AllowUnexported(podSyncStatus{})))
}
}
for _, tc := range []struct {
name string
update UpdatePodOptions
runtimeStatus *kubecontainer.PodStatus
prepare func(t *testing.T, w *timeIncrementingWorkers) (afterUpdateFn func())
expect *podSyncStatus
expectBeforeWorker *podSyncStatus
expectKnownTerminated bool
}{
{
name: "a new pod is recorded and started",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newNamedPod("1", "ns", "running-pod", false),
},
expect: hasContext(&podSyncStatus{
fullname: "running-pod_ns",
syncedAt: time.Unix(1, 0),
startedAt: time.Unix(3, 0),
activeUpdate: &UpdatePodOptions{
Pod: newNamedPod("1", "ns", "running-pod", false),
},
}),
},
{
name: "a new pod is recorded and started unless it is a duplicate of an existing terminating pod UID",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: withLabel(newNamedPod("1", "ns", "running-pod", false), "updated", "value"),
},
prepare: func(t *testing.T, w *timeIncrementingWorkers) func() {
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newNamedPod("1", "ns", "running-pod", false),
})
w.PauseWorkers("1")
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
Pod: newNamedPod("1", "ns", "running-pod", false),
})
return func() { w.ReleaseWorkersUnderLock("1") }
},
expect: hasContext(&podSyncStatus{
fullname: "running-pod_ns",
syncedAt: time.Unix(1, 0),
startedAt: time.Unix(3, 0),
terminatingAt: time.Unix(3, 0),
terminatedAt: time.Unix(6, 0),
gracePeriod: 30,
startedTerminating: true,
restartRequested: true, // because we received a create during termination
finished: true,
activeUpdate: &UpdatePodOptions{
Pod: newNamedPod("1", "ns", "running-pod", false),
KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: intp(30)},
},
}),
expectKnownTerminated: true,
},
{
name: "a new pod is recorded and started and running pod is ignored",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newNamedPod("1", "ns", "running-pod", false),
RunningPod: &kubecontainer.Pod{ID: "1", Name: "orphaned-pod", Namespace: "ns"},
},
expect: hasContext(&podSyncStatus{
fullname: "running-pod_ns",
syncedAt: time.Unix(1, 0),
startedAt: time.Unix(3, 0),
activeUpdate: &UpdatePodOptions{
Pod: newNamedPod("1", "ns", "running-pod", false),
},
}),
},
{
name: "a running pod is terminated when an update contains a deletionTimestamp",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodUpdate,
Pod: withDeletionTimestamp(newNamedPod("1", "ns", "running-pod", false), time.Unix(1, 0), intp(15)),
},
prepare: func(t *testing.T, w *timeIncrementingWorkers) func() {
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newNamedPod("1", "ns", "running-pod", false),
})
return nil
},
expect: hasContext(&podSyncStatus{
fullname: "running-pod_ns",
syncedAt: time.Unix(1, 0),
startedAt: time.Unix(3, 0),
terminatingAt: time.Unix(3, 0),
terminatedAt: time.Unix(5, 0),
gracePeriod: 15,
startedTerminating: true,
finished: true,
deleted: true,
activeUpdate: &UpdatePodOptions{
Pod: withDeletionTimestamp(newNamedPod("1", "ns", "running-pod", false), time.Unix(1, 0), intp(15)),
KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: intp(15)},
},
}),
expectKnownTerminated: true,
},
{
name: "a running pod is terminated when an eviction is requested",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
Pod: newNamedPod("1", "ns", "running-pod", false),
KillPodOptions: &KillPodOptions{Evict: true},
},
prepare: func(t *testing.T, w *timeIncrementingWorkers) func() {
w.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newNamedPod("1", "ns", "running-pod", false),
})
return nil
},
expect: hasContext(&podSyncStatus{
fullname: "running-pod_ns",
syncedAt: time.Unix(1, 0),
startedAt: time.Unix(3, 0),
terminatingAt: time.Unix(3, 0),
terminatedAt: time.Unix(5, 0),
gracePeriod: 30,
startedTerminating: true,
finished: true,
evicted: true,
activeUpdate: &UpdatePodOptions{
Pod: newNamedPod("1", "ns", "running-pod", false),
KillPodOptions: &KillPodOptions{
PodTerminationGracePeriodSecondsOverride: intp(30),
Evict: true,
},
},
}),
expectKnownTerminated: true,
},
{
name: "a pod that is terminal and has never started must be terminated if the runtime does not have a cached terminal state",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded),
},
expect: hasContext(&podSyncStatus{
fullname: "done-pod_ns",
syncedAt: time.Unix(1, 0),
terminatingAt: time.Unix(1, 0),
startedAt: time.Unix(3, 0),
terminatedAt: time.Unix(3, 0),
activeUpdate: &UpdatePodOptions{
Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded),
KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: intp(30)},
},
gracePeriod: 30,
startedTerminating: true,
finished: true,
}),
expectKnownTerminated: true,
},
{
name: "a pod that is terminal and has never started advances to finished if the runtime has a cached terminal state",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded),
},
runtimeStatus: &kubecontainer.PodStatus{ /* we know about this pod */ },
expectBeforeWorker: &podSyncStatus{
fullname: "done-pod_ns",
syncedAt: time.Unix(1, 0),
terminatingAt: time.Unix(1, 0),
terminatedAt: time.Unix(1, 0),
pendingUpdate: &UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded),
},
finished: false, // Should be marked as not finished initially (to ensure `SyncTerminatedPod` will run) and status will progress to terminated.
startedTerminating: true,
working: true,
},
expect: hasContext(&podSyncStatus{
fullname: "done-pod_ns",
syncedAt: time.Unix(1, 0),
terminatingAt: time.Unix(1, 0),
terminatedAt: time.Unix(1, 0),
startedAt: time.Unix(3, 0),
startedTerminating: true,
finished: true,
activeUpdate: &UpdatePodOptions{
UpdateType: kubetypes.SyncPodSync,
Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded),
},
// if we have never seen the pod before, a restart makes no sense
restartRequested: false,
}),
expectKnownTerminated: true,
},
{
name: "an orphaned running pod we have not seen is marked terminating and advances to finished and then is removed",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
RunningPod: &kubecontainer.Pod{ID: "1", Name: "orphaned-pod", Namespace: "ns"},
},
expectBeforeWorker: &podSyncStatus{
fullname: "orphaned-pod_ns",
syncedAt: time.Unix(1, 0),
terminatingAt: time.Unix(1, 0),
pendingUpdate: &UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
RunningPod: &kubecontainer.Pod{ID: "1", Name: "orphaned-pod", Namespace: "ns"},
KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: &one},
},
gracePeriod: 1,
deleted: true,
observedRuntime: true,
working: true,
},
// Once a running pod is fully terminated, we stop tracking it in history, and so it
// is deliberately expected not to be known outside the pod worker since the source of
// the pod is also not in the desired pod set.
expectKnownTerminated: false,
},
{
name: "an orphaned running pod with a non-kill update type does nothing",
update: UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
RunningPod: &kubecontainer.Pod{ID: "1", Name: "orphaned-pod", Namespace: "ns"},
},
expect: nil,
},
} {
t.Run(tc.name, func(t *testing.T) {
var uid types.UID
switch {
case tc.update.Pod != nil:
uid = tc.update.Pod.UID
case tc.update.RunningPod != nil:
uid = tc.update.RunningPod.ID
default:
t.Fatalf("unable to find uid for update")
}
var fns []func()
podWorkers, _ := createTimeIncrementingPodWorkers()
if tc.expectBeforeWorker != nil {
fns = append(fns, func() {
expectPodSyncStatus(t, tc.expectBeforeWorker, podWorkers.w.podSyncStatuses[uid])
})
}
if tc.prepare != nil {
if fn := tc.prepare(t, podWorkers); fn != nil {
fns = append(fns, fn)
}
}
// set up an initial pod status for the UpdatePod invocation which is
// reset before workers call the podCache
if tc.runtimeStatus != nil {
podWorkers.runtime.PodStatus = *tc.runtimeStatus
podWorkers.runtime.Err = nil
} else {
podWorkers.runtime.PodStatus = kubecontainer.PodStatus{}
podWorkers.runtime.Err = status.Error(codes.NotFound, "No such pod")
}
fns = append(fns, func() {
podWorkers.runtime.PodStatus = kubecontainer.PodStatus{}
podWorkers.runtime.Err = nil
})
podWorkers.UpdatePod(tc.update, fns...)
if podWorkers.w.IsPodKnownTerminated(uid) != tc.expectKnownTerminated {
t.Errorf("podWorker.IsPodKnownTerminated expected to be %t", tc.expectKnownTerminated)
}
expectPodSyncStatus(t, tc.expect, podWorkers.w.podSyncStatuses[uid])
// TODO: validate processed records for the pod based on the test case, which reduces
// the amount of testing we need to do in kubelet_pods_test.go
})
}
}
func TestUpdatePodForRuntimePod(t *testing.T) {
podWorkers, _, processed := createPodWorkers()
// ignores running pod of wrong sync type
podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodCreate,
RunningPod: &kubecontainer.Pod{ID: "1", Name: "1", Namespace: "test"},
})
drainAllWorkers(podWorkers)
if len(processed) != 0 {
t.Fatalf("Not all pods processed: %v", len(processed))
}
// creates synthetic pod
podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
RunningPod: &kubecontainer.Pod{ID: "1", Name: "1", Namespace: "test"},
})
drainAllWorkers(podWorkers)
if len(processed) != 1 {
t.Fatalf("Not all pods processed: %v", processed)
}
updates := processed["1"]
if len(updates) != 1 {
t.Fatalf("unexpected updates: %v", updates)
}
if updates[0].runningPod == nil || updates[0].updateType != kubetypes.SyncPodKill || updates[0].name != "1" {
t.Fatalf("unexpected update: %v", updates)
}
}
func TestUpdatePodForTerminatedRuntimePod(t *testing.T) {
podWorkers, _, processed := createPodWorkers()
now := time.Now()
podWorkers.podSyncStatuses[types.UID("1")] = &podSyncStatus{
startedTerminating: true,
terminatedAt: now.Add(-time.Second),
terminatingAt: now.Add(-2 * time.Second),
gracePeriod: 1,
}
// creates synthetic pod
podWorkers.UpdatePod(UpdatePodOptions{
UpdateType: kubetypes.SyncPodKill,
RunningPod: &kubecontainer.Pod{ID: "1", Name: "1", Namespace: "test"},
})
drainAllWorkers(podWorkers)
if len(processed) != 0 {
t.Fatalf("Not all pods processed: %v", processed)
}
updates := processed["1"]
if len(updates) != 0 {
t.Fatalf("unexpected updates: %v", updates)
}
}
func TestUpdatePodDoesNotForgetSyncPodKill(t *testing.T) {
podWorkers, _, processed := createPodWorkers()
numPods := 20
for i := 0; i < numPods; i++ {
pod := newNamedPod(strconv.Itoa(i), "ns", strconv.Itoa(i), false)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodCreate,
})
podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodKill,
})
podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodUpdate,
})
}
drainWorkers(podWorkers, numPods)
if len(processed) != numPods {
t.Errorf("Not all pods processed: %v", len(processed))
return
}
for i := 0; i < numPods; i++ {
uid := types.UID(strconv.Itoa(i))
// each pod should be processed two or three times (kill,terminate or create,kill,terminate) because
// we buffer pending updates and the pod worker may compress the create and kill
syncPodRecords := processed[uid]
var match bool
grace := int64(30)
for _, possible := range [][]syncPodRecord{
{{name: string(uid), updateType: kubetypes.SyncPodKill, gracePeriod: &grace}, {name: string(uid), terminated: true}},
{{name: string(uid), updateType: kubetypes.SyncPodCreate}, {name: string(uid), updateType: kubetypes.SyncPodKill, gracePeriod: &grace}, {name: string(uid), terminated: true}},
} {
if reflect.DeepEqual(possible, syncPodRecords) {
match = true
break
}
}
if !match {
t.Fatalf("unexpected history for pod %v: %#v", i, syncPodRecords)
}
}
}
func newUIDSet(uids ...types.UID) sets.Set[string] {
set := sets.New[string]()
for _, uid := range uids {
set.Insert(string(uid))
}
return set
}
type terminalPhaseSync struct {
lock sync.Mutex
fn syncPodFnType
terminal sets.Set[string]
}
func (s *terminalPhaseSync) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
isTerminal, err := s.fn(ctx, updateType, pod, mirrorPod, podStatus)
if err != nil {
return false, err
}
if !isTerminal {
s.lock.Lock()
defer s.lock.Unlock()
isTerminal = s.terminal.Has(string(pod.UID))
}
return isTerminal, nil
}
func (s *terminalPhaseSync) SetTerminal(uid types.UID) {
s.lock.Lock()
defer s.lock.Unlock()
s.terminal.Insert(string(uid))
}
func newTerminalPhaseSync(fn syncPodFnType) *terminalPhaseSync {
return &terminalPhaseSync{
fn: fn,
terminal: sets.New[string](),
}
}
func TestTerminalPhaseTransition(t *testing.T) {
podWorkers, _, _ := createPodWorkers()
var channels WorkChannel
podWorkers.workerChannelFn = channels.Intercept
terminalPhaseSyncer := newTerminalPhaseSync(podWorkers.podSyncer.(*podSyncerFuncs).syncPod)
podWorkers.podSyncer.(*podSyncerFuncs).syncPod = terminalPhaseSyncer.SyncPod
// start pod
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("1", "test1", "pod1", false),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// should observe pod running
pod1 := podWorkers.podSyncStatuses[types.UID("1")]
if pod1.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod1)
}
// send another update to the pod
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("1", "test1", "pod1", false),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// should observe pod still running
pod1 = podWorkers.podSyncStatuses[types.UID("1")]
if pod1.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod1)
}
// the next sync should result in a transition to terminal
terminalPhaseSyncer.SetTerminal(types.UID("1"))
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("1", "test1", "pod1", false),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// should observe pod terminating
pod1 = podWorkers.podSyncStatuses[types.UID("1")]
if !pod1.IsTerminationRequested() || !pod1.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod1)
}
}
func TestStaticPodExclusion(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
podWorkers, _, processed := createPodWorkers()
var channels WorkChannel
podWorkers.workerChannelFn = channels.Intercept
testPod := newNamedPod("2-static", "test1", "pod1", true)
if !kubetypes.IsStaticPod(testPod) {
t.Fatalf("unable to test static pod")
}
// start two pods with the same name, one static, one apiserver
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("1-normal", "test1", "pod1", false),
UpdateType: kubetypes.SyncPodUpdate,
})
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("2-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// should observe both pods running
pod1 := podWorkers.podSyncStatuses[types.UID("1-normal")]
if pod1.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod1)
}
pod2 := podWorkers.podSyncStatuses[types.UID("2-static")]
if pod2.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod2)
}
if len(processed) != 2 {
t.Fatalf("unexpected synced pods: %#v", processed)
}
if e, a :=
[]syncPodRecord{{name: "pod1", updateType: kubetypes.SyncPodUpdate}},
processed[types.UID("2-static")]; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(e, a))
}
if e, a := map[string]types.UID{"pod1_test1": "2-static"}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
// attempt to start a second and third static pod, which should not start
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("3-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("4-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// should observe both pods running but last pod shouldn't have synced
pod1 = podWorkers.podSyncStatuses[types.UID("1-normal")]
if pod1.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod1)
}
pod2 = podWorkers.podSyncStatuses[types.UID("2-static")]
if pod2.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod2)
}
pod3 := podWorkers.podSyncStatuses[types.UID("3-static")]
if pod3.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod3)
}
pod4 := podWorkers.podSyncStatuses[types.UID("4-static")]
if pod4.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod4)
}
if len(processed) != 2 {
t.Fatalf("unexpected synced pods: %#v", processed)
}
if expected, actual :=
[]syncPodRecord{{name: "pod1", updateType: kubetypes.SyncPodUpdate}},
processed[types.UID("2-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
if expected, actual :=
[]syncPodRecord(nil),
processed[types.UID("3-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
if expected, actual :=
[]syncPodRecord(nil),
processed[types.UID("4-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
if e, a := map[string]types.UID{"pod1_test1": "2-static"}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
if e, a := map[string][]types.UID{"pod1_test1": {"3-static", "4-static"}}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// verify all are enqueued
if e, a := sets.New[string]("1-normal", "2-static", "4-static", "3-static"), podWorkers.workQueue.(*fakeQueue).Set(); !e.Equal(a) {
t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a))
}
// send a basic update for 3-static
podWorkers.workQueue.GetWork()
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("3-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// 3-static should not be started because 2-static is still running
if e, a := map[string]types.UID{"pod1_test1": "2-static"}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
if e, a := map[string][]types.UID{"pod1_test1": {"3-static", "4-static"}}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// the queue should include a single item for 3-static (indicating we need to retry later)
if e, a := sets.New[string]("3-static"), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a))
}
// mark 3-static as deleted while 2-static is still running
podWorkers.workQueue.GetWork()
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("3-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodKill,
})
drainAllWorkers(podWorkers)
// should observe 3-static as terminated because it has never started, but other state should be a no-op
pod3 = podWorkers.podSyncStatuses[types.UID("3-static")]
if !pod3.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod3)
}
// the queue should be empty because the worker is now done
if e, a := sets.New[string](), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a))
}
// 2-static is still running
if e, a := map[string]types.UID{"pod1_test1": "2-static"}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
// 3-static and 4-static are both still queued
if e, a := map[string][]types.UID{"pod1_test1": {"3-static", "4-static"}}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// terminate 2-static
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("2-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodKill,
})
drainAllWorkers(podWorkers)
// should observe 2-static as terminated, and 2-static should no longer be reported as the started static pod
pod2 = podWorkers.podSyncStatuses[types.UID("2-static")]
if !pod2.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod3)
}
if e, a := map[string]types.UID{}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
if e, a := map[string][]types.UID{"pod1_test1": {"3-static", "4-static"}}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// simulate a periodic event from the work queue for 4-static
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("4-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// 4-static should be started because 3-static has already terminated
pod4 = podWorkers.podSyncStatuses[types.UID("4-static")]
if pod4.IsTerminated() {
t.Fatalf("unexpected pod state: %#v", pod3)
}
if e, a := map[string]types.UID{"pod1_test1": "4-static"}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
if e, a := map[string][]types.UID{}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// initiate a sync with all pods remaining
state := podWorkers.SyncKnownPods([]*v1.Pod{
newNamedPod("1-normal", "test1", "pod1", false),
newNamedPod("2-static", "test1", "pod1", true),
newNamedPod("3-static", "test1", "pod1", true),
newNamedPod("4-static", "test1", "pod1", true),
})
drainAllWorkers(podWorkers)
// 2-static and 3-static should both be listed as terminated
if e, a := map[types.UID]PodWorkerSync{
"1-normal": {State: SyncPod, HasConfig: true},
"2-static": {State: TerminatedPod, HasConfig: true, Static: true},
"3-static": {State: TerminatedPod},
"4-static": {State: SyncPod, HasConfig: true, Static: true},
}, state; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected actual state: %s", cmp.Diff(e, a))
}
// 3-static is still in the config, it should still be in our status
if status, ok := podWorkers.podSyncStatuses["3-static"]; !ok || status.terminatedAt.IsZero() || !status.finished || status.working {
t.Fatalf("unexpected post termination status: %#v", status)
}
// initiate a sync with 3-static removed
state = podWorkers.SyncKnownPods([]*v1.Pod{
newNamedPod("1-normal", "test1", "pod1", false),
newNamedPod("2-static", "test1", "pod1", true),
newNamedPod("4-static", "test1", "pod1", true),
})
drainAllWorkers(podWorkers)
// expect sync to put 3-static into final state and remove the status
if e, a := map[types.UID]PodWorkerSync{
"1-normal": {State: SyncPod, HasConfig: true},
"2-static": {State: TerminatedPod, HasConfig: true, Static: true},
"4-static": {State: SyncPod, HasConfig: true, Static: true},
}, state; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected actual state: %s", cmp.Diff(e, a))
}
if status, ok := podWorkers.podSyncStatuses["3-static"]; ok {
t.Fatalf("unexpected post termination status: %#v", status)
}
// start a static pod, kill it, then add another one, but ensure the pod worker
// for pod 5 doesn't see the kill event (so it remains waiting to start)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("5-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
// Wait for the previous work to be delivered to the worker
drainAllWorkers(podWorkers)
channels.Channel("5-static").Hold()
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("5-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodKill,
})
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("6-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainWorkersExcept(podWorkers, "5-static")
// pod 5 should have termination requested, but hasn't cleaned up
pod5 := podWorkers.podSyncStatuses[types.UID("5-static")]
if !pod5.IsTerminationRequested() || pod5.IsTerminated() {
t.Fatalf("unexpected status for pod 5: %#v", pod5)
}
if e, a := map[string]types.UID{"pod1_test1": "4-static"}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
if e, a := map[string][]types.UID{"pod1_test1": {"5-static", "6-static"}}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// terminate 4-static and wake 6-static
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("4-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodKill,
})
drainWorkersExcept(podWorkers, "5-static")
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("6-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainWorkersExcept(podWorkers, "5-static")
// 5-static should still be waiting, 6-static should have started and synced
pod5 = podWorkers.podSyncStatuses[types.UID("5-static")]
if !pod5.IsTerminationRequested() || pod5.IsTerminated() {
t.Fatalf("unexpected status for pod 5: %#v", pod5)
}
if e, a := map[string]types.UID{"pod1_test1": "6-static"}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
// no static pods shoud be waiting
if e, a := map[string][]types.UID{}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// prove 6-static synced
if expected, actual :=
[]syncPodRecord{{name: "pod1", updateType: kubetypes.SyncPodUpdate}},
processed[types.UID("6-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
// ensure 5-static exits when we deliver the event out of order
channels.Channel("5-static").Release()
drainAllWorkers(podWorkers)
pod5 = podWorkers.podSyncStatuses[types.UID("5-static")]
if !pod5.IsTerminated() {
t.Fatalf("unexpected status for pod 5: %#v", pod5)
}
// start three more static pods, kill the previous static pod blocking start,
// and simulate the second pod of three (8) getting to run first
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("7-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("8-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("9-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("6-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodKill,
})
drainAllWorkers(podWorkers)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("6-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodCreate,
})
drainAllWorkers(podWorkers)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("8-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// 6 should have been detected as restartable
if status := podWorkers.podSyncStatuses["6-static"]; !status.restartRequested {
t.Fatalf("unexpected restarted static pod: %#v", status)
}
// 7 and 8 should both be waiting still with no syncs
if e, a := map[string]types.UID{}, podWorkers.startedStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected started static pods: %s", cmp.Diff(e, a))
}
// only 7-static can start now, but it hasn't received an event
if e, a := map[string][]types.UID{"pod1_test1": {"7-static", "8-static", "9-static"}}, podWorkers.waitingToStartStaticPodsByFullname; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
}
// none of the new pods have synced
if expected, actual :=
[]syncPodRecord(nil),
processed[types.UID("7-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
if expected, actual :=
[]syncPodRecord(nil),
processed[types.UID("8-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
if expected, actual :=
[]syncPodRecord(nil),
processed[types.UID("9-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
// terminate 7-static and wake 8-static
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("7-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodKill,
})
drainAllWorkers(podWorkers)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod("8-static", "test1", "pod1", true),
UpdateType: kubetypes.SyncPodUpdate,
})
drainAllWorkers(podWorkers)
// 8 should have synced
if expected, actual :=
[]syncPodRecord{{name: "pod1", updateType: kubetypes.SyncPodUpdate}},
processed[types.UID("8-static")]; !reflect.DeepEqual(expected, actual) {
t.Fatalf("unexpected sync pod calls: %s", cmp.Diff(expected, actual))
}
// initiate a sync with all but 8-static pods undesired
state = podWorkers.SyncKnownPods([]*v1.Pod{
newNamedPod("8-static", "test1", "pod1", true),
})
drainAllWorkers(podWorkers)
if e, a := map[types.UID]PodWorkerSync{
"1-normal": {State: TerminatingPod, Orphan: true, HasConfig: true},
"8-static": {State: SyncPod, HasConfig: true, Static: true},
}, state; !reflect.DeepEqual(e, a) {
t.Fatalf("unexpected actual restartable: %s", cmp.Diff(e, a))
}
}
type WorkChannelItem struct {
out chan struct{}
lock sync.Mutex
pause bool
queue int
}
func (item *WorkChannelItem) Handle() {
item.lock.Lock()
defer item.lock.Unlock()
if item.pause {
item.queue++
return
}
item.out <- struct{}{}
}
func (item *WorkChannelItem) Hold() {
item.lock.Lock()
defer item.lock.Unlock()
item.pause = true
}
func (item *WorkChannelItem) Close() {
item.lock.Lock()
defer item.lock.Unlock()
if item.out != nil {
close(item.out)
item.out = nil
}
}
// Release blocks until all work is passed on the chain
func (item *WorkChannelItem) Release() {
item.lock.Lock()
defer item.lock.Unlock()
item.pause = false
for i := 0; i < item.queue; i++ {
item.out <- struct{}{}
}
item.queue = 0
}
// WorkChannel intercepts podWork channels between the pod worker and its child
// goroutines and allows tests to pause or release the flow of podWork to the
// workers.
type WorkChannel struct {
lock sync.Mutex
channels map[types.UID]*WorkChannelItem
}
func (w *WorkChannel) Channel(uid types.UID) *WorkChannelItem {
w.lock.Lock()
defer w.lock.Unlock()
if w.channels == nil {
w.channels = make(map[types.UID]*WorkChannelItem)
}
channel, ok := w.channels[uid]
if !ok {
channel = &WorkChannelItem{
out: make(chan struct{}, 1),
}
w.channels[uid] = channel
}
return channel
}
func (w *WorkChannel) Intercept(uid types.UID, ch chan struct{}) (outCh <-chan struct{}) {
channel := w.Channel(uid)
w.lock.Lock()
defer w.lock.Unlock()
go func() {
defer func() {
channel.Close()
w.lock.Lock()
defer w.lock.Unlock()
delete(w.channels, uid)
}()
for range ch {
channel.Handle()
}
}()
return channel.out
}
func TestSyncKnownPods(t *testing.T) {
podWorkers, _, _ := createPodWorkers()
numPods := 20
for i := 0; i < numPods; i++ {
podWorkers.UpdatePod(UpdatePodOptions{
Pod: newNamedPod(strconv.Itoa(i), "ns", "name", false),
UpdateType: kubetypes.SyncPodUpdate,
})
}
drainWorkers(podWorkers, numPods)
if len(podWorkers.podUpdates) != numPods {
t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates))
}
desiredPods := map[types.UID]sets.Empty{}
desiredPods[types.UID("2")] = sets.Empty{}
desiredPods[types.UID("14")] = sets.Empty{}
desiredPodList := []*v1.Pod{newNamedPod("2", "ns", "name", false), newNamedPod("14", "ns", "name", false)}
// kill all but the requested pods
for i := 0; i < numPods; i++ {
pod := newNamedPod(strconv.Itoa(i), "ns", "name", false)
if _, ok := desiredPods[pod.UID]; ok {
continue
}
if (i % 2) == 0 {
now := metav1.Now()
pod.DeletionTimestamp = &now
}
podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodKill,
})
}
drainWorkers(podWorkers, numPods)
if !podWorkers.ShouldPodContainersBeTerminating(types.UID("0")) {
t.Errorf("Expected pod to be terminating")
}
if !podWorkers.ShouldPodContainersBeTerminating(types.UID("1")) {
t.Errorf("Expected pod to be terminating")
}
if podWorkers.ShouldPodContainersBeTerminating(types.UID("2")) {
t.Errorf("Expected pod to not be terminating")
}
if !podWorkers.IsPodTerminationRequested(types.UID("0")) {
t.Errorf("Expected pod to be terminating")
}
if podWorkers.IsPodTerminationRequested(types.UID("2")) {
t.Errorf("Expected pod to not be terminating")
}
if podWorkers.CouldHaveRunningContainers(types.UID("0")) {
t.Errorf("Expected pod to be terminated (deleted and terminated)")
}
if podWorkers.CouldHaveRunningContainers(types.UID("1")) {
t.Errorf("Expected pod to be terminated")
}
if !podWorkers.CouldHaveRunningContainers(types.UID("2")) {
t.Errorf("Expected pod to not be terminated")
}
if !podWorkers.ShouldPodContentBeRemoved(types.UID("0")) {
t.Errorf("Expected pod to be suitable for removal (deleted and terminated)")
}
if podWorkers.ShouldPodContentBeRemoved(types.UID("1")) {
t.Errorf("Expected pod to not be suitable for removal (terminated but not deleted)")
}
if podWorkers.ShouldPodContentBeRemoved(types.UID("2")) {
t.Errorf("Expected pod to not be suitable for removal (not terminated)")
}
if podWorkers.ShouldPodContainersBeTerminating(types.UID("abc")) {
t.Errorf("Expected pod to not be known to be terminating (does not exist but not yet synced)")
}
if !podWorkers.CouldHaveRunningContainers(types.UID("abc")) {
t.Errorf("Expected pod to potentially have running containers (does not exist but not yet synced)")
}
if podWorkers.ShouldPodContentBeRemoved(types.UID("abc")) {
t.Errorf("Expected pod to not be suitable for removal (does not exist but not yet synced)")
}
podWorkers.SyncKnownPods(desiredPodList)
if len(podWorkers.podUpdates) != 2 {
t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates))
}
if _, exists := podWorkers.podUpdates[types.UID("2")]; !exists {
t.Errorf("No updates channel for pod 2")
}
if _, exists := podWorkers.podUpdates[types.UID("14")]; !exists {
t.Errorf("No updates channel for pod 14")
}
if podWorkers.IsPodTerminationRequested(types.UID("2")) {
t.Errorf("Expected pod termination request to be cleared after sync")
}
if !podWorkers.ShouldPodContainersBeTerminating(types.UID("abc")) {
t.Errorf("Expected pod to be expected to terminate containers (does not exist and synced at least once)")
}
if podWorkers.CouldHaveRunningContainers(types.UID("abc")) {
t.Errorf("Expected pod to be known not to have running containers (does not exist and synced at least once)")
}
if !podWorkers.ShouldPodContentBeRemoved(types.UID("abc")) {
t.Errorf("Expected pod to be suitable for removal (does not exist and synced at least once)")
}
// verify workers that are not terminated stay open even if config no longer
// sees them
podWorkers.SyncKnownPods(nil)
drainAllWorkers(podWorkers)
if len(podWorkers.podUpdates) != 0 {
t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates))
}
if len(podWorkers.podSyncStatuses) != 2 {
t.Errorf("Incorrect number of tracked statuses: %#v", podWorkers.podSyncStatuses)
}
for uid := range desiredPods {
pod := newNamedPod(string(uid), "ns", "name", false)
podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodKill,
})
}
drainWorkers(podWorkers, numPods)
// verify once those pods terminate (via some other flow) the workers are cleared
podWorkers.SyncKnownPods(nil)
if len(podWorkers.podUpdates) != 0 {
t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates))
}
if len(podWorkers.podSyncStatuses) != 0 {
t.Errorf("Incorrect number of tracked statuses: %#v", podWorkers.podSyncStatuses)
}
}
func Test_removeTerminatedWorker(t *testing.T) {
podUID := types.UID("pod-uid")
testCases := []struct {
desc string
orphan bool
podSyncStatus *podSyncStatus
startedStaticPodsByFullname map[string]types.UID
waitingToStartStaticPodsByFullname map[string][]types.UID
removed bool
expectGracePeriod int64
expectPending *UpdatePodOptions
}{
{
desc: "finished worker",
podSyncStatus: &podSyncStatus{
finished: true,
},
removed: true,
},
{
desc: "waiting to start worker because of another started pod with the same fullname",
podSyncStatus: &podSyncStatus{
finished: false,
fullname: "fake-fullname",
},
startedStaticPodsByFullname: map[string]types.UID{
"fake-fullname": "another-pod-uid",
},
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"fake-fullname": {podUID},
},
removed: false,
},
{
desc: "not yet started worker",
podSyncStatus: &podSyncStatus{
finished: false,
fullname: "fake-fullname",
},
startedStaticPodsByFullname: make(map[string]types.UID),
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"fake-fullname": {podUID},
},
removed: false,
},
{
desc: "orphaned not started worker",
podSyncStatus: &podSyncStatus{
finished: false,
fullname: "fake-fullname",
},
orphan: true,
removed: true,
},
{
desc: "orphaned started worker",
podSyncStatus: &podSyncStatus{
startedAt: time.Unix(1, 0),
finished: false,
fullname: "fake-fullname",
},
orphan: true,
removed: false,
},
{
desc: "orphaned terminating worker with no activeUpdate",
podSyncStatus: &podSyncStatus{
startedAt: time.Unix(1, 0),
terminatingAt: time.Unix(2, 0),
finished: false,
fullname: "fake-fullname",
},
orphan: true,
removed: false,
},
{
desc: "orphaned terminating worker",
podSyncStatus: &podSyncStatus{
startedAt: time.Unix(1, 0),
terminatingAt: time.Unix(2, 0),
finished: false,
fullname: "fake-fullname",
activeUpdate: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "1"}},
},
},
orphan: true,
removed: false,
expectPending: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "1"}},
},
},
{
desc: "orphaned terminating worker with pendingUpdate",
podSyncStatus: &podSyncStatus{
startedAt: time.Unix(1, 0),
terminatingAt: time.Unix(2, 0),
finished: false,
fullname: "fake-fullname",
working: true,
pendingUpdate: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "2"}},
},
activeUpdate: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "1"}},
},
},
orphan: true,
removed: false,
expectPending: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "2"}},
},
},
{
desc: "orphaned terminated worker with no activeUpdate",
podSyncStatus: &podSyncStatus{
startedAt: time.Unix(1, 0),
terminatingAt: time.Unix(2, 0),
terminatedAt: time.Unix(3, 0),
finished: false,
fullname: "fake-fullname",
},
orphan: true,
removed: false,
},
{
desc: "orphaned terminated worker",
podSyncStatus: &podSyncStatus{
startedAt: time.Unix(1, 0),
terminatingAt: time.Unix(2, 0),
terminatedAt: time.Unix(3, 0),
finished: false,
fullname: "fake-fullname",
activeUpdate: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "1"}},
},
},
orphan: true,
removed: false,
expectPending: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "1"}},
},
},
{
desc: "orphaned terminated worker with pendingUpdate",
podSyncStatus: &podSyncStatus{
startedAt: time.Unix(1, 0),
terminatingAt: time.Unix(2, 0),
terminatedAt: time.Unix(3, 0),
finished: false,
working: true,
fullname: "fake-fullname",
pendingUpdate: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "2"}},
},
activeUpdate: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "1"}},
},
},
orphan: true,
removed: false,
expectPending: &UpdatePodOptions{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID, Name: "2"}},
},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
podWorkers, _, _ := createPodWorkers()
podWorkers.podSyncStatuses[podUID] = tc.podSyncStatus
podWorkers.podUpdates[podUID] = make(chan struct{}, 1)
if tc.podSyncStatus.working {
podWorkers.podUpdates[podUID] <- struct{}{}
}
podWorkers.startedStaticPodsByFullname = tc.startedStaticPodsByFullname
podWorkers.waitingToStartStaticPodsByFullname = tc.waitingToStartStaticPodsByFullname
podWorkers.removeTerminatedWorker(podUID, podWorkers.podSyncStatuses[podUID], tc.orphan)
status, exists := podWorkers.podSyncStatuses[podUID]
if tc.removed && exists {
t.Fatalf("Expected pod worker to be removed")
}
if !tc.removed && !exists {
t.Fatalf("Expected pod worker to not be removed")
}
if tc.removed {
return
}
if tc.expectGracePeriod > 0 && status.gracePeriod != tc.expectGracePeriod {
t.Errorf("Unexpected grace period %d", status.gracePeriod)
}
if !reflect.DeepEqual(tc.expectPending, status.pendingUpdate) {
t.Errorf("Unexpected pending: %s", cmp.Diff(tc.expectPending, status.pendingUpdate))
}
if tc.expectPending != nil {
if !status.working {
t.Errorf("Should be working")
}
if len(podWorkers.podUpdates[podUID]) != 1 {
t.Errorf("Should have one entry in podUpdates")
}
}
})
}
}
type simpleFakeKubelet struct {
pod *v1.Pod
mirrorPod *v1.Pod
podStatus *kubecontainer.PodStatus
wg sync.WaitGroup
}
func (kl *simpleFakeKubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
kl.pod, kl.mirrorPod, kl.podStatus = pod, mirrorPod, podStatus
return false, nil
}
func (kl *simpleFakeKubelet) SyncPodWithWaitGroup(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
kl.pod, kl.mirrorPod, kl.podStatus = pod, mirrorPod, podStatus
kl.wg.Done()
return false, nil
}
func (kl *simpleFakeKubelet) SyncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
return nil
}
func (kl *simpleFakeKubelet) SyncTerminatingRuntimePod(ctx context.Context, runningPod *kubecontainer.Pod) error {
return nil
}
func (kl *simpleFakeKubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error {
return nil
}
// TestFakePodWorkers verifies that the fakePodWorkers behaves the same way as the real podWorkers
// for their invocation of the syncPodFn.
func TestFakePodWorkers(t *testing.T) {
fakeRecorder := &record.FakeRecorder{}
fakeRuntime := &containertest.FakeRuntime{}
fakeCache := containertest.NewFakeCache(fakeRuntime)
kubeletForRealWorkers := &simpleFakeKubelet{}
kubeletForFakeWorkers := &simpleFakeKubelet{}
realPodSyncer := newPodSyncerFuncs(kubeletForRealWorkers)
realPodSyncer.syncPod = kubeletForRealWorkers.SyncPodWithWaitGroup
realPodWorkers := newPodWorkers(
realPodSyncer,
fakeRecorder,
queue.NewBasicWorkQueue(&clock.RealClock{}),
time.Second,
time.Second,
fakeCache,
allocation.NewInMemoryManager(cm.NodeConfig{}, nil, nil, nil, nil, nil, nil),
)
fakePodWorkers := &fakePodWorkers{
syncPodFn: kubeletForFakeWorkers.SyncPod,
cache: fakeCache,
t: t,
}
tests := []struct {
pod *v1.Pod
mirrorPod *v1.Pod
}{
{
&v1.Pod{},
&v1.Pod{},
},
{
podWithUIDNameNs("12345678", "foo", "new"),
podWithUIDNameNs("12345678", "fooMirror", "new"),
},
{
podWithUIDNameNs("98765", "bar", "new"),
podWithUIDNameNs("98765", "barMirror", "new"),
},
}
for i, tt := range tests {
kubeletForRealWorkers.wg.Add(1)
realPodWorkers.UpdatePod(UpdatePodOptions{
Pod: tt.pod,
MirrorPod: tt.mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
})
fakePodWorkers.UpdatePod(UpdatePodOptions{
Pod: tt.pod,
MirrorPod: tt.mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
})
kubeletForRealWorkers.wg.Wait()
if !reflect.DeepEqual(kubeletForRealWorkers.pod, kubeletForFakeWorkers.pod) {
t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.pod, kubeletForFakeWorkers.pod)
}
if !reflect.DeepEqual(kubeletForRealWorkers.mirrorPod, kubeletForFakeWorkers.mirrorPod) {
t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.mirrorPod, kubeletForFakeWorkers.mirrorPod)
}
if !reflect.DeepEqual(kubeletForRealWorkers.podStatus, kubeletForFakeWorkers.podStatus) {
t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.podStatus, kubeletForFakeWorkers.podStatus)
}
}
}
// TestKillPodNowFunc tests the blocking kill pod function works with pod workers as expected.
func TestKillPodNowFunc(t *testing.T) {
fakeRecorder := &record.FakeRecorder{}
podWorkers, _, processed := createPodWorkers()
killPodFunc := killPodNow(podWorkers, fakeRecorder)
pod := newNamedPod("test", "ns", "test", false)
gracePeriodOverride := int64(0)
err := killPodFunc(pod, false, &gracePeriodOverride, func(status *v1.PodStatus) {
status.Phase = v1.PodFailed
status.Reason = "reason"
status.Message = "message"
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
drainAllWorkers(podWorkers)
if len(processed) != 1 {
t.Fatalf("len(processed) expected: %v, actual: %#v", 1, processed)
}
syncPodRecords := processed[pod.UID]
if len(syncPodRecords) != 2 {
t.Fatalf("Pod processed expected %v times, got %#v", 1, syncPodRecords)
}
if syncPodRecords[0].updateType != kubetypes.SyncPodKill {
t.Errorf("Pod update type was %v, but expected %v", syncPodRecords[0].updateType, kubetypes.SyncPodKill)
}
if !syncPodRecords[1].terminated {
t.Errorf("Pod terminated %v, but expected %v", syncPodRecords[1].terminated, true)
}
}
func Test_allowPodStart(t *testing.T) {
testCases := []struct {
desc string
pod *v1.Pod
podSyncStatuses map[types.UID]*podSyncStatus
startedStaticPodsByFullname map[string]types.UID
waitingToStartStaticPodsByFullname map[string][]types.UID
expectedStartedStaticPodsByFullname map[string]types.UID
expectedWaitingToStartStaticPodsByFullname map[string][]types.UID
allowed bool
allowedEver bool
}{
{
// TODO: Do we want to allow non-static pods with the same full name?
// Note that it may disable the force deletion of pods.
desc: "non-static pod",
pod: newNamedPod("uid-0", "ns", "test", false),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "test_",
},
"uid-1": {
fullname: "test_",
},
},
allowed: true,
allowedEver: true,
},
{
// TODO: Do we want to allow a non-static pod with the same full name
// as the started static pod?
desc: "non-static pod when there is a started static pod with the same full name",
pod: newNamedPod("uid-0", "ns", "test", false),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "test_",
},
"uid-1": {
fullname: "test_",
},
},
startedStaticPodsByFullname: map[string]types.UID{
"test_": types.UID("uid-1"),
},
expectedStartedStaticPodsByFullname: map[string]types.UID{
"test_": types.UID("uid-1"),
},
allowed: true,
allowedEver: true,
},
{
// TODO: Do we want to allow a static pod with the same full name as the
// started non-static pod?
desc: "static pod when there is a started non-static pod with the same full name",
pod: newNamedPod("uid-0", "ns", "test", false),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "test_",
},
"uid-1": {
fullname: "test_",
},
},
allowed: true,
allowedEver: true,
},
{
desc: "static pod when there are no started static pods with the same full name",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
},
"uid-1": {
fullname: "bar_",
},
},
startedStaticPodsByFullname: map[string]types.UID{
"bar_": types.UID("uid-1"),
},
expectedStartedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-0"),
"bar_": types.UID("uid-1"),
},
allowed: true,
allowedEver: true,
},
{
desc: "static pod when there is a started static pod with the same full name",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
},
"uid-1": {
fullname: "foo_",
},
},
startedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-1"),
},
expectedStartedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-1"),
},
allowed: false,
allowedEver: true,
},
{
desc: "static pod if the static pod has already started",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
},
},
startedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-0"),
},
expectedStartedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-0"),
},
allowed: true,
allowedEver: true,
},
{
desc: "static pod if the static pod is the first pod waiting to start",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
},
},
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-0"),
},
},
expectedStartedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-0"),
},
expectedWaitingToStartStaticPodsByFullname: make(map[string][]types.UID),
allowed: true,
allowedEver: true,
},
{
desc: "static pod if the static pod is not the first pod waiting to start",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
},
"uid-1": {
fullname: "foo_",
},
},
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-1"),
types.UID("uid-0"),
},
},
expectedStartedStaticPodsByFullname: make(map[string]types.UID),
expectedWaitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-1"),
types.UID("uid-0"),
},
},
allowed: false,
allowedEver: true,
},
{
desc: "static pod if the static pod is the first valid pod waiting to start / clean up until picking the first valid pod",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
},
"uid-1": {
fullname: "foo_",
},
},
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-2"),
types.UID("uid-2"),
types.UID("uid-3"),
types.UID("uid-0"),
types.UID("uid-1"),
},
},
expectedStartedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-0"),
},
expectedWaitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-1"),
},
},
allowed: true,
allowedEver: true,
},
{
desc: "static pod if the static pod is the first pod that is not termination requested and waiting to start",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
},
"uid-1": {
fullname: "foo_",
},
"uid-2": {
fullname: "foo_",
terminatingAt: time.Now(),
},
"uid-3": {
fullname: "foo_",
terminatedAt: time.Now(),
},
},
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-2"),
types.UID("uid-3"),
types.UID("uid-0"),
types.UID("uid-1"),
},
},
expectedStartedStaticPodsByFullname: map[string]types.UID{
"foo_": types.UID("uid-0"),
},
expectedWaitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-1"),
},
},
allowed: true,
allowedEver: true,
},
{
desc: "static pod if there is no sync status for the pod should be denied",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-1": {
fullname: "foo_",
},
"uid-2": {
fullname: "foo_",
terminatingAt: time.Now(),
},
"uid-3": {
fullname: "foo_",
terminatedAt: time.Now(),
},
},
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-1"),
},
},
expectedStartedStaticPodsByFullname: map[string]types.UID{},
expectedWaitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-1"),
},
},
allowed: false,
allowedEver: false,
},
{
desc: "static pod if the static pod is terminated should not be allowed",
pod: newStaticPod("uid-0", "foo"),
podSyncStatuses: map[types.UID]*podSyncStatus{
"uid-0": {
fullname: "foo_",
terminatingAt: time.Now(),
},
},
waitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-2"),
types.UID("uid-3"),
types.UID("uid-0"),
types.UID("uid-1"),
},
},
expectedStartedStaticPodsByFullname: map[string]types.UID{},
expectedWaitingToStartStaticPodsByFullname: map[string][]types.UID{
"foo_": {
types.UID("uid-2"),
types.UID("uid-3"),
types.UID("uid-0"),
types.UID("uid-1"),
},
},
allowed: false,
allowedEver: false,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
podWorkers, _, _ := createPodWorkers()
if tc.podSyncStatuses != nil {
podWorkers.podSyncStatuses = tc.podSyncStatuses
}
if tc.startedStaticPodsByFullname != nil {
podWorkers.startedStaticPodsByFullname = tc.startedStaticPodsByFullname
}
if tc.waitingToStartStaticPodsByFullname != nil {
podWorkers.waitingToStartStaticPodsByFullname = tc.waitingToStartStaticPodsByFullname
}
allowed, allowedEver := podWorkers.allowPodStart(tc.pod)
if allowed != tc.allowed {
if tc.allowed {
t.Errorf("Pod should be allowed")
} else {
t.Errorf("Pod should not be allowed")
}
}
if allowedEver != tc.allowedEver {
if tc.allowedEver {
t.Errorf("Pod should be allowed ever")
} else {
t.Errorf("Pod should not be allowed ever")
}
}
// if maps are neither nil nor empty
if len(podWorkers.startedStaticPodsByFullname) != 0 ||
len(podWorkers.startedStaticPodsByFullname) != len(tc.expectedStartedStaticPodsByFullname) {
if !reflect.DeepEqual(
podWorkers.startedStaticPodsByFullname,
tc.expectedStartedStaticPodsByFullname) {
t.Errorf("startedStaticPodsByFullname: expected %v, got %v",
tc.expectedStartedStaticPodsByFullname,
podWorkers.startedStaticPodsByFullname)
}
}
// if maps are neither nil nor empty
if len(podWorkers.waitingToStartStaticPodsByFullname) != 0 ||
len(podWorkers.waitingToStartStaticPodsByFullname) != len(tc.expectedWaitingToStartStaticPodsByFullname) {
if !reflect.DeepEqual(
podWorkers.waitingToStartStaticPodsByFullname,
tc.expectedWaitingToStartStaticPodsByFullname) {
t.Errorf("waitingToStartStaticPodsByFullname: expected %v, got %v",
tc.expectedWaitingToStartStaticPodsByFullname,
podWorkers.waitingToStartStaticPodsByFullname)
}
}
})
}
}
func Test_calculateEffectiveGracePeriod(t *testing.T) {
zero := int64(0)
two := int64(2)
five := int64(5)
thirty := int64(30)
testCases := []struct {
desc string
podSpecTerminationGracePeriodSeconds *int64
podDeletionGracePeriodSeconds *int64
gracePeriodOverride *int64
expectedGracePeriod int64
}{
{
desc: "use termination grace period from the spec when no overrides",
podSpecTerminationGracePeriodSeconds: &thirty,
expectedGracePeriod: thirty,
},
{
desc: "use pod DeletionGracePeriodSeconds when set",
podSpecTerminationGracePeriodSeconds: &thirty,
podDeletionGracePeriodSeconds: &five,
expectedGracePeriod: five,
},
{
desc: "use grace period override when set",
podSpecTerminationGracePeriodSeconds: &thirty,
podDeletionGracePeriodSeconds: &five,
gracePeriodOverride: &two,
expectedGracePeriod: two,
},
{
desc: "use 1 when pod DeletionGracePeriodSeconds is zero",
podSpecTerminationGracePeriodSeconds: &thirty,
podDeletionGracePeriodSeconds: &zero,
expectedGracePeriod: 1,
},
{
desc: "use 1 when grace period override is zero",
podSpecTerminationGracePeriodSeconds: &thirty,
podDeletionGracePeriodSeconds: &five,
gracePeriodOverride: &zero,
expectedGracePeriod: 1,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
pod := newNamedPod("1", "ns", "running-pod", false)
pod.Spec.TerminationGracePeriodSeconds = tc.podSpecTerminationGracePeriodSeconds
pod.DeletionGracePeriodSeconds = tc.podDeletionGracePeriodSeconds
gracePeriod, _ := calculateEffectiveGracePeriod(&podSyncStatus{}, pod, &KillPodOptions{
PodTerminationGracePeriodSecondsOverride: tc.gracePeriodOverride,
})
if gracePeriod != tc.expectedGracePeriod {
t.Errorf("Expected a grace period of %v, but was %v", tc.expectedGracePeriod, gracePeriod)
}
})
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podcertificate
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"fmt"
mathrand "math/rand/v2"
"sync"
"time"
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
certinformersv1alpha1 "k8s.io/client-go/informers/certificates/v1alpha1"
coreinformersv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
certlistersv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1"
corelistersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
// PodManager is a local wrapper interface for pod.Manager.
type PodManager interface {
GetPodByUID(uid types.UID) (*corev1.Pod, bool)
GetPods() []*corev1.Pod
}
// Manager abstracts the functionality needed by Kubelet and the volume host in
// order to provide pod certificate functionality.
type Manager interface {
// TrackPod is called by Kubelet every time a new pod is assigned to the node.
TrackPod(ctx context.Context, pod *corev1.Pod)
// ForgetPod is called by Kubelet every time a pod is dropped from the node.
ForgetPod(ctx context.Context, pod *corev1.Pod)
// GetPodCertificateCredentialBundle is called by the volume host to
// retrieve the credential bundle for a given pod certificate volume.
GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) (privKey []byte, certChain []byte, err error)
}
// After this amount of time (plus jitter), we can assume that a PCR that we
// created, but isn't showing up on our informer, must have been deleted.
const assumeDeletedThreshold = 10 * time.Minute
// IssuingManager is the main implementation of Manager.
//
// The core construct is a workqueue that contains one entry for each
// PodCertificateProjection (tracked with a podname/volumename/sourceindex
// tuple) in each non-mirror Pod scheduled to the node. Everytime anything
// interesting happens to a PodCertificateRequest or Pod, we redrive all of the
// potentially-affected PodCertificateProjections into the workqueue.
//
// State is not preserved across restarts --- if Kubelet or the node restarts,
// then all PodCertificateProjections will be queued for immediate refresh.
//
// Refresh is handled by periodicially redriving all PodCertificateProjections
// into the queue.
type IssuingManager struct {
kc kubernetes.Interface
podManager PodManager
projectionQueue workqueue.TypedRateLimitingInterface[projectionKey]
pcrInformer cache.SharedIndexInformer
pcrLister certlistersv1alpha1.PodCertificateRequestLister
nodeInformer cache.SharedIndexInformer
nodeLister corelistersv1.NodeLister
nodeName types.NodeName
clock clock.PassiveClock
// lock covers credStore
lock sync.Mutex
credStore map[projectionKey]*projectionRecord
}
type projectionKey struct {
Namespace string
PodName string
PodUID string
VolumeName string
SourceIndex int
}
type projectionRecord struct {
// lock covers all fields within projectionRecord.
lock sync.Mutex
// The state machine for this projection:
//
//
// ┌─────────────────┐
// ▼ │
// fresh ────► wait ────► fresh ──────► waitrefresh
// │ │
// ├──────► denied ◄───────────┤
// │ │
// └──────► failed ◄───────────┘
curState credState
}
// Interface type for all projection record states.
type credState interface {
getCredBundle() (privKey, certChain []byte, err error)
}
type credStateInitial struct {
}
func (c *credStateInitial) getCredBundle() ([]byte, []byte, error) {
return nil, nil, fmt.Errorf("credential bundle is not issued yet")
}
type credStateWait struct {
privateKey []byte
pcrName string
// If it has reached this time and the PCR isn't showing up on the informer,
// assume that it was deleted.
pcrAbandonAt time.Time
}
func (c *credStateWait) getCredBundle() ([]byte, []byte, error) {
return nil, nil, fmt.Errorf("credential bundle is not issued yet")
}
type credStateDenied struct {
Reason string
Message string
}
func (c *credStateDenied) getCredBundle() ([]byte, []byte, error) {
return nil, nil, fmt.Errorf("PodCertificateRequest was permanently denied: reason=%q message=%q", c.Reason, c.Message)
}
type credStateFailed struct {
Reason string
Message string
}
func (c *credStateFailed) getCredBundle() ([]byte, []byte, error) {
return nil, nil, fmt.Errorf("PodCertificateRequest was permanently failed: reason=%q message=%q", c.Reason, c.Message)
}
type credStateFresh struct {
privateKey []byte
certChain []byte
beginRefreshAt time.Time
}
func (c *credStateFresh) getCredBundle() ([]byte, []byte, error) {
return c.privateKey, c.certChain, nil
}
type credStateWaitRefresh struct {
privateKey []byte
certChain []byte
beginRefreshAt time.Time
refreshPrivateKey []byte
refreshPCRName string
// If it has reached this time and the PCR isn't showing up on the informer,
// assume that it was deleted.
refreshPCRAbandonAt time.Time
}
func (c *credStateWaitRefresh) getCredBundle() ([]byte, []byte, error) {
return c.privateKey, c.certChain, nil
}
var _ Manager = (*IssuingManager)(nil)
func NewIssuingManager(kc kubernetes.Interface, podManager PodManager, pcrInformer certinformersv1alpha1.PodCertificateRequestInformer, nodeInformer coreinformersv1.NodeInformer, nodeName types.NodeName, clock clock.WithTicker) *IssuingManager {
m := &IssuingManager{
kc: kc,
podManager: podManager,
projectionQueue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[projectionKey]()),
pcrInformer: pcrInformer.Informer(),
pcrLister: pcrInformer.Lister(),
nodeInformer: nodeInformer.Informer(),
nodeLister: nodeInformer.Lister(),
nodeName: nodeName,
clock: clock,
credStore: map[projectionKey]*projectionRecord{},
}
// Add informer functions for PodCertificateRequests. In all cases, we just
// queue the corresponding PodCertificateProjections for re-processing.
// This is not needed for correctness, since volumeSourceQueue backoffs will
// eventually trigger the volume to be inspected. However, it's a better UX
// for us to notice immediately once the certificate is issued.
m.pcrInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
pcr := obj.(*certificatesv1alpha1.PodCertificateRequest)
m.queueAllProjectionsForPod(pcr.Spec.PodUID)
},
UpdateFunc: func(old, new any) {
pcr := new.(*certificatesv1alpha1.PodCertificateRequest)
m.queueAllProjectionsForPod(pcr.Spec.PodUID)
},
DeleteFunc: func(obj any) {
pcr := obj.(*certificatesv1alpha1.PodCertificateRequest)
m.queueAllProjectionsForPod(pcr.Spec.PodUID)
},
})
return m
}
func (m *IssuingManager) queueAllProjectionsForPod(uid types.UID) {
pod, ok := m.podManager.GetPodByUID(uid)
if !ok {
return
}
for _, v := range pod.Spec.Volumes {
if v.Projected == nil {
continue
}
for sourceIndex, source := range v.Projected.Sources {
if source.PodCertificate == nil {
continue
}
key := projectionKey{
Namespace: pod.ObjectMeta.Namespace,
PodName: pod.ObjectMeta.Name,
PodUID: string(pod.ObjectMeta.UID),
VolumeName: v.Name,
SourceIndex: sourceIndex,
}
m.projectionQueue.Add(key)
}
}
}
func (m *IssuingManager) Run(ctx context.Context) {
klog.InfoS("podcertificate.IssuingManager starting up")
if !cache.WaitForCacheSync(ctx.Done(), m.pcrInformer.HasSynced, m.nodeInformer.HasSynced) {
return
}
go wait.JitterUntilWithContext(ctx, m.runRefreshPass, 1*time.Minute, 1.0, false)
go wait.UntilWithContext(ctx, m.runProjectionProcessor, time.Second)
<-ctx.Done()
m.projectionQueue.ShutDown()
klog.InfoS("podcertificate.IssuingManager shut down")
}
func (m *IssuingManager) runProjectionProcessor(ctx context.Context) {
for m.processNextProjection(ctx) {
}
}
func (m *IssuingManager) processNextProjection(ctx context.Context) bool {
key, quit := m.projectionQueue.Get()
if quit {
return false
}
defer m.projectionQueue.Done(key)
err := m.handleProjection(ctx, key)
if err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "while handling podCertificate projected volume source", "namespace", key.Namespace, "pod", key.PodName, "volume", key.VolumeName, "sourceIndex", key.SourceIndex)
m.projectionQueue.AddRateLimited(key)
return true
}
m.projectionQueue.Forget(key)
return true
}
func (m *IssuingManager) handleProjection(ctx context.Context, key projectionKey) error {
// Remember, returning nil from this function indicates that the work item
// was successfully processed, and should be dropped from the queue.
pod, ok := m.podManager.GetPodByUID(types.UID(key.PodUID))
if !ok {
// If we can't find the pod anymore, it's been deleted. Clear all our
// internal state associated with the pod and return a nil error so it
// is forgotten from the queue.
m.lock.Lock()
defer m.lock.Unlock()
for k := range m.credStore {
if k.Namespace == key.Namespace && k.PodName == key.PodName && k.PodUID == key.PodUID {
delete(m.credStore, k)
}
}
return nil
}
var source *corev1.PodCertificateProjection
for _, vol := range pod.Spec.Volumes {
if vol.Name == key.VolumeName && vol.Projected != nil {
for i, volumeSource := range vol.Projected.Sources {
if i == key.SourceIndex && volumeSource.PodCertificate != nil {
source = volumeSource.PodCertificate
}
}
}
}
if source == nil {
// No amount of retrying will fix this problem. Log it and return nil.
utilruntime.HandleErrorWithContext(ctx, nil, "pod does not contain the named podCertificate projected volume source", "key", key)
return nil
}
var rec *projectionRecord
func() {
m.lock.Lock()
defer m.lock.Unlock()
rec = m.credStore[key]
if rec == nil {
rec = &projectionRecord{
curState: &credStateInitial{},
}
m.credStore[key] = rec
}
}()
// Lock the record for the remainder of the function.
rec.lock.Lock()
defer rec.lock.Unlock()
switch state := rec.curState.(type) {
case *credStateInitial:
// We have not started the initial issuance. We need to create a PCR
// and record it in credStore.
// We fetch the service account so we can know its UID. Ideally, Kubelet
// would have a central component that tracks all service accounts related
// to pods on the node using a single-item watch.
serviceAccount, err := m.kc.CoreV1().ServiceAccounts(pod.ObjectMeta.Namespace).Get(ctx, pod.Spec.ServiceAccountName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("while fetching service account: %w", err)
}
node, err := m.nodeLister.Get(string(m.nodeName))
if err != nil {
return fmt.Errorf("while getting node object from local cache: %w", err)
}
privKey, pcr, err := m.createPodCertificateRequest(
ctx,
pod.ObjectMeta.Namespace,
pod.ObjectMeta.Name, pod.ObjectMeta.UID,
pod.Spec.ServiceAccountName, serviceAccount.ObjectMeta.UID,
m.nodeName, node.ObjectMeta.UID,
source.SignerName, source.KeyType, source.MaxExpirationSeconds,
)
if err != nil {
return fmt.Errorf("while creating initial PodCertificateRequest: %w", err)
}
rec.curState = &credStateWait{
privateKey: privKey,
pcrName: pcr.ObjectMeta.Name,
pcrAbandonAt: pcr.ObjectMeta.CreationTimestamp.Time.Add(assumeDeletedThreshold + jitterDuration()),
}
// Return nil to remove the projection from the workqueue --- it will be
// readded once the PodCertificateRequest appears in the informer cache,
// and goes through status updates.
klog.V(4).InfoS("PodCertificateRequest created, moving to credStateWait", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
case *credStateWait:
// We are working through the initial issuance. We created a PCR, now
// we need to wait for it to reach a terminal state.
pcr, err := m.pcrLister.PodCertificateRequests(key.Namespace).Get(state.pcrName)
if k8serrors.IsNotFound(err) && m.clock.Now().After(state.pcrAbandonAt) {
// "Not Found" could be due to informer lag, or because someone
// deleted the PodCertificateRequest. In the first case, the
// correct action is to continue waiting. In the second case, the
// correct action is to recreate the PCR. Properly disambiguating
// the cases will require resourceVersions to be ordered, and for
// the lister to report the highest resource version it has seen. In
// the meantime, assume that if it has been 10 minutes since we
// remember creating the PCR, then we must be in case 2. Return to
// credStateInitial so we create a new PCR.
rec.curState = &credStateInitial{}
return fmt.Errorf("PodCertificateRequest %q appears to have been deleted", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
} else if err != nil {
return fmt.Errorf("while getting PodCertificateRequest %q: %w", key.Namespace+"/"+state.pcrName, err)
}
// If the PodCertificateRequest has moved to a terminal state, update
// our state machine accordingly.
for _, cond := range pcr.Status.Conditions {
switch cond.Type {
case certificatesv1alpha1.PodCertificateRequestConditionTypeDenied:
rec.curState = &credStateDenied{
Reason: cond.Reason,
Message: cond.Message,
}
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateDenied", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
case certificatesv1alpha1.PodCertificateRequestConditionTypeFailed:
rec.curState = &credStateFailed{
Reason: cond.Reason,
Message: cond.Message,
}
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateFailed", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
case certificatesv1alpha1.PodCertificateRequestConditionTypeIssued:
rec.curState = &credStateFresh{
privateKey: state.privateKey,
certChain: cleanCertificateChain([]byte(pcr.Status.CertificateChain)),
beginRefreshAt: pcr.Status.BeginRefreshAt.Time.Add(jitterDuration()),
}
klog.V(4).InfoS("PodCertificateRequest issued, moving to credStateFresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
}
}
// Nothing -- the request is still pending. Return nil to remove the
// projection from the workqueue. It will be redriven when the
// PodCertificateRequest gets an update.
klog.V(4).InfoS("PodCertificateRequest not in terminal state, remaining in credStateWait", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
case *credStateDenied:
// Nothing to do; this is a permanent error state for the pod.
klog.V(4).InfoS("staying in credStateDenied", "key", key)
return nil
case *credStateFailed:
// Nothing to do; this is a permanent error state for the pod.
klog.V(4).InfoS("staying in credStateFailed", "key", key)
return nil
case *credStateFresh:
// Do nothing until it is time to refresh, then create a new PCR and
// switch to credStateWaitRefresh.
if m.clock.Now().Before(state.beginRefreshAt) {
// If it's not time to refresh yet, do nothing.
return nil
}
klog.V(4).InfoS("Time to refresh", "key", key)
// We fetch the service account so we can know its UID. Ideally, Kubelet
// would have a central component that tracks all service accounts related
// to pods on the node using a single-item watch.
serviceAccount, err := m.kc.CoreV1().ServiceAccounts(pod.ObjectMeta.Namespace).Get(ctx, pod.Spec.ServiceAccountName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("while fetching service account: %w", err)
}
node, err := m.nodeLister.Get(string(m.nodeName))
if err != nil {
return fmt.Errorf("while getting node object from local cache: %w", err)
}
privKey, pcr, err := m.createPodCertificateRequest(
ctx,
pod.ObjectMeta.Namespace,
pod.ObjectMeta.Name, pod.ObjectMeta.UID,
pod.Spec.ServiceAccountName, serviceAccount.ObjectMeta.UID,
m.nodeName, node.ObjectMeta.UID,
source.SignerName, source.KeyType, source.MaxExpirationSeconds,
)
if err != nil {
return fmt.Errorf("while creating refresh PodCertificateRequest: %w", err)
}
rec.curState = &credStateWaitRefresh{
privateKey: state.privateKey,
certChain: state.certChain,
beginRefreshAt: state.beginRefreshAt,
refreshPrivateKey: privKey,
refreshPCRName: pcr.ObjectMeta.Name,
refreshPCRAbandonAt: pcr.ObjectMeta.CreationTimestamp.Time.Add(assumeDeletedThreshold + jitterDuration()),
}
// Return nil to remove the projection from the workqueue --- it will be
// readded once the PodCertificateRequest appears in the informer cache,
// and goes through status updates.
klog.V(4).InfoS("PodCertificateRequest created, moving to credStateWaitRefresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
case *credStateWaitRefresh:
// Check the refresh PodCertificateRequest
pcr, err := m.pcrLister.PodCertificateRequests(key.Namespace).Get(state.refreshPCRName)
if k8serrors.IsNotFound(err) && m.clock.Now().After(state.refreshPCRAbandonAt) {
// "Not Found" could be due to informer lag, or because someone
// deleted the PodCertificateRequest. In the first case, the
// correct action is to continue waiting. In the second case, the
// correct action is to recreate the PCR. Properly disambiguating
// the cases will require resourceVersions to be ordered, and for
// the lister to report the highest resource version it has seen. In
// the meantime, assume that if it has been 10 minutes since we
// remember creating the PCR, then we must be in case 2. Return to
// credStateFresh so we create a new PCR.
rec.curState = &credStateFresh{
privateKey: state.privateKey,
certChain: state.certChain,
beginRefreshAt: state.beginRefreshAt,
}
return fmt.Errorf("PodCertificateRequest appears to have been deleted")
} else if err != nil {
return fmt.Errorf("while getting PodCertificateRequest %q: %w", key.Namespace+"/"+state.refreshPCRName, err)
}
// If the PodCertificateRequest has moved to a terminal state, update
// our state machine accordingly.
for _, cond := range pcr.Status.Conditions {
switch cond.Type {
case certificatesv1alpha1.PodCertificateRequestConditionTypeDenied:
rec.curState = &credStateDenied{
Reason: cond.Reason,
Message: cond.Message,
}
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateDenied", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
case certificatesv1alpha1.PodCertificateRequestConditionTypeFailed:
rec.curState = &credStateFailed{
Reason: cond.Reason,
Message: cond.Message,
}
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateFailed", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
case certificatesv1alpha1.PodCertificateRequestConditionTypeIssued:
rec.curState = &credStateFresh{
privateKey: state.refreshPrivateKey,
certChain: cleanCertificateChain([]byte(pcr.Status.CertificateChain)),
beginRefreshAt: pcr.Status.BeginRefreshAt.Time.Add(jitterDuration()),
}
klog.V(4).InfoS("PodCertificateRequest issued, moving to credStateFresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
}
}
// Nothing -- the request is still pending. Return nil to remove the
// projection from the workqueue. It will be redriven when the
// PodCertificateRequest gets an update.
klog.V(4).InfoS("PodCertificateRequest not in terminal state, remaining in credStateWaitRefresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
return nil
}
return nil
}
// jitterDuration returns a 5-minute randomized jitter to the given duration, to
// prevent multiple PodCertificateProjections from synchronizing their PCR
// creations.
func jitterDuration() time.Duration {
return time.Duration(mathrand.Int64N(5 * 60 * 1_000_000_000))
}
// runRefreshPass adds every non-mirror pod on the node back to the volume
// source processing queue.
func (m *IssuingManager) runRefreshPass(ctx context.Context) {
allPods := m.podManager.GetPods()
for _, pod := range allPods {
m.queueAllProjectionsForPod(pod.ObjectMeta.UID)
}
}
// TrackPod queues the pod's podCertificate projected volume sources for
// processing.
func (m *IssuingManager) TrackPod(ctx context.Context, pod *corev1.Pod) {
m.queueAllProjectionsForPod(pod.ObjectMeta.UID)
}
// ForgetPod queues the pod's podCertificate projected volume sources for processing.
//
// The pod worker will notice that the pod no longer exists and clear any
// pending and live credentials associated with it.
func (m *IssuingManager) ForgetPod(ctx context.Context, pod *corev1.Pod) {
m.queueAllProjectionsForPod(pod.ObjectMeta.UID)
}
// createPodCertificateRequest creates a PodCertificateRequest.
func (m *IssuingManager) createPodCertificateRequest(
ctx context.Context,
namespace string,
podName string, podUID types.UID,
serviceAccountName string, serviceAccountUID types.UID,
nodeName types.NodeName, nodeUID types.UID,
signerName, keyType string, maxExpirationSeconds *int32) ([]byte, *certificatesv1alpha1.PodCertificateRequest, error) {
privateKey, publicKey, proof, err := generateKeyAndProof(keyType, []byte(podUID))
if err != nil {
return nil, nil, fmt.Errorf("while generating keypair: %w", err)
}
pkixPublicKey, err := x509.MarshalPKIXPublicKey(publicKey)
if err != nil {
return nil, nil, fmt.Errorf("while marshaling public key: %w", err)
}
keyPEM, err := pemEncodeKey(privateKey)
if err != nil {
return nil, nil, fmt.Errorf("while PEM-encoding private key: %w", err)
}
req := &certificatesv1alpha1.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
GenerateName: "req-",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "core/v1",
Kind: "Pod",
Name: podName,
UID: podUID,
},
},
},
Spec: certificatesv1alpha1.PodCertificateRequestSpec{
SignerName: signerName,
PodName: podName,
PodUID: podUID,
ServiceAccountName: serviceAccountName,
ServiceAccountUID: serviceAccountUID,
NodeName: nodeName,
NodeUID: nodeUID,
MaxExpirationSeconds: maxExpirationSeconds,
PKIXPublicKey: pkixPublicKey,
ProofOfPossession: proof,
},
}
req, err = m.kc.CertificatesV1alpha1().PodCertificateRequests(namespace).Create(ctx, req, metav1.CreateOptions{})
if err != nil {
return nil, nil, fmt.Errorf("while creating on API: %w", err)
}
return keyPEM, req, nil
}
func (m *IssuingManager) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
credKey := projectionKey{
Namespace: namespace,
PodName: podName,
PodUID: podUID,
VolumeName: volumeName,
SourceIndex: sourceIndex,
}
var rec *projectionRecord
func() {
m.lock.Lock()
defer m.lock.Unlock()
rec = m.credStore[credKey]
}()
if rec == nil {
return nil, nil, fmt.Errorf("no credentials yet for key=%v", credKey)
}
rec.lock.Lock()
defer rec.lock.Unlock()
return rec.curState.getCredBundle()
}
func hashBytes(in []byte) []byte {
out := sha256.Sum256(in)
return out[:]
}
func generateKeyAndProof(keyType string, toBeSigned []byte) (privKey crypto.PrivateKey, pubKey crypto.PublicKey, sig []byte, err error) {
switch keyType {
case "RSA3072":
key, err := rsa.GenerateKey(rand.Reader, 3072)
if err != nil {
return nil, nil, nil, fmt.Errorf("while generating RSA 3072 key: %w", err)
}
sig, err := rsa.SignPSS(rand.Reader, key, crypto.SHA256, hashBytes(toBeSigned), nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
}
return key, &key.PublicKey, sig, nil
case "RSA4096":
key, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, nil, nil, fmt.Errorf("while generating RSA 4096 key: %w", err)
}
sig, err := rsa.SignPSS(rand.Reader, key, crypto.SHA256, hashBytes(toBeSigned), nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
}
return key, &key.PublicKey, sig, nil
case "ECDSAP256":
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, nil, nil, fmt.Errorf("while generating ECDSA P256 key: %w", err)
}
sig, err := ecdsa.SignASN1(rand.Reader, key, hashBytes(toBeSigned))
if err != nil {
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
}
return key, &key.PublicKey, sig, nil
case "ECDSAP384":
key, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
return nil, nil, nil, fmt.Errorf("while generating ECDSA P384 key: %w", err)
}
sig, err := ecdsa.SignASN1(rand.Reader, key, hashBytes(toBeSigned))
if err != nil {
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
}
return key, &key.PublicKey, sig, nil
case "ECDSAP521":
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return nil, nil, nil, fmt.Errorf("while generating ECDSA P521 key: %w", err)
}
sig, err := ecdsa.SignASN1(rand.Reader, key, hashBytes(toBeSigned))
if err != nil {
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
}
return key, &key.PublicKey, sig, nil
case "ED25519":
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, nil, fmt.Errorf("while generating Ed25519 key: %w", err)
}
sig := ed25519.Sign(priv, toBeSigned)
return priv, pub, sig, nil
default:
return nil, nil, nil, fmt.Errorf("unknown key type %q", keyType)
}
}
func pemEncodeKey(key crypto.PrivateKey) ([]byte, error) {
keyDER, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return nil, fmt.Errorf("while marshaling key to PKCS#8: %w", err)
}
return pem.EncodeToMemory(&pem.Block{
Type: "PRIVATE KEY",
Bytes: keyDER,
}), nil
}
// ensure that all inter-block data and block headers are dropped from the
// certificate chain.
func cleanCertificateChain(in []byte) []byte {
outChain := &bytes.Buffer{}
rest := in
var b *pem.Block
for {
b, rest = pem.Decode(rest)
if b == nil {
break
}
cleanedBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: b.Bytes,
}
outChain.Write(pem.EncodeToMemory(cleanedBlock))
}
return outChain.Bytes()
}
// NoOpManager is an implementation of Manager that just returns errors, meant
// for use in static/detached Kubelet mode.
type NoOpManager struct{}
var _ Manager = (*NoOpManager)(nil)
func (m *NoOpManager) TrackPod(ctx context.Context, pod *corev1.Pod) {
}
func (m *NoOpManager) ForgetPod(ctx context.Context, pod *corev1.Pod) {
}
func (m *NoOpManager) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
return nil, nil, fmt.Errorf("unimplemented")
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package preemption
import (
"context"
"fmt"
"math"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/resource"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const message = "Preempted in order to admit critical pod"
// CriticalPodAdmissionHandler is an AdmissionFailureHandler that handles admission failure for Critical Pods.
// If the ONLY admission failures are due to insufficient resources, then CriticalPodAdmissionHandler evicts pods
// so that the critical pod can be admitted. For evictions, the CriticalPodAdmissionHandler evicts a set of pods that
// frees up the required resource requests. The set of pods is designed to minimize impact, and is prioritized according to the ordering:
// minimal impact for guaranteed pods > minimal impact for burstable pods > minimal impact for besteffort pods.
// minimal impact is defined as follows: fewest pods evicted > fewest total requests of pods.
// finding the fewest total requests of pods is considered besteffort.
type CriticalPodAdmissionHandler struct {
getPodsFunc eviction.ActivePodsFunc
killPodFunc eviction.KillPodFunc
recorder record.EventRecorder
}
var _ lifecycle.AdmissionFailureHandler = &CriticalPodAdmissionHandler{}
func NewCriticalPodAdmissionHandler(getPodsFunc eviction.ActivePodsFunc, killPodFunc eviction.KillPodFunc, recorder record.EventRecorder) *CriticalPodAdmissionHandler {
return &CriticalPodAdmissionHandler{
getPodsFunc: getPodsFunc,
killPodFunc: killPodFunc,
recorder: recorder,
}
}
// HandleAdmissionFailure gracefully handles admission rejection, and, in some cases,
// to allow admission of the pod despite its previous failure.
func (c *CriticalPodAdmissionHandler) HandleAdmissionFailure(ctx context.Context, admitPod *v1.Pod, failureReasons []lifecycle.PredicateFailureReason) ([]lifecycle.PredicateFailureReason, error) {
if !kubetypes.IsCriticalPod(admitPod) {
return failureReasons, nil
}
// InsufficientResourceError is not a reason to reject a critical pod.
// Instead of rejecting, we free up resources to admit it, if no other reasons for rejection exist.
nonResourceReasons := []lifecycle.PredicateFailureReason{}
resourceReasons := []*admissionRequirement{}
for _, reason := range failureReasons {
if r, ok := reason.(*lifecycle.InsufficientResourceError); ok {
resourceReasons = append(resourceReasons, &admissionRequirement{
resourceName: r.ResourceName,
quantity: r.GetInsufficientAmount(),
})
} else {
nonResourceReasons = append(nonResourceReasons, reason)
}
}
if len(nonResourceReasons) > 0 {
// Return only reasons that are not resource related, since critical pods cannot fail admission for resource reasons.
return nonResourceReasons, nil
}
err := c.evictPodsToFreeRequests(ctx, admitPod, admissionRequirementList(resourceReasons))
// if no error is returned, preemption succeeded and the pod is safe to admit.
return nil, err
}
// evictPodsToFreeRequests takes a list of insufficient resources, and attempts to free them by evicting pods
// based on requests. For example, if the only insufficient resource is 200Mb of memory, this function could
// evict a pod with request=250Mb.
func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(ctx context.Context, admitPod *v1.Pod, insufficientResources admissionRequirementList) error {
logger := klog.FromContext(ctx)
podsToPreempt, err := getPodsToPreempt(admitPod, c.getPodsFunc(), insufficientResources)
if err != nil {
return fmt.Errorf("preemption: error finding a set of pods to preempt: %v", err)
}
for _, pod := range podsToPreempt {
// record that we are evicting the pod
c.recorder.Eventf(pod, v1.EventTypeWarning, events.PreemptContainer, message)
// this is a blocking call and should only return when the pod and its containers are killed.
logger.V(2).Info("Preempting pod to free up resources", "pod", klog.KObj(pod), "podUID", pod.UID, "insufficientResources", insufficientResources.toString(), "requestingPod", klog.KObj(admitPod))
err := c.killPodFunc(pod, true, nil, func(status *v1.PodStatus) {
status.Phase = v1.PodFailed
status.Reason = events.PreemptContainer
status.Message = message
podutil.UpdatePodCondition(status, &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(status, pod.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: "Pod was preempted by Kubelet to accommodate a critical pod.",
})
})
if err != nil {
logger.Error(err, "Failed to evict pod", "pod", klog.KObj(pod))
// In future syncPod loops, the kubelet will retry the pod deletion steps that it was stuck on.
continue
}
if len(insufficientResources) > 0 {
metrics.Preemptions.WithLabelValues(insufficientResources[0].resourceName.String()).Inc()
} else {
metrics.Preemptions.WithLabelValues("").Inc()
}
logger.Info("Pod evicted successfully", "pod", klog.KObj(pod))
}
return nil
}
// getPodsToPreempt returns a list of pods that could be preempted to free requests >= requirements
func getPodsToPreempt(pod *v1.Pod, pods []*v1.Pod, requirements admissionRequirementList) ([]*v1.Pod, error) {
bestEffortPods, burstablePods, guaranteedPods := sortPodsByQOS(pod, pods)
// make sure that pods exist to reclaim the requirements
unableToMeetRequirements := requirements.subtract(append(append(bestEffortPods, burstablePods...), guaranteedPods...)...)
if len(unableToMeetRequirements) > 0 {
return nil, fmt.Errorf("no set of running pods found to reclaim resources: %v", unableToMeetRequirements.toString())
}
// find the guaranteed pods we would need to evict if we already evicted ALL burstable and besteffort pods.
guaranteedToEvict, err := getPodsToPreemptByDistance(guaranteedPods, requirements.subtract(append(bestEffortPods, burstablePods...)...))
if err != nil {
return nil, err
}
// Find the burstable pods we would need to evict if we already evicted ALL besteffort pods, and the required guaranteed pods.
burstableToEvict, err := getPodsToPreemptByDistance(burstablePods, requirements.subtract(append(bestEffortPods, guaranteedToEvict...)...))
if err != nil {
return nil, err
}
// Find the besteffort pods we would need to evict if we already evicted the required guaranteed and burstable pods.
bestEffortToEvict, err := getPodsToPreemptByDistance(bestEffortPods, requirements.subtract(append(burstableToEvict, guaranteedToEvict...)...))
if err != nil {
return nil, err
}
return append(append(bestEffortToEvict, burstableToEvict...), guaranteedToEvict...), nil
}
// getPodsToPreemptByDistance finds the pods that have pod requests >= admission requirements.
// Chooses pods that minimize "distance" to the requirements.
// If more than one pod exists that fulfills the remaining requirements,
// it chooses the pod that has the "smaller resource request"
// This method, by repeatedly choosing the pod that fulfills as much of the requirements as possible,
// attempts to minimize the number of pods returned.
func getPodsToPreemptByDistance(pods []*v1.Pod, requirements admissionRequirementList) ([]*v1.Pod, error) {
podsToEvict := []*v1.Pod{}
// evict pods by shortest distance from remaining requirements, updating requirements every round.
for len(requirements) > 0 {
if len(pods) == 0 {
return nil, fmt.Errorf("no set of running pods found to reclaim resources: %v", requirements.toString())
}
// all distances must be less than len(requirements), because the max distance for a single requirement is 1
bestDistance := float64(len(requirements) + 1)
bestPodIndex := 0
// Find the pod with the smallest distance from requirements
// Or, in the case of two equidistant pods, find the pod with "smaller" resource requests.
for i, pod := range pods {
dist := requirements.distance(pod)
if dist < bestDistance || (bestDistance == dist && smallerResourceRequest(pod, pods[bestPodIndex])) {
bestDistance = dist
bestPodIndex = i
}
}
// subtract the pod from requirements, and transfer the pod from input-pods to pods-to-evicted
requirements = requirements.subtract(pods[bestPodIndex])
podsToEvict = append(podsToEvict, pods[bestPodIndex])
pods[bestPodIndex] = pods[len(pods)-1]
pods = pods[:len(pods)-1]
}
return podsToEvict, nil
}
type admissionRequirement struct {
resourceName v1.ResourceName
quantity int64
}
type admissionRequirementList []*admissionRequirement
// distance returns distance of the pods requests from the admissionRequirements.
// The distance is measured by the fraction of the requirement satisfied by the pod,
// so that each requirement is weighted equally, regardless of absolute magnitude.
func (a admissionRequirementList) distance(pod *v1.Pod) float64 {
dist := float64(0)
for _, req := range a {
remainingRequest := float64(req.quantity - resource.GetResourceRequest(pod, req.resourceName))
if remainingRequest > 0 {
dist += math.Pow(remainingRequest/float64(req.quantity), 2)
}
}
return dist
}
// subtract returns a new admissionRequirementList containing remaining requirements if the provided pod
// were to be preempted
func (a admissionRequirementList) subtract(pods ...*v1.Pod) admissionRequirementList {
newList := []*admissionRequirement{}
for _, req := range a {
newQuantity := req.quantity
for _, pod := range pods {
newQuantity -= resource.GetResourceRequest(pod, req.resourceName)
if newQuantity <= 0 {
break
}
}
if newQuantity > 0 {
newList = append(newList, &admissionRequirement{
resourceName: req.resourceName,
quantity: newQuantity,
})
}
}
return newList
}
func (a admissionRequirementList) toString() string {
s := "["
for _, req := range a {
s += fmt.Sprintf("(res: %v, q: %d), ", req.resourceName, req.quantity)
}
return s + "]"
}
// sortPodsByQOS returns lists containing besteffort, burstable, and guaranteed pods that
// can be preempted by preemptor pod.
func sortPodsByQOS(preemptor *v1.Pod, pods []*v1.Pod) (bestEffort, burstable, guaranteed []*v1.Pod) {
for _, pod := range pods {
if kubetypes.Preemptable(preemptor, pod) {
switch v1qos.GetPodQOS(pod) {
case v1.PodQOSBestEffort:
bestEffort = append(bestEffort, pod)
case v1.PodQOSBurstable:
burstable = append(burstable, pod)
case v1.PodQOSGuaranteed:
guaranteed = append(guaranteed, pod)
default:
}
}
}
return
}
// smallerResourceRequest returns true if pod1 has a smaller request than pod2
func smallerResourceRequest(pod1 *v1.Pod, pod2 *v1.Pod) bool {
priorityList := []v1.ResourceName{
v1.ResourceMemory,
v1.ResourceCPU,
}
for _, res := range priorityList {
req1 := resource.GetResourceRequest(pod1, res)
req2 := resource.GetResourceRequest(pod2, res)
if req1 < req2 {
return true
} else if req1 > req2 {
return false
}
}
return true
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"context"
"fmt"
"io"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/probe"
execprobe "k8s.io/kubernetes/pkg/probe/exec"
grpcprobe "k8s.io/kubernetes/pkg/probe/grpc"
httpprobe "k8s.io/kubernetes/pkg/probe/http"
tcpprobe "k8s.io/kubernetes/pkg/probe/tcp"
"k8s.io/utils/exec"
"k8s.io/klog/v2"
)
const maxProbeRetries = 3
// Prober helps to check the liveness/readiness/startup of a container.
type prober struct {
exec execprobe.Prober
http httpprobe.Prober
tcp tcpprobe.Prober
grpc grpcprobe.Prober
runner kubecontainer.CommandRunner
recorder record.EventRecorder
}
// NewProber creates a Prober, it takes a command runner and
// several container info managers.
func newProber(
runner kubecontainer.CommandRunner,
recorder record.EventRecorder) *prober {
const followNonLocalRedirects = false
return &prober{
exec: execprobe.New(),
http: httpprobe.New(followNonLocalRedirects),
tcp: tcpprobe.New(),
grpc: grpcprobe.New(),
runner: runner,
recorder: recorder,
}
}
// recordContainerEvent should be used by the prober for all container related events.
func (pb *prober) recordContainerEvent(ctx context.Context, pod *v1.Pod, container *v1.Container, eventType, reason, message string, args ...interface{}) {
logger := klog.FromContext(ctx)
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
logger.Error(err, "Can't make a ref to pod and container", "pod", klog.KObj(pod), "containerName", container.Name)
return
}
pb.recorder.Eventf(ref, eventType, reason, message, args...)
}
// probe probes the container.
func (pb *prober) probe(ctx context.Context, probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) {
var probeSpec *v1.Probe
switch probeType {
case readiness:
probeSpec = container.ReadinessProbe
case liveness:
probeSpec = container.LivenessProbe
case startup:
probeSpec = container.StartupProbe
default:
return results.Failure, fmt.Errorf("unknown probe type: %q", probeType)
}
logger := klog.FromContext(ctx)
if probeSpec == nil {
logger.Info("Probe is nil", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
return results.Success, nil
}
result, output, err := pb.runProbeWithRetries(ctx, probeType, probeSpec, pod, status, container, containerID, maxProbeRetries)
if err != nil {
// Handle probe error
logger.V(1).Error(err, "Probe errored", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "probeResult", result)
pb.recordContainerEvent(ctx, pod, &container, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored and resulted in %s state: %s", probeType, result, err)
return results.Failure, err
}
switch result {
case probe.Success:
logger.V(3).Info("Probe succeeded", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
return results.Success, nil
case probe.Warning:
pb.recordContainerEvent(ctx, pod, &container, v1.EventTypeWarning, events.ContainerProbeWarning, "%s probe warning: %s", probeType, output)
logger.V(3).Info("Probe succeeded with a warning", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "output", output)
return results.Success, nil
case probe.Failure:
logger.V(1).Info("Probe failed", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "probeResult", result, "output", output)
pb.recordContainerEvent(ctx, pod, &container, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
return results.Failure, nil
case probe.Unknown:
logger.V(1).Info("Probe unknown without error", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "probeResult", result)
return results.Failure, nil
default:
logger.V(1).Info("Unsupported probe result", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "probeResult", result)
return results.Failure, nil
}
}
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
// if it never succeeds.
func (pb *prober) runProbeWithRetries(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) {
var err error
var result probe.Result
var output string
for i := 0; i < retries; i++ {
result, output, err = pb.runProbe(ctx, probeType, p, pod, status, container, containerID)
if err == nil {
return result, output, nil
}
}
return result, output, err
}
func (pb *prober) runProbe(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
logger := klog.FromContext(ctx)
timeout := time.Duration(p.TimeoutSeconds) * time.Second
switch {
case p.Exec != nil:
logger.V(4).Info("Exec-Probe runProbe", "pod", klog.KObj(pod), "containerName", container.Name, "execCommand", p.Exec.Command)
command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env)
return pb.exec.Probe(pb.newExecInContainer(ctx, pod, container, containerID, command, timeout))
case p.HTTPGet != nil:
req, err := httpprobe.NewRequestForHTTPGetAction(p.HTTPGet, &container, status.PodIP, "probe")
if err != nil {
// Log and record event for Unknown result
logger.V(4).Info("HTTP-Probe failed to create request", "error", err)
return probe.Unknown, "", err
}
if loggerV4 := logger.V(4); logger.Enabled() {
port := req.URL.Port()
host := req.URL.Hostname()
path := req.URL.Path
scheme := req.URL.Scheme
headers := p.HTTPGet.HTTPHeaders
loggerV4.Info("HTTP-Probe", "scheme", scheme, "host", host, "port", port, "path", path, "timeout", timeout, "headers", headers, "probeType", probeType)
}
return pb.http.Probe(req, timeout)
case p.TCPSocket != nil:
port, err := probe.ResolveContainerPort(p.TCPSocket.Port, &container)
if err != nil {
logger.V(4).Info("TCP-Probe failed to resolve port", "error", err)
return probe.Unknown, "", err
}
host := p.TCPSocket.Host
if host == "" {
host = status.PodIP
}
logger.V(4).Info("TCP-Probe", "host", host, "port", port, "timeout", timeout)
return pb.tcp.Probe(host, port, timeout)
case p.GRPC != nil:
host := status.PodIP
service := ""
if p.GRPC.Service != nil {
service = *p.GRPC.Service
}
logger.V(4).Info("GRPC-Probe", "host", host, "service", service, "port", p.GRPC.Port, "timeout", timeout)
return pb.grpc.Probe(host, service, int(p.GRPC.Port), timeout)
default:
logger.V(4).Info("Failed to find probe builder for container", "containerName", container.Name)
return probe.Unknown, "", fmt.Errorf("missing probe handler for %s:%s", format.Pod(pod), container.Name)
}
}
type execInContainer struct {
// run executes a command in a container. Combined stdout and stderr output is always returned. An
// error is returned if one occurred.
run func() ([]byte, error)
writer io.Writer
pod *v1.Pod
container v1.Container
}
func (pb *prober) newExecInContainer(ctx context.Context, pod *v1.Pod, container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd {
return &execInContainer{
run: func() ([]byte, error) { return pb.runner.RunInContainer(ctx, containerID, cmd, timeout) },
pod: pod,
container: container,
}
}
func (eic *execInContainer) Run() error {
return nil
}
func (eic *execInContainer) CombinedOutput() ([]byte, error) {
return eic.run()
}
func (eic *execInContainer) Output() ([]byte, error) {
return nil, fmt.Errorf("unimplemented")
}
func (eic *execInContainer) SetDir(dir string) {
// unimplemented
}
func (eic *execInContainer) SetStdin(in io.Reader) {
// unimplemented
}
func (eic *execInContainer) SetStdout(out io.Writer) {
eic.writer = out
}
func (eic *execInContainer) SetStderr(out io.Writer) {
eic.writer = out
}
func (eic *execInContainer) SetEnv(env []string) {
// unimplemented
}
func (eic *execInContainer) Stop() {
// unimplemented
}
func (eic *execInContainer) Start() error {
data, err := eic.run()
if eic.writer != nil {
// only record the write error, do not cover the command run error
if p, err := eic.writer.Write(data); err != nil {
// Use klog.TODO() because we currently do not have a proper context/logger to pass in.
// Replace this with an appropriate context/logger when refactoring this function to accept a context parameter.
klog.TODO().Error(err, "Unable to write all bytes from execInContainer", "expectedBytes", len(data), "actualBytes", p, "pod", klog.KObj(eic.pod), "containerName", eic.container.Name)
}
}
return err
}
func (eic *execInContainer) Wait() error {
return nil
}
func (eic *execInContainer) StdoutPipe() (io.ReadCloser, error) {
return nil, fmt.Errorf("unimplemented")
}
func (eic *execInContainer) StderrPipe() (io.ReadCloser, error) {
return nil, fmt.Errorf("unimplemented")
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"context"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/metrics"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
kubeutil "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/utils/clock"
)
// ProberResults stores the cumulative number of a probe by result as prometheus metrics.
var ProberResults = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: "prober",
Name: "probe_total",
Help: "Cumulative number of a liveness, readiness or startup probe for a container by result.",
StabilityLevel: metrics.BETA,
},
[]string{"probe_type",
"result",
"container",
"pod",
"namespace",
"pod_uid"},
)
// ProberDuration stores the duration of a successful probe lifecycle by result as prometheus metrics.
var ProberDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: "prober",
Name: "probe_duration_seconds",
Help: "Duration in seconds for a probe response.",
StabilityLevel: metrics.ALPHA,
},
[]string{"probe_type",
"container",
"pod",
"namespace"},
)
// Manager manages pod probing. It creates a probe "worker" for every container that specifies a
// probe (AddPod). The worker periodically probes its assigned container and caches the results. The
// manager use the cached probe results to set the appropriate Ready state in the PodStatus when
// requested (UpdatePodStatus). Updating probe parameters is not currently supported.
type Manager interface {
// AddPod creates new probe workers for every container probe. This should be called for every
// pod created.
AddPod(ctx context.Context, pod *v1.Pod)
// StopLivenessAndStartup handles stopping liveness and startup probes during termination.
StopLivenessAndStartup(pod *v1.Pod)
// RemovePod handles cleaning up the removed pod state, including terminating probe workers and
// deleting cached results.
RemovePod(pod *v1.Pod)
// CleanupPods handles cleaning up pods which should no longer be running.
// It takes a map of "desired pods" which should not be cleaned up.
CleanupPods(desiredPods map[types.UID]sets.Empty)
// UpdatePodStatus modifies the given PodStatus with the appropriate Ready state for each
// container based on container running status, cached probe results and worker states.
UpdatePodStatus(context.Context, *v1.Pod, *v1.PodStatus)
}
type manager struct {
// Map of active workers for probes
workers map[probeKey]*worker
// Lock for accessing & mutating workers
workerLock sync.RWMutex
// The statusManager cache provides pod IP and container IDs for probing.
statusManager status.Manager
// readinessManager manages the results of readiness probes
readinessManager results.Manager
// livenessManager manages the results of liveness probes
livenessManager results.Manager
// startupManager manages the results of startup probes
startupManager results.Manager
// prober executes the probe actions.
prober *prober
start time.Time
}
// NewManager creates a Manager for pod probing.
func NewManager(
statusManager status.Manager,
livenessManager results.Manager,
readinessManager results.Manager,
startupManager results.Manager,
runner kubecontainer.CommandRunner,
recorder record.EventRecorder) Manager {
prober := newProber(runner, recorder)
return &manager{
statusManager: statusManager,
prober: prober,
readinessManager: readinessManager,
livenessManager: livenessManager,
startupManager: startupManager,
workers: make(map[probeKey]*worker),
start: clock.RealClock{}.Now(),
}
}
// Key uniquely identifying container probes
type probeKey struct {
podUID types.UID
containerName string
probeType probeType
}
// Type of probe (liveness, readiness or startup)
type probeType int
const (
liveness probeType = iota
readiness
startup
probeResultSuccessful string = "successful"
probeResultFailed string = "failed"
probeResultUnknown string = "unknown"
)
// For debugging.
func (t probeType) String() string {
switch t {
case readiness:
return "Readiness"
case liveness:
return "Liveness"
case startup:
return "Startup"
default:
return "UNKNOWN"
}
}
func getRestartableInitContainers(pod *v1.Pod) []v1.Container {
var restartableInitContainers []v1.Container
for _, c := range pod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&c) {
restartableInitContainers = append(restartableInitContainers, c)
}
}
return restartableInitContainers
}
func (m *manager) AddPod(ctx context.Context, pod *v1.Pod) {
m.workerLock.Lock()
defer m.workerLock.Unlock()
logger := klog.FromContext(ctx)
key := probeKey{podUID: pod.UID}
for _, c := range append(pod.Spec.Containers, getRestartableInitContainers(pod)...) {
key.containerName = c.Name
if c.StartupProbe != nil {
key.probeType = startup
if _, ok := m.workers[key]; ok {
logger.V(8).Error(nil, "Startup probe already exists for container",
"pod", klog.KObj(pod), "containerName", c.Name)
return
}
w := newWorker(m, startup, pod, c)
m.workers[key] = w
go w.run(ctx)
}
if c.ReadinessProbe != nil {
key.probeType = readiness
if _, ok := m.workers[key]; ok {
logger.V(8).Error(nil, "Readiness probe already exists for container",
"pod", klog.KObj(pod), "containerName", c.Name)
return
}
w := newWorker(m, readiness, pod, c)
m.workers[key] = w
go w.run(ctx)
}
if c.LivenessProbe != nil {
key.probeType = liveness
if _, ok := m.workers[key]; ok {
logger.V(8).Error(nil, "Liveness probe already exists for container",
"pod", klog.KObj(pod), "containerName", c.Name)
return
}
w := newWorker(m, liveness, pod, c)
m.workers[key] = w
go w.run(ctx)
}
}
}
func (m *manager) StopLivenessAndStartup(pod *v1.Pod) {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
key := probeKey{podUID: pod.UID}
for _, c := range pod.Spec.Containers {
key.containerName = c.Name
for _, probeType := range [...]probeType{liveness, startup} {
key.probeType = probeType
if worker, ok := m.workers[key]; ok {
worker.stop()
}
}
}
}
func (m *manager) RemovePod(pod *v1.Pod) {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
key := probeKey{podUID: pod.UID}
for _, c := range append(pod.Spec.Containers, getRestartableInitContainers(pod)...) {
key.containerName = c.Name
for _, probeType := range [...]probeType{readiness, liveness, startup} {
key.probeType = probeType
if worker, ok := m.workers[key]; ok {
worker.stop()
}
}
}
}
func (m *manager) CleanupPods(desiredPods map[types.UID]sets.Empty) {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
for key, worker := range m.workers {
if _, ok := desiredPods[key.podUID]; !ok {
worker.stop()
}
}
}
func (m *manager) isContainerStarted(pod *v1.Pod, containerStatus *v1.ContainerStatus) bool {
if containerStatus.State.Running == nil {
return false
}
if result, ok := m.startupManager.Get(kubecontainer.ParseContainerID(containerStatus.ContainerID)); ok {
return result == results.Success
}
// if there is a startup probe which hasn't run yet, the container is not
// started.
if _, exists := m.getWorker(pod.UID, containerStatus.Name, startup); exists {
return false
}
// there is no startup probe, so the container is started.
return true
}
func (m *manager) UpdatePodStatus(ctx context.Context, pod *v1.Pod, podStatus *v1.PodStatus) {
logger := klog.FromContext(ctx)
for i, c := range podStatus.ContainerStatuses {
started := m.isContainerStarted(pod, &podStatus.ContainerStatuses[i])
podStatus.ContainerStatuses[i].Started = &started
if !started {
continue
}
var ready bool
if c.State.Running == nil {
ready = false
} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok && result == results.Success {
ready = true
} else {
// The check whether there is a probe which hasn't run yet.
w, exists := m.getWorker(pod.UID, c.Name, readiness)
ready = !exists // no readinessProbe -> always ready
if exists {
// Trigger an immediate run of the readinessProbe to update ready state
select {
case w.manualTriggerCh <- struct{}{}:
default: // Non-blocking.
logger.Info("Failed to trigger a manual run", "probe", w.probeType.String())
}
}
}
podStatus.ContainerStatuses[i].Ready = ready
}
for i, c := range podStatus.InitContainerStatuses {
started := m.isContainerStarted(pod, &podStatus.InitContainerStatuses[i])
podStatus.InitContainerStatuses[i].Started = &started
initContainer, ok := kubeutil.GetContainerByIndex(pod.Spec.InitContainers, podStatus.InitContainerStatuses, i)
if !ok {
logger.V(4).Info("Mismatch between pod spec and status, likely programmer error", "pod", klog.KObj(pod), "containerName", c.Name)
continue
}
if !podutil.IsRestartableInitContainer(&initContainer) {
if c.State.Terminated != nil && c.State.Terminated.ExitCode == 0 {
podStatus.InitContainerStatuses[i].Ready = true
}
continue
}
if !started {
continue
}
var ready bool
if c.State.Running == nil {
ready = false
} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok && result == results.Success {
ready = true
} else {
// The check whether there is a probe which hasn't run yet.
w, exists := m.getWorker(pod.UID, c.Name, readiness)
ready = !exists // no readinessProbe -> always ready
if exists {
// Trigger an immediate run of the readinessProbe to update ready state
select {
case w.manualTriggerCh <- struct{}{}:
default: // Non-blocking.
logger.Info("Failed to trigger a manual run", "probe", w.probeType.String())
}
}
}
podStatus.InitContainerStatuses[i].Ready = ready
}
}
func (m *manager) getWorker(podUID types.UID, containerName string, probeType probeType) (*worker, bool) {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
worker, ok := m.workers[probeKey{podUID, containerName, probeType}]
return worker, ok
}
// Called by the worker after exiting.
func (m *manager) removeWorker(podUID types.UID, containerName string, probeType probeType) {
m.workerLock.Lock()
defer m.workerLock.Unlock()
delete(m.workers, probeKey{podUID, containerName, probeType})
}
// workerCount returns the total number of probe workers. For testing.
func (m *manager) workerCount() int {
m.workerLock.RLock()
defer m.workerLock.RUnlock()
return len(m.workers)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package results
import (
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// Manager provides a probe results cache and channel of updates.
type Manager interface {
// Get returns the cached result for the container with the given ID.
Get(kubecontainer.ContainerID) (Result, bool)
// Set sets the cached result for the container with the given ID.
// The pod is only included to be sent with the update.
Set(kubecontainer.ContainerID, Result, *v1.Pod)
// Remove clears the cached result for the container with the given ID.
Remove(kubecontainer.ContainerID)
// Updates creates a channel that receives an Update whenever its result changes (but not
// removed).
// NOTE: The current implementation only supports a single updates channel.
Updates() <-chan Update
}
// Result is the type for probe results.
type Result int
const (
// Unknown is encoded as -1 (type Result)
Unknown Result = iota - 1
// Success is encoded as 0 (type Result)
Success
// Failure is encoded as 1 (type Result)
Failure
)
func (r Result) String() string {
switch r {
case Success:
return "Success"
case Failure:
return "Failure"
default:
return "UNKNOWN"
}
}
// ToPrometheusType translates a Result to a form which is better understood by prometheus.
func (r Result) ToPrometheusType() float64 {
switch r {
case Success:
return 0
case Failure:
return 1
default:
return -1
}
}
// Update is an enum of the types of updates sent over the Updates channel.
type Update struct {
ContainerID kubecontainer.ContainerID
Result Result
PodUID types.UID
}
// Manager implementation.
type manager struct {
// guards the cache
sync.RWMutex
// map of container ID -> probe Result
cache map[kubecontainer.ContainerID]Result
// channel of updates
updates chan Update
}
var _ Manager = &manager{}
// NewManager creates and returns an empty results manager.
func NewManager() Manager {
return &manager{
cache: make(map[kubecontainer.ContainerID]Result),
updates: make(chan Update, 20),
}
}
func (m *manager) Get(id kubecontainer.ContainerID) (Result, bool) {
m.RLock()
defer m.RUnlock()
result, found := m.cache[id]
return result, found
}
func (m *manager) Set(id kubecontainer.ContainerID, result Result, pod *v1.Pod) {
if m.setInternal(id, result) {
m.updates <- Update{id, result, pod.UID}
}
}
// Internal helper for locked portion of set. Returns whether an update should be sent.
func (m *manager) setInternal(id kubecontainer.ContainerID, result Result) bool {
m.Lock()
defer m.Unlock()
prev, exists := m.cache[id]
if !exists || prev != result {
m.cache[id] = result
return true
}
return false
}
func (m *manager) Remove(id kubecontainer.ContainerID) {
m.Lock()
defer m.Unlock()
delete(m.cache, id)
}
func (m *manager) Updates() <-chan Update {
return m.updates
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
// FakeManager simulates a prober.Manager for testing.
type FakeManager struct{}
// Unused methods below.
// AddPod simulates adding a Pod.
func (FakeManager) AddPod(_ context.Context, _ *v1.Pod) {}
// RemovePod simulates removing a Pod.
func (FakeManager) RemovePod(_ *v1.Pod) {}
// Simulated stopping liveness and startup probes.
func (FakeManager) StopLivenessAndStartup(_ *v1.Pod) {}
// CleanupPods simulates cleaning up Pods.
func (FakeManager) CleanupPods(_ map[types.UID]sets.Empty) {}
// Start simulates start syncing the probe status
func (FakeManager) Start() {}
// UpdatePodStatus simulates updating the Pod Status.
func (FakeManager) UpdatePodStatus(_ context.Context, _ *v1.Pod, podStatus *v1.PodStatus) {
for i := range podStatus.ContainerStatuses {
podStatus.ContainerStatuses[i].Ready = true
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"context"
"math/rand"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/metrics"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
)
// worker handles the periodic probing of its assigned container. Each worker has a go-routine
// associated with it which runs the probe loop until the container permanently terminates, or the
// stop channel is closed. The worker uses the probe Manager's statusManager to get up-to-date
// container IDs.
type worker struct {
// Channel for stopping the probe.
stopCh chan struct{}
// Channel for triggering the probe manually.
manualTriggerCh chan struct{}
// The pod containing this probe (read-only)
pod *v1.Pod
// The container to probe (read-only)
container v1.Container
// Describes the probe configuration (read-only)
spec *v1.Probe
// The type of the worker.
probeType probeType
// The probe value during the initial delay.
initialValue results.Result
// Where to store this workers results.
resultsManager results.Manager
probeManager *manager
// The last known container ID for this worker.
containerID kubecontainer.ContainerID
// The last probe result for this worker.
lastResult results.Result
// How many times in a row the probe has returned the same result.
resultRun int
// If set, skip probing.
onHold bool
// proberResultsMetricLabels holds the labels attached to this worker
// for the ProberResults metric by result.
proberResultsSuccessfulMetricLabels metrics.Labels
proberResultsFailedMetricLabels metrics.Labels
proberResultsUnknownMetricLabels metrics.Labels
// proberDurationMetricLabels holds the labels attached to this worker
// for the ProberDuration metric by result.
proberDurationSuccessfulMetricLabels metrics.Labels
proberDurationUnknownMetricLabels metrics.Labels
}
// Creates and starts a new probe worker.
func newWorker(
m *manager,
probeType probeType,
pod *v1.Pod,
container v1.Container) *worker {
w := &worker{
stopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking.
manualTriggerCh: make(chan struct{}, 1), // Buffer so prober_manager can do non-blocking calls to doProbe.
pod: pod,
container: container,
probeType: probeType,
probeManager: m,
}
switch probeType {
case readiness:
w.spec = container.ReadinessProbe
w.resultsManager = m.readinessManager
w.initialValue = results.Failure
case liveness:
w.spec = container.LivenessProbe
w.resultsManager = m.livenessManager
w.initialValue = results.Success
case startup:
w.spec = container.StartupProbe
w.resultsManager = m.startupManager
w.initialValue = results.Unknown
}
basicMetricLabels := metrics.Labels{
"probe_type": w.probeType.String(),
"container": w.container.Name,
"pod": w.pod.Name,
"namespace": w.pod.Namespace,
"pod_uid": string(w.pod.UID),
}
proberDurationLabels := metrics.Labels{
"probe_type": w.probeType.String(),
"container": w.container.Name,
"pod": w.pod.Name,
"namespace": w.pod.Namespace,
}
w.proberResultsSuccessfulMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)
w.proberResultsSuccessfulMetricLabels["result"] = probeResultSuccessful
w.proberResultsFailedMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)
w.proberResultsFailedMetricLabels["result"] = probeResultFailed
w.proberResultsUnknownMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)
w.proberResultsUnknownMetricLabels["result"] = probeResultUnknown
w.proberDurationSuccessfulMetricLabels = deepCopyPrometheusLabels(proberDurationLabels)
w.proberDurationUnknownMetricLabels = deepCopyPrometheusLabels(proberDurationLabels)
return w
}
// run periodically probes the container.
func (w *worker) run(ctx context.Context) {
logger := klog.FromContext(ctx)
probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second
// If kubelet restarted the probes could be started in rapid succession.
// Let the worker wait for a random portion of tickerPeriod before probing.
// Do it only if the kubelet has started recently.
if probeTickerPeriod > time.Since(w.probeManager.start) {
time.Sleep(time.Duration(rand.Float64() * float64(probeTickerPeriod)))
}
probeTicker := time.NewTicker(probeTickerPeriod)
defer func() {
// Clean up.
probeTicker.Stop()
if !w.containerID.IsEmpty() {
w.resultsManager.Remove(w.containerID)
}
w.probeManager.removeWorker(w.pod.UID, w.container.Name, w.probeType)
ProberResults.Delete(w.proberResultsSuccessfulMetricLabels)
ProberResults.Delete(w.proberResultsFailedMetricLabels)
ProberResults.Delete(w.proberResultsUnknownMetricLabels)
ProberDuration.Delete(w.proberDurationSuccessfulMetricLabels)
ProberDuration.Delete(w.proberDurationUnknownMetricLabels)
}()
probeLoop:
for w.doProbe(ctx) {
// Wait for next probe tick.
select {
case <-w.stopCh:
break probeLoop
case <-probeTicker.C:
// continue
case <-w.manualTriggerCh:
// Updating the periodic timer to run the probe again at intervals of probeTickerPeriod
// starting from the moment a manual run occurs.
probeTicker.Reset(probeTickerPeriod)
logger.V(4).Info("Triggered Probe by manual run", "probeType", w.probeType, "pod", klog.KObj(w.pod), "podUID", w.pod.UID, "containerName", w.container.Name)
// continue
}
}
}
// stop stops the probe worker. The worker handles cleanup and removes itself from its manager.
// It is safe to call stop multiple times.
func (w *worker) stop() {
select {
case w.stopCh <- struct{}{}:
default: // Non-blocking.
}
}
// doProbe probes the container once and records the result.
// Returns whether the worker should continue.
func (w *worker) doProbe(ctx context.Context) (keepGoing bool) {
defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging)
defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true })
logger := klog.FromContext(ctx)
startTime := time.Now()
status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID)
if !ok {
// Either the pod has not been created yet, or it was already deleted.
logger.V(3).Info("No status for pod", "pod", klog.KObj(w.pod))
return true
}
// Worker should terminate if pod is terminated.
if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
logger.V(3).Info("Pod is terminated, exiting probe worker",
"pod", klog.KObj(w.pod), "phase", status.Phase)
return false
}
c, ok := podutil.GetContainerStatus(status.ContainerStatuses, w.container.Name)
if !ok || len(c.ContainerID) == 0 {
c, ok = podutil.GetContainerStatus(status.InitContainerStatuses, w.container.Name)
if !ok || len(c.ContainerID) == 0 {
// Either the container has not been created yet, or it was deleted.
logger.V(3).Info("Probe target container not found",
"pod", klog.KObj(w.pod), "containerName", w.container.Name)
return true // Wait for more information.
}
}
if w.containerID.String() != c.ContainerID {
if !w.containerID.IsEmpty() {
w.resultsManager.Remove(w.containerID)
}
w.containerID = kubecontainer.ParseContainerID(c.ContainerID)
w.resultsManager.Set(w.containerID, w.initialValue, w.pod)
// We've got a new container; resume probing.
w.onHold = false
}
if w.onHold {
// Worker is on hold until there is a new container.
return true
}
if c.State.Running == nil {
logger.V(3).Info("Non-running container probed",
"pod", klog.KObj(w.pod), "containerName", w.container.Name)
if !w.containerID.IsEmpty() {
w.resultsManager.Set(w.containerID, results.Failure, w.pod)
}
// Abort if the container will not be restarted.
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
return c.State.Terminated != nil || podutil.IsContainerRestartable(w.pod.Spec, w.container)
}
return c.State.Terminated == nil ||
w.pod.Spec.RestartPolicy != v1.RestartPolicyNever
}
// Graceful shutdown of the pod.
if w.pod.ObjectMeta.DeletionTimestamp != nil && (w.probeType == liveness || w.probeType == startup) {
logger.V(3).Info("Pod deletion requested, setting probe result to success",
"probeType", w.probeType, "pod", klog.KObj(w.pod), "containerName", w.container.Name)
if w.probeType == startup {
logger.Info("Pod deletion requested before container has fully started",
"pod", klog.KObj(w.pod), "containerName", w.container.Name)
}
// Set a last result to ensure quiet shutdown.
w.resultsManager.Set(w.containerID, results.Success, w.pod)
// Stop probing at this point.
return false
}
// Probe disabled for InitialDelaySeconds.
if int32(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
return true
}
if c.Started != nil && *c.Started {
// Stop probing for startup once container has started.
// we keep it running to make sure it will work for restarted container.
if w.probeType == startup {
return true
}
} else {
// Disable other probes until container has started.
if w.probeType != startup {
return true
}
}
// Note, exec probe does NOT have access to pod environment variables or downward API
result, err := w.probeManager.prober.probe(ctx, w.probeType, w.pod, status, w.container, w.containerID)
if err != nil {
// Prober error, throw away the result.
return true
}
switch result {
case results.Success:
ProberResults.With(w.proberResultsSuccessfulMetricLabels).Inc()
ProberDuration.With(w.proberDurationSuccessfulMetricLabels).Observe(time.Since(startTime).Seconds())
case results.Failure:
ProberResults.With(w.proberResultsFailedMetricLabels).Inc()
default:
ProberResults.With(w.proberResultsUnknownMetricLabels).Inc()
ProberDuration.With(w.proberDurationUnknownMetricLabels).Observe(time.Since(startTime).Seconds())
}
if w.lastResult == result {
w.resultRun++
} else {
w.lastResult = result
w.resultRun = 1
}
if (result == results.Failure && w.resultRun < int(w.spec.FailureThreshold)) ||
(result == results.Success && w.resultRun < int(w.spec.SuccessThreshold)) {
// Success or failure is below threshold - leave the probe state unchanged.
return true
}
w.resultsManager.Set(w.containerID, result, w.pod)
if (w.probeType == liveness && result == results.Failure) || w.probeType == startup {
// The container fails a liveness/startup check, it will need to be restarted.
// Stop probing until we see a new container ID. This is to reduce the
// chance of hitting #21751, where running `docker exec` when a
// container is being stopped may lead to corrupted container state.
// In addition, if the threshold for each result of a startup probe is exceeded, we should stop probing
// until the container is restarted.
// This is to prevent extra Probe executions #117153.
w.onHold = true
w.resultRun = 0
}
return true
}
func deepCopyPrometheusLabels(m metrics.Labels) metrics.Labels {
ret := make(metrics.Labels, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package qos contains helper functions for quality of service.
// For each resource (memory, CPU) Kubelet supports three classes of containers.
// Memory guaranteed containers will receive the highest priority and will get all the resources
// they need.
// Burstable containers will be guaranteed their request and can "burst" and use more resources
// when available.
// Best-Effort containers, which don't specify a request, can use resources only if not being used
// by other pods.
package qos
import (
v1 "k8s.io/api/core/v1"
resourcehelper "k8s.io/component-helpers/resource"
)
// minRegularContainerMemory returns the minimum memory resource quantity
// across all regular containers in pod.Spec.Containers.
// It does not include initContainers (both restartable and non-restartable).
func minRegularContainerMemory(pod v1.Pod) int64 {
memoryValue := pod.Spec.Containers[0].Resources.Requests.Memory().Value()
for _, container := range pod.Spec.Containers[1:] {
if container.Resources.Requests.Memory().Value() < memoryValue {
memoryValue = container.Resources.Requests.Memory().Value()
}
}
return memoryValue
}
// remainingPodMemReqPerContainer calculates the remaining pod memory request per
// container by:
// 1. Taking the total pod memory requests
// 2. Subtracting total container memory requests from pod memory requests
// 3. Dividing the remainder by the number of containers.
// This gives us the additional memory request that is not allocated to any
// containers in the pod. This value will be divided equally among all containers to
// calculate oom score adjusment.
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#oom-score-adjustment
// for more details.
func remainingPodMemReqPerContainer(pod *v1.Pod) int64 {
var remainingMemory int64
if pod.Spec.Resources.Requests.Memory().IsZero() {
return remainingMemory
}
numContainers := len(pod.Spec.Containers) + len(pod.Spec.InitContainers)
// Aggregated requests of all containers.
aggrContainerReqs := resourcehelper.AggregateContainerRequests(pod, resourcehelper.PodResourcesOptions{})
remainingMemory = pod.Spec.Resources.Requests.Memory().Value() - aggrContainerReqs.Memory().Value()
remainingMemoryPerContainer := remainingMemory / int64(numContainers)
return remainingMemoryPerContainer
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qos
import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
resourcehelper "k8s.io/component-helpers/resource"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/types"
)
const (
// KubeletOOMScoreAdj is the OOM score adjustment for Kubelet
KubeletOOMScoreAdj int = -999
// KubeProxyOOMScoreAdj is the OOM score adjustment for kube-proxy
KubeProxyOOMScoreAdj int = -999
guaranteedOOMScoreAdj int = -997
besteffortOOMScoreAdj int = 1000
)
// GetContainerOOMScoreAdjust returns the amount by which the OOM score of all processes in the
// container should be adjusted.
// The OOM score of a process is the percentage of memory it consumes
// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
// OOMScoreAdjust should be calculated based on the allocated resources, so the pod argument should
// contain the allocated resources in the spec.
func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int {
if types.IsNodeCriticalPod(pod) {
// Only node critical pod should be the last to get killed.
return guaranteedOOMScoreAdj
}
switch v1qos.GetPodQOS(pod) {
case v1.PodQOSGuaranteed:
// Guaranteed containers should be the last to get killed.
return guaranteedOOMScoreAdj
case v1.PodQOSBestEffort:
return besteffortOOMScoreAdj
}
// Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally,
// we want to protect Burstable containers that consume less memory than requested.
// The formula below is a heuristic. A container requesting for 10% of a system's
// memory will have an OOM score adjust of 900. If a process in container Y
// uses over 10% of memory, its OOM score will be 1000. The idea is that containers
// which use more than their request will have an OOM score of 1000 and will be prime
// targets for OOM kills.
// Note that this is a heuristic, it won't work if a container has many small processes.
containerMemReq := container.Resources.Requests.Memory().Value()
var oomScoreAdjust, remainingReqPerContainer int64
// When PodLevelResources feature is enabled, the OOM score adjustment formula is modified
// to account for pod-level memory requests. Any extra pod memory request that's
// not allocated to the containers is divided equally among all containers and
// added to their individual memory requests when calculating the OOM score
// adjustment. Otherwise, only container-level memory requests are used. See
// https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md#oom-score-adjustment
// for more details.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
resourcehelper.IsPodLevelRequestsSet(pod) {
// TODO(ndixita): Refactor to use this formula in all cases, as
// remainingReqPerContainer will be 0 when pod-level resources are not set.
remainingReqPerContainer = remainingPodMemReqPerContainer(pod)
oomScoreAdjust = 1000 - (1000 * (containerMemReq + remainingReqPerContainer) / memoryCapacity)
} else {
oomScoreAdjust = 1000 - (1000*containerMemReq)/memoryCapacity
}
// adapt the sidecarContainer memoryRequest for OOM ADJ calculation
// calculate the oom score adjustment based on: max-memory( currentSideCarContainer , min-memory(regular containers) ) .
if isSidecarContainer(pod, container) {
// check min memory quantity in regular containers
minMemoryRequest := minRegularContainerMemory(*pod)
// When calculating minMemoryOomScoreAdjust for sidecar containers with PodLevelResources enabled,
// we add the per-container share of unallocated pod memory requests to the minimum memory request.
// This ensures the OOM score adjustment i.e. minMemoryOomScoreAdjust
// calculation remains consistent
// with how we handle pod-level memory requests for regular containers.
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) &&
resourcehelper.IsPodLevelRequestsSet(pod) {
minMemoryRequest += remainingReqPerContainer
}
minMemoryOomScoreAdjust := 1000 - (1000*minMemoryRequest)/memoryCapacity
// the OOM adjustment for sidecar container will match
// or fall below the OOM score adjustment of regular containers in the Pod.
if oomScoreAdjust > minMemoryOomScoreAdjust {
oomScoreAdjust = minMemoryOomScoreAdjust
}
}
// A guaranteed pod using 100% of memory can have an OOM score of 10. Ensure
// that burstable pods have a higher OOM score adjustment.
if int(oomScoreAdjust) < (1000 + guaranteedOOMScoreAdj) {
return (1000 + guaranteedOOMScoreAdj)
}
// Give burstable pods a higher chance of survival over besteffort pods.
if int(oomScoreAdjust) == besteffortOOMScoreAdj {
return int(oomScoreAdjust - 1)
}
return int(oomScoreAdjust)
}
// isSidecarContainer returns a boolean indicating whether a container is a sidecar or not.
// Since v1.Container does not directly specify whether a container is a sidecar,
// this function uses available indicators (container.RestartPolicy == v1.ContainerRestartPolicyAlways)
// to make that determination.
func isSidecarContainer(pod *v1.Pod, container *v1.Container) bool {
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
for _, initContainer := range pod.Spec.InitContainers {
if initContainer.Name == container.Name {
return true
}
}
}
return false
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/lru"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// ReasonCache stores the failure reason of the latest container start
// in a string, keyed by <pod_UID>_<container_name>. The goal is to
// propagate this reason to the container status. This endeavor is
// "best-effort" for two reasons:
// 1. The cache is not persisted.
// 2. We use an LRU cache to avoid extra garbage collection work. This
// means that some entries may be recycled before a pod has been
// deleted.
//
// TODO(random-liu): Use more reliable cache which could collect garbage of failed pod.
// TODO(random-liu): Move reason cache to somewhere better.
type ReasonCache struct {
cache *lru.Cache
}
// ReasonItem is the cached item in ReasonCache
type ReasonItem struct {
Err error
Message string
}
// maxReasonCacheEntries is the cache entry number in lru cache. 1000 is a proper number
// for our 100 pods per node target. If we support more pods per node in the future, we
// may want to increase the number.
const maxReasonCacheEntries = 1000
// NewReasonCache creates an instance of 'ReasonCache'.
func NewReasonCache() *ReasonCache {
return &ReasonCache{cache: lru.New(maxReasonCacheEntries)}
}
func (c *ReasonCache) composeKey(uid types.UID, name string) string {
return fmt.Sprintf("%s_%s", uid, name)
}
// add adds error reason into the cache
func (c *ReasonCache) add(uid types.UID, name string, reason error, message string) {
c.cache.Add(c.composeKey(uid, name), ReasonItem{reason, message})
}
// Update updates the reason cache with the SyncPodResult. Only SyncResult with
// StartContainer action will change the cache.
func (c *ReasonCache) Update(uid types.UID, result kubecontainer.PodSyncResult) {
for _, r := range result.SyncResults {
if r.Action != kubecontainer.StartContainer {
continue
}
name := r.Target.(string)
if r.Error != nil {
c.add(uid, name, r.Error, r.Message)
} else {
c.Remove(uid, name)
}
}
}
// Remove removes error reason from the cache
func (c *ReasonCache) Remove(uid types.UID, name string) {
c.cache.Remove(c.composeKey(uid, name))
}
// Get gets error reason from the cache. The return values are error reason, error message and
// whether an error reason is found in the cache. If no error reason is found, empty string will
// be returned for error reason and error message.
func (c *ReasonCache) Get(uid types.UID, name string) (*ReasonItem, bool) {
value, ok := c.cache.Get(c.composeKey(uid, name))
if !ok {
return nil, false
}
info := value.(ReasonItem)
return &info, true
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"errors"
"fmt"
"sync"
"time"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
type runtimeState struct {
sync.RWMutex
lastBaseRuntimeSync time.Time
baseRuntimeSyncThreshold time.Duration
networkError error
runtimeError error
storageError error
cidr string
healthChecks []*healthCheck
rtHandlers []kubecontainer.RuntimeHandler
rtFeatures *kubecontainer.RuntimeFeatures
}
// A health check function should be efficient and not rely on external
// components (e.g., container runtime).
type healthCheckFnType func() (bool, error)
type healthCheck struct {
name string
fn healthCheckFnType
}
func (s *runtimeState) addHealthCheck(name string, f healthCheckFnType) {
s.Lock()
defer s.Unlock()
s.healthChecks = append(s.healthChecks, &healthCheck{name: name, fn: f})
}
func (s *runtimeState) setRuntimeSync(t time.Time) {
s.Lock()
defer s.Unlock()
s.lastBaseRuntimeSync = t
}
func (s *runtimeState) setNetworkState(err error) {
s.Lock()
defer s.Unlock()
s.networkError = err
}
func (s *runtimeState) setRuntimeState(err error) {
s.Lock()
defer s.Unlock()
s.runtimeError = err
}
func (s *runtimeState) setRuntimeHandlers(rtHandlers []kubecontainer.RuntimeHandler) {
s.Lock()
defer s.Unlock()
s.rtHandlers = rtHandlers
}
func (s *runtimeState) runtimeHandlers() []kubecontainer.RuntimeHandler {
s.RLock()
defer s.RUnlock()
return s.rtHandlers
}
func (s *runtimeState) setRuntimeFeatures(features *kubecontainer.RuntimeFeatures) {
s.Lock()
defer s.Unlock()
s.rtFeatures = features
}
func (s *runtimeState) runtimeFeatures() *kubecontainer.RuntimeFeatures {
s.RLock()
defer s.RUnlock()
return s.rtFeatures
}
func (s *runtimeState) setStorageState(err error) {
s.Lock()
defer s.Unlock()
s.storageError = err
}
func (s *runtimeState) setPodCIDR(cidr string) {
s.Lock()
defer s.Unlock()
s.cidr = cidr
}
func (s *runtimeState) podCIDR() string {
s.RLock()
defer s.RUnlock()
return s.cidr
}
func (s *runtimeState) runtimeErrors() error {
s.RLock()
defer s.RUnlock()
errs := []error{}
if s.lastBaseRuntimeSync.IsZero() {
errs = append(errs, errors.New("container runtime status check may not have completed yet"))
} else if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
errs = append(errs, errors.New("container runtime is down"))
}
for _, hc := range s.healthChecks {
if ok, err := hc.fn(); !ok {
errs = append(errs, fmt.Errorf("%s is not healthy: %v", hc.name, err))
}
}
if s.runtimeError != nil {
errs = append(errs, s.runtimeError)
}
return utilerrors.NewAggregate(errs)
}
func (s *runtimeState) networkErrors() error {
s.RLock()
defer s.RUnlock()
errs := []error{}
if s.networkError != nil {
errs = append(errs, s.networkError)
}
return utilerrors.NewAggregate(errs)
}
func (s *runtimeState) storageErrors() error {
s.RLock()
defer s.RUnlock()
errs := []error{}
if s.storageError != nil {
errs = append(errs, s.storageError)
}
return utilerrors.NewAggregate(errs)
}
func newRuntimeState(runtimeSyncThreshold time.Duration) *runtimeState {
return &runtimeState{
lastBaseRuntimeSync: time.Time{},
baseRuntimeSyncThreshold: runtimeSyncThreshold,
networkError: ErrNetworkUnknown,
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtimeclass
import (
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
nodev1 "k8s.io/client-go/listers/node/v1"
)
// Manager caches RuntimeClass API objects, and provides accessors to the Kubelet.
type Manager struct {
informerFactory informers.SharedInformerFactory
lister nodev1.RuntimeClassLister
}
// NewManager returns a new RuntimeClass Manager. Run must be called before the manager can be used.
func NewManager(client clientset.Interface) *Manager {
const resyncPeriod = 0
factory := informers.NewSharedInformerFactory(client, resyncPeriod)
lister := factory.Node().V1().RuntimeClasses().Lister()
return &Manager{
informerFactory: factory,
lister: lister,
}
}
// Start starts syncing the RuntimeClass cache with the apiserver.
func (m *Manager) Start(stopCh <-chan struct{}) {
m.informerFactory.Start(stopCh)
}
// WaitForCacheSync exposes the WaitForCacheSync method on the informer factory for testing
// purposes.
func (m *Manager) WaitForCacheSync(stopCh <-chan struct{}) {
m.informerFactory.WaitForCacheSync(stopCh)
}
// LookupRuntimeHandler returns the RuntimeHandler string associated with the given RuntimeClass
// name (or the default of "" for nil). If the RuntimeClass is not found, it returns an
// errors.NotFound error.
func (m *Manager) LookupRuntimeHandler(runtimeClassName *string) (string, error) {
if runtimeClassName == nil || *runtimeClassName == "" {
// The default RuntimeClass always resolves to the empty runtime handler.
return "", nil
}
name := *runtimeClassName
rc, err := m.lister.Get(name)
if err != nil {
if errors.IsNotFound(err) {
return "", err
}
return "", fmt.Errorf("failed to lookup RuntimeClass %s: %v", name, err)
}
return rc.Handler, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
nodev1 "k8s.io/api/node/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
)
const (
// SandboxRuntimeClass is a valid RuntimeClass pre-populated in the populated dynamic client.
SandboxRuntimeClass = "sandbox"
// SandboxRuntimeHandler is the handler associated with the SandboxRuntimeClass.
SandboxRuntimeHandler = "kata-containers"
// EmptyRuntimeClass is a valid RuntimeClass without a handler pre-populated in the populated dynamic client.
EmptyRuntimeClass = "native"
)
// NewPopulatedClient creates a fake client for use with the runtimeclass.Manager,
// and populates it with a few test RuntimeClass objects.
func NewPopulatedClient() clientset.Interface {
return fake.NewSimpleClientset(
NewRuntimeClass(EmptyRuntimeClass, ""),
NewRuntimeClass(SandboxRuntimeClass, SandboxRuntimeHandler),
)
}
// StartManagerSync starts the manager, and waits for the informer cache to sync.
// Returns a function to stop the manager, which should be called with a defer:
//
// defer StartManagerSync(t, m)()
func StartManagerSync(m *runtimeclass.Manager) func() {
stopCh := make(chan struct{})
m.Start(stopCh)
m.WaitForCacheSync(stopCh)
return func() {
close(stopCh)
}
}
// NewRuntimeClass is a helper to generate a RuntimeClass resource with
// the given name & handler.
func NewRuntimeClass(name, handler string) *nodev1.RuntimeClass {
return &nodev1.RuntimeClass{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Handler: handler,
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secret
import (
"fmt"
v1 "k8s.io/api/core/v1"
)
// fakeManager implements Manager interface for testing purposes.
// simple operations to apiserver.
type fakeManager struct {
secrets []*v1.Secret
}
// NewFakeManager creates empty/fake secret manager
func NewFakeManager() Manager {
return &fakeManager{}
}
// NewFakeManagerWithSecrets creates a fake secret manager with the provided secrets
func NewFakeManagerWithSecrets(secrets []*v1.Secret) Manager {
return &fakeManager{
secrets: secrets,
}
}
// GetSecret function returns the searched secret if it was provided during the manager initialization, otherwise, it returns an error.
// If the manager was initialized without any secrets, it returns a nil secret."
func (s *fakeManager) GetSecret(namespace, name string) (*v1.Secret, error) {
if s.secrets == nil {
return nil, nil
}
for _, secret := range s.secrets {
if secret.Name == name {
return secret, nil
}
}
return nil, fmt.Errorf("secret %s not found", name)
}
// RegisterPod implements the RegisterPod method for testing purposes.
func (s *fakeManager) RegisterPod(pod *v1.Pod) {
}
// UnregisterPod implements the UnregisterPod method for testing purposes.
func (s *fakeManager) UnregisterPod(pod *v1.Pod) {
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secret
import (
"context"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
)
// Manager manages Kubernetes secrets. This includes retrieving
// secrets or registering/unregistering them via Pods.
type Manager interface {
// Get secret by secret namespace and name.
GetSecret(namespace, name string) (*v1.Secret, error)
// WARNING: Register/UnregisterPod functions should be efficient,
// i.e. should not block on network operations.
// RegisterPod registers all secrets from a given pod.
RegisterPod(pod *v1.Pod)
// UnregisterPod unregisters secrets from a given pod that are not
// used by any other registered pod.
UnregisterPod(pod *v1.Pod)
}
// simpleSecretManager implements SecretManager interfaces with
// simple operations to apiserver.
type simpleSecretManager struct {
kubeClient clientset.Interface
}
// NewSimpleSecretManager creates a new SecretManager instance.
func NewSimpleSecretManager(kubeClient clientset.Interface) Manager {
return &simpleSecretManager{kubeClient: kubeClient}
}
func (s *simpleSecretManager) GetSecret(namespace, name string) (*v1.Secret, error) {
return s.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
func (s *simpleSecretManager) RegisterPod(pod *v1.Pod) {
}
func (s *simpleSecretManager) UnregisterPod(pod *v1.Pod) {
}
// secretManager keeps a store with secrets necessary
// for registered pods. Different implementations of the store
// may result in different semantics for freshness of secrets
// (e.g. ttl-based implementation vs watch-based implementation).
type secretManager struct {
manager manager.Manager
}
func (s *secretManager) GetSecret(namespace, name string) (*v1.Secret, error) {
object, err := s.manager.GetObject(namespace, name)
if err != nil {
return nil, err
}
if secret, ok := object.(*v1.Secret); ok {
return secret, nil
}
return nil, fmt.Errorf("unexpected object type: %v", object)
}
func (s *secretManager) RegisterPod(pod *v1.Pod) {
s.manager.RegisterPod(pod)
}
func (s *secretManager) UnregisterPod(pod *v1.Pod) {
s.manager.UnregisterPod(pod)
}
func getSecretNames(pod *v1.Pod) sets.Set[string] {
result := sets.New[string]()
podutil.VisitPodSecretNames(pod, func(name string) bool {
result.Insert(name)
return true
})
return result
}
const (
defaultTTL = time.Minute
)
// NewCachingSecretManager creates a manager that keeps a cache of all secrets
// necessary for registered pods.
// It implements the following logic:
// - whenever a pod is created or updated, the cached versions of all secrets
// are invalidated
// - every GetObject() call tries to fetch the value from local cache; if it is
// not there, invalidated or too old, we fetch it from apiserver and refresh the
// value in cache; otherwise it is just fetched from cache
func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager {
getSecret := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
return kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, opts)
}
secretStore := manager.NewObjectStore(getSecret, clock.RealClock{}, getTTL, defaultTTL)
return &secretManager{
manager: manager.NewCacheBasedManager(secretStore, getSecretNames),
}
}
// NewWatchingSecretManager creates a manager that keeps a cache of all secrets
// necessary for registered pods.
// It implements the following logic:
// - whenever a pod is created or updated, we start individual watches for all
// referenced objects that aren't referenced from other registered pods
// - every GetObject() returns a value from local cache propagated via watches
func NewWatchingSecretManager(kubeClient clientset.Interface, resyncInterval time.Duration) Manager {
listSecret := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) {
return kubeClient.CoreV1().Secrets(namespace).List(context.TODO(), opts)
}
watchSecret := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
return kubeClient.CoreV1().Secrets(namespace).Watch(context.TODO(), opts)
}
newSecret := func() runtime.Object {
return &v1.Secret{}
}
isImmutable := func(object runtime.Object) bool {
if secret, ok := object.(*v1.Secret); ok {
return secret.Immutable != nil && *secret.Immutable
}
return false
}
gr := corev1.Resource("secret")
return &secretManager{
manager: manager.NewWatchBasedManager(listSecret, watchSecret, newSecret, isImmutable, gr, resyncInterval, getSecretNames),
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"net/http"
"strings"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/server/healthz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/configz"
"k8s.io/component-base/zpages/flagz"
"k8s.io/component-base/zpages/statusz"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
)
// KubeletAuth implements AuthInterface
type KubeletAuth struct {
// authenticator identifies the user for requests to the Kubelet API
authenticator.Request
// KubeletRequestAttributesGetter builds authorization.Attributes for a request to the Kubelet API
NodeRequestAttributesGetter
// authorizer determines whether a given authorization.Attributes is allowed
authorizer.Authorizer
}
// NewKubeletAuth returns a kubelet.AuthInterface composed of the given authenticator, attribute getter, and authorizer
func NewKubeletAuth(authenticator authenticator.Request, authorizerAttributeGetter NodeRequestAttributesGetter, authorizer authorizer.Authorizer) AuthInterface {
return &KubeletAuth{authenticator, authorizerAttributeGetter, authorizer}
}
// NewNodeAuthorizerAttributesGetter creates a new authorizer.RequestAttributesGetter for the node.
func NewNodeAuthorizerAttributesGetter(nodeName types.NodeName) NodeRequestAttributesGetter {
return nodeAuthorizerAttributesGetter{nodeName: nodeName}
}
type nodeAuthorizerAttributesGetter struct {
nodeName types.NodeName
}
func isSubpath(subpath, path string) bool {
path = strings.TrimSuffix(path, "/")
return subpath == path || (strings.HasPrefix(subpath, path) && subpath[len(path)] == '/')
}
// GetRequestAttributes populates authorizer attributes for the requests to the kubelet API.
// Default attributes are: {apiVersion=v1,verb=<http verb from request>,resource=nodes,name=<node name>,subresource=proxy}
// More specific verb/resource is set for the following request patterns:
//
// /stats/* => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=stats
// /metrics/* => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=metrics
// /logs/* => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=log
// /checkpoint/* => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=checkpoint
// /statusz => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=statusz
// /pods/* => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=pods,proxy
// /runningPods/* => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=pods,proxy
// /healthz/* => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=healthz,proxy
// /configz => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=configz,proxy
// /flagz => verb=<api verb from request>, resource=nodes, name=<node name>, subresource(s)=configz,proxy
func (n nodeAuthorizerAttributesGetter) GetRequestAttributes(u user.Info, r *http.Request) []authorizer.Attributes {
apiVerb := ""
switch r.Method {
case "POST":
apiVerb = "create"
case "GET":
apiVerb = "get"
case "PUT":
apiVerb = "update"
case "PATCH":
apiVerb = "patch"
case "DELETE":
apiVerb = "delete"
}
requestPath := r.URL.Path
var subresources []string
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletFineGrainedAuthz) {
switch {
case isSubpath(requestPath, podsPath):
subresources = append(subresources, "pods")
case isSubpath(requestPath, healthz.DefaultHealthzPath):
subresources = append(subresources, "healthz")
case isSubpath(requestPath, configz.DefaultConfigzPath):
subresources = append(subresources, "configz")
// We put runningpods last since it will allocate a new string on every
// check since the handler path has a trailing slash.
case isSubpath(requestPath, runningPodsPath):
subresources = append(subresources, "pods")
}
}
switch {
case isSubpath(requestPath, statsPath):
subresources = append(subresources, "stats")
case isSubpath(requestPath, metricsPath):
subresources = append(subresources, "metrics")
case isSubpath(requestPath, logsPath):
// "log" to match other log subresources (pods/log, etc)
subresources = append(subresources, "log")
case isSubpath(requestPath, checkpointPath):
subresources = append(subresources, "checkpoint")
case isSubpath(requestPath, statusz.DefaultStatuszPath):
subresources = append(subresources, "statusz")
case isSubpath(requestPath, flagz.DefaultFlagzPath):
subresources = append(subresources, "configz")
default:
subresources = append(subresources, "proxy")
}
var attrs []authorizer.Attributes
for _, subresource := range subresources {
attr := authorizer.AttributesRecord{
User: u,
Verb: apiVerb,
Namespace: "",
APIGroup: "",
APIVersion: "v1",
Resource: "nodes",
Subresource: subresource,
Name: string(n.nodeName),
ResourceRequest: true,
Path: requestPath,
}
attrs = append(attrs, attr)
}
klog.V(5).InfoS("Node request attributes", "user", attrs[0].GetUser().GetName(), "verb", attrs[0].GetVerb(), "resource", attrs[0].GetResource(), "subresource(s)", subresources)
return attrs
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"net/http"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
)
func TestIsSubPath(t *testing.T) {
testcases := map[string]struct {
subpath string
path string
expected bool
}{
"empty": {subpath: "", path: "", expected: true},
"match 1": {subpath: "foo", path: "foo", expected: true},
"match 2": {subpath: "/foo", path: "/foo", expected: true},
"match 3": {subpath: "/foo/", path: "/foo/", expected: true},
"match 4": {subpath: "/foo/bar", path: "/foo/bar", expected: true},
"subpath of root 1": {subpath: "/foo", path: "/", expected: true},
"subpath of root 2": {subpath: "/foo/", path: "/", expected: true},
"subpath of root 3": {subpath: "/foo/bar", path: "/", expected: true},
"subpath of path 1": {subpath: "/foo", path: "/foo", expected: true},
"subpath of path 2": {subpath: "/foo/", path: "/foo", expected: true},
"subpath of path 3": {subpath: "/foo/bar", path: "/foo", expected: true},
"mismatch 1": {subpath: "/foo", path: "/bar", expected: false},
"mismatch 2": {subpath: "/foo", path: "/foobar", expected: false},
"mismatch 3": {subpath: "/foobar", path: "/foo", expected: false},
}
for k, tc := range testcases {
result := isSubpath(tc.subpath, tc.path)
if result != tc.expected {
t.Errorf("%s: expected %v, got %v", k, tc.expected, result)
}
}
}
func TestGetRequestAttributes(t *testing.T) {
for _, fineGrained := range []bool{false, true} {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletFineGrainedAuthz, fineGrained)
for _, test := range AuthzTestCases(fineGrained) {
t.Run(test.Method+":"+test.Path, func(t *testing.T) {
getter := NewNodeAuthorizerAttributesGetter(authzTestNodeName)
req, err := http.NewRequest(test.Method, "https://localhost:1234"+test.Path, nil)
require.NoError(t, err)
attrs := getter.GetRequestAttributes(AuthzTestUser(), req)
test.AssertAttributes(t, attrs)
})
}
}
}
const (
authzTestNodeName = "test"
authzTestUserName = "phibby"
)
type AuthzTestCase struct {
Method, Path string
ExpectedVerb string
ExpectedSubresources []string
}
func (a *AuthzTestCase) AssertAttributes(t *testing.T, attrs []authorizer.Attributes) {
var expectedAttributes []authorizer.AttributesRecord
for _, subresource := range a.ExpectedSubresources {
expectedAttributes = append(expectedAttributes, authorizer.AttributesRecord{
User: AuthzTestUser(),
APIGroup: "",
APIVersion: "v1",
Verb: a.ExpectedVerb,
Resource: "nodes",
Name: authzTestNodeName,
Subresource: subresource,
ResourceRequest: true,
Path: a.Path,
})
}
assert.Equal(t, len(attrs), len(expectedAttributes))
for i := range attrs {
assert.Equal(t, attrs[i], expectedAttributes[i])
}
}
func AuthzTestUser() user.Info {
return &user.DefaultInfo{Name: authzTestUserName}
}
func AuthzTestCases(fineGrained bool) []AuthzTestCase {
// Path -> ExpectedSubresource
testPaths := map[string][]string{
"/attach/{podNamespace}/{podID}/{containerName}": {"proxy"},
"/attach/{podNamespace}/{podID}/{uid}/{containerName}": {"proxy"},
"/checkpoint/{podNamespace}/{podID}/{containerName}": {"checkpoint"},
"/configz": {"proxy"},
"/flagz": {"configz"},
"/statusz": {"statusz"},
"/containerLogs/{podNamespace}/{podID}/{containerName}": {"proxy"},
"/debug/flags/v": {"proxy"},
"/debug/pprof/{subpath:*}": {"proxy"},
"/exec/{podNamespace}/{podID}/{containerName}": {"proxy"},
"/exec/{podNamespace}/{podID}/{uid}/{containerName}": {"proxy"},
"/healthz": {"proxy"},
"/healthz/log": {"proxy"},
"/healthz/ping": {"proxy"},
"/healthz/syncloop": {"proxy"},
"/logs/": {"log"},
"/logs/{logpath:*}": {"log"},
"/metrics": {"metrics"},
"/metrics/slis": {"metrics"},
"/metrics/cadvisor": {"metrics"},
"/metrics/probes": {"metrics"},
"/metrics/resource": {"metrics"},
"/pods/": {"proxy"},
"/portForward/{podNamespace}/{podID}": {"proxy"},
"/portForward/{podNamespace}/{podID}/{uid}": {"proxy"},
"/run/{podNamespace}/{podID}/{containerName}": {"proxy"},
"/run/{podNamespace}/{podID}/{uid}/{containerName}": {"proxy"},
"/runningpods/": {"proxy"},
"/stats/": {"stats"},
"/stats/summary": {"stats"},
}
if fineGrained {
testPaths["/healthz"] = append([]string{"healthz"}, testPaths["/healthz"]...)
testPaths["/healthz/log"] = append([]string{"healthz"}, testPaths["/healthz/log"]...)
testPaths["/healthz/ping"] = append([]string{"healthz"}, testPaths["/healthz/ping"]...)
testPaths["/healthz/syncloop"] = append([]string{"healthz"}, testPaths["/healthz/syncloop"]...)
testPaths["/pods/"] = append([]string{"pods"}, testPaths["/pods/"]...)
testPaths["/runningpods/"] = append([]string{"pods"}, testPaths["/runningpods/"]...)
testPaths["/configz"] = append([]string{"configz"}, testPaths["/configz"]...)
}
testCases := []AuthzTestCase{}
for path, subresource := range testPaths {
testCases = append(testCases,
AuthzTestCase{"POST", path, "create", subresource},
AuthzTestCase{"GET", path, "get", subresource},
AuthzTestCase{"PUT", path, "update", subresource},
AuthzTestCase{"PATCH", path, "patch", subresource},
AuthzTestCase{"DELETE", path, "delete", subresource})
}
return testCases
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package server
import (
"fmt"
"net/http"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
func init() {
testing.Init()
}
func errorHandler() {
if r := recover(); r != nil {
fmt.Println(r)
}
}
func FuzzRequest(data []byte) int {
defer errorHandler()
t := &testing.T{}
f := fuzz.NewConsumer(data)
urlString, err := f.GetString()
if err != nil {
return 0
}
ss, err := newTestStreamingServer(100 * time.Millisecond)
if err != nil {
return 0
}
defer ss.testHTTPServer.Close()
fw := newServerTestWithDebug(true, ss)
defer fw.testHTTPServer.Close()
url := fw.testHTTPServer.URL + urlString
upgradeRoundTripper, err := spdy.NewRoundTripper(nil)
if err != nil {
return 0
}
c := &http.Client{Transport: upgradeRoundTripper}
resp, err := c.Do(makeReq(t, "POST", url, "v4.channel.k8s.io"))
if err != nil {
return 0
}
defer resp.Body.Close()
return 1
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"time"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
const (
kubeletSubsystem = "kubelet"
)
var (
// HTTPRequests tracks the number of the http requests received since the server started.
HTTPRequests = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: kubeletSubsystem,
Name: "http_requests_total",
Help: "Number of the http requests received since the server started",
StabilityLevel: metrics.ALPHA,
},
// server_type aims to differentiate the readonly server and the readwrite server.
// long_running marks whether the request is long-running or not.
// Currently, long-running requests include exec/attach/portforward/debug.
[]string{"method", "path", "server_type", "long_running"},
)
// HTTPRequestsDuration tracks the duration in seconds to serve http requests.
HTTPRequestsDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: kubeletSubsystem,
Name: "http_requests_duration_seconds",
Help: "Duration in seconds to serve http requests",
// Use DefBuckets for now, will customize the buckets if necessary.
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"method", "path", "server_type", "long_running"},
)
// HTTPInflightRequests tracks the number of the inflight http requests.
HTTPInflightRequests = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: kubeletSubsystem,
Name: "http_inflight_requests",
Help: "Number of the inflight http requests",
StabilityLevel: metrics.ALPHA,
},
[]string{"method", "path", "server_type", "long_running"},
)
// VolumeStatCalDuration tracks the duration in seconds to calculate volume stats.
// this metric is mainly for comparison between fsquota monitoring and `du` for disk usage.
VolumeStatCalDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: kubeletSubsystem,
Name: "volume_metric_collection_duration_seconds",
Help: "Duration in seconds to calculate volume stats",
Buckets: metrics.DefBuckets,
StabilityLevel: metrics.ALPHA,
},
[]string{"metric_source"},
)
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
registerMetrics.Do(func() {
legacyregistry.MustRegister(HTTPRequests)
legacyregistry.MustRegister(HTTPRequestsDuration)
legacyregistry.MustRegister(HTTPInflightRequests)
legacyregistry.MustRegister(VolumeStatCalDuration)
})
}
// SinceInSeconds gets the time since the specified start in seconds.
func SinceInSeconds(start time.Time) float64 {
return time.Since(start).Seconds()
}
// CollectVolumeStatCalDuration collects the duration in seconds to calculate volume stats.
func CollectVolumeStatCalDuration(metricSource string, start time.Time) {
VolumeStatCalDuration.WithLabelValues(metricSource).Observe(SinceInSeconds(start))
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"reflect"
goruntime "runtime"
"strconv"
"strings"
"time"
"github.com/emicklei/go-restful/v3"
cadvisormetrics "github.com/google/cadvisor/container"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorv2 "github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/metrics"
"go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful"
oteltrace "go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
netutils "k8s.io/utils/net"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/proxy"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/httplog"
"k8s.io/apiserver/pkg/server/routes"
"k8s.io/apiserver/pkg/util/compatibility"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/apiserver/pkg/util/flushwriter"
"k8s.io/component-base/configz"
"k8s.io/component-base/logs"
compbasemetrics "k8s.io/component-base/metrics"
metricsfeatures "k8s.io/component-base/metrics/features"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/prometheus/slis"
zpagesfeatures "k8s.io/component-base/zpages/features"
"k8s.io/component-base/zpages/flagz"
"k8s.io/component-base/zpages/statusz"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/cri-client/pkg/util"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
podresourcesapiv1alpha1 "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
"k8s.io/kubelet/pkg/cri/streaming"
"k8s.io/kubelet/pkg/cri/streaming/portforward"
remotecommandserver "k8s.io/kubelet/pkg/cri/streaming/remotecommand"
kubelettypes "k8s.io/kubelet/pkg/types"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/v1/validation"
"k8s.io/kubernetes/pkg/features"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
apisgrpc "k8s.io/kubernetes/pkg/kubelet/apis/grpc"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/kubernetes/pkg/kubelet/prober"
servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
)
func init() {
utilruntime.Must(metricsfeatures.AddFeatureGates(utilfeature.DefaultMutableFeatureGate))
}
const (
metricsPath = "/metrics"
cadvisorMetricsPath = "/metrics/cadvisor"
resourceMetricsPath = "/metrics/resource"
proberMetricsPath = "/metrics/probes"
statsPath = "/stats/"
logsPath = "/logs/"
checkpointPath = "/checkpoint/"
pprofBasePath = "/debug/pprof/"
debugFlagPath = "/debug/flags/v"
podsPath = "/pods"
runningPodsPath = "/runningpods/"
)
const (
// Kubelet component name
ComponentKubelet = "kubelet"
)
// Server is a http.Handler which exposes kubelet functionality over HTTP.
type Server struct {
flagz flagz.Reader
auth AuthInterface
host HostInterface
restfulCont containerInterface
metricsBuckets sets.Set[string]
metricsMethodBuckets sets.Set[string]
resourceAnalyzer stats.ResourceAnalyzer
extendedCheckers []healthz.HealthChecker
}
// TLSOptions holds the TLS options.
type TLSOptions struct {
Config *tls.Config
CertFile string
KeyFile string
}
// containerInterface defines the restful.Container functions used on the root container
type containerInterface interface {
Add(service *restful.WebService) *restful.Container
Handle(path string, handler http.Handler)
Filter(filter restful.FilterFunction)
ServeHTTP(w http.ResponseWriter, r *http.Request)
RegisteredWebServices() []*restful.WebService
// RegisteredHandlePaths returns the paths of handlers registered directly with the container (non-web-services)
// Used to test filters are being applied on non-web-service handlers
RegisteredHandlePaths() []string
}
// filteringContainer delegates all Handle(...) calls to Container.HandleWithFilter(...),
// so we can ensure restful.FilterFunctions are used for all handlers
type filteringContainer struct {
*restful.Container
registeredHandlePaths []string
}
func (a *filteringContainer) Handle(path string, handler http.Handler) {
a.HandleWithFilter(path, handler)
a.registeredHandlePaths = append(a.registeredHandlePaths, path)
}
func (a *filteringContainer) RegisteredHandlePaths() []string {
return a.registeredHandlePaths
}
// ListenAndServeKubeletServer initializes a server to respond to HTTP network requests on the Kubelet.
func ListenAndServeKubeletServer(
host HostInterface,
resourceAnalyzer stats.ResourceAnalyzer,
checkers []healthz.HealthChecker,
flagz flagz.Reader,
kubeCfg *kubeletconfiginternal.KubeletConfiguration,
tlsOptions *TLSOptions,
auth AuthInterface,
tp oteltrace.TracerProvider) {
address := netutils.ParseIPSloppy(kubeCfg.Address)
port := uint(kubeCfg.Port)
klog.InfoS("Starting to listen", "address", address, "port", port)
handler := NewServer(host, resourceAnalyzer, checkers, flagz, auth, kubeCfg)
handler.InstallTracingFilter(tp)
s := &http.Server{
Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)),
Handler: &handler,
IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout
ReadTimeout: 4 * 60 * time.Minute,
WriteTimeout: 4 * 60 * time.Minute,
MaxHeaderBytes: 1 << 20,
}
if tlsOptions != nil {
s.TLSConfig = tlsOptions.Config
// Passing empty strings as the cert and key files means no
// cert/keys are specified and GetCertificate in the TLSConfig
// should be called instead.
if err := s.ListenAndServeTLS(tlsOptions.CertFile, tlsOptions.KeyFile); err != nil {
klog.ErrorS(err, "Failed to listen and serve")
os.Exit(1)
}
} else if err := s.ListenAndServe(); err != nil {
klog.ErrorS(err, "Failed to listen and serve")
os.Exit(1)
}
}
// ListenAndServeKubeletReadOnlyServer initializes a server to respond to HTTP network requests on the Kubelet.
func ListenAndServeKubeletReadOnlyServer(
host HostInterface,
resourceAnalyzer stats.ResourceAnalyzer,
checkers []healthz.HealthChecker,
flagz flagz.Reader,
address net.IP,
port uint,
tp oteltrace.TracerProvider) {
klog.InfoS("Starting to listen read-only", "address", address, "port", port)
s := NewServer(host, resourceAnalyzer, checkers, nil, nil, nil)
s.InstallTracingFilter(tp, otelrestful.WithPublicEndpoint())
server := &http.Server{
Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)),
Handler: &s,
IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout
ReadTimeout: 4 * 60 * time.Minute,
WriteTimeout: 4 * 60 * time.Minute,
MaxHeaderBytes: 1 << 20,
}
if err := server.ListenAndServe(); err != nil {
klog.ErrorS(err, "Failed to listen and serve")
os.Exit(1)
}
}
// ListenAndServePodResources initializes a gRPC server to serve the PodResources service
func ListenAndServePodResources(ctx context.Context, endpoint string, providers podresources.PodResourcesProviders) {
server := grpc.NewServer(apisgrpc.WithRateLimiter(ctx, "podresources", podresources.DefaultQPS, podresources.DefaultBurstTokens))
podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(providers))
podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(ctx, providers))
l, err := util.CreateListener(endpoint)
if err != nil {
klog.ErrorS(err, "Failed to create listener for podResources endpoint")
os.Exit(1)
}
klog.InfoS("Starting to serve the podresources API", "endpoint", endpoint)
if err := server.Serve(l); err != nil {
klog.ErrorS(err, "Failed to serve")
os.Exit(1)
}
}
type NodeRequestAttributesGetter interface {
GetRequestAttributes(u user.Info, r *http.Request) []authorizer.Attributes
}
// AuthInterface contains all methods required by the auth filters
type AuthInterface interface {
authenticator.Request
NodeRequestAttributesGetter
authorizer.Authorizer
}
// HostInterface contains all the kubelet methods required by the server.
// For testability.
type HostInterface interface {
stats.Provider
GetVersionInfo() (*cadvisorapi.VersionInfo, error)
GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error)
GetRunningPods(ctx context.Context) ([]*v1.Pod, error)
RunInContainer(ctx context.Context, name string, uid types.UID, container string, cmd []string) ([]byte, error)
CheckpointContainer(ctx context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error
GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error
ServeLogs(w http.ResponseWriter, req *http.Request)
SyncLoopHealthCheck(req *http.Request) error
GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error)
GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error)
GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error)
ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error)
ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error)
}
// NewServer initializes and configures a kubelet.Server object to handle HTTP requests.
func NewServer(
host HostInterface,
resourceAnalyzer stats.ResourceAnalyzer,
checkers []healthz.HealthChecker,
flagz flagz.Reader,
auth AuthInterface,
kubeCfg *kubeletconfiginternal.KubeletConfiguration) Server {
server := Server{
flagz: flagz,
host: host,
resourceAnalyzer: resourceAnalyzer,
auth: auth,
restfulCont: &filteringContainer{Container: restful.NewContainer()},
metricsBuckets: sets.New[string](),
metricsMethodBuckets: sets.New[string]("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"),
extendedCheckers: checkers,
}
if auth != nil {
server.InstallAuthFilter()
}
server.InstallAuthNotRequiredHandlers()
if kubeCfg != nil && kubeCfg.EnableDebuggingHandlers {
klog.InfoS("Adding debug handlers to kubelet server")
server.InstallAuthRequiredHandlers()
// To maintain backward compatibility serve logs and pprof only when enableDebuggingHandlers is also enabled
// see https://github.com/kubernetes/kubernetes/pull/87273
server.InstallSystemLogHandler(kubeCfg.EnableSystemLogHandler, kubeCfg.EnableSystemLogQuery)
server.InstallProfilingHandler(kubeCfg.EnableProfilingHandler, kubeCfg.EnableContentionProfiling)
server.InstallDebugFlagsHandler(kubeCfg.EnableDebugFlagsHandler)
} else {
server.InstallDebuggingDisabledHandlers()
}
server.installStatusZ()
return server
}
// InstallAuthFilter installs authentication filters with the restful Container.
func (s *Server) InstallAuthFilter() {
s.restfulCont.Filter(func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
// Authenticate
info, ok, err := s.auth.AuthenticateRequest(req.Request)
if err != nil {
klog.ErrorS(err, "Unable to authenticate the request due to an error")
resp.WriteErrorString(http.StatusUnauthorized, "Unauthorized")
return
}
if !ok {
resp.WriteErrorString(http.StatusUnauthorized, "Unauthorized")
return
}
// Get authorization attributes
attrs := s.auth.GetRequestAttributes(info.User, req.Request)
var allowed bool
var msg string
var subresources []string
for _, attr := range attrs {
subresources = append(subresources, attr.GetSubresource())
decision, _, err := s.auth.Authorize(req.Request.Context(), attr)
if err != nil {
klog.ErrorS(err, "Authorization error", "user", attr.GetUser().GetName(), "verb", attr.GetVerb(), "resource", attr.GetResource(), "subresource", attr.GetSubresource())
msg = fmt.Sprintf("Authorization error (user=%s, verb=%s, resource=%s, subresource=%s)", attr.GetUser().GetName(), attr.GetVerb(), attr.GetResource(), attr.GetSubresource())
resp.WriteErrorString(http.StatusInternalServerError, msg)
return
}
if decision == authorizer.DecisionAllow {
allowed = true
break
}
}
if !allowed {
if len(attrs) == 0 {
klog.ErrorS(fmt.Errorf("could not determine attributes for request"), "Authorization error")
resp.WriteErrorString(http.StatusForbidden, "Authorization error: could not determine attributes for request")
return
}
// The attributes only differ by subresource so we just use the first one.
klog.V(2).InfoS("Forbidden", "user", attrs[0].GetUser().GetName(), "verb", attrs[0].GetVerb(), "resource", attrs[0].GetResource(), "subresource(s)", subresources)
resp.WriteErrorString(http.StatusForbidden, fmt.Sprintf("Forbidden (user=%s, verb=%s, resource=%s, subresource(s)=%v)\n", attrs[0].GetUser().GetName(), attrs[0].GetVerb(), attrs[0].GetResource(), subresources))
return
}
// Continue
chain.ProcessFilter(req, resp)
})
}
// InstallTracingFilter installs OpenTelemetry tracing filter with the restful Container.
func (s *Server) InstallTracingFilter(tp oteltrace.TracerProvider, opts ...otelrestful.Option) {
s.restfulCont.Filter(otelrestful.OTelFilter("kubelet", append(opts, otelrestful.WithTracerProvider(tp))...))
}
func (s *Server) installStatusZ() {
if utilfeature.DefaultFeatureGate.Enabled(zpagesfeatures.ComponentStatusz) {
s.addMetricsBucketMatcher("statusz")
statusz.Install(s.restfulCont, ComponentKubelet, statusz.NewRegistry(compatibility.DefaultBuildEffectiveVersion(), statusz.WithListedPaths(s.restfulCont.RegisteredHandlePaths())))
}
}
// addMetricsBucketMatcher adds a regexp matcher and the relevant bucket to use when
// it matches. Please be aware this is not thread safe and should not be used dynamically
func (s *Server) addMetricsBucketMatcher(bucket string) {
s.metricsBuckets.Insert(bucket)
}
// getMetricBucket find the appropriate metrics reporting bucket for the given path
func (s *Server) getMetricBucket(path string) string {
root := getURLRootPath(path)
if s.metricsBuckets.Has(root) {
return root
}
return "other"
}
// getMetricMethodBucket checks for unknown or invalid HTTP verbs
func (s *Server) getMetricMethodBucket(method string) string {
if s.metricsMethodBuckets.Has(method) {
return method
}
return "other"
}
// InstallAuthNotRequiredHandlers registers request handlers that do not require authorization, which are
// installed on both the unsecured and secured (TLS) servers.
// NOTE: This method is maintained for backwards compatibility, but no new endpoints should be added
// to this set. New handlers should be added under InstallAuthorizedHandlers.
func (s *Server) InstallAuthNotRequiredHandlers() {
s.addMetricsBucketMatcher("healthz")
checkers := []healthz.HealthChecker{
healthz.PingHealthz,
healthz.LogHealthz,
healthz.NamedCheck("syncloop", s.host.SyncLoopHealthCheck),
}
checkers = append(checkers, s.extendedCheckers...)
healthz.InstallHandler(s.restfulCont, checkers...)
slis.SLIMetricsWithReset{}.Install(s.restfulCont)
s.addMetricsBucketMatcher("pods")
ws := new(restful.WebService)
ws.
Path(podsPath).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("").
To(s.getPods).
Operation("getPods"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("stats")
s.restfulCont.Add(stats.CreateHandlers(statsPath, s.host, s.resourceAnalyzer))
s.addMetricsBucketMatcher("metrics")
s.addMetricsBucketMatcher("metrics/cadvisor")
s.addMetricsBucketMatcher("metrics/probes")
s.addMetricsBucketMatcher("metrics/resource")
s.restfulCont.Handle(metricsPath, legacyregistry.Handler())
includedMetrics := cadvisormetrics.MetricSet{
cadvisormetrics.CpuUsageMetrics: struct{}{},
cadvisormetrics.MemoryUsageMetrics: struct{}{},
cadvisormetrics.CpuLoadMetrics: struct{}{},
cadvisormetrics.DiskIOMetrics: struct{}{},
cadvisormetrics.DiskUsageMetrics: struct{}{},
cadvisormetrics.NetworkUsageMetrics: struct{}{},
cadvisormetrics.AppMetrics: struct{}{},
cadvisormetrics.ProcessMetrics: struct{}{},
cadvisormetrics.OOMMetrics: struct{}{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
includedMetrics[cadvisormetrics.PressureMetrics] = struct{}{}
}
// cAdvisor metrics are exposed under the secured handler as well
r := compbasemetrics.NewKubeRegistry()
r.RawMustRegister(metrics.NewPrometheusMachineCollector(prometheusHostAdapter{s.host}, includedMetrics))
if utilfeature.DefaultFeatureGate.Enabled(features.PodAndContainerStatsFromCRI) {
r.CustomRegister(collectors.NewCRIMetricsCollector(context.TODO(), s.host.ListPodSandboxMetrics, s.host.ListMetricDescriptors))
} else {
cadvisorOpts := cadvisorv2.RequestOptions{
IdType: cadvisorv2.TypeName,
Count: 1,
Recursive: true,
}
r.RawMustRegister(metrics.NewPrometheusCollector(prometheusHostAdapter{s.host}, containerPrometheusLabelsFunc(s.host), includedMetrics, clock.RealClock{}, cadvisorOpts))
}
s.restfulCont.Handle(cadvisorMetricsPath,
compbasemetrics.HandlerFor(r, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}),
)
s.addMetricsBucketMatcher("metrics/resource")
resourceRegistry := compbasemetrics.NewKubeRegistry()
resourceRegistry.CustomMustRegister(collectors.NewResourceMetricsCollector(s.resourceAnalyzer))
s.restfulCont.Handle(resourceMetricsPath,
compbasemetrics.HandlerFor(resourceRegistry, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}),
)
// prober metrics are exposed under a different endpoint
s.addMetricsBucketMatcher("metrics/probes")
p := compbasemetrics.NewKubeRegistry()
_ = compbasemetrics.RegisterProcessStartTime(p.Register)
p.MustRegister(prober.ProberResults)
p.MustRegister(prober.ProberDuration)
s.restfulCont.Handle(proberMetricsPath,
compbasemetrics.HandlerFor(p, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}),
)
// DO NOT ADD NEW HANDLERS HERE!
// See note in method comment.
}
// InstallAuthRequiredHandlers registers the HTTP handlers that should only be installed on servers
// with authorization enabled.
// NOTE: New endpoints must require authorization.
func (s *Server) InstallAuthRequiredHandlers() {
s.addMetricsBucketMatcher("run")
ws := new(restful.WebService)
ws.
Path("/run")
ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}").
To(s.getRun).
Operation("getRun"))
ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}/{containerName}").
To(s.getRun).
Operation("getRun"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("exec")
ws = new(restful.WebService)
ws.
Path("/exec")
ws.Route(ws.GET("/{podNamespace}/{podID}/{containerName}").
To(s.getExec).
Operation("getExec"))
ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}").
To(s.getExec).
Operation("getExec"))
ws.Route(ws.GET("/{podNamespace}/{podID}/{uid}/{containerName}").
To(s.getExec).
Operation("getExec"))
ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}/{containerName}").
To(s.getExec).
Operation("getExec"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("attach")
ws = new(restful.WebService)
ws.
Path("/attach")
ws.Route(ws.GET("/{podNamespace}/{podID}/{containerName}").
To(s.getAttach).
Operation("getAttach"))
ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}").
To(s.getAttach).
Operation("getAttach"))
ws.Route(ws.GET("/{podNamespace}/{podID}/{uid}/{containerName}").
To(s.getAttach).
Operation("getAttach"))
ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}/{containerName}").
To(s.getAttach).
Operation("getAttach"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("portForward")
ws = new(restful.WebService)
ws.
Path("/portForward")
ws.Route(ws.GET("/{podNamespace}/{podID}").
To(s.getPortForward).
Operation("getPortForward"))
ws.Route(ws.POST("/{podNamespace}/{podID}").
To(s.getPortForward).
Operation("getPortForward"))
ws.Route(ws.GET("/{podNamespace}/{podID}/{uid}").
To(s.getPortForward).
Operation("getPortForward"))
ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}").
To(s.getPortForward).
Operation("getPortForward"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("containerLogs")
ws = new(restful.WebService)
ws.
Path("/containerLogs")
ws.Route(ws.GET("/{podNamespace}/{podID}/{containerName}").
To(s.getContainerLogs).
Operation("getContainerLogs"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("configz")
configz.InstallHandler(s.restfulCont)
if utilfeature.DefaultFeatureGate.Enabled(zpagesfeatures.ComponentFlagz) {
if s.flagz != nil {
s.addMetricsBucketMatcher("flagz")
flagz.Install(s.restfulCont, ComponentKubelet, s.flagz)
}
}
// The /runningpods endpoint is used for testing only.
s.addMetricsBucketMatcher("runningpods")
ws = new(restful.WebService)
ws.
Path(runningPodsPath).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("").
To(s.getRunningPods).
Operation("getRunningPods"))
s.restfulCont.Add(ws)
// Only enable checkpoint API if the feature is enabled
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerCheckpoint) {
s.addMetricsBucketMatcher("checkpoint")
ws = &restful.WebService{}
ws.Path(checkpointPath).Produces(restful.MIME_JSON)
ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}").
To(s.checkpoint).
Operation("checkpoint"))
s.restfulCont.Add(ws)
}
}
// InstallDebuggingDisabledHandlers registers the HTTP request patterns that provide better error message
func (s *Server) InstallDebuggingDisabledHandlers() {
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Debug endpoints are disabled.", http.StatusMethodNotAllowed)
})
s.addMetricsBucketMatcher("run")
s.addMetricsBucketMatcher("exec")
s.addMetricsBucketMatcher("attach")
s.addMetricsBucketMatcher("portForward")
s.addMetricsBucketMatcher("containerLogs")
s.addMetricsBucketMatcher("runningpods")
s.addMetricsBucketMatcher("pprof")
s.addMetricsBucketMatcher("logs")
paths := []string{
"/run/", "/exec/", "/attach/", "/portForward/", "/containerLogs/",
runningPodsPath, pprofBasePath, logsPath}
for _, p := range paths {
s.restfulCont.Handle(p, h)
}
}
// InstallSystemLogHandler registers the HTTP request patterns for logs endpoint.
func (s *Server) InstallSystemLogHandler(enableSystemLogHandler bool, enableSystemLogQuery bool) {
s.addMetricsBucketMatcher("logs")
if enableSystemLogHandler {
ws := new(restful.WebService)
ws.Path(logsPath)
ws.Route(ws.GET("").
To(s.getLogs).
Operation("getLogs"))
if !enableSystemLogQuery {
ws.Route(ws.GET("/{logpath:*}").
To(s.getLogs).
Operation("getLogs").
Param(ws.PathParameter("logpath", "path to the log").DataType("string")))
} else {
ws.Route(ws.GET("/{logpath:*}").
To(s.getLogs).
Operation("getLogs").
Param(ws.PathParameter("logpath", "path to the log").DataType("string")).
Param(ws.QueryParameter("query", "query specifies services(s) or files from which to return logs").DataType("string")).
Param(ws.QueryParameter("sinceTime", "sinceTime is an RFC3339 timestamp from which to show logs").DataType("string")).
Param(ws.QueryParameter("untilTime", "untilTime is an RFC3339 timestamp until which to show logs").DataType("string")).
Param(ws.QueryParameter("tailLines", "tailLines is used to retrieve the specified number of lines from the end of the log").DataType("string")).
Param(ws.QueryParameter("pattern", "pattern filters log entries by the provided regex pattern").DataType("string")).
Param(ws.QueryParameter("boot", "boot show messages from a specific system boot").DataType("string")))
}
s.restfulCont.Add(ws)
} else {
s.restfulCont.Handle(logsPath, getHandlerForDisabledEndpoint("logs endpoint is disabled."))
}
}
func getHandlerForDisabledEndpoint(errorMessage string) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, errorMessage, http.StatusMethodNotAllowed)
})
}
// InstallDebugFlagsHandler registers the HTTP request patterns for /debug/flags/v endpoint.
func (s *Server) InstallDebugFlagsHandler(enableDebugFlagsHandler bool) {
if enableDebugFlagsHandler {
// Setup flags handlers.
// so far, only logging related endpoints are considered valid to add for these debug flags.
s.restfulCont.Handle(debugFlagPath, routes.StringFlagPutHandler(logs.GlogSetter))
} else {
s.restfulCont.Handle(debugFlagPath, getHandlerForDisabledEndpoint("flags endpoint is disabled."))
return
}
}
// InstallProfilingHandler registers the HTTP request patterns for /debug/pprof endpoint.
func (s *Server) InstallProfilingHandler(enableProfilingLogHandler bool, enableContentionProfiling bool) {
s.addMetricsBucketMatcher("debug")
if !enableProfilingLogHandler {
s.restfulCont.Handle(pprofBasePath, getHandlerForDisabledEndpoint("profiling endpoint is disabled."))
return
}
handlePprofEndpoint := func(req *restful.Request, resp *restful.Response) {
name := strings.TrimPrefix(req.Request.URL.Path, pprofBasePath)
switch name {
case "profile":
pprof.Profile(resp, req.Request)
case "symbol":
pprof.Symbol(resp, req.Request)
case "cmdline":
pprof.Cmdline(resp, req.Request)
case "trace":
pprof.Trace(resp, req.Request)
default:
pprof.Index(resp, req.Request)
}
}
// Setup pprof handlers.
ws := new(restful.WebService).Path(pprofBasePath)
ws.Route(ws.GET("/{subpath:*}").To(handlePprofEndpoint)).Doc("pprof endpoint")
s.restfulCont.Add(ws)
if enableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}
}
// getContainerLogs handles containerLogs request against the Kubelet
func (s *Server) getContainerLogs(request *restful.Request, response *restful.Response) {
podNamespace := request.PathParameter("podNamespace")
podID := request.PathParameter("podID")
containerName := request.PathParameter("containerName")
ctx := request.Request.Context()
if len(podID) == 0 {
// TODO: Why return JSON when the rest return plaintext errors?
// TODO: Why return plaintext errors?
response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing podID."}`))
return
}
if len(containerName) == 0 {
// TODO: Why return JSON when the rest return plaintext errors?
response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing container name."}`))
return
}
if len(podNamespace) == 0 {
// TODO: Why return JSON when the rest return plaintext errors?
response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing podNamespace."}`))
return
}
query := request.Request.URL.Query()
// backwards compatibility for the "tail" query parameter
if tail := request.QueryParameter("tail"); len(tail) > 0 {
query["tailLines"] = []string{tail}
// "all" is the same as omitting tail
if tail == "all" {
delete(query, "tailLines")
}
}
// container logs on the kubelet are locked to the v1 API version of PodLogOptions
logOptions := &v1.PodLogOptions{}
if err := legacyscheme.ParameterCodec.DecodeParameters(query, v1.SchemeGroupVersion, logOptions); err != nil {
response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Unable to decode query."}`))
return
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodLogsQuerySplitStreams) {
// Even with defaulters, logOptions.Stream can be nil if no arguments are provided at all.
if logOptions.Stream == nil {
// Default to "All" to maintain backward compatibility.
logOptions.Stream = ptr.To(v1.LogStreamAll)
}
}
logOptions.TypeMeta = metav1.TypeMeta{}
if errs := validation.ValidatePodLogOptions(logOptions); len(errs) > 0 {
response.WriteError(http.StatusUnprocessableEntity, fmt.Errorf(`{"message": "Invalid request."}`))
return
}
pod, ok := s.host.GetPodByName(podNamespace, podID)
if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod %q does not exist", podID))
return
}
// Check if containerName is valid.
if kubecontainer.GetContainerSpec(pod, containerName) == nil {
response.WriteError(http.StatusNotFound, fmt.Errorf("container %q not found in pod %q", containerName, podID))
return
}
if _, ok := response.ResponseWriter.(http.Flusher); !ok {
response.WriteError(http.StatusInternalServerError, fmt.Errorf("unable to convert %v into http.Flusher, cannot show logs", reflect.TypeOf(response)))
return
}
var (
stdout io.Writer
stderr io.Writer
fw = flushwriter.Wrap(response.ResponseWriter)
)
if utilfeature.DefaultFeatureGate.Enabled(features.PodLogsQuerySplitStreams) {
wantedStream := logOptions.Stream
// No stream type specified, default to All
if wantedStream == nil {
allStream := v1.LogStreamAll
wantedStream = &allStream
}
switch *wantedStream {
case v1.LogStreamStdout:
stdout, stderr = fw, nil
case v1.LogStreamStderr:
stdout, stderr = nil, fw
case v1.LogStreamAll:
stdout, stderr = fw, fw
default:
_ = response.WriteError(http.StatusBadRequest, fmt.Errorf("invalid stream type %q", *logOptions.Stream))
return
}
} else {
if logOptions.Stream != nil && *logOptions.Stream != v1.LogStreamAll {
_ = response.WriteError(http.StatusBadRequest, fmt.Errorf("unable to return the given log stream: %q. Please enable PodLogsQuerySplitStreams feature gate in kubelet", *logOptions.Stream))
return
}
stdout, stderr = fw, fw
}
response.Header().Set("Transfer-Encoding", "chunked")
if err := s.host.GetKubeletContainerLogs(ctx, kubecontainer.GetPodFullName(pod), containerName, logOptions, stdout, stderr); err != nil {
response.WriteError(http.StatusBadRequest, err)
return
}
}
// encodePods creates an v1.PodList object from pods and returns the encoded
// PodList.
func encodePods(pods []*v1.Pod) (data []byte, err error) {
podList := new(v1.PodList)
for _, pod := range pods {
podList.Items = append(podList.Items, *pod)
}
// TODO: this needs to be parameterized to the kubelet, not hardcoded. Depends on Kubelet
// as API server refactor.
// TODO: Locked to v1, needs to be made generic
codec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
return runtime.Encode(codec, podList)
}
// getPods returns a list of pods bound to the Kubelet and their spec.
func (s *Server) getPods(request *restful.Request, response *restful.Response) {
pods := s.host.GetPods()
data, err := encodePods(pods)
if err != nil {
response.WriteError(http.StatusInternalServerError, err)
return
}
writeJSONResponse(response, data)
}
// getRunningPods returns a list of pods running on Kubelet. The list is
// provided by the container runtime, and is different from the list returned
// by getPods, which is a set of desired pods to run.
func (s *Server) getRunningPods(request *restful.Request, response *restful.Response) {
ctx := request.Request.Context()
pods, err := s.host.GetRunningPods(ctx)
if err != nil {
response.WriteError(http.StatusInternalServerError, err)
return
}
data, err := encodePods(pods)
if err != nil {
response.WriteError(http.StatusInternalServerError, err)
return
}
writeJSONResponse(response, data)
}
// getLogs handles logs requests against the Kubelet.
func (s *Server) getLogs(request *restful.Request, response *restful.Response) {
s.host.ServeLogs(response, request.Request)
}
type execRequestParams struct {
podNamespace string
podName string
podUID types.UID
containerName string
cmd []string
}
func getExecRequestParams(req *restful.Request) execRequestParams {
return execRequestParams{
podNamespace: req.PathParameter("podNamespace"),
podName: req.PathParameter("podID"),
podUID: types.UID(req.PathParameter("uid")),
containerName: req.PathParameter("containerName"),
cmd: req.Request.URL.Query()[api.ExecCommandParam],
}
}
type portForwardRequestParams struct {
podNamespace string
podName string
podUID types.UID
}
func getPortForwardRequestParams(req *restful.Request) portForwardRequestParams {
return portForwardRequestParams{
podNamespace: req.PathParameter("podNamespace"),
podName: req.PathParameter("podID"),
podUID: types.UID(req.PathParameter("uid")),
}
}
type responder struct{}
func (r *responder) Error(w http.ResponseWriter, req *http.Request, err error) {
klog.ErrorS(err, "Error while proxying request")
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// proxyStream proxies stream to url.
func proxyStream(w http.ResponseWriter, r *http.Request, url *url.URL) {
// TODO(random-liu): Set MaxBytesPerSec to throttle the stream.
handler := proxy.NewUpgradeAwareHandler(url, nil /*transport*/, false /*wrapTransport*/, true /*upgradeRequired*/, &responder{})
handler.ServeHTTP(w, r)
}
// getAttach handles requests to attach to a container.
func (s *Server) getAttach(request *restful.Request, response *restful.Response) {
params := getExecRequestParams(request)
streamOpts, err := remotecommandserver.NewOptions(request.Request)
if err != nil {
utilruntime.HandleError(err)
response.WriteError(http.StatusBadRequest, err)
return
}
pod, ok := s.host.GetPodByName(params.podNamespace, params.podName)
if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
return
}
podFullName := kubecontainer.GetPodFullName(pod)
url, err := s.host.GetAttach(request.Request.Context(), podFullName, params.podUID, params.containerName, *streamOpts)
if err != nil {
streaming.WriteError(err, response.ResponseWriter)
return
}
proxyStream(response.ResponseWriter, request.Request, url)
}
// getExec handles requests to run a command inside a container.
func (s *Server) getExec(request *restful.Request, response *restful.Response) {
params := getExecRequestParams(request)
streamOpts, err := remotecommandserver.NewOptions(request.Request)
if err != nil {
utilruntime.HandleError(err)
response.WriteError(http.StatusBadRequest, err)
return
}
pod, ok := s.host.GetPodByName(params.podNamespace, params.podName)
if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
return
}
podFullName := kubecontainer.GetPodFullName(pod)
url, err := s.host.GetExec(request.Request.Context(), podFullName, params.podUID, params.containerName, params.cmd, *streamOpts)
if err != nil {
streaming.WriteError(err, response.ResponseWriter)
return
}
proxyStream(response.ResponseWriter, request.Request, url)
}
// getRun handles requests to run a command inside a container.
func (s *Server) getRun(request *restful.Request, response *restful.Response) {
params := getExecRequestParams(request)
pod, ok := s.host.GetPodByName(params.podNamespace, params.podName)
if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
return
}
// For legacy reasons, run uses different query param than exec.
params.cmd = strings.Split(request.QueryParameter("cmd"), " ")
data, err := s.host.RunInContainer(request.Request.Context(), kubecontainer.GetPodFullName(pod), params.podUID, params.containerName, params.cmd)
if err != nil {
response.WriteError(http.StatusInternalServerError, err)
return
}
writeJSONResponse(response, data)
}
// Derived from go-restful writeJSON.
func writeJSONResponse(response *restful.Response, data []byte) {
if data == nil {
response.WriteHeader(http.StatusOK)
// do not write a nil representation
return
}
response.Header().Set(restful.HEADER_ContentType, restful.MIME_JSON)
response.WriteHeader(http.StatusOK)
if _, err := response.Write(data); err != nil {
klog.ErrorS(err, "Error writing response")
}
}
// getPortForward handles a new restful port forward request. It determines the
// pod name and uid and then calls ServePortForward.
func (s *Server) getPortForward(request *restful.Request, response *restful.Response) {
params := getPortForwardRequestParams(request)
portForwardOptions, err := portforward.NewV4Options(request.Request)
if err != nil {
utilruntime.HandleError(err)
response.WriteError(http.StatusBadRequest, err)
return
}
pod, ok := s.host.GetPodByName(params.podNamespace, params.podName)
if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
return
}
if len(params.podUID) > 0 && pod.UID != params.podUID {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod not found"))
return
}
url, err := s.host.GetPortForward(request.Request.Context(), pod.Name, pod.Namespace, pod.UID, *portForwardOptions)
if err != nil {
streaming.WriteError(err, response.ResponseWriter)
return
}
proxyStream(response.ResponseWriter, request.Request, url)
}
// checkpoint handles the checkpoint API request. It checks if the requested
// podNamespace, pod and container actually exist and only then calls out
// to the runtime to actually checkpoint the container.
func (s *Server) checkpoint(request *restful.Request, response *restful.Response) {
ctx := request.Request.Context()
pod, ok := s.host.GetPodByName(request.PathParameter("podNamespace"), request.PathParameter("podID"))
if !ok {
response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist"))
return
}
containerName := request.PathParameter("containerName")
found := false
for _, container := range pod.Spec.Containers {
if container.Name == containerName {
found = true
break
}
}
if !found {
for _, container := range pod.Spec.InitContainers {
if container.Name == containerName {
found = true
break
}
}
}
if !found {
for _, container := range pod.Spec.EphemeralContainers {
if container.Name == containerName {
found = true
break
}
}
}
if !found {
response.WriteError(
http.StatusNotFound,
fmt.Errorf("container %v does not exist", containerName),
)
return
}
options := &runtimeapi.CheckpointContainerRequest{}
// Query parameter to select an optional timeout. Without the timeout parameter
// the checkpoint command will use the default CRI timeout.
timeouts := request.Request.URL.Query()["timeout"]
if len(timeouts) > 0 {
// If the user specified one or multiple values for timeouts we
// are using the last available value.
timeout, err := strconv.ParseInt(timeouts[len(timeouts)-1], 10, 64)
if err != nil {
response.WriteError(
http.StatusNotFound,
fmt.Errorf("cannot parse value of timeout parameter"),
)
return
}
options.Timeout = timeout
}
if err := s.host.CheckpointContainer(ctx, pod.UID, kubecontainer.GetPodFullName(pod), containerName, options); err != nil {
response.WriteError(
http.StatusInternalServerError,
fmt.Errorf(
"checkpointing of %v/%v/%v failed (%v)",
request.PathParameter("podNamespace"),
request.PathParameter("podID"),
containerName,
err,
),
)
return
}
writeJSONResponse(
response,
[]byte(fmt.Sprintf("{\"items\":[\"%s\"]}", options.Location)),
)
}
// getURLRootPath trims a URL path.
// For paths in the format of "/metrics/xxx", "metrics/xxx" is returned;
// For all other paths, the first part of the path is returned.
func getURLRootPath(path string) string {
parts := strings.SplitN(strings.TrimPrefix(path, "/"), "/", 3)
if len(parts) == 0 {
return path
}
if parts[0] == "metrics" && len(parts) > 1 {
return fmt.Sprintf("%s/%s", parts[0], parts[1])
}
return parts[0]
}
var longRunningRequestPathMap = map[string]bool{
"exec": true,
"attach": true,
"portforward": true,
"debug": true,
}
// isLongRunningRequest determines whether the request is long-running or not.
func isLongRunningRequest(path string) bool {
_, ok := longRunningRequestPathMap[path]
return ok
}
var statusesNoTracePred = httplog.StatusIsNot(
http.StatusOK,
http.StatusFound,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusBadRequest,
http.StatusNotFound,
http.StatusSwitchingProtocols,
)
// ServeHTTP responds to HTTP requests on the Kubelet.
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
handler := httplog.WithLogging(s.restfulCont, statusesNoTracePred)
// monitor http requests
var serverType string
if s.auth == nil {
serverType = "readonly"
} else {
serverType = "readwrite"
}
method, path := s.getMetricMethodBucket(req.Method), s.getMetricBucket(req.URL.Path)
longRunning := strconv.FormatBool(isLongRunningRequest(path))
servermetrics.HTTPRequests.WithLabelValues(method, path, serverType, longRunning).Inc()
servermetrics.HTTPInflightRequests.WithLabelValues(method, path, serverType, longRunning).Inc()
defer servermetrics.HTTPInflightRequests.WithLabelValues(method, path, serverType, longRunning).Dec()
startTime := time.Now()
defer servermetrics.HTTPRequestsDuration.WithLabelValues(method, path, serverType, longRunning).Observe(servermetrics.SinceInSeconds(startTime))
handler.ServeHTTP(w, req)
}
// prometheusHostAdapter adapts the HostInterface to the interface expected by the
// cAdvisor prometheus collector.
type prometheusHostAdapter struct {
host HostInterface
}
func (a prometheusHostAdapter) GetRequestedContainersInfo(containerName string, options cadvisorv2.RequestOptions) (map[string]*cadvisorapi.ContainerInfo, error) {
return a.host.GetRequestedContainersInfo(containerName, options)
}
func (a prometheusHostAdapter) GetVersionInfo() (*cadvisorapi.VersionInfo, error) {
return a.host.GetVersionInfo()
}
func (a prometheusHostAdapter) GetMachineInfo() (*cadvisorapi.MachineInfo, error) {
return a.host.GetCachedMachineInfo()
}
func containerPrometheusLabelsFunc(s stats.Provider) metrics.ContainerLabelsFunc {
// containerPrometheusLabels maps cAdvisor labels to prometheus labels.
return func(c *cadvisorapi.ContainerInfo) map[string]string {
// Prometheus requires that all metrics in the same family have the same labels,
// so we arrange to supply blank strings for missing labels
var name, image, podName, namespace, containerName string
if len(c.Aliases) > 0 {
name = c.Aliases[0]
}
image = c.Spec.Image
if v, ok := c.Spec.Labels[kubelettypes.KubernetesPodNameLabel]; ok {
podName = v
}
if v, ok := c.Spec.Labels[kubelettypes.KubernetesPodNamespaceLabel]; ok {
namespace = v
}
if v, ok := c.Spec.Labels[kubelettypes.KubernetesContainerNameLabel]; ok {
containerName = v
}
// Associate pod cgroup with pod so we have an accurate accounting of sandbox
if podName == "" && namespace == "" {
if pod, found := s.GetPodByCgroupfs(c.Name); found {
podName = pod.Name
namespace = pod.Namespace
}
}
set := map[string]string{
metrics.LabelID: c.Name,
metrics.LabelName: name,
metrics.LabelImage: image,
"pod": podName,
"namespace": namespace,
"container": containerName,
}
return set
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/remotecommand"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
api "k8s.io/kubernetes/pkg/apis/core"
// Do some initialization to decode the query parameters correctly.
"k8s.io/apiserver/pkg/server/healthz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
zpagesfeatures "k8s.io/component-base/zpages/features"
"k8s.io/component-base/zpages/flagz"
"k8s.io/kubelet/pkg/cri/streaming"
"k8s.io/kubelet/pkg/cri/streaming/portforward"
remotecommandserver "k8s.io/kubelet/pkg/cri/streaming/remotecommand"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/features"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/volume"
)
const (
testUID = "9b01b80f-8fb4-11e4-95ab-4200af06647"
testContainerID = "container789"
testPodSandboxID = "pod0987"
)
type fakeKubelet struct {
podByNameFunc func(namespace, name string) (*v1.Pod, bool)
machineInfoFunc func() (*cadvisorapi.MachineInfo, error)
podsFunc func() []*v1.Pod
runningPodsFunc func(ctx context.Context) ([]*v1.Pod, error)
logFunc func(w http.ResponseWriter, req *http.Request)
runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
getExecCheck func(string, types.UID, string, []string, remotecommandserver.Options)
getAttachCheck func(string, types.UID, string, remotecommandserver.Options)
getPortForwardCheck func(string, string, types.UID, portforward.V4Options)
containerLogsFunc func(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error
resyncInterval time.Duration
loopEntryTime time.Time
plegHealth bool
streamingRuntime streaming.Server
}
func (fk *fakeKubelet) LatestLoopEntryTime() time.Time {
return fk.loopEntryTime
}
func (fk *fakeKubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return fk.podByNameFunc(namespace, name)
}
func (fk *fakeKubelet) GetRequestedContainersInfo(containerName string, options cadvisorapiv2.RequestOptions) (map[string]*cadvisorapi.ContainerInfo, error) {
return map[string]*cadvisorapi.ContainerInfo{}, nil
}
func (fk *fakeKubelet) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) {
return fk.machineInfoFunc()
}
func (*fakeKubelet) GetVersionInfo() (*cadvisorapi.VersionInfo, error) {
return &cadvisorapi.VersionInfo{}, nil
}
func (fk *fakeKubelet) GetPods() []*v1.Pod {
return fk.podsFunc()
}
func (fk *fakeKubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) {
return fk.runningPodsFunc(ctx)
}
func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
fk.logFunc(w, req)
}
func (fk *fakeKubelet) GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
return fk.containerLogsFunc(ctx, podFullName, containerName, logOptions, stdout, stderr)
}
func (fk *fakeKubelet) RunInContainer(_ context.Context, podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
return fk.runFunc(podFullName, uid, containerName, cmd)
}
func (fk *fakeKubelet) CheckpointContainer(_ context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error {
if containerName == "checkpointingFailure" {
return fmt.Errorf("Returning error for test")
}
return nil
}
func (fk *fakeKubelet) ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error) {
return nil, nil
}
func (fk *fakeKubelet) ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error) {
return nil, nil
}
func (fk *fakeKubelet) SyncLoopHealthCheck(req *http.Request) error {
duration := fk.resyncInterval * 2
minDuration := time.Minute * 5
if duration < minDuration {
duration = minDuration
}
enterLoopTime := fk.LatestLoopEntryTime()
if !enterLoopTime.IsZero() && time.Now().After(enterLoopTime.Add(duration)) {
return fmt.Errorf("sync Loop took longer than expected")
}
return nil
}
type fakeRuntime struct {
execFunc func(string, []string, io.Reader, io.WriteCloser, io.WriteCloser, bool, <-chan remotecommand.TerminalSize) error
attachFunc func(string, io.Reader, io.WriteCloser, io.WriteCloser, bool, <-chan remotecommand.TerminalSize) error
portForwardFunc func(string, int32, io.ReadWriteCloser) error
}
func (f *fakeRuntime) Exec(_ context.Context, containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return f.execFunc(containerID, cmd, stdin, stdout, stderr, tty, resize)
}
func (f *fakeRuntime) Attach(_ context.Context, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return f.attachFunc(containerID, stdin, stdout, stderr, tty, resize)
}
func (f *fakeRuntime) PortForward(_ context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error {
return f.portForwardFunc(podSandboxID, port, stream)
}
type testStreamingServer struct {
streaming.Server
fakeRuntime *fakeRuntime
testHTTPServer *httptest.Server
}
func newTestStreamingServer(streamIdleTimeout time.Duration) (s *testStreamingServer, err error) {
s = &testStreamingServer{}
s.testHTTPServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
s.ServeHTTP(w, r)
}))
defer func() {
if err != nil {
s.testHTTPServer.Close()
}
}()
testURL, err := url.Parse(s.testHTTPServer.URL)
if err != nil {
return nil, err
}
s.fakeRuntime = &fakeRuntime{}
config := streaming.DefaultConfig
config.BaseURL = testURL
if streamIdleTimeout != 0 {
config.StreamIdleTimeout = streamIdleTimeout
}
s.Server, err = streaming.NewServer(config, s.fakeRuntime)
if err != nil {
return nil, err
}
return s, nil
}
func (fk *fakeKubelet) GetExec(_ context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
if fk.getExecCheck != nil {
fk.getExecCheck(podFullName, podUID, containerName, cmd, streamOpts)
}
// Always use testContainerID
resp, err := fk.streamingRuntime.GetExec(&runtimeapi.ExecRequest{
ContainerId: testContainerID,
Cmd: cmd,
Tty: streamOpts.TTY,
Stdin: streamOpts.Stdin,
Stdout: streamOpts.Stdout,
Stderr: streamOpts.Stderr,
})
if err != nil {
return nil, err
}
return url.Parse(resp.GetUrl())
}
func (fk *fakeKubelet) GetAttach(_ context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
if fk.getAttachCheck != nil {
fk.getAttachCheck(podFullName, podUID, containerName, streamOpts)
}
// Always use testContainerID
resp, err := fk.streamingRuntime.GetAttach(&runtimeapi.AttachRequest{
ContainerId: testContainerID,
Tty: streamOpts.TTY,
Stdin: streamOpts.Stdin,
Stdout: streamOpts.Stdout,
Stderr: streamOpts.Stderr,
})
if err != nil {
return nil, err
}
return url.Parse(resp.GetUrl())
}
func (fk *fakeKubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
if fk.getPortForwardCheck != nil {
fk.getPortForwardCheck(podName, podNamespace, podUID, portForwardOpts)
}
// Always use testPodSandboxID
resp, err := fk.streamingRuntime.GetPortForward(&runtimeapi.PortForwardRequest{
PodSandboxId: testPodSandboxID,
Port: portForwardOpts.Ports,
})
if err != nil {
return nil, err
}
return url.Parse(resp.GetUrl())
}
// Unused functions
func (*fakeKubelet) GetNode() (*v1.Node, error) { return nil, nil }
func (*fakeKubelet) GetNodeConfig() cm.NodeConfig { return cm.NodeConfig{} }
func (*fakeKubelet) GetPodCgroupRoot() string { return "" }
func (*fakeKubelet) GetPodByCgroupfs(cgroupfs string) (*v1.Pod, bool) { return nil, false }
func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
return map[string]volume.Volume{}, true
}
func (*fakeKubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) {
return map[string]volume.BlockVolume{}, true
}
func (*fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil }
func (*fakeKubelet) ListPodStats(_ context.Context) ([]statsapi.PodStats, error) { return nil, nil }
func (*fakeKubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(_ context.Context) ([]statsapi.PodStats, error) {
return nil, nil
}
func (*fakeKubelet) ListPodCPUAndMemoryStats(_ context.Context) ([]statsapi.PodStats, error) {
return nil, nil
}
func (*fakeKubelet) ImageFsStats(_ context.Context) (*statsapi.FsStats, *statsapi.FsStats, error) {
return nil, nil, nil
}
func (*fakeKubelet) RlimitStats() (*statsapi.RlimitStats, error) { return nil, nil }
func (*fakeKubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
return nil, nil, nil
}
func (*fakeKubelet) GetCgroupCPUAndMemoryStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, error) {
return nil, nil
}
type fakeAuth struct {
authenticateFunc func(*http.Request) (*authenticator.Response, bool, error)
attributesFunc func(user.Info, *http.Request) []authorizer.Attributes
authorizeFunc func(authorizer.Attributes) (authorized authorizer.Decision, reason string, err error)
}
func (f *fakeAuth) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) {
return f.authenticateFunc(req)
}
func (f *fakeAuth) GetRequestAttributes(u user.Info, req *http.Request) []authorizer.Attributes {
return f.attributesFunc(u, req)
}
func (f *fakeAuth) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
return f.authorizeFunc(a)
}
type serverTestFramework struct {
serverUnderTest *Server
fakeKubelet *fakeKubelet
fakeAuth *fakeAuth
testHTTPServer *httptest.Server
}
func newServerTest() *serverTestFramework {
return newServerTestWithDebug(true, nil)
}
func newServerTestWithDebug(enableDebugging bool, streamingServer streaming.Server) *serverTestFramework {
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
EnableDebuggingHandlers: enableDebugging,
EnableSystemLogHandler: enableDebugging,
EnableProfilingHandler: enableDebugging,
EnableDebugFlagsHandler: enableDebugging,
}
return newServerTestWithDebuggingHandlers(kubeCfg, streamingServer)
}
func newServerTestWithDebuggingHandlers(kubeCfg *kubeletconfiginternal.KubeletConfiguration, streamingServer streaming.Server) *serverTestFramework {
fw := &serverTestFramework{}
fw.fakeKubelet = &fakeKubelet{
podByNameFunc: func(namespace, name string) (*v1.Pod, bool) {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
UID: testUID,
},
}, true
},
plegHealth: true,
streamingRuntime: streamingServer,
}
fw.fakeAuth = &fakeAuth{
authenticateFunc: func(req *http.Request) (*authenticator.Response, bool, error) {
return &authenticator.Response{User: &user.DefaultInfo{Name: "test"}}, true, nil
},
attributesFunc: func(u user.Info, req *http.Request) []authorizer.Attributes {
return []authorizer.Attributes{&authorizer.AttributesRecord{User: u}}
},
authorizeFunc: func(a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) {
return authorizer.DecisionAllow, "", nil
},
}
server := NewServer(
fw.fakeKubelet,
stats.NewResourceAnalyzer(fw.fakeKubelet, time.Minute, &record.FakeRecorder{}),
[]healthz.HealthChecker{},
flagz.NamedFlagSetsReader{},
fw.fakeAuth,
kubeCfg,
)
fw.serverUnderTest = &server
fw.testHTTPServer = httptest.NewServer(fw.serverUnderTest)
return fw
}
// A helper function to return the correct pod name.
func getPodName(name, namespace string) string {
if namespace == "" {
namespace = metav1.NamespaceDefault
}
return name + "_" + namespace
}
func TestServeLogs(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
content := string(`<pre><a href="kubelet.log">kubelet.log</a><a href="google.log">google.log</a></pre>`)
fw.fakeKubelet.logFunc = func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Add("Content-Type", "text/html")
w.Write([]byte(content))
}
resp, err := http.Get(fw.testHTTPServer.URL + "/logs/")
if err != nil {
t.Fatalf("Got error GETing: %v", err)
}
defer resp.Body.Close()
body, err := httputil.DumpResponse(resp, true)
if err != nil {
// copying the response body did not work
t.Errorf("Cannot copy resp: %#v", err)
}
result := string(body)
if !strings.Contains(result, "kubelet.log") || !strings.Contains(result, "google.log") {
t.Errorf("Received wrong data: %s", result)
}
}
func TestServeRunInContainer(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
output := "foo bar"
podNamespace := "other"
podName := "foo"
expectedPodName := getPodName(podName, podNamespace)
expectedContainerName := "baz"
expectedCommand := "ls -a"
fw.fakeKubelet.runFunc = func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
if podFullName != expectedPodName {
t.Errorf("expected %s, got %s", expectedPodName, podFullName)
}
if containerName != expectedContainerName {
t.Errorf("expected %s, got %s", expectedContainerName, containerName)
}
if strings.Join(cmd, " ") != expectedCommand {
t.Errorf("expected: %s, got %v", expectedCommand, cmd)
}
return []byte(output), nil
}
resp, err := http.Post(fw.testHTTPServer.URL+"/run/"+podNamespace+"/"+podName+"/"+expectedContainerName+"?cmd=ls%20-a", "", nil)
if err != nil {
t.Fatalf("Got error POSTing: %v", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
// copying the response body did not work
t.Errorf("Cannot copy resp: %#v", err)
}
result := string(body)
if result != output {
t.Errorf("expected %s, got %s", output, result)
}
}
func TestServeRunInContainerWithUID(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
output := "foo bar"
podNamespace := "other"
podName := "foo"
expectedPodName := getPodName(podName, podNamespace)
expectedContainerName := "baz"
expectedCommand := "ls -a"
fw.fakeKubelet.runFunc = func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
if podFullName != expectedPodName {
t.Errorf("expected %s, got %s", expectedPodName, podFullName)
}
if string(uid) != testUID {
t.Errorf("expected %s, got %s", testUID, uid)
}
if containerName != expectedContainerName {
t.Errorf("expected %s, got %s", expectedContainerName, containerName)
}
if strings.Join(cmd, " ") != expectedCommand {
t.Errorf("expected: %s, got %v", expectedCommand, cmd)
}
return []byte(output), nil
}
resp, err := http.Post(fw.testHTTPServer.URL+"/run/"+podNamespace+"/"+podName+"/"+testUID+"/"+expectedContainerName+"?cmd=ls%20-a", "", nil)
if err != nil {
t.Fatalf("Got error POSTing: %v", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
// copying the response body did not work
t.Errorf("Cannot copy resp: %#v", err)
}
result := string(body)
if result != output {
t.Errorf("expected %s, got %s", output, result)
}
}
func TestHealthCheck(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
assertHealthIsOk(t, fw.testHTTPServer.URL+"/healthz")
}
func assertHealthFails(t *testing.T, httpURL string, expectedErrorCode int) {
resp, err := http.Get(httpURL)
if err != nil {
t.Fatalf("Got error GETing: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != expectedErrorCode {
t.Errorf("expected status code %d, got %d", expectedErrorCode, resp.StatusCode)
}
}
func TestStatusz(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, zpagesfeatures.ComponentStatusz, true)
fw := newServerTest()
defer fw.testHTTPServer.Close()
req, err := http.NewRequest(http.MethodGet, fw.testHTTPServer.URL+"/statusz", nil)
if err != nil {
t.Fatalf("Got error creating request: %v", err)
}
req.Header.Set("Accept", "text/plain")
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("Got error GETing: %v", err)
}
defer func(resp *http.Response) {
closeErr := resp.Body.Close()
if closeErr != nil {
t.Errorf("Got error closing response body: %v", err)
}
}(resp)
if resp.StatusCode != http.StatusOK {
t.Errorf("expected status code %d, got %d", http.StatusOK, resp.StatusCode)
}
resBody, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("Got error reading response body: %v", err)
}
reg := regexp.MustCompile(`Paths([:=\s]+)/configz /debug /healthz /metrics\n$`)
if reg.FindStringSubmatch(string(resBody)) == nil {
t.Errorf("statusz paths missing: %s\n\nExpected: %q", string(resBody), "Paths<delimter> /configz /debug /healthz /metrics")
}
}
// Ensure all registered handlers & services have an associated testcase.
func TestAuthzCoverage(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
for _, fineGrained := range []bool{false, true} {
t.Run(fmt.Sprintf("fineGrained=%v", fineGrained), func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletFineGrainedAuthz, fineGrained)
// method:path -> has coverage
expectedCases := map[string]bool{}
// Test all the non-web-service handlers
for _, path := range fw.serverUnderTest.restfulCont.RegisteredHandlePaths() {
expectedCases["GET:"+path] = false
expectedCases["POST:"+path] = false
}
// Test all the generated web-service paths
for _, ws := range fw.serverUnderTest.restfulCont.RegisteredWebServices() {
for _, r := range ws.Routes() {
expectedCases[r.Method+":"+r.Path] = false
}
}
// This is a sanity check that the Handle->HandleWithFilter() delegation is working
// Ideally, these would move to registered web services and this list would get shorter
expectedPaths := []string{"/healthz", "/metrics", "/metrics/cadvisor"}
for _, expectedPath := range expectedPaths {
if _, expected := expectedCases["GET:"+expectedPath]; !expected {
t.Errorf("Expected registered handle path %s was missing", expectedPath)
}
}
for _, tc := range AuthzTestCases(fineGrained) {
expectedCases[tc.Method+":"+tc.Path] = true
}
for tc, found := range expectedCases {
if !found {
t.Errorf("Missing authz test case for %s", tc)
}
}
})
}
}
func TestInstallAuthNotRequiredHandlers(t *testing.T) {
fw := newServerTestWithDebug(false, nil)
defer fw.testHTTPServer.Close()
// No new handlers should be added to this list.
allowedAuthNotRequiredHandlers := sets.NewString(
"/healthz",
"/healthz/log",
"/healthz/ping",
"/healthz/syncloop",
"/metrics",
"/metrics/slis",
"/metrics/cadvisor",
"/metrics/probes",
"/metrics/resource",
"/pods/",
"/stats/",
"/stats/summary",
)
// These handlers are explicitly disabled.
debuggingDisabledHandlers := sets.NewString(
"/run/",
"/exec/",
"/attach/",
"/portForward/",
"/containerLogs/",
"/runningpods/",
"/debug/pprof/",
"/logs/",
)
allowedAuthNotRequiredHandlers.Insert(debuggingDisabledHandlers.UnsortedList()...)
// Test all the non-web-service handlers
for _, path := range fw.serverUnderTest.restfulCont.RegisteredHandlePaths() {
if !allowedAuthNotRequiredHandlers.Has(path) {
t.Errorf("New handler %q must require auth", path)
}
}
// Test all the generated web-service paths
for _, ws := range fw.serverUnderTest.restfulCont.RegisteredWebServices() {
for _, r := range ws.Routes() {
if !allowedAuthNotRequiredHandlers.Has(r.Path) {
t.Errorf("New handler %q must require auth", r.Path)
}
}
}
// Ensure the disabled handlers are in fact disabled.
for path := range debuggingDisabledHandlers {
for _, method := range []string{"GET", "POST", "PUT", "PATCH", "DELETE"} {
t.Run(method+":"+path, func(t *testing.T) {
req, err := http.NewRequest(method, fw.testHTTPServer.URL+path, nil)
require.NoError(t, err)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close() //nolint:errcheck
assert.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode)
})
}
}
}
func TestAuthFilters(t *testing.T) {
// Enable features.ContainerCheckpoint during test
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ContainerCheckpoint, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, zpagesfeatures.ComponentStatusz, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, zpagesfeatures.ComponentFlagz, true)
fw := newServerTest()
defer fw.testHTTPServer.Close()
attributesGetter := NewNodeAuthorizerAttributesGetter(authzTestNodeName)
for _, fineGraned := range []bool{false, true} {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletFineGrainedAuthz, fineGraned)
for _, tc := range AuthzTestCases(fineGraned) {
t.Run(fmt.Sprintf("method=%v:path=%v:fineGrained=%v", tc.Method, tc.Method, fineGraned), func(t *testing.T) {
var (
expectedUser = AuthzTestUser()
calledAuthenticate = false
calledAuthorize = false
calledAttributes = false
)
fw.fakeAuth.authenticateFunc = func(req *http.Request) (*authenticator.Response, bool, error) {
calledAuthenticate = true
return &authenticator.Response{User: expectedUser}, true, nil
}
fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) []authorizer.Attributes {
calledAttributes = true
require.Equal(t, expectedUser, u)
attrs := attributesGetter.GetRequestAttributes(u, req)
tc.AssertAttributes(t, attrs)
return attrs
}
fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) {
calledAuthorize = true
return authorizer.DecisionNoOpinion, "", nil
}
req, err := http.NewRequest(tc.Method, fw.testHTTPServer.URL+tc.Path, nil)
require.NoError(t, err)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close() //nolint:errcheck
assert.Equal(t, http.StatusForbidden, resp.StatusCode)
assert.True(t, calledAuthenticate, "Authenticate was not called")
assert.True(t, calledAttributes, "Attributes were not called")
assert.True(t, calledAuthorize, "Authorize was not called")
})
}
}
}
func TestAuthenticationError(t *testing.T) {
var (
expectedUser = &user.DefaultInfo{Name: "test"}
expectedAttributes = []authorizer.Attributes{&authorizer.AttributesRecord{User: expectedUser}}
calledAuthenticate = false
calledAuthorize = false
calledAttributes = false
)
fw := newServerTest()
defer fw.testHTTPServer.Close()
fw.fakeAuth.authenticateFunc = func(req *http.Request) (*authenticator.Response, bool, error) {
calledAuthenticate = true
return &authenticator.Response{User: expectedUser}, true, nil
}
fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) []authorizer.Attributes {
calledAttributes = true
return expectedAttributes
}
fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) {
calledAuthorize = true
return authorizer.DecisionNoOpinion, "", errors.New("Failed")
}
assertHealthFails(t, fw.testHTTPServer.URL+"/healthz", http.StatusInternalServerError)
if !calledAuthenticate {
t.Fatalf("Authenticate was not called")
}
if !calledAttributes {
t.Fatalf("Attributes was not called")
}
if !calledAuthorize {
t.Fatalf("Authorize was not called")
}
}
func TestAuthenticationFailure(t *testing.T) {
var (
expectedUser = &user.DefaultInfo{Name: "test"}
expectedAttributes = []authorizer.Attributes{&authorizer.AttributesRecord{User: expectedUser}}
calledAuthenticate = false
calledAuthorize = false
calledAttributes = false
)
fw := newServerTest()
defer fw.testHTTPServer.Close()
fw.fakeAuth.authenticateFunc = func(req *http.Request) (*authenticator.Response, bool, error) {
calledAuthenticate = true
return nil, false, nil
}
fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) []authorizer.Attributes {
calledAttributes = true
return expectedAttributes
}
fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) {
calledAuthorize = true
return authorizer.DecisionNoOpinion, "", nil
}
assertHealthFails(t, fw.testHTTPServer.URL+"/healthz", http.StatusUnauthorized)
if !calledAuthenticate {
t.Fatalf("Authenticate was not called")
}
if calledAttributes {
t.Fatalf("Attributes was called unexpectedly")
}
if calledAuthorize {
t.Fatalf("Authorize was called unexpectedly")
}
}
func TestAuthorizationSuccess(t *testing.T) {
var (
expectedUser = &user.DefaultInfo{Name: "test"}
expectedAttributes = []authorizer.Attributes{&authorizer.AttributesRecord{User: expectedUser}}
calledAuthenticate = false
calledAuthorize = false
calledAttributes = false
)
fw := newServerTest()
defer fw.testHTTPServer.Close()
fw.fakeAuth.authenticateFunc = func(req *http.Request) (*authenticator.Response, bool, error) {
calledAuthenticate = true
return &authenticator.Response{User: expectedUser}, true, nil
}
fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) []authorizer.Attributes {
calledAttributes = true
return expectedAttributes
}
fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) {
calledAuthorize = true
return authorizer.DecisionAllow, "", nil
}
assertHealthIsOk(t, fw.testHTTPServer.URL+"/healthz")
if !calledAuthenticate {
t.Fatalf("Authenticate was not called")
}
if !calledAttributes {
t.Fatalf("Attributes were not called")
}
if !calledAuthorize {
t.Fatalf("Authorize was not called")
}
}
func TestSyncLoopCheck(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
fw.fakeKubelet.resyncInterval = time.Minute
fw.fakeKubelet.loopEntryTime = time.Now()
// Test with correct hostname, Docker version
assertHealthIsOk(t, fw.testHTTPServer.URL+"/healthz")
fw.fakeKubelet.loopEntryTime = time.Now().Add(time.Minute * -10)
assertHealthFails(t, fw.testHTTPServer.URL+"/healthz", http.StatusInternalServerError)
}
// returns http response status code from the HTTP GET
func assertHealthIsOk(t *testing.T, httpURL string) {
resp, err := http.Get(httpURL)
if err != nil {
t.Fatalf("Got error GETing: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Errorf("expected status code %d, got %d", http.StatusOK, resp.StatusCode)
}
body, readErr := io.ReadAll(resp.Body)
if readErr != nil {
// copying the response body did not work
t.Fatalf("Cannot copy resp: %#v", readErr)
}
result := string(body)
if !strings.Contains(result, "ok") {
t.Errorf("expected body contains ok, got %s", result)
}
}
func setPodByNameFunc(fw *serverTestFramework, namespace, pod, container string) {
fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*v1.Pod, bool) {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: pod,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: container,
},
},
},
}, true
}
}
func setGetContainerLogsFunc(fw *serverTestFramework, t *testing.T, expectedPodName, expectedContainerName string, expectedLogOptions *v1.PodLogOptions, output string) {
fw.fakeKubelet.containerLogsFunc = func(_ context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
if podFullName != expectedPodName {
t.Errorf("expected %s, got %s", expectedPodName, podFullName)
}
if containerName != expectedContainerName {
t.Errorf("expected %s, got %s", expectedContainerName, containerName)
}
if !reflect.DeepEqual(expectedLogOptions, logOptions) {
t.Errorf("expected %#v, got %#v", expectedLogOptions, logOptions)
}
io.WriteString(stdout, output)
return nil
}
}
func TestContainerLogs(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
tests := map[string]struct {
query string
podLogOption *v1.PodLogOptions
}{
"without tail": {"", &v1.PodLogOptions{}},
"with tail": {"?tailLines=5", &v1.PodLogOptions{TailLines: ptr.To[int64](5)}},
"with legacy tail": {"?tail=5", &v1.PodLogOptions{TailLines: ptr.To[int64](5)}},
"with tail all": {"?tail=all", &v1.PodLogOptions{}},
"with follow": {"?follow=1", &v1.PodLogOptions{Follow: true}},
}
for desc, test := range tests {
// To make sure the original behavior doesn't change no matter the feature PodLogsQuerySplitStreams is enabled or not.
for _, enablePodLogsQuerySplitStreams := range []bool{true, false} {
t.Run(fmt.Sprintf("%s (enablePodLogsQuerySplitStreams=%v)", desc, enablePodLogsQuerySplitStreams), func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLogsQuerySplitStreams, enablePodLogsQuerySplitStreams)
expectedLogOptions := test.podLogOption.DeepCopy()
if enablePodLogsQuerySplitStreams && expectedLogOptions.Stream == nil {
// The HTTP handler will internally set the default stream value.
expectedLogOptions.Stream = ptr.To(v1.LogStreamAll)
}
output := "foo bar"
podNamespace := "other"
podName := "foo"
expectedPodName := getPodName(podName, podNamespace)
expectedContainerName := "baz"
setPodByNameFunc(fw, podNamespace, podName, expectedContainerName)
setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, expectedLogOptions, output)
resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + test.query)
if err != nil {
t.Errorf("Got error GETing: %v", err)
}
defer func() {
_ = resp.Body.Close()
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Errorf("Error reading container logs: %v", err)
}
result := string(body)
if result != output {
t.Errorf("Expected: '%v', got: '%v'", output, result)
}
})
}
}
}
func TestContainerLogsWithInvalidTail(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
output := "foo bar"
podNamespace := "other"
podName := "foo"
expectedPodName := getPodName(podName, podNamespace)
expectedContainerName := "baz"
setPodByNameFunc(fw, podNamespace, podName, expectedContainerName)
setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{}, output)
resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tail=-1")
if err != nil {
t.Errorf("Got error GETing: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusUnprocessableEntity {
t.Errorf("Unexpected non-error reading container logs: %#v", resp)
}
}
func TestContainerLogsWithSeparateStream(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLogsQuerySplitStreams, true)
type logEntry struct {
stream string
msg string
}
fw := newServerTest()
defer fw.testHTTPServer.Close()
var (
streamStdout = v1.LogStreamStdout
streamStderr = v1.LogStreamStderr
streamAll = v1.LogStreamAll
)
testCases := []struct {
name string
query string
logs []logEntry
expectedOutput string
expectedLogOptions *v1.PodLogOptions
}{
{
// Defaulters don't work if the query is empty.
// See also https://github.com/kubernetes/kubernetes/issues/128589
name: "empty query should return all logs",
logs: []logEntry{
{stream: v1.LogStreamStdout, msg: "foo\n"},
{stream: v1.LogStreamStderr, msg: "bar\n"},
},
query: "",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamAll,
},
expectedOutput: "foo\nbar\n",
},
{
name: "missing stream param should return all logs",
logs: []logEntry{
{stream: v1.LogStreamStdout, msg: "foo\n"},
{stream: v1.LogStreamStderr, msg: "bar\n"},
},
query: "?limitBytes=100",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamAll,
LimitBytes: ptr.To[int64](100),
},
expectedOutput: "foo\nbar\n",
},
{
name: "only stdout logs",
logs: []logEntry{
{stream: v1.LogStreamStdout, msg: "out1\n"},
{stream: v1.LogStreamStderr, msg: "err1\n"},
{stream: v1.LogStreamStdout, msg: "out2\n"},
},
query: "?stream=Stdout",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamStdout,
},
expectedOutput: "out1\nout2\n",
},
{
name: "only stderr logs",
logs: []logEntry{
{stream: v1.LogStreamStderr, msg: "err1\n"},
{stream: v1.LogStreamStderr, msg: "err2\n"},
{stream: v1.LogStreamStdout, msg: "out1\n"},
},
query: "?stream=Stderr",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamStderr,
},
expectedOutput: "err1\nerr2\n",
},
{
name: "return all logs",
logs: []logEntry{
{stream: v1.LogStreamStdout, msg: "out1\n"},
{stream: v1.LogStreamStderr, msg: "err1\n"},
{stream: v1.LogStreamStdout, msg: "out2\n"},
},
query: "?stream=All",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamAll,
},
expectedOutput: "out1\nerr1\nout2\n",
},
{
name: "stdout logs with legacy tail",
logs: []logEntry{
{stream: v1.LogStreamStdout, msg: "out1\n"},
{stream: v1.LogStreamStderr, msg: "err1\n"},
{stream: v1.LogStreamStdout, msg: "out2\n"},
},
query: "?stream=All&tail=1",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamAll,
TailLines: ptr.To[int64](1),
},
expectedOutput: "out2\n",
},
{
name: "return the last 2 lines of logs",
logs: []logEntry{
{stream: v1.LogStreamStdout, msg: "out1\n"},
{stream: v1.LogStreamStderr, msg: "err1\n"},
{stream: v1.LogStreamStdout, msg: "out2\n"},
},
query: "?stream=All&tailLines=2",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamAll,
TailLines: ptr.To[int64](2),
},
expectedOutput: "err1\nout2\n",
},
{
name: "return the first 6 bytes of the stdout log stream",
logs: []logEntry{
{stream: v1.LogStreamStderr, msg: "err1\n"},
{stream: v1.LogStreamStdout, msg: "out1\n"},
{stream: v1.LogStreamStderr, msg: "err2\n"},
{stream: v1.LogStreamStdout, msg: "out2\n"},
},
query: "?stream=Stdout&limitBytes=6",
expectedLogOptions: &v1.PodLogOptions{
Stream: &streamStdout,
LimitBytes: ptr.To[int64](6),
},
expectedOutput: "out1\no",
},
{
name: "invalid stream",
logs: []logEntry{
{stream: v1.LogStreamStderr, msg: "err1\n"},
{stream: v1.LogStreamStdout, msg: "out1\n"},
},
query: "?stream=invalid",
expectedLogOptions: nil,
expectedOutput: `{"message": "Invalid request."}`,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
podNamespace := "other"
podName := "foo"
expectedContainerName := "baz"
setPodByNameFunc(fw, podNamespace, podName, expectedContainerName)
fw.fakeKubelet.containerLogsFunc = func(_ context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
if !reflect.DeepEqual(tc.expectedLogOptions, logOptions) {
t.Errorf("expected %#v, got %#v", tc.expectedLogOptions, logOptions)
}
var dst io.Writer
tailLines := len(tc.logs)
if logOptions.TailLines != nil {
tailLines = int(*logOptions.TailLines)
}
remain := 0
if logOptions.LimitBytes != nil {
remain = int(*logOptions.LimitBytes)
} else {
for _, log := range tc.logs {
remain += len(log.msg)
}
}
logs := tc.logs[len(tc.logs)-tailLines:]
for _, log := range logs {
switch log.stream {
case v1.LogStreamStdout:
dst = stdout
case v1.LogStreamStderr:
dst = stderr
}
// Skip if the stream is not requested
if dst == nil {
continue
}
line := log.msg
if len(line) > remain {
line = line[:remain]
}
_, _ = io.WriteString(dst, line)
remain -= len(line)
if remain <= 0 {
return nil
}
}
return nil
}
resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + tc.query)
if err != nil {
t.Errorf("Got error GETing: %v", err)
}
defer func() {
_ = resp.Body.Close()
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Errorf("Error reading container logs: %v", err)
}
result := string(body)
if result != tc.expectedOutput {
t.Errorf("Expected: %q, got: %q", tc.expectedOutput, result)
}
})
}
}
func TestCheckpointContainer(t *testing.T) {
podNamespace := "other"
podName := "foo"
expectedContainerName := "baz"
setupTest := func(featureGate bool) *serverTestFramework {
// Enable features.ContainerCheckpoint during test
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ContainerCheckpoint, featureGate)
fw := newServerTest()
// GetPodByName() should always fail
fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*v1.Pod, bool) {
return nil, false
}
return fw
}
fw := setupTest(true)
defer fw.testHTTPServer.Close()
t.Run("wrong pod namespace", func(t *testing.T) {
resp, err := http.Post(fw.testHTTPServer.URL+"/checkpoint/"+podNamespace+"/"+podName+"/"+expectedContainerName, "", nil)
if err != nil {
t.Errorf("Got error POSTing: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected non-error checkpointing container: %#v", resp)
}
})
// let GetPodByName() return a result, but our container "wrongContainerName" is not part of the Pod
setPodByNameFunc(fw, podNamespace, podName, expectedContainerName)
t.Run("wrong container name", func(t *testing.T) {
resp, err := http.Post(fw.testHTTPServer.URL+"/checkpoint/"+podNamespace+"/"+podName+"/wrongContainerName", "", nil)
if err != nil {
t.Errorf("Got error POSTing: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected non-error checkpointing container: %#v", resp)
}
})
// Now the checkpointing of the container fails
fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*v1.Pod, bool) {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: podNamespace,
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "checkpointingFailure",
},
},
},
}, true
}
t.Run("checkpointing fails", func(t *testing.T) {
resp, err := http.Post(fw.testHTTPServer.URL+"/checkpoint/"+podNamespace+"/"+podName+"/checkpointingFailure", "", nil)
if err != nil {
t.Errorf("Got error POSTing: %v", err)
}
defer resp.Body.Close()
assert.Equal(t, 500, resp.StatusCode)
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, "checkpointing of other/foo/checkpointingFailure failed (Returning error for test)", string(body))
})
// Now test a successful checkpoint succeeds
setPodByNameFunc(fw, podNamespace, podName, expectedContainerName)
t.Run("checkpointing succeeds", func(t *testing.T) {
resp, err := http.Post(fw.testHTTPServer.URL+"/checkpoint/"+podNamespace+"/"+podName+"/"+expectedContainerName, "", nil)
if err != nil {
t.Errorf("Got error POSTing: %v", err)
}
assert.Equal(t, 200, resp.StatusCode)
})
// Now test for 404 if checkpointing support is explicitly disabled.
fw.testHTTPServer.Close()
fw = setupTest(false)
defer fw.testHTTPServer.Close()
setPodByNameFunc(fw, podNamespace, podName, expectedContainerName)
t.Run("checkpointing fails because disabled", func(t *testing.T) {
resp, err := http.Post(fw.testHTTPServer.URL+"/checkpoint/"+podNamespace+"/"+podName+"/"+expectedContainerName, "", nil)
if err != nil {
t.Errorf("Got error POSTing: %v", err)
}
assert.Equal(t, 404, resp.StatusCode)
})
}
func makeReq(t *testing.T, method, url, clientProtocol string) *http.Request {
req, err := http.NewRequest(method, url, nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
req.Header.Set("Content-Type", "")
req.Header.Add("X-Stream-Protocol-Version", clientProtocol)
return req
}
func TestServeExecInContainerIdleTimeout(t *testing.T) {
ss, err := newTestStreamingServer(100 * time.Millisecond)
require.NoError(t, err)
defer ss.testHTTPServer.Close()
fw := newServerTestWithDebug(true, ss)
defer fw.testHTTPServer.Close()
podNamespace := "other"
podName := "foo"
expectedContainerName := "baz"
url := fw.testHTTPServer.URL + "/exec/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?c=ls&c=-a&" + api.ExecStdinParam + "=1"
upgradeRoundTripper, err := spdy.NewRoundTripper(&tls.Config{})
if err != nil {
t.Fatalf("Error creating SpdyRoundTripper: %v", err)
}
c := &http.Client{Transport: upgradeRoundTripper}
resp, err := c.Do(makeReq(t, "POST", url, "v4.channel.k8s.io"))
if err != nil {
t.Fatalf("Got error POSTing: %v", err)
}
defer resp.Body.Close()
upgradeRoundTripper.Dialer = &net.Dialer{
Deadline: time.Now().Add(60 * time.Second),
Timeout: 60 * time.Second,
}
conn, err := upgradeRoundTripper.NewConnection(resp)
if err != nil {
t.Fatalf("Unexpected error creating streaming connection: %s", err)
}
if conn == nil {
t.Fatal("Unexpected nil connection")
}
<-conn.CloseChan()
}
func testExecAttach(t *testing.T, verb string) {
tests := map[string]struct {
stdin bool
stdout bool
stderr bool
tty bool
responseStatusCode int
uid bool
}{
"no input or output": {responseStatusCode: http.StatusBadRequest},
"stdin": {stdin: true, responseStatusCode: http.StatusSwitchingProtocols},
"stdout": {stdout: true, responseStatusCode: http.StatusSwitchingProtocols},
"stderr": {stderr: true, responseStatusCode: http.StatusSwitchingProtocols},
"stdout and stderr": {stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols},
"stdin stdout and stderr": {stdin: true, stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols},
"stdin stdout stderr with uid": {stdin: true, stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols, uid: true},
}
for desc := range tests {
test := tests[desc]
t.Run(desc, func(t *testing.T) {
ss, err := newTestStreamingServer(0)
require.NoError(t, err)
defer ss.testHTTPServer.Close()
fw := newServerTestWithDebug(true, ss)
defer fw.testHTTPServer.Close()
fmt.Println(desc)
podNamespace := "other"
podName := "foo"
expectedPodName := getPodName(podName, podNamespace)
expectedContainerName := "baz"
expectedCommand := "ls -a"
expectedStdin := "stdin"
expectedStdout := "stdout"
expectedStderr := "stderr"
done := make(chan struct{})
clientStdoutReadDone := make(chan struct{})
clientStderrReadDone := make(chan struct{})
execInvoked := false
attachInvoked := false
checkStream := func(podFullName string, uid types.UID, containerName string, streamOpts remotecommandserver.Options) {
assert.Equal(t, expectedPodName, podFullName, "podFullName")
if test.uid {
assert.Equal(t, testUID, string(uid), "uid")
}
assert.Equal(t, expectedContainerName, containerName, "containerName")
assert.Equal(t, test.stdin, streamOpts.Stdin, "stdin")
assert.Equal(t, test.stdout, streamOpts.Stdout, "stdout")
assert.Equal(t, test.tty, streamOpts.TTY, "tty")
assert.Equal(t, !test.tty && test.stderr, streamOpts.Stderr, "stderr")
}
fw.fakeKubelet.getExecCheck = func(podFullName string, uid types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) {
execInvoked = true
assert.Equal(t, expectedCommand, strings.Join(cmd, " "), "cmd")
checkStream(podFullName, uid, containerName, streamOpts)
}
fw.fakeKubelet.getAttachCheck = func(podFullName string, uid types.UID, containerName string, streamOpts remotecommandserver.Options) {
attachInvoked = true
checkStream(podFullName, uid, containerName, streamOpts)
}
testStream := func(containerID string, in io.Reader, out, stderr io.WriteCloser, tty bool, done chan struct{}) error {
close(done)
assert.Equal(t, testContainerID, containerID, "containerID")
assert.Equal(t, test.tty, tty, "tty")
require.Equal(t, test.stdin, in != nil, "in")
require.Equal(t, test.stdout, out != nil, "out")
require.Equal(t, !test.tty && test.stderr, stderr != nil, "err")
if test.stdin {
b := make([]byte, 10)
n, err := in.Read(b)
assert.NoError(t, err, "reading from stdin")
assert.Equal(t, expectedStdin, string(b[0:n]), "content from stdin")
}
if test.stdout {
_, err := out.Write([]byte(expectedStdout))
assert.NoError(t, err, "writing to stdout")
out.Close()
<-clientStdoutReadDone
}
if !test.tty && test.stderr {
_, err := stderr.Write([]byte(expectedStderr))
assert.NoError(t, err, "writing to stderr")
stderr.Close()
<-clientStderrReadDone
}
return nil
}
ss.fakeRuntime.execFunc = func(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
assert.Equal(t, expectedCommand, strings.Join(cmd, " "), "cmd")
return testStream(containerID, stdin, stdout, stderr, tty, done)
}
ss.fakeRuntime.attachFunc = func(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return testStream(containerID, stdin, stdout, stderr, tty, done)
}
var url string
if test.uid {
url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + testUID + "/" + expectedContainerName + "?ignore=1"
} else {
url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?ignore=1"
}
if verb == "exec" {
url += "&command=ls&command=-a"
}
if test.stdin {
url += "&" + api.ExecStdinParam + "=1"
}
if test.stdout {
url += "&" + api.ExecStdoutParam + "=1"
}
if test.stderr && !test.tty {
url += "&" + api.ExecStderrParam + "=1"
}
if test.tty {
url += "&" + api.ExecTTYParam + "=1"
}
var (
resp *http.Response
upgradeRoundTripper httpstream.UpgradeRoundTripper
c *http.Client
)
upgradeRoundTripper, err = spdy.NewRoundTripper(&tls.Config{})
if err != nil {
t.Fatalf("Error creating SpdyRoundTripper: %v", err)
}
c = &http.Client{Transport: upgradeRoundTripper}
resp, err = c.Do(makeReq(t, "POST", url, "v4.channel.k8s.io"))
require.NoError(t, err, "POSTing")
defer resp.Body.Close()
_, err = io.ReadAll(resp.Body)
assert.NoError(t, err, "reading response body")
require.Equal(t, test.responseStatusCode, resp.StatusCode, "response status")
if test.responseStatusCode != http.StatusSwitchingProtocols {
return
}
conn, err := upgradeRoundTripper.NewConnection(resp)
require.NoError(t, err, "creating streaming connection")
defer conn.Close()
h := http.Header{}
h.Set(api.StreamType, api.StreamTypeError)
_, err = conn.CreateStream(h)
require.NoError(t, err, "creating error stream")
if test.stdin {
h.Set(api.StreamType, api.StreamTypeStdin)
stream, err := conn.CreateStream(h)
require.NoError(t, err, "creating stdin stream")
_, err = stream.Write([]byte(expectedStdin))
require.NoError(t, err, "writing to stdin stream")
}
var stdoutStream httpstream.Stream
if test.stdout {
h.Set(api.StreamType, api.StreamTypeStdout)
stdoutStream, err = conn.CreateStream(h)
require.NoError(t, err, "creating stdout stream")
}
var stderrStream httpstream.Stream
if test.stderr && !test.tty {
h.Set(api.StreamType, api.StreamTypeStderr)
stderrStream, err = conn.CreateStream(h)
require.NoError(t, err, "creating stderr stream")
}
if test.stdout {
output := make([]byte, 10)
n, err := stdoutStream.Read(output)
close(clientStdoutReadDone)
assert.NoError(t, err, "reading from stdout stream")
assert.Equal(t, expectedStdout, string(output[0:n]), "stdout")
}
if test.stderr && !test.tty {
output := make([]byte, 10)
n, err := stderrStream.Read(output)
close(clientStderrReadDone)
assert.NoError(t, err, "reading from stderr stream")
assert.Equal(t, expectedStderr, string(output[0:n]), "stderr")
}
// wait for the server to finish before checking if the attach/exec funcs were invoked
<-done
if verb == "exec" {
assert.True(t, execInvoked, "exec should be invoked")
assert.False(t, attachInvoked, "attach should not be invoked")
} else {
assert.True(t, attachInvoked, "attach should be invoked")
assert.False(t, execInvoked, "exec should not be invoked")
}
})
}
}
func TestServeExecInContainer(t *testing.T) {
testExecAttach(t, "exec")
}
func TestServeAttachContainer(t *testing.T) {
testExecAttach(t, "attach")
}
func TestServePortForwardIdleTimeout(t *testing.T) {
ss, err := newTestStreamingServer(100 * time.Millisecond)
require.NoError(t, err)
defer ss.testHTTPServer.Close()
fw := newServerTestWithDebug(true, ss)
defer fw.testHTTPServer.Close()
podNamespace := "other"
podName := "foo"
url := fw.testHTTPServer.URL + "/portForward/" + podNamespace + "/" + podName
upgradeRoundTripper, err := spdy.NewRoundTripper(&tls.Config{})
if err != nil {
t.Fatalf("Error creating SpdyRoundTripper: %v", err)
}
c := &http.Client{Transport: upgradeRoundTripper}
req := makeReq(t, "POST", url, "portforward.k8s.io")
resp, err := c.Do(req)
if err != nil {
t.Fatalf("Got error POSTing: %v", err)
}
defer resp.Body.Close()
conn, err := upgradeRoundTripper.NewConnection(resp)
if err != nil {
t.Fatalf("Unexpected error creating streaming connection: %s", err)
}
if conn == nil {
t.Fatal("Unexpected nil connection")
}
defer conn.Close()
<-conn.CloseChan()
}
func TestServePortForward(t *testing.T) {
tests := map[string]struct {
port string
uid bool
clientData string
containerData string
shouldError bool
}{
"no port": {port: "", shouldError: true},
"none number port": {port: "abc", shouldError: true},
"negative port": {port: "-1", shouldError: true},
"too large port": {port: "65536", shouldError: true},
"0 port": {port: "0", shouldError: true},
"min port": {port: "1", shouldError: false},
"normal port": {port: "8000", shouldError: false},
"normal port with data forward": {port: "8000", clientData: "client data", containerData: "container data", shouldError: false},
"max port": {port: "65535", shouldError: false},
"normal port with uid": {port: "8000", uid: true, shouldError: false},
}
podNamespace := "other"
podName := "foo"
for desc := range tests {
test := tests[desc]
t.Run(desc, func(t *testing.T) {
ss, err := newTestStreamingServer(0)
require.NoError(t, err)
defer ss.testHTTPServer.Close()
fw := newServerTestWithDebug(true, ss)
defer fw.testHTTPServer.Close()
portForwardFuncDone := make(chan struct{})
fw.fakeKubelet.getPortForwardCheck = func(name, namespace string, uid types.UID, opts portforward.V4Options) {
assert.Equal(t, podName, name, "pod name")
assert.Equal(t, podNamespace, namespace, "pod namespace")
if test.uid {
assert.Equal(t, testUID, string(uid), "uid")
}
}
ss.fakeRuntime.portForwardFunc = func(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
defer close(portForwardFuncDone)
assert.Equal(t, testPodSandboxID, podSandboxID, "pod sandbox id")
// The port should be valid if it reaches here.
testPort, err := strconv.ParseInt(test.port, 10, 32)
require.NoError(t, err, "parse port")
assert.Equal(t, int32(testPort), port, "port")
if test.clientData != "" {
fromClient := make([]byte, 32)
n, err := stream.Read(fromClient)
assert.NoError(t, err, "reading client data")
assert.Equal(t, test.clientData, string(fromClient[0:n]), "client data")
}
if test.containerData != "" {
_, err := stream.Write([]byte(test.containerData))
assert.NoError(t, err, "writing container data")
}
return nil
}
var url string
if test.uid {
url = fmt.Sprintf("%s/portForward/%s/%s/%s", fw.testHTTPServer.URL, podNamespace, podName, testUID)
} else {
url = fmt.Sprintf("%s/portForward/%s/%s", fw.testHTTPServer.URL, podNamespace, podName)
}
var (
upgradeRoundTripper httpstream.UpgradeRoundTripper
c *http.Client
)
upgradeRoundTripper, err = spdy.NewRoundTripper(&tls.Config{})
if err != nil {
t.Fatalf("Error creating SpdyRoundTripper: %v", err)
}
c = &http.Client{Transport: upgradeRoundTripper}
req := makeReq(t, "POST", url, "portforward.k8s.io")
resp, err := c.Do(req)
require.NoError(t, err, "POSTing")
defer resp.Body.Close()
assert.Equal(t, http.StatusSwitchingProtocols, resp.StatusCode, "status code")
conn, err := upgradeRoundTripper.NewConnection(resp)
require.NoError(t, err, "creating streaming connection")
defer conn.Close()
headers := http.Header{}
headers.Set("streamType", "error")
headers.Set("port", test.port)
_, err = conn.CreateStream(headers)
assert.Equal(t, test.shouldError, err != nil, "expect error")
if test.shouldError {
return
}
headers.Set("streamType", "data")
headers.Set("port", test.port)
dataStream, err := conn.CreateStream(headers)
require.NoError(t, err, "create stream")
if test.clientData != "" {
_, err := dataStream.Write([]byte(test.clientData))
assert.NoError(t, err, "writing client data")
}
if test.containerData != "" {
fromContainer := make([]byte, 32)
n, err := dataStream.Read(fromContainer)
assert.NoError(t, err, "reading container data")
assert.Equal(t, test.containerData, string(fromContainer[0:n]), "container data")
}
<-portForwardFuncDone
})
}
}
func TestMetricBuckets(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, zpagesfeatures.ComponentStatusz, true)
tests := map[string]struct {
url string
bucket string
}{
"healthz endpoint": {url: "/healthz", bucket: "healthz"},
"attach": {url: "/attach/podNamespace/podID/containerName", bucket: "attach"},
"attach with uid": {url: "/attach/podNamespace/podID/uid/containerName", bucket: "attach"},
"configz": {url: "/configz", bucket: "configz"},
"containerLogs": {url: "/containerLogs/podNamespace/podID/containerName", bucket: "containerLogs"},
"debug v flags": {url: "/debug/flags/v", bucket: "debug"},
"pprof with sub": {url: "/debug/pprof/subpath", bucket: "debug"},
"exec": {url: "/exec/podNamespace/podID/containerName", bucket: "exec"},
"exec with uid": {url: "/exec/podNamespace/podID/uid/containerName", bucket: "exec"},
"healthz": {url: "/healthz/", bucket: "healthz"},
"healthz log sub": {url: "/healthz/log", bucket: "healthz"},
"healthz ping": {url: "/healthz/ping", bucket: "healthz"},
"healthz sync loop": {url: "/healthz/syncloop", bucket: "healthz"},
"logs": {url: "/logs/", bucket: "logs"},
"logs with path": {url: "/logs/logpath", bucket: "logs"},
"metrics": {url: "/metrics", bucket: "metrics"},
"metrics cadvisor sub": {url: "/metrics/cadvisor", bucket: "metrics/cadvisor"},
"metrics probes sub": {url: "/metrics/probes", bucket: "metrics/probes"},
"metrics resource sub": {url: "/metrics/resource", bucket: "metrics/resource"},
"pods": {url: "/pods/", bucket: "pods"},
"portForward": {url: "/portForward/podNamespace/podID", bucket: "portForward"},
"portForward with uid": {url: "/portForward/podNamespace/podID/uid", bucket: "portForward"},
"run": {url: "/run/podNamespace/podID/containerName", bucket: "run"},
"run with uid": {url: "/run/podNamespace/podID/uid/containerName", bucket: "run"},
"runningpods": {url: "/runningpods/", bucket: "runningpods"},
"stats": {url: "/stats/", bucket: "stats"},
"stats summary sub": {url: "/stats/summary", bucket: "stats"},
"statusz": {url: "/statusz", bucket: "statusz"},
"/flagz": {url: "/flagz", bucket: "flagz"},
"invalid path": {url: "/junk", bucket: "other"},
"invalid path starting with good": {url: "/healthzjunk", bucket: "other"},
}
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, zpagesfeatures.ComponentFlagz, true)
fw := newServerTest()
defer fw.testHTTPServer.Close()
for _, test := range tests {
path := test.url
bucket := test.bucket
require.Equal(t, fw.serverUnderTest.getMetricBucket(path), bucket)
}
}
func TestMetricMethodBuckets(t *testing.T) {
tests := map[string]struct {
method string
bucket string
}{
"normal GET": {method: "GET", bucket: "GET"},
"normal POST": {method: "POST", bucket: "POST"},
"invalid method": {method: "WEIRD", bucket: "other"},
}
fw := newServerTest()
defer fw.testHTTPServer.Close()
for _, test := range tests {
method := test.method
bucket := test.bucket
require.Equal(t, fw.serverUnderTest.getMetricMethodBucket(method), bucket)
}
}
func TestDebuggingDisabledHandlers(t *testing.T) {
// for backward compatibility even if enablesystemLogHandler or enableProfilingHandler is set but not
// enableDebuggingHandler then /logs, /pprof and /flags shouldn't be served.
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
EnableDebuggingHandlers: false,
EnableSystemLogHandler: true,
EnableDebugFlagsHandler: true,
EnableProfilingHandler: true,
}
fw := newServerTestWithDebuggingHandlers(kubeCfg, nil)
defer fw.testHTTPServer.Close()
paths := []string{
"/run", "/exec", "/attach", "/portForward", "/containerLogs", "/runningpods",
"/run/", "/exec/", "/attach/", "/portForward/", "/containerLogs/", "/runningpods/",
"/run/xxx", "/exec/xxx", "/attach/xxx", "/debug/pprof/profile", "/logs/kubelet.log",
}
for _, p := range paths {
verifyEndpointResponse(t, fw, p, "Debug endpoints are disabled.\n")
}
}
func TestDisablingLogAndProfilingHandler(t *testing.T) {
kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
EnableDebuggingHandlers: true,
}
fw := newServerTestWithDebuggingHandlers(kubeCfg, nil)
defer fw.testHTTPServer.Close()
// verify debug endpoints are disabled
verifyEndpointResponse(t, fw, "/logs/kubelet.log", "logs endpoint is disabled.\n")
verifyEndpointResponse(t, fw, "/debug/pprof/profile?seconds=2", "profiling endpoint is disabled.\n")
verifyEndpointResponse(t, fw, "/debug/flags/v", "flags endpoint is disabled.\n")
}
func TestFailedParseParamsSummaryHandler(t *testing.T) {
fw := newServerTest()
defer fw.testHTTPServer.Close()
resp, err := http.Post(fw.testHTTPServer.URL+"/stats/summary", "invalid/content/type", nil)
assert.NoError(t, err)
defer resp.Body.Close()
v, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
assert.Contains(t, string(v), "parse form failed")
}
func verifyEndpointResponse(t *testing.T, fw *serverTestFramework, path string, expectedResponse string) {
resp, err := http.Get(fw.testHTTPServer.URL + path)
require.NoError(t, err)
assert.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, expectedResponse, string(body))
resp, err = http.Post(fw.testHTTPServer.URL+path, "", nil)
require.NoError(t, err)
assert.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode)
body, err = io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, expectedResponse, string(body))
}
func TestTrimURLPath(t *testing.T) {
tests := []struct {
path, expected string
}{
{"", ""},
{"//", ""},
{"/pods", "pods"},
{"pods", "pods"},
{"pods/", "pods"},
{"good/", "good"},
{"pods/probes", "pods"},
{"metrics", "metrics"},
{"metrics/resource", "metrics/resource"},
{"metrics/hello", "metrics/hello"},
}
for _, test := range tests {
assert.Equalf(t, test.expected, getURLRootPath(test.path), "path is: %s", test.path)
}
}
func TestFineGrainedAuthz(t *testing.T) {
// Enable features.ContainerCheckpoint during test
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletFineGrainedAuthz, true)
fw := newServerTest()
defer fw.testHTTPServer.Close()
attributesGetter := NewNodeAuthorizerAttributesGetter(authzTestNodeName)
testCases := []struct {
name string
path string
expectedSubResources []string
authorizer func(authorizer.Attributes) (authorized authorizer.Decision, reason string, err error)
wantStatusCode int
wantCalledAuthorizeCount int
}{
{
name: "both subresources rejected",
path: "/configz",
expectedSubResources: []string{"configz", "proxy"},
authorizer: func(authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
return authorizer.DecisionNoOpinion, "", nil
},
wantStatusCode: 403,
wantCalledAuthorizeCount: 2,
},
{
name: "fine grained rejected, proxy accepted",
path: "/configz",
expectedSubResources: []string{"configz", "proxy"},
authorizer: func(a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
switch a.GetSubresource() {
case "configz":
return authorizer.DecisionNoOpinion, "", nil
case "proxy":
return authorizer.DecisionAllow, "", nil
default:
return authorizer.DecisionNoOpinion, "", fmt.Errorf("unexpected subresource %v", a.GetSubresource())
}
},
wantStatusCode: 200,
wantCalledAuthorizeCount: 2,
},
{
name: "fine grained accepted",
path: "/configz",
expectedSubResources: []string{"configz", "proxy"},
authorizer: func(a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
switch a.GetSubresource() {
case "configz":
return authorizer.DecisionAllow, "", nil
case "proxy":
return authorizer.DecisionNoOpinion, "", fmt.Errorf("did not expect code to reach here")
default:
return authorizer.DecisionNoOpinion, "", fmt.Errorf("unexpected subresource %v", a.GetSubresource())
}
},
wantStatusCode: 200,
wantCalledAuthorizeCount: 1,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
calledAuthenticate := false
calledAuthorizeCount := 0
calledAttributes := false
fw.fakeAuth.authenticateFunc = func(req *http.Request) (*authenticator.Response, bool, error) {
calledAuthenticate = true
return &authenticator.Response{User: AuthzTestUser()}, true, nil
}
fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) []authorizer.Attributes {
calledAttributes = true
attrs := attributesGetter.GetRequestAttributes(u, req)
var gotSubresources []string
for _, attr := range attrs {
gotSubresources = append(gotSubresources, attr.GetSubresource())
}
require.Equal(t, tc.expectedSubResources, gotSubresources)
return attrs
}
fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
calledAuthorizeCount += 1
return tc.authorizer(a)
}
req, err := http.NewRequest("GET", fw.testHTTPServer.URL+tc.path, nil)
require.NoError(t, err)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, tc.wantStatusCode, resp.StatusCode)
assert.True(t, calledAuthenticate, "Authenticate was not called")
assert.True(t, calledAttributes, "Attributes were not called")
assert.Equal(t, tc.wantCalledAuthorizeCount, calledAuthorizeCount)
})
}
}
func TestNewServerRegistersMetricsSLIsEndpointTwice(t *testing.T) {
host := &fakeKubelet{}
resourceAnalyzer := stats.NewResourceAnalyzer(nil, time.Minute, &record.FakeRecorder{})
server1 := NewServer(host, resourceAnalyzer, []healthz.HealthChecker{}, flagz.NamedFlagSetsReader{}, nil, nil)
server2 := NewServer(host, resourceAnalyzer, []healthz.HealthChecker{}, flagz.NamedFlagSetsReader{}, nil, nil)
// Check if both servers registered the /metrics/slis endpoint
assert.Contains(t, server1.restfulCont.RegisteredHandlePaths(), "/metrics/slis", "First server should register /metrics/slis")
assert.Contains(t, server2.restfulCont.RegisteredHandlePaths(), "/metrics/slis", "Second server should register /metrics/slis")
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"sync"
"sync/atomic"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
)
type statCache map[types.UID]*volumeStatCalculator
// fsResourceAnalyzerInterface is for embedding fs functions into ResourceAnalyzer
type fsResourceAnalyzerInterface interface {
GetPodVolumeStats(uid types.UID) (PodVolumeStats, bool)
}
// fsResourceAnalyzer provides stats about fs resource usage
type fsResourceAnalyzer struct {
statsProvider Provider
calcPeriod time.Duration
cachedVolumeStats atomic.Value
startOnce sync.Once
eventRecorder record.EventRecorder
}
var _ fsResourceAnalyzerInterface = &fsResourceAnalyzer{}
// newFsResourceAnalyzer returns a new fsResourceAnalyzer implementation
func newFsResourceAnalyzer(statsProvider Provider, calcVolumePeriod time.Duration, eventRecorder record.EventRecorder) *fsResourceAnalyzer {
r := &fsResourceAnalyzer{
statsProvider: statsProvider,
calcPeriod: calcVolumePeriod,
eventRecorder: eventRecorder,
}
r.cachedVolumeStats.Store(make(statCache))
return r
}
// Start eager background caching of volume stats.
func (s *fsResourceAnalyzer) Start() {
s.startOnce.Do(func() {
if s.calcPeriod <= 0 {
klog.InfoS("Volume stats collection disabled")
return
}
klog.InfoS("Starting FS ResourceAnalyzer")
go wait.Forever(func() { s.updateCachedPodVolumeStats() }, s.calcPeriod)
})
}
// updateCachedPodVolumeStats calculates and caches the PodVolumeStats for every Pod known to the kubelet.
func (s *fsResourceAnalyzer) updateCachedPodVolumeStats() {
oldCache := s.cachedVolumeStats.Load().(statCache)
newCache := make(statCache)
// Copy existing entries to new map, creating/starting new entries for pods missing from the cache
for _, pod := range s.statsProvider.GetPods() {
if value, found := oldCache[pod.GetUID()]; !found {
newCache[pod.GetUID()] = newVolumeStatCalculator(s.statsProvider, s.calcPeriod, pod, s.eventRecorder).StartOnce()
} else {
newCache[pod.GetUID()] = value
}
}
// Stop entries for pods that have been deleted
for uid, entry := range oldCache {
if _, found := newCache[uid]; !found {
entry.StopOnce()
}
}
// Update the cache reference
s.cachedVolumeStats.Store(newCache)
}
// GetPodVolumeStats returns the PodVolumeStats for a given pod. Results are looked up from a cache that
// is eagerly populated in the background, and never calculated on the fly.
func (s *fsResourceAnalyzer) GetPodVolumeStats(uid types.UID) (PodVolumeStats, bool) {
cache := s.cachedVolumeStats.Load().(statCache)
statCalc, found := cache[uid]
if !found {
// TODO: Differentiate between stats being empty
// See issue #20679
return PodVolumeStats{}, false
}
return statCalc.GetLatest()
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package stats
import (
"context"
"fmt"
"net/http"
restful "github.com/emicklei/go-restful/v3"
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorv2 "github.com/google/cadvisor/info/v2"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/volume"
)
// Provider hosts methods required by stats handlers.
type Provider interface {
// The following stats are provided by either CRI or cAdvisor.
//
// ListPodStats returns the stats of all the containers managed by pods.
ListPodStats(ctx context.Context) ([]statsapi.PodStats, error)
// ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for
// the containers and returns the stats for all the pod-managed containers.
ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error)
// ListPodStatsAndUpdateCPUNanoCoreUsage returns the stats of all the
// containers managed by pods and force update the cpu usageNanoCores.
// This is a workaround for CRI runtimes that do not integrate with
// cadvisor. See https://github.com/kubernetes/kubernetes/issues/72788
// for more details.
ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error)
// ImageFsStats returns the stats of the image filesystem.
// Kubelet allows three options for container filesystems
// Everything is on node fs (so no image filesystem)
// Container storage is on a dedicated disk (imageFs is separate from root)
// Container Filesystem is on root and Images are stored on ImageFs
// First return parameter is the image filesystem and
// second parameter is the container filesystem
ImageFsStats(ctx context.Context) (imageFs *statsapi.FsStats, containerFs *statsapi.FsStats, callErr error)
// The following stats are provided by cAdvisor.
//
// GetCgroupStats returns the stats and the networking usage of the cgroup
// with the specified cgroupName.
GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error)
// GetCgroupCPUAndMemoryStats returns the CPU and memory stats of the cgroup with the specified cgroupName.
GetCgroupCPUAndMemoryStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, error)
// RootFsStats returns the stats of the node root filesystem.
RootFsStats() (*statsapi.FsStats, error)
// GetRequestedContainersInfo returns the information of the container with
// the containerName, and with the specified cAdvisor options.
GetRequestedContainersInfo(containerName string, options cadvisorv2.RequestOptions) (map[string]*cadvisorapi.ContainerInfo, error)
// The following information is provided by Kubelet.
//
// GetPodByName returns the spec of the pod with the name in the specified
// namespace.
GetPodByName(namespace, name string) (*v1.Pod, bool)
// GetNode returns the spec of the local node.
GetNode() (*v1.Node, error)
// GetNodeConfig returns the configuration of the local node.
GetNodeConfig() cm.NodeConfig
// ListVolumesForPod returns the stats of the volume used by the pod with
// the podUID.
ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool)
// ListBlockVolumesForPod returns the stats of the volume used by the
// pod with the podUID.
ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool)
// GetPods returns the specs of all the pods running on this node.
GetPods() []*v1.Pod
// RlimitStats returns the rlimit stats of system.
RlimitStats() (*statsapi.RlimitStats, error)
// GetPodCgroupRoot returns the literal cgroupfs value for the cgroup containing all pods
GetPodCgroupRoot() string
// GetPodByCgroupfs provides the pod that maps to the specified cgroup literal, as well
// as whether the pod was found.
GetPodByCgroupfs(cgroupfs string) (*v1.Pod, bool)
}
type handler struct {
provider Provider
summaryProvider SummaryProvider
}
// CreateHandlers creates the REST handlers for the stats.
func CreateHandlers(rootPath string, provider Provider, summaryProvider SummaryProvider) *restful.WebService {
h := &handler{provider, summaryProvider}
ws := &restful.WebService{}
ws.Path(rootPath).
Produces(restful.MIME_JSON)
endpoints := []struct {
path string
handler restful.RouteFunction
}{
{"/summary", h.handleSummary},
}
for _, e := range endpoints {
for _, method := range []string{"GET", "POST"} {
ws.Route(ws.
Method(method).
Path(e.path).
To(e.handler))
}
}
return ws
}
// Handles stats summary requests to /stats/summary
// If "only_cpu_and_memory" GET param is true then only cpu and memory is returned in response.
func (h *handler) handleSummary(request *restful.Request, response *restful.Response) {
ctx := request.Request.Context()
onlyCPUAndMemory := false
err := request.Request.ParseForm()
if err != nil {
handleError(response, "/stats/summary", fmt.Errorf("parse form failed: %w", err))
return
}
if onlyCluAndMemoryParam, found := request.Request.Form["only_cpu_and_memory"]; found &&
len(onlyCluAndMemoryParam) == 1 && onlyCluAndMemoryParam[0] == "true" {
onlyCPUAndMemory = true
}
var summary *statsapi.Summary
if onlyCPUAndMemory {
summary, err = h.summaryProvider.GetCPUAndMemoryStats(ctx)
} else {
// external calls to the summary API use cached stats
forceStatsUpdate := false
summary, err = h.summaryProvider.Get(ctx, forceStatsUpdate)
}
if err != nil {
handleError(response, "/stats/summary", err)
} else {
writeResponse(response, summary)
}
}
func writeResponse(response *restful.Response, stats interface{}) {
if err := response.WriteAsJson(stats); err != nil {
klog.ErrorS(err, "Error writing response")
}
}
// handleError serializes an error object into an HTTP response.
// request is provided for logging.
func handleError(response *restful.Response, request string, err error) {
switch err {
case kubecontainer.ErrContainerNotFound:
response.WriteError(http.StatusNotFound, err)
default:
msg := fmt.Sprintf("Internal Error: %v", err)
klog.ErrorS(err, "HTTP InternalServerError serving", "request", request)
response.WriteErrorString(http.StatusInternalServerError, msg)
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"k8s.io/client-go/tools/record"
"time"
)
// ResourceAnalyzer provides statistics on node resource consumption
type ResourceAnalyzer interface {
Start()
fsResourceAnalyzerInterface
SummaryProvider
}
// resourceAnalyzer implements ResourceAnalyzer
type resourceAnalyzer struct {
*fsResourceAnalyzer
SummaryProvider
}
var _ ResourceAnalyzer = &resourceAnalyzer{}
// NewResourceAnalyzer returns a new ResourceAnalyzer
func NewResourceAnalyzer(statsProvider Provider, calVolumeFrequency time.Duration, eventRecorder record.EventRecorder) ResourceAnalyzer {
fsAnalyzer := newFsResourceAnalyzer(statsProvider, calVolumeFrequency, eventRecorder)
summaryProvider := NewSummaryProvider(statsProvider)
return &resourceAnalyzer{fsAnalyzer, summaryProvider}
}
// Start starts background functions necessary for the ResourceAnalyzer to function
func (ra *resourceAnalyzer) Start() {
ra.fsResourceAnalyzer.Start()
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package stats
import (
"context"
"fmt"
"k8s.io/klog/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/util"
)
// SummaryProvider provides summaries of the stats from Kubelet.
type SummaryProvider interface {
// Get provides a new Summary with the stats from Kubelet,
// and will update some stats if updateStats is true
Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error)
// GetCPUAndMemoryStats provides a new Summary with the CPU and memory stats from Kubelet,
GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error)
}
// summaryProviderImpl implements the SummaryProvider interface.
type summaryProviderImpl struct {
// kubeletCreationTime is the time at which the summaryProvider was created.
kubeletCreationTime metav1.Time
// systemBootTime is the time at which the system was started
systemBootTime metav1.Time
provider Provider
}
var _ SummaryProvider = &summaryProviderImpl{}
// NewSummaryProvider returns a SummaryProvider using the stats provided by the
// specified statsProvider.
func NewSummaryProvider(statsProvider Provider) SummaryProvider {
kubeletCreationTime := metav1.Now()
bootTime, err := util.GetBootTime()
if err != nil {
// bootTime will be zero if we encounter an error getting the boot time.
klog.InfoS("Error getting system boot time. Node metrics will have an incorrect start time", "err", err)
}
return &summaryProviderImpl{
kubeletCreationTime: kubeletCreationTime,
systemBootTime: metav1.NewTime(bootTime),
provider: statsProvider,
}
}
func (sp *summaryProviderImpl) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) {
// TODO(timstclair): Consider returning a best-effort response if any of
// the following errors occur.
node, err := sp.provider.GetNode()
if err != nil {
return nil, fmt.Errorf("failed to get node info: %v", err)
}
nodeConfig := sp.provider.GetNodeConfig()
rootStats, networkStats, err := sp.provider.GetCgroupStats("/", updateStats)
if err != nil {
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
}
rootFsStats, err := sp.provider.RootFsStats()
if err != nil {
return nil, fmt.Errorf("failed to get rootFs stats: %v", err)
}
imageFsStats, containerFsStats, err := sp.provider.ImageFsStats(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get imageFs stats: %v", err)
}
var podStats []statsapi.PodStats
if updateStats {
podStats, err = sp.provider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx)
} else {
podStats, err = sp.provider.ListPodStats(ctx)
}
if err != nil {
return nil, fmt.Errorf("failed to list pod stats: %v", err)
}
rlimit, err := sp.provider.RlimitStats()
if err != nil {
return nil, fmt.Errorf("failed to get rlimit stats: %v", err)
}
nodeStats := statsapi.NodeStats{
NodeName: node.Name,
CPU: rootStats.CPU,
Memory: rootStats.Memory,
Swap: rootStats.Swap,
Network: networkStats,
StartTime: sp.systemBootTime,
Fs: rootFsStats,
Runtime: &statsapi.RuntimeStats{ContainerFs: containerFsStats, ImageFs: imageFsStats},
Rlimit: rlimit,
SystemContainers: sp.GetSystemContainersStats(nodeConfig, podStats, updateStats),
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
nodeStats.IO = rootStats.IO
}
summary := statsapi.Summary{
Node: nodeStats,
Pods: podStats,
}
return &summary, nil
}
func (sp *summaryProviderImpl) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) {
// TODO(timstclair): Consider returning a best-effort response if any of
// the following errors occur.
node, err := sp.provider.GetNode()
if err != nil {
return nil, fmt.Errorf("failed to get node info: %v", err)
}
nodeConfig := sp.provider.GetNodeConfig()
rootStats, err := sp.provider.GetCgroupCPUAndMemoryStats("/", false)
if err != nil {
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
}
podStats, err := sp.provider.ListPodCPUAndMemoryStats(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list pod stats: %v", err)
}
nodeStats := statsapi.NodeStats{
NodeName: node.Name,
CPU: rootStats.CPU,
Memory: rootStats.Memory,
Swap: rootStats.Swap,
StartTime: rootStats.StartTime,
SystemContainers: sp.GetSystemContainersCPUAndMemoryStats(nodeConfig, podStats, false),
}
summary := statsapi.Summary{
Node: nodeStats,
Pods: podStats,
}
return &summary, nil
}
//go:build !windows
// +build !windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"errors"
"k8s.io/klog/v2"
cadvisormemory "github.com/google/cadvisor/cache/memory"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm"
)
func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig, podStats []statsapi.PodStats, updateStats bool) (stats []statsapi.ContainerStats) {
systemContainers := map[string]struct {
name string
forceStatsUpdate bool
startTime metav1.Time
}{
statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime},
statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false},
statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false},
statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats},
}
for sys, cont := range systemContainers {
// skip if cgroup name is undefined (not all system containers are required)
if cont.name == "" {
continue
}
s, _, err := sp.provider.GetCgroupStats(cont.name, cont.forceStatsUpdate)
if err != nil {
klog.ErrorS(err, "Failed to get system container stats", "containerName", cont.name)
continue
}
// System containers don't have a filesystem associated with them.
s.Logs, s.Rootfs = nil, nil
s.Name = sys
// if we know the start time of a system container, use that instead of the start time provided by cAdvisor
if !cont.startTime.IsZero() {
s.StartTime = cont.startTime
}
stats = append(stats, *s)
}
return stats
}
func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig cm.NodeConfig, podStats []statsapi.PodStats, updateStats bool) (stats []statsapi.ContainerStats) {
systemContainers := map[string]struct {
name string
forceStatsUpdate bool
startTime metav1.Time
}{
statsapi.SystemContainerKubelet: {name: nodeConfig.KubeletCgroupsName, forceStatsUpdate: false, startTime: sp.kubeletCreationTime},
statsapi.SystemContainerRuntime: {name: nodeConfig.RuntimeCgroupsName, forceStatsUpdate: false},
statsapi.SystemContainerMisc: {name: nodeConfig.SystemCgroupsName, forceStatsUpdate: false},
statsapi.SystemContainerPods: {name: sp.provider.GetPodCgroupRoot(), forceStatsUpdate: updateStats},
}
for sys, cont := range systemContainers {
// skip if cgroup name is undefined (not all system containers are required)
if cont.name == "" {
continue
}
s, err := sp.provider.GetCgroupCPUAndMemoryStats(cont.name, cont.forceStatsUpdate)
if err != nil {
if errors.Is(err, cadvisormemory.ErrDataNotFound) {
klog.V(4).InfoS("cgroup stats not found in memory cache", "containerName", cont.name)
} else {
klog.ErrorS(err, "Failed to get system container stats", "containerName", cont.name)
}
continue
}
s.Name = sys
// if we know the start time of a system container, use that instead of the start time provided by cAdvisor
if !cont.startTime.IsZero() {
s.StartTime = cont.startTime
}
stats = append(stats, *s)
}
return stats
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"fmt"
"sync"
"sync/atomic"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/klog/v2"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
utiltrace "k8s.io/utils/trace"
)
// volumeStatCalculator calculates volume metrics for a given pod periodically in the background and caches the result
type volumeStatCalculator struct {
statsProvider Provider
jitterPeriod time.Duration
pod *v1.Pod
stopChannel chan struct{}
startO sync.Once
stopO sync.Once
latest atomic.Value
eventRecorder record.EventRecorder
}
// PodVolumeStats encapsulates the VolumeStats for a pod.
// It consists of two lists, for local ephemeral volumes, and for persistent volumes respectively.
type PodVolumeStats struct {
EphemeralVolumes []stats.VolumeStats
PersistentVolumes []stats.VolumeStats
}
// newVolumeStatCalculator creates a new VolumeStatCalculator
func newVolumeStatCalculator(statsProvider Provider, jitterPeriod time.Duration, pod *v1.Pod, eventRecorder record.EventRecorder) *volumeStatCalculator {
return &volumeStatCalculator{
statsProvider: statsProvider,
jitterPeriod: jitterPeriod,
pod: pod,
stopChannel: make(chan struct{}),
eventRecorder: eventRecorder,
}
}
// StartOnce starts pod volume calc that will occur periodically in the background until s.StopOnce is called
func (s *volumeStatCalculator) StartOnce() *volumeStatCalculator {
s.startO.Do(func() {
go wait.JitterUntil(func() {
s.calcAndStoreStats()
}, s.jitterPeriod, 1.0, true, s.stopChannel)
})
return s
}
// StopOnce stops background pod volume calculation. Will not stop a currently executing calculations until
// they complete their current iteration.
func (s *volumeStatCalculator) StopOnce() *volumeStatCalculator {
s.stopO.Do(func() {
close(s.stopChannel)
})
return s
}
// getLatest returns the most recent PodVolumeStats from the cache
func (s *volumeStatCalculator) GetLatest() (PodVolumeStats, bool) {
result := s.latest.Load()
if result == nil {
return PodVolumeStats{}, false
}
return result.(PodVolumeStats), true
}
// calcAndStoreStats calculates PodVolumeStats for a given pod and writes the result to the s.latest cache.
// If the pod references PVCs, the prometheus metrics for those are updated with the result.
func (s *volumeStatCalculator) calcAndStoreStats() {
// Find all Volumes for the Pod
volumes, found := s.statsProvider.ListVolumesForPod(s.pod.UID)
blockVolumes, bvFound := s.statsProvider.ListBlockVolumesForPod(s.pod.UID)
if !found && !bvFound {
return
}
metricVolumes := make(map[string]volume.MetricsProvider)
if found {
for name, v := range volumes {
metricVolumes[name] = v
}
}
if bvFound {
for name, v := range blockVolumes {
// Only add the blockVolume if it implements the MetricsProvider interface
if _, ok := v.(volume.MetricsProvider); ok {
// Some drivers inherit the MetricsProvider interface from Filesystem
// mode volumes, but do not implement it for Block mode. Checking
// SupportsMetrics() will prevent panics in that case.
if v.SupportsMetrics() {
metricVolumes[name] = v
}
}
}
}
// Get volume specs for the pod - key'd by volume name
volumesSpec := make(map[string]v1.Volume, len(s.pod.Spec.Volumes))
for _, v := range s.pod.Spec.Volumes {
volumesSpec[v.Name] = v
}
// Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats
var ephemeralStats []stats.VolumeStats
var persistentStats []stats.VolumeStats
for name, v := range metricVolumes {
metric, err := func() (*volume.Metrics, error) {
trace := utiltrace.New(fmt.Sprintf("Calculate volume metrics of %v for pod %v/%v", name, s.pod.Namespace, s.pod.Name))
defer trace.LogIfLong(1 * time.Second)
return v.GetMetrics()
}()
if err != nil {
// Expected for Volumes that don't support Metrics
if !volume.IsNotSupported(err) {
klog.V(4).InfoS("Failed to calculate volume metrics", "pod", klog.KObj(s.pod), "podUID", s.pod.UID, "volumeName", name, "err", err)
}
continue
}
// Lookup the volume spec and add a 'PVCReference' for volumes that reference a PVC
volSpec := volumesSpec[name]
var pvcRef *stats.PVCReference
if pvcSource := volSpec.PersistentVolumeClaim; pvcSource != nil {
pvcRef = &stats.PVCReference{
Name: pvcSource.ClaimName,
Namespace: s.pod.GetNamespace(),
}
} else if volSpec.Ephemeral != nil {
pvcRef = &stats.PVCReference{
Name: ephemeral.VolumeClaimName(s.pod, &volSpec),
Namespace: s.pod.GetNamespace(),
}
}
volumeStats := s.parsePodVolumeStats(name, pvcRef, metric, volSpec)
if util.IsLocalEphemeralVolume(volSpec) {
ephemeralStats = append(ephemeralStats, volumeStats)
} else {
persistentStats = append(persistentStats, volumeStats)
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIVolumeHealth) {
if metric.Abnormal != nil && metric.Message != nil && (*metric.Abnormal) {
s.eventRecorder.Event(s.pod, v1.EventTypeWarning, "VolumeConditionAbnormal", fmt.Sprintf("Volume %s: %s", name, *metric.Message))
}
}
}
// Store the new stats
s.latest.Store(PodVolumeStats{EphemeralVolumes: ephemeralStats,
PersistentVolumes: persistentStats})
}
// parsePodVolumeStats converts (internal) volume.Metrics to (external) stats.VolumeStats structures
func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats.PVCReference, metric *volume.Metrics, volSpec v1.Volume) stats.VolumeStats {
var (
available, capacity, used, inodes, inodesFree, inodesUsed uint64
)
if metric.Available != nil {
available = uint64(metric.Available.Value())
}
if metric.Capacity != nil {
capacity = uint64(metric.Capacity.Value())
}
if metric.Used != nil {
used = uint64(metric.Used.Value())
}
if metric.Inodes != nil {
inodes = uint64(metric.Inodes.Value())
}
if metric.InodesFree != nil {
inodesFree = uint64(metric.InodesFree.Value())
}
if metric.InodesUsed != nil {
inodesUsed = uint64(metric.InodesUsed.Value())
}
volumeStats := stats.VolumeStats{
Name: podName,
PVCRef: pvcRef,
FsStats: stats.FsStats{Time: metric.Time, AvailableBytes: &available, CapacityBytes: &capacity,
UsedBytes: &used, Inodes: &inodes, InodesFree: &inodesFree, InodesUsed: &inodesUsed},
}
if metric.Abnormal != nil {
volumeStats.VolumeHealthStats = &stats.VolumeHealthStats{
Abnormal: *metric.Abnormal,
}
}
return volumeStats
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"context"
"fmt"
"path/filepath"
"sort"
"strings"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
kubetypes "k8s.io/kubelet/pkg/types"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
)
// cadvisorStatsProvider implements the containerStatsProvider interface by
// getting the container stats from cAdvisor. This is needed by
// integrations which do not provide stats from CRI. See
// `pkg/kubelet/cadvisor/util.go#UsingLegacyCadvisorStats` for the logic for
// determining which integrations do not provide stats from CRI.
type cadvisorStatsProvider struct {
// cadvisor is used to get the stats of the cgroup for the containers that
// are managed by pods.
cadvisor cadvisor.Interface
// resourceAnalyzer is used to get the volume stats of the pods.
resourceAnalyzer stats.ResourceAnalyzer
// imageService is used to get the stats of the image filesystem.
imageService kubecontainer.ImageService
// statusProvider is used to get pod metadata
statusProvider status.PodStatusProvider
// hostStatsProvider is used to get pod host stat usage.
hostStatsProvider HostStatsProvider
// containerManager is used to generate the cgroup path for a pod.
containerManager cm.ContainerManager
}
// newCadvisorStatsProvider returns a containerStatsProvider that provides
// container stats from cAdvisor.
func newCadvisorStatsProvider(
cadvisor cadvisor.Interface,
resourceAnalyzer stats.ResourceAnalyzer,
imageService kubecontainer.ImageService,
statusProvider status.PodStatusProvider,
hostStatsProvider HostStatsProvider,
containerManager cm.ContainerManager,
) containerStatsProvider {
return &cadvisorStatsProvider{
cadvisor: cadvisor,
resourceAnalyzer: resourceAnalyzer,
imageService: imageService,
statusProvider: statusProvider,
hostStatsProvider: hostStatsProvider,
containerManager: containerManager,
}
}
// ListPodStats returns the stats of all the pod-managed containers.
func (p *cadvisorStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) {
// Gets node root filesystem information and image filesystem stats, which
// will be used to populate the available and capacity bytes/inodes in
// container stats.
rootFsInfo, err := p.cadvisor.RootFsInfo()
if err != nil {
return nil, fmt.Errorf("failed to get rootFs info: %v", err)
}
imageFsInfo, err := p.cadvisor.ImagesFsInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get imageFs info: %v", err)
}
logger := klog.FromContext(ctx)
infos, err := getCadvisorContainerInfo(logger, p.cadvisor)
if err != nil {
return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err)
}
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger, infos)
// Map each container to a pod and update the PodStats with container data.
podToStats := map[statsapi.PodReference]*statsapi.PodStats{}
for key, cinfo := range filteredInfos {
// On systemd using devicemapper each mount into the container has an
// associated cgroup. We ignore them to ensure we do not get duplicate
// entries in our summary. For details on .mount units:
// http://man7.org/linux/man-pages/man5/systemd.mount.5.html
if strings.HasSuffix(key, ".mount") {
continue
}
// Build the Pod key if this container is managed by a Pod
if !isPodManagedContainer(logger, &cinfo) {
continue
}
ref := buildPodRef(cinfo.Spec.Labels)
// Lookup the PodStats for the pod using the PodRef. If none exists,
// initialize a new entry.
podStats, found := podToStats[ref]
if !found {
podStats = &statsapi.PodStats{PodRef: ref}
podToStats[ref] = podStats
}
// Update the PodStats entry with the stats from the container by
// adding it to podStats.Containers.
containerName := kubetypes.GetContainerName(cinfo.Spec.Labels)
if containerName == kubetypes.PodInfraContainerName {
// Special case for infrastructure container which is hidden from
// the user and has network stats.
podStats.Network = cadvisorInfoToNetworkStats(&cinfo)
} else {
containerStat := cadvisorInfoToContainerStats(logger, containerName, &cinfo, &rootFsInfo, &imageFsInfo)
// NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers
// using old log path, they will be populated by cadvisorInfoToContainerStats.
podUID := types.UID(podStats.PodRef.UID)
logs, err := p.hostStatsProvider.getPodContainerLogStats(podStats.PodRef.Namespace, podStats.PodRef.Name, podUID, containerName, &rootFsInfo)
if err != nil {
logger.Error(err, "Unable to fetch container log stats", "containerName", containerName)
} else {
containerStat.Logs = logs
}
podStats.Containers = append(podStats.Containers, *containerStat)
}
// Either way, collect process stats
podStats.ProcessStats = mergeProcessStats(podStats.ProcessStats, cadvisorInfoToProcessStats(&cinfo))
}
// Add each PodStats to the result.
result := make([]statsapi.PodStats, 0, len(podToStats))
for _, podStats := range podToStats {
makePodStorageStats(logger, podStats, &rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, false)
podUID := types.UID(podStats.PodRef.UID)
// Lookup the pod-level cgroup's CPU and memory stats
podInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)
if podInfo != nil {
cpu, memory := cadvisorInfoToCPUandMemoryStats(podInfo)
podStats.CPU = cpu
podStats.Memory = memory
podStats.Swap = cadvisorInfoToSwapStats(podInfo)
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
podStats.IO = cadvisorInfoToIOStats(podInfo)
}
// ProcessStats were accumulated as the containers were iterated.
}
status, found := p.statusProvider.GetPodStatus(podUID)
if found && status.StartTime != nil && !status.StartTime.IsZero() {
podStats.StartTime = *status.StartTime
// only append stats if we were able to get the start time of the pod
result = append(result, *podStats)
}
}
return result, nil
}
// ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for
// the containers and returns the stats for all the pod-managed containers.
// For cadvisor, cpu nano core usages are pre-computed and cached, so this
// function simply calls ListPodStats.
func (p *cadvisorStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) {
return p.ListPodStats(ctx)
}
func (p *cadvisorStatsProvider) PodCPUAndMemoryStats(ctx context.Context, pod *v1.Pod, _ *kubecontainer.PodStatus) (*statsapi.PodStats, error) {
_, podName := p.containerManager.NewPodContainerManager().GetPodContainerName(pod)
infos, err := p.cadvisor.ContainerInfoV2(podName, cadvisorapiv2.RequestOptions{
IdType: cadvisorapiv2.TypeName,
Count: 2,
Recursive: true,
})
if err != nil {
return nil, err
}
podStats := &statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
},
}
for name, info := range infos {
if name == podName {
// Pod container
podStats.StartTime = metav1.NewTime(info.Spec.CreationTime)
cpu, memory := cadvisorInfoToCPUandMemoryStats(&info)
podStats.CPU = cpu
podStats.Memory = memory
} else {
containerName := kubetypes.GetContainerName(info.Spec.Labels)
if containerName != kubetypes.PodInfraContainerName && containerName != "" {
podStats.Containers = append(podStats.Containers, *cadvisorInfoToContainerCPUAndMemoryStats(containerName, &info))
}
}
}
return podStats, nil
}
// ListPodCPUAndMemoryStats returns the cpu and memory stats of all the pod-managed containers.
func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) {
logger := klog.FromContext(ctx)
infos, err := getCadvisorContainerInfo(logger, p.cadvisor)
if err != nil {
return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err)
}
filteredInfos, allInfos := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger, infos)
// Map each container to a pod and update the PodStats with container data.
podToStats := map[statsapi.PodReference]*statsapi.PodStats{}
for key, cinfo := range filteredInfos {
// On systemd using devicemapper each mount into the container has an
// associated cgroup. We ignore them to ensure we do not get duplicate
// entries in our summary. For details on .mount units:
// http://man7.org/linux/man-pages/man5/systemd.mount.5.html
if strings.HasSuffix(key, ".mount") {
continue
}
// Build the Pod key if this container is managed by a Pod
if !isPodManagedContainer(logger, &cinfo) {
continue
}
ref := buildPodRef(cinfo.Spec.Labels)
// Lookup the PodStats for the pod using the PodRef. If none exists,
// initialize a new entry.
podStats, found := podToStats[ref]
if !found {
podStats = &statsapi.PodStats{PodRef: ref}
podToStats[ref] = podStats
}
// Update the PodStats entry with the stats from the container by
// adding it to podStats.Containers.
containerName := kubetypes.GetContainerName(cinfo.Spec.Labels)
if containerName == kubetypes.PodInfraContainerName {
// Special case for infrastructure container which is hidden from
// the user and has network stats.
podStats.StartTime = metav1.NewTime(cinfo.Spec.CreationTime)
} else {
podStats.Containers = append(podStats.Containers, *cadvisorInfoToContainerCPUAndMemoryStats(containerName, &cinfo))
}
}
// Add each PodStats to the result.
result := make([]statsapi.PodStats, 0, len(podToStats))
for _, podStats := range podToStats {
podUID := types.UID(podStats.PodRef.UID)
// Lookup the pod-level cgroup's CPU and memory stats
podInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)
if podInfo != nil {
cpu, memory := cadvisorInfoToCPUandMemoryStats(podInfo)
podStats.CPU = cpu
podStats.Memory = memory
podStats.Swap = cadvisorInfoToSwapStats(podInfo)
}
result = append(result, *podStats)
}
return result, nil
}
// ImageFsStats returns the stats of the filesystem for storing images.
func (p *cadvisorStatsProvider) ImageFsStats(ctx context.Context) (imageFsRet *statsapi.FsStats, containerFsRet *statsapi.FsStats, errCall error) {
imageFsInfo, err := p.cadvisor.ImagesFsInfo(ctx)
if err != nil {
return nil, nil, fmt.Errorf("failed to get imageFs info: %v", err)
}
if !utilfeature.DefaultFeatureGate.Enabled(features.KubeletSeparateDiskGC) {
imageStats, err := p.imageService.ImageStats(ctx)
if err != nil || imageStats == nil {
return nil, nil, fmt.Errorf("failed to get image stats: %v", err)
}
var imageFsInodesUsed *uint64
if imageFsInfo.Inodes != nil && imageFsInfo.InodesFree != nil {
imageFsIU := *imageFsInfo.Inodes - *imageFsInfo.InodesFree
imageFsInodesUsed = &imageFsIU
}
imageFs := &statsapi.FsStats{
Time: metav1.NewTime(imageFsInfo.Timestamp),
AvailableBytes: &imageFsInfo.Available,
CapacityBytes: &imageFsInfo.Capacity,
UsedBytes: &imageStats.TotalStorageBytes,
InodesFree: imageFsInfo.InodesFree,
Inodes: imageFsInfo.Inodes,
InodesUsed: imageFsInodesUsed,
}
return imageFs, imageFs, nil
}
imageStats, err := p.imageService.ImageFsInfo(ctx)
if err != nil {
return nil, nil, fmt.Errorf("failed to get image stats: %w", err)
}
if imageStats == nil || len(imageStats.ImageFilesystems) == 0 || len(imageStats.ContainerFilesystems) == 0 {
return nil, nil, fmt.Errorf("missing image stats: %+v", imageStats)
}
splitFileSystem := false
imageFs := imageStats.ImageFilesystems[0]
containerFs := imageStats.ContainerFilesystems[0]
if imageFs.FsId != nil && containerFs.FsId != nil && imageFs.FsId.Mountpoint != containerFs.FsId.Mountpoint {
splitFileSystem = true
}
var imageFsInodesUsed *uint64
if imageFsInfo.Inodes != nil && imageFsInfo.InodesFree != nil {
imageFsIU := *imageFsInfo.Inodes - *imageFsInfo.InodesFree
imageFsInodesUsed = &imageFsIU
}
var usedBytes uint64
if imageFs.GetUsedBytes() != nil {
usedBytes = imageFs.GetUsedBytes().GetValue()
}
fsStats := &statsapi.FsStats{
Time: metav1.NewTime(imageFsInfo.Timestamp),
AvailableBytes: &imageFsInfo.Available,
CapacityBytes: &imageFsInfo.Capacity,
UsedBytes: &usedBytes,
InodesFree: imageFsInfo.InodesFree,
Inodes: imageFsInfo.Inodes,
InodesUsed: imageFsInodesUsed,
}
// We rely on cadvisor to have the crio-containers label for split filesystem case.
// We return to avoid checking ContainerFsInfo.
if !splitFileSystem {
return fsStats, fsStats, nil
}
containerFsInfo, err := p.cadvisor.ContainerFsInfo(ctx)
if err != nil {
return nil, nil, fmt.Errorf("failed to get container fs info: %w", err)
}
// ImageFs and ContainerFs could be on different paths on the same device.
if containerFsInfo.Device == imageFsInfo.Device {
return fsStats, fsStats, nil
}
logger := klog.FromContext(ctx)
logger.Info("Detect Split Filesystem", "ImageFilesystems", imageStats.ImageFilesystems[0], "ContainerFilesystems", imageStats.ContainerFilesystems[0])
var containerFsInodesUsed *uint64
if containerFsInfo.Inodes != nil && containerFsInfo.InodesFree != nil {
containerFsIU := *containerFsInfo.Inodes - *containerFsInfo.InodesFree
containerFsInodesUsed = &containerFsIU
}
var usedContainerBytes uint64
if containerFs.GetUsedBytes() != nil {
usedContainerBytes = containerFs.GetUsedBytes().GetValue()
}
fsContainerStats := &statsapi.FsStats{
Time: metav1.NewTime(containerFsInfo.Timestamp),
AvailableBytes: &containerFsInfo.Available,
CapacityBytes: &containerFsInfo.Capacity,
UsedBytes: &usedContainerBytes,
InodesFree: containerFsInfo.InodesFree,
Inodes: containerFsInfo.Inodes,
InodesUsed: containerFsInodesUsed,
}
return fsStats, fsContainerStats, nil
}
// ImageFsDevice returns name of the device where the image filesystem locates,
// e.g. /dev/sda1.
func (p *cadvisorStatsProvider) ImageFsDevice(ctx context.Context) (string, error) {
imageFsInfo, err := p.cadvisor.ImagesFsInfo(ctx)
if err != nil {
return "", err
}
return imageFsInfo.Device, nil
}
// buildPodRef returns a PodReference that identifies the Pod managing cinfo
func buildPodRef(containerLabels map[string]string) statsapi.PodReference {
podName := kubetypes.GetPodName(containerLabels)
podNamespace := kubetypes.GetPodNamespace(containerLabels)
podUID := kubetypes.GetPodUID(containerLabels)
return statsapi.PodReference{Name: podName, Namespace: podNamespace, UID: podUID}
}
// isPodManagedContainer returns true if the cinfo container is managed by a Pod
func isPodManagedContainer(logger klog.Logger, cinfo *cadvisorapiv2.ContainerInfo) bool {
podName := kubetypes.GetPodName(cinfo.Spec.Labels)
podNamespace := kubetypes.GetPodNamespace(cinfo.Spec.Labels)
managed := podName != "" && podNamespace != ""
if !managed && podName != podNamespace {
logger.Info(
"Expect container to have either both podName and podNamespace labels, or neither",
"podNameLabel", podName, "podNamespaceLabel", podNamespace)
}
return managed
}
// getCadvisorPodInfoFromPodUID returns a pod cgroup information by matching the podUID with its CgroupName identifier base name
func getCadvisorPodInfoFromPodUID(podUID types.UID, infos map[string]cadvisorapiv2.ContainerInfo) *cadvisorapiv2.ContainerInfo {
if info, found := infos[cm.GetPodCgroupNameSuffix(podUID)]; found {
return &info
}
return nil
}
// filterTerminatedContainerInfoAndAssembleByPodCgroupKey returns the specified containerInfo but with
// the stats of the terminated containers removed and all containerInfos assembled by pod cgroup key.
// the first return map is container cgroup name <-> ContainerInfo and
// the second return map is pod cgroup key <-> ContainerInfo.
// A ContainerInfo is considered to be of a terminated container if it has an
// older CreationTime and zero CPU instantaneous and memory RSS usage.
func filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger klog.Logger, containerInfo map[string]cadvisorapiv2.ContainerInfo) (map[string]cadvisorapiv2.ContainerInfo, map[string]cadvisorapiv2.ContainerInfo) {
cinfoMap := make(map[containerID][]containerInfoWithCgroup)
cinfosByPodCgroupKey := make(map[string]cadvisorapiv2.ContainerInfo)
for key, cinfo := range containerInfo {
var podCgroupKey string
if cm.IsSystemdStyleName(key) {
// Convert to internal cgroup name and take the last component only.
internalCgroupName := cm.ParseSystemdToCgroupName(key)
podCgroupKey = internalCgroupName[len(internalCgroupName)-1]
} else {
// Take last component only.
podCgroupKey = filepath.Base(key)
}
cinfosByPodCgroupKey[podCgroupKey] = cinfo
if !isPodManagedContainer(logger, &cinfo) {
continue
}
cinfoID := containerID{
podRef: buildPodRef(cinfo.Spec.Labels),
containerName: kubetypes.GetContainerName(cinfo.Spec.Labels),
}
cinfoMap[cinfoID] = append(cinfoMap[cinfoID], containerInfoWithCgroup{
cinfo: cinfo,
cgroup: key,
})
}
result := make(map[string]cadvisorapiv2.ContainerInfo)
for _, refs := range cinfoMap {
if len(refs) == 1 {
// ContainerInfo with no CPU/memory/network usage for uncleaned cgroups of
// already terminated containers, which should not be shown in the results.
if !isContainerTerminated(&refs[0].cinfo) {
result[refs[0].cgroup] = refs[0].cinfo
}
continue
}
sort.Sort(ByCreationTime(refs))
for i := len(refs) - 1; i >= 0; i-- {
if hasMemoryAndCPUInstUsage(&refs[i].cinfo) {
result[refs[i].cgroup] = refs[i].cinfo
break
}
}
}
return result, cinfosByPodCgroupKey
}
// ByCreationTime implements sort.Interface for []containerInfoWithCgroup based
// on the cinfo.Spec.CreationTime field.
type ByCreationTime []containerInfoWithCgroup
func (a ByCreationTime) Len() int { return len(a) }
func (a ByCreationTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByCreationTime) Less(i, j int) bool {
if a[i].cinfo.Spec.CreationTime.Equal(a[j].cinfo.Spec.CreationTime) {
// There shouldn't be two containers with the same name and/or the same
// creation time. However, to make the logic here robust, we break the
// tie by moving the one without CPU instantaneous or memory RSS usage
// to the beginning.
return hasMemoryAndCPUInstUsage(&a[j].cinfo)
}
return a[i].cinfo.Spec.CreationTime.Before(a[j].cinfo.Spec.CreationTime)
}
// containerID is the identity of a container in a pod.
type containerID struct {
podRef statsapi.PodReference
containerName string
}
// containerInfoWithCgroup contains the ContainerInfo and its cgroup name.
type containerInfoWithCgroup struct {
cinfo cadvisorapiv2.ContainerInfo
cgroup string
}
// hasMemoryAndCPUInstUsage returns true if the specified container info has
// both non-zero CPU instantaneous usage and non-zero memory RSS usage, and
// false otherwise.
func hasMemoryAndCPUInstUsage(info *cadvisorapiv2.ContainerInfo) bool {
if !info.Spec.HasCpu || !info.Spec.HasMemory {
return false
}
cstat, found := latestContainerStats(info)
if !found {
return false
}
if cstat.CpuInst == nil {
return false
}
return cstat.CpuInst.Usage.Total != 0 && cstat.Memory.RSS != 0
}
// isContainerTerminated returns true if the specified container meet one of the following conditions
// 1. info.spec both cpu memory and network are false conditions
// 2. info.Stats both network and cpu or memory are nil
// 3. both zero CPU instantaneous usage zero memory RSS usage and zero network usage,
// and false otherwise.
func isContainerTerminated(info *cadvisorapiv2.ContainerInfo) bool {
if !info.Spec.HasCpu && !info.Spec.HasMemory && !info.Spec.HasNetwork {
return true
}
cstat, found := latestContainerStats(info)
if !found {
return true
}
if cstat.Network != nil {
iStats := cadvisorInfoToNetworkStats(info)
if iStats != nil {
for _, iStat := range iStats.Interfaces {
if *iStat.RxErrors != 0 || *iStat.TxErrors != 0 || *iStat.RxBytes != 0 || *iStat.TxBytes != 0 {
return false
}
}
}
}
if cstat.CpuInst == nil || cstat.Memory == nil {
return true
}
return cstat.CpuInst.Usage.Total == 0 && cstat.Memory.RSS == 0
}
func getCadvisorContainerInfo(logger klog.Logger, ca cadvisor.Interface) (map[string]cadvisorapiv2.ContainerInfo, error) {
infos, err := ca.ContainerInfoV2("/", cadvisorapiv2.RequestOptions{
IdType: cadvisorapiv2.TypeName,
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
Recursive: true,
})
if err != nil {
if _, ok := infos["/"]; ok {
// If the failure is partial, log it and return a best-effort
// response.
logger.Error(err, "Partial failure issuing cadvisor.ContainerInfoV2")
} else {
return nil, fmt.Errorf("failed to get root cgroup stats: %w", err)
}
}
return infos, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"context"
"errors"
"fmt"
"path/filepath"
"sort"
"strings"
"sync"
"time"
cadvisormemory "github.com/google/cadvisor/cache/memory"
cadvisorfs "github.com/google/cadvisor/fs"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
kubetypes "k8s.io/kubelet/pkg/types"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/utils/clock"
"k8s.io/utils/ptr"
)
var (
// defaultCachePeriod is the default cache period for each cpuUsage.
defaultCachePeriod = 10 * time.Minute
)
// cpuUsageRecord holds the cpu usage stats and the calculated usageNanoCores.
type cpuUsageRecord struct {
stats *runtimeapi.CpuUsage
usageNanoCores *uint64
}
// criStatsProvider implements the containerStatsProvider interface by getting
// the container stats from CRI.
type criStatsProvider struct {
// cadvisor is used to get the node root filesystem's stats (such as the
// capacity/available bytes/inodes) that will be populated in per container
// filesystem stats.
cadvisor cadvisor.Interface
// resourceAnalyzer is used to get the volume stats of the pods.
resourceAnalyzer stats.ResourceAnalyzer
// runtimeService is used to get the status and stats of the pods and its
// managed containers.
runtimeService internalapi.RuntimeService
// imageService is used to get the stats of the image filesystem.
imageService internalapi.ImageManagerService
// hostStatsProvider is used to get the status of the host filesystem consumed by pods.
hostStatsProvider HostStatsProvider
// windowsNetworkStatsProvider is used by kubelet to gather networking stats on Windows
windowsNetworkStatsProvider interface{} //nolint:unused // U1000 We can't import hcsshim due to Build constraints in hcsshim
// clock is used report current time
clock clock.Clock
// fallbackStatsProvider is used to fill in missing information incase the CRI
// provides insufficient data.
// TODO: A lot of the cadvisorStatsProvider logic is duplicated in this file, and should be read
// from the fallbackStatsProvider instead.
// Remove this once the CRI stats migration is complete.
fallbackStatsProvider containerStatsProvider
// cpuUsageCache caches the cpu usage for containers.
cpuUsageCache map[string]*cpuUsageRecord
mutex sync.RWMutex
podAndContainerStatsFromCRI bool
}
// newCRIStatsProvider returns a containerStatsProvider implementation that
// provides container stats using CRI.
func newCRIStatsProvider(
cadvisor cadvisor.Interface,
resourceAnalyzer stats.ResourceAnalyzer,
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
hostStatsProvider HostStatsProvider,
podAndContainerStatsFromCRI bool,
fallbackStatsProvider containerStatsProvider,
) containerStatsProvider {
return &criStatsProvider{
cadvisor: cadvisor,
resourceAnalyzer: resourceAnalyzer,
runtimeService: runtimeService,
imageService: imageService,
hostStatsProvider: hostStatsProvider,
cpuUsageCache: make(map[string]*cpuUsageRecord),
podAndContainerStatsFromCRI: podAndContainerStatsFromCRI,
clock: clock.RealClock{},
fallbackStatsProvider: fallbackStatsProvider,
}
}
// ListPodStats returns the stats of all the pod-managed containers.
func (p *criStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) {
// Don't update CPU nano core usage.
return p.listPodStats(ctx, false)
}
// ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for
// the containers and returns the stats for all the pod-managed containers.
// This is a workaround because CRI runtimes do not supply nano core usages,
// so this function calculate the difference between the current and the last
// (cached) cpu stats to calculate this metrics. The implementation assumes a
// single caller to periodically invoke this function to update the metrics. If
// there exist multiple callers, the period used to compute the cpu usage may
// vary and the usage could be incoherent (e.g., spiky). If no caller calls
// this function, the cpu usage will stay nil. Right now, eviction manager is
// the only caller, and it calls this function every 10s.
func (p *criStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) {
// Update CPU nano core usage.
return p.listPodStats(ctx, true)
}
func (p *criStatsProvider) listPodStats(ctx context.Context, updateCPUNanoCoreUsage bool) ([]statsapi.PodStats, error) {
// Gets node root filesystem information, which will be used to populate
// the available and capacity bytes/inodes in container stats.
rootFsInfo, err := p.cadvisor.RootFsInfo()
if err != nil {
return nil, fmt.Errorf("failed to get rootFs info: %v", err)
}
containerMap, podSandboxMap, err := p.getPodAndContainerMaps(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get pod or container map: %v", err)
}
logger := klog.FromContext(ctx)
if p.podAndContainerStatsFromCRI {
result, err := p.listPodStatsStrictlyFromCRI(ctx, updateCPUNanoCoreUsage, containerMap, podSandboxMap, &rootFsInfo)
if err == nil {
// Call succeeded
return result, nil
}
s, ok := status.FromError(err)
// Legitimate failure, rather than the CRI implementation does not support ListPodSandboxStats.
if !ok || s.Code() != codes.Unimplemented {
return nil, err
}
// CRI implementation doesn't support ListPodSandboxStats, warn and fallback.
logger.V(5).Error(err,
"CRI implementation must be updated to support ListPodSandboxStats if PodAndContainerStatsFromCRI feature gate is enabled. Falling back to populating with cAdvisor; this call will fail in the future.",
)
}
return p.listPodStatsPartiallyFromCRI(ctx, updateCPUNanoCoreUsage, containerMap, podSandboxMap, &rootFsInfo)
}
func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, updateCPUNanoCoreUsage bool, containerMap map[string]*runtimeapi.Container, podSandboxMap map[string]*runtimeapi.PodSandbox, rootFsInfo *cadvisorapiv2.FsInfo) ([]statsapi.PodStats, error) {
// fsIDtoInfo is a map from mountpoint to its stats. This will be used
// as a cache to avoid querying cAdvisor for the filesystem stats with the
// same filesystem id many times.
fsIDtoInfo := make(map[string]*cadvisorapiv2.FsInfo)
// sandboxIDToPodStats is a temporary map from sandbox ID to its pod stats.
sandboxIDToPodStats := make(map[string]*statsapi.PodStats)
resp, err := p.runtimeService.ListContainerStats(ctx, &runtimeapi.ContainerStatsFilter{})
if err != nil {
return nil, fmt.Errorf("failed to list all container stats: %v", err)
}
logger := klog.FromContext(ctx)
allInfos, err := getCadvisorContainerInfo(logger, p.cadvisor)
if err != nil {
return nil, fmt.Errorf("failed to fetch cadvisor stats: %v", err)
}
caInfos, allInfos := getCRICadvisorStats(logger, allInfos)
// get network stats for containers.
// This is only used on Windows. For other platforms, (nil, nil) should be returned.
containerNetworkStats, err := p.listContainerNetworkStats(logger)
if err != nil {
return nil, fmt.Errorf("failed to list container network stats: %v", err)
}
for _, stats := range resp {
containerID := stats.Attributes.Id
container, found := containerMap[containerID]
if !found {
continue
}
podSandboxID := container.PodSandboxId
podSandbox, found := podSandboxMap[podSandboxID]
if !found {
continue
}
// Creates the stats of the pod (if not created yet) which the
// container belongs to.
ps, found := sandboxIDToPodStats[podSandboxID]
if !found {
ps = buildPodStats(podSandbox)
sandboxIDToPodStats[podSandboxID] = ps
}
// Fill available stats for full set of required pod stats
cs, err := p.makeContainerStats(logger, stats, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), updateCPUNanoCoreUsage)
if err != nil {
return nil, fmt.Errorf("make container stats: %w", err)
}
p.addPodNetworkStats(logger, ps, podSandboxID, caInfos, cs, containerNetworkStats[podSandboxID])
p.addPodCPUMemoryStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
p.addSwapStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
p.addIOStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
// If cadvisor stats is available for the container, use it to populate
// container stats
caStats, caFound := caInfos[containerID]
if !caFound {
logger.V(5).Info("Unable to find cadvisor stats for container", "containerID", containerID)
} else {
p.addCadvisorContainerStats(logger, cs, &caStats)
p.addProcessStats(ps, &caStats)
}
ps.Containers = append(ps.Containers, *cs)
}
// cleanup outdated caches.
p.cleanupOutdatedCaches()
result := make([]statsapi.PodStats, 0, len(sandboxIDToPodStats))
for _, s := range sandboxIDToPodStats {
makePodStorageStats(logger, s, rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, true)
result = append(result, *s)
}
return result, nil
}
func (p *criStatsProvider) listPodStatsStrictlyFromCRI(ctx context.Context, updateCPUNanoCoreUsage bool, containerMap map[string]*runtimeapi.Container, podSandboxMap map[string]*runtimeapi.PodSandbox, rootFsInfo *cadvisorapiv2.FsInfo) ([]statsapi.PodStats, error) {
criSandboxStats, err := p.runtimeService.ListPodSandboxStats(ctx, &runtimeapi.PodSandboxStatsFilter{})
if err != nil {
return nil, err
}
logger := klog.FromContext(ctx)
fsIDtoInfo := make(map[string]*cadvisorapiv2.FsInfo)
summarySandboxStats := make([]statsapi.PodStats, 0, len(podSandboxMap))
for _, criSandboxStat := range criSandboxStats {
if criSandboxStat == nil || criSandboxStat.Attributes == nil {
logger.V(5).Info("Unable to find CRI stats for sandbox")
continue
}
podSandbox, found := podSandboxMap[criSandboxStat.Attributes.Id]
if !found {
continue
}
ps := buildPodStats(podSandbox)
if err := p.addCRIPodContainerStats(logger, criSandboxStat, ps, fsIDtoInfo, containerMap, podSandbox, rootFsInfo, updateCPUNanoCoreUsage); err != nil {
return nil, fmt.Errorf("add CRI pod container stats: %w", err)
}
addCRIPodNetworkStats(ps, criSandboxStat)
addCRIPodCPUStats(ps, criSandboxStat)
addCRIPodMemoryStats(ps, criSandboxStat)
addCRIPodProcessStats(ps, criSandboxStat)
addCRIPodIOStats(ps, criSandboxStat)
makePodStorageStats(logger, ps, rootFsInfo, p.resourceAnalyzer, p.hostStatsProvider, true)
summarySandboxStats = append(summarySandboxStats, *ps)
}
return summarySandboxStats, nil
}
func (p *criStatsProvider) PodCPUAndMemoryStats(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) (*statsapi.PodStats, error) {
if len(podStatus.SandboxStatuses) == 0 {
return nil, fmt.Errorf("missing sandbox for pod %s", format.Pod(pod))
}
podSandbox := podStatus.SandboxStatuses[0]
ps := &statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: podSandbox.Metadata.Name,
UID: podSandbox.Metadata.Uid,
Namespace: podSandbox.Metadata.Namespace,
},
// The StartTime in the summary API is the pod creation time.
StartTime: metav1.NewTime(time.Unix(0, podSandbox.CreatedAt)),
}
if p.podAndContainerStatsFromCRI {
criSandboxStats, err := p.runtimeService.PodSandboxStats(ctx, podSandbox.Id)
if err != nil {
// Call failed, why?
s, ok := status.FromError(err)
// Legitimate failure, rather than the CRI implementation does not support PodSandboxStats.
if !ok || s.Code() != codes.Unimplemented {
return nil, err
}
// CRI implementation doesn't support PodSandboxStats, warn and fallback.
klog.ErrorS(err,
"CRI implementation must be updated to support PodSandboxStats if PodAndContainerStatsFromCRI feature gate is enabled. Falling back to populating with cAdvisor; this call will fail in the future.",
)
} else {
addCRIPodCPUStats(ps, criSandboxStats)
addCRIPodMemoryStats(ps, criSandboxStats)
}
}
resp, err := p.runtimeService.ListContainerStats(ctx, &runtimeapi.ContainerStatsFilter{
PodSandboxId: podSandbox.Id,
})
if err != nil {
return nil, fmt.Errorf("failed to list container stats from pod %s (sandbox %s): %w", format.Pod(pod), podSandbox.Id, err)
}
// Fallback if ListContainerStats doesn't return any results.
useFallback := ps.CPU == nil || ps.Memory == nil || len(resp) == 0
for _, stats := range resp {
containerStatus := podStatus.FindContainerStatusByName(stats.Attributes.Metadata.Name)
if containerStatus == nil {
klog.V(4).InfoS("Received stats for unknown container", "pod", klog.KObj(pod), "container", stats.Attributes.Metadata)
continue
}
// Fill available CPU and memory stats for full set of required pod stats
cs := p.makeContainerCPUAndMemoryStats(stats, containerStatus.CreatedAt, false)
useFallback = useFallback || cs.CPU == nil || cs.Memory == nil
ps.Containers = append(ps.Containers, *cs)
}
if useFallback {
fallbackStats, err := p.fallbackStatsProvider.PodCPUAndMemoryStats(ctx, pod, podStatus)
if err != nil {
return nil, fmt.Errorf("failed to fetch stats for pod %s from fallback provider: %w", format.Pod(pod), err)
}
if ps.CPU == nil {
ps.CPU = fallbackStats.CPU
}
if ps.Memory == nil {
ps.Memory = fallbackStats.Memory
}
for _, fb := range fallbackStats.Containers {
var container *statsapi.ContainerStats
for i, cs := range ps.Containers {
if fb.Name == cs.Name {
container = &ps.Containers[i]
break
}
}
if container != nil {
if container.CPU == nil {
container.CPU = fb.CPU
}
if container.Memory == nil {
container.Memory = fb.Memory
}
} else {
ps.Containers = append(ps.Containers, fb)
}
}
}
return ps, nil
}
// ListPodCPUAndMemoryStats returns the CPU and Memory stats of all the pod-managed containers.
func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) {
// sandboxIDToPodStats is a temporary map from sandbox ID to its pod stats.
sandboxIDToPodStats := make(map[string]*statsapi.PodStats)
containerMap, podSandboxMap, err := p.getPodAndContainerMaps(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get pod or container map: %v", err)
}
logger := klog.FromContext(ctx)
result := make([]statsapi.PodStats, 0, len(podSandboxMap))
if p.podAndContainerStatsFromCRI {
criSandboxStats, err := p.runtimeService.ListPodSandboxStats(ctx, &runtimeapi.PodSandboxStatsFilter{})
// Call succeeded
if err == nil {
for _, criSandboxStat := range criSandboxStats {
podSandbox, found := podSandboxMap[criSandboxStat.Attributes.Id]
if !found {
continue
}
ps := buildPodStats(podSandbox)
addCRIPodCPUStats(ps, criSandboxStat)
addCRIPodMemoryStats(ps, criSandboxStat)
result = append(result, *ps)
}
return result, err
}
// Call failed, why?
s, ok := status.FromError(err)
// Legitimate failure, rather than the CRI implementation does not support ListPodSandboxStats.
if !ok || s.Code() != codes.Unimplemented {
return nil, err
}
// CRI implementation doesn't support ListPodSandboxStats, warn and fallback.
logger.Error(err,
"CRI implementation must be updated to support ListPodSandboxStats if PodAndContainerStatsFromCRI feature gate is enabled. Falling back to populating with cAdvisor; this call will fail in the future.",
)
}
resp, err := p.runtimeService.ListContainerStats(ctx, &runtimeapi.ContainerStatsFilter{})
if err != nil {
return nil, fmt.Errorf("failed to list all container stats: %v", err)
}
allInfos, err := getCadvisorContainerInfo(logger, p.cadvisor)
if err != nil {
return nil, fmt.Errorf("failed to fetch cadvisor stats: %v", err)
}
caInfos, allInfos := getCRICadvisorStats(logger, allInfos)
for _, stats := range resp {
containerID := stats.Attributes.Id
container, found := containerMap[containerID]
if !found {
continue
}
podSandboxID := container.PodSandboxId
podSandbox, found := podSandboxMap[podSandboxID]
if !found {
continue
}
// Creates the stats of the pod (if not created yet) which the
// container belongs to.
ps, found := sandboxIDToPodStats[podSandboxID]
if !found {
ps = buildPodStats(podSandbox)
sandboxIDToPodStats[podSandboxID] = ps
}
// Fill available CPU and memory stats for full set of required pod stats
cs := p.makeContainerCPUAndMemoryStats(stats, time.Unix(0, container.CreatedAt), true)
p.addPodCPUMemoryStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
p.addSwapStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs)
// If cadvisor stats is available for the container, use it to populate
// container stats
caStats, caFound := caInfos[containerID]
if !caFound {
logger.V(4).Info("Unable to find cadvisor stats for container", "containerID", containerID)
} else {
p.addCadvisorContainerCPUAndMemoryStats(logger, cs, &caStats)
}
ps.Containers = append(ps.Containers, *cs)
}
// cleanup outdated caches.
p.cleanupOutdatedCaches()
for _, s := range sandboxIDToPodStats {
result = append(result, *s)
}
return result, nil
}
func (p *criStatsProvider) getPodAndContainerMaps(ctx context.Context) (map[string]*runtimeapi.Container, map[string]*runtimeapi.PodSandbox, error) {
containers, err := p.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{})
if err != nil {
return nil, nil, fmt.Errorf("failed to list all containers: %v", err)
}
// Creates pod sandbox map between the pod sandbox ID and the PodSandbox object.
podSandboxMap := make(map[string]*runtimeapi.PodSandbox)
podSandboxes, err := p.runtimeService.ListPodSandbox(ctx, &runtimeapi.PodSandboxFilter{})
if err != nil {
return nil, nil, fmt.Errorf("failed to list all pod sandboxes: %v", err)
}
podSandboxes = removeTerminatedPods(podSandboxes)
for _, s := range podSandboxes {
podSandboxMap[s.Id] = s
}
containers = removeTerminatedContainers(containers)
// Creates container map between the container ID and the Container object.
containerMap := make(map[string]*runtimeapi.Container)
for _, c := range containers {
containerMap[c.Id] = c
}
return containerMap, podSandboxMap, nil
}
// ImageFsStats returns the stats of the image filesystem.
func (p *criStatsProvider) ImageFsStats(ctx context.Context) (imageFsRet *statsapi.FsStats, containerFsRet *statsapi.FsStats, errRet error) {
resp, err := p.imageService.ImageFsInfo(ctx)
if err != nil {
return nil, nil, err
}
// CRI may return the stats of multiple image filesystems but we only
// return the first one.
//
// TODO(yguo0905): Support returning stats of multiple image filesystems.
if len(resp.GetImageFilesystems()) == 0 {
return nil, nil, fmt.Errorf("imageFs information is unavailable")
}
fs := resp.GetImageFilesystems()[0]
imageFsRet = &statsapi.FsStats{
Time: metav1.NewTime(time.Unix(0, fs.Timestamp)),
UsedBytes: &fs.UsedBytes.Value,
}
if fs.InodesUsed != nil {
imageFsRet.InodesUsed = &fs.InodesUsed.Value
}
imageFsInfo, err := p.getFsInfo(klog.FromContext(ctx), fs.GetFsId())
if err != nil {
return nil, nil, fmt.Errorf("get filesystem info: %w", err)
}
if imageFsInfo != nil {
// The image filesystem id is unknown to the local node or there's
// an error on retrieving the stats. In these cases, we omit those
// stats and return the best-effort partial result. See
// https://github.com/kubernetes/heapster/issues/1793.
imageFsRet.AvailableBytes = &imageFsInfo.Available
imageFsRet.CapacityBytes = &imageFsInfo.Capacity
imageFsRet.InodesFree = imageFsInfo.InodesFree
imageFsRet.Inodes = imageFsInfo.Inodes
}
// TODO: For CRI Stats Provider we don't support separate disks yet.
return imageFsRet, imageFsRet, nil
}
// ImageFsDevice returns name of the device where the image filesystem locates,
// e.g. /dev/sda1.
func (p *criStatsProvider) ImageFsDevice(ctx context.Context) (string, error) {
resp, err := p.imageService.ImageFsInfo(ctx)
if err != nil {
return "", err
}
for _, fs := range resp.GetImageFilesystems() {
fsInfo, err := p.getFsInfo(klog.FromContext(ctx), fs.GetFsId())
if err != nil {
return "", fmt.Errorf("get filesystem info: %w", err)
}
if fsInfo != nil {
return fsInfo.Device, nil
}
}
return "", errors.New("imagefs device is not found")
}
// getFsInfo returns the information of the filesystem with the specified
// fsID. If any error occurs, this function logs the error and returns
// nil.
func (p *criStatsProvider) getFsInfo(logger klog.Logger, fsID *runtimeapi.FilesystemIdentifier) (*cadvisorapiv2.FsInfo, error) {
if fsID == nil {
logger.V(2).Info("Failed to get filesystem info: fsID is nil")
return nil, nil
}
mountpoint := fsID.GetMountpoint()
fsInfo, err := p.cadvisor.GetDirFsInfo(mountpoint)
if err != nil {
msg := "Failed to get the info of the filesystem with mountpoint"
if errors.Is(err, cadvisorfs.ErrNoSuchDevice) ||
errors.Is(err, cadvisorfs.ErrDeviceNotInPartitionsMap) ||
errors.Is(err, cadvisormemory.ErrDataNotFound) {
logger.V(2).Info(msg, "mountpoint", mountpoint, "err", err)
} else {
logger.Error(err, msg, "mountpoint", mountpoint)
return nil, fmt.Errorf("%s: %w", msg, err)
}
return nil, nil
}
return &fsInfo, nil
}
// buildPodStats returns a PodStats that identifies the Pod managing cinfo
func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
return &statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: podSandbox.Metadata.Name,
UID: podSandbox.Metadata.Uid,
Namespace: podSandbox.Metadata.Namespace,
},
// The StartTime in the summary API is the pod creation time.
StartTime: metav1.NewTime(time.Unix(0, podSandbox.CreatedAt)),
}
}
func (p *criStatsProvider) addPodNetworkStats(
logger klog.Logger,
ps *statsapi.PodStats,
podSandboxID string,
caInfos map[string]cadvisorapiv2.ContainerInfo,
cs *statsapi.ContainerStats,
netStats *statsapi.NetworkStats,
) {
caPodSandbox, found := caInfos[podSandboxID]
// try get network stats from cadvisor first.
if found {
networkStats := cadvisorInfoToNetworkStats(&caPodSandbox)
if networkStats != nil {
ps.Network = networkStats
return
}
}
// Not found from cadvisor, get from netStats.
if netStats != nil {
ps.Network = netStats
return
}
// TODO: sum Pod network stats from container stats.
logger.V(4).Info("Unable to find network stats for sandbox", "sandboxID", podSandboxID)
}
func (p *criStatsProvider) addPodCPUMemoryStats(
ps *statsapi.PodStats,
podUID types.UID,
allInfos map[string]cadvisorapiv2.ContainerInfo,
cs *statsapi.ContainerStats,
) {
// try get cpu and memory stats from cadvisor first.
podCgroupInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)
if podCgroupInfo != nil {
cpu, memory := cadvisorInfoToCPUandMemoryStats(podCgroupInfo)
ps.CPU = cpu
ps.Memory = memory
return
}
// Sum Pod cpu and memory stats from containers stats.
if cs.CPU != nil {
if ps.CPU == nil {
ps.CPU = &statsapi.CPUStats{}
}
ps.CPU.Time = cs.CPU.Time
usageCoreNanoSeconds := ptr.Deref(cs.CPU.UsageCoreNanoSeconds, 0) + ptr.Deref(ps.CPU.UsageCoreNanoSeconds, 0)
usageNanoCores := ptr.Deref(cs.CPU.UsageNanoCores, 0) + ptr.Deref(ps.CPU.UsageNanoCores, 0)
ps.CPU.UsageCoreNanoSeconds = &usageCoreNanoSeconds
ps.CPU.UsageNanoCores = &usageNanoCores
// Pod level PSI stats cannot be calculated from container level
}
if cs.Memory != nil {
if ps.Memory == nil {
ps.Memory = &statsapi.MemoryStats{}
}
ps.Memory.Time = cs.Memory.Time
availableBytes := ptr.Deref(cs.Memory.AvailableBytes, 0) + ptr.Deref(ps.Memory.AvailableBytes, 0)
usageBytes := ptr.Deref(cs.Memory.UsageBytes, 0) + ptr.Deref(ps.Memory.UsageBytes, 0)
workingSetBytes := ptr.Deref(cs.Memory.WorkingSetBytes, 0) + ptr.Deref(ps.Memory.WorkingSetBytes, 0)
rSSBytes := ptr.Deref(cs.Memory.RSSBytes, 0) + ptr.Deref(ps.Memory.RSSBytes, 0)
pageFaults := ptr.Deref(cs.Memory.PageFaults, 0) + ptr.Deref(ps.Memory.PageFaults, 0)
majorPageFaults := ptr.Deref(cs.Memory.MajorPageFaults, 0) + ptr.Deref(ps.Memory.MajorPageFaults, 0)
ps.Memory.AvailableBytes = &availableBytes
ps.Memory.UsageBytes = &usageBytes
ps.Memory.WorkingSetBytes = &workingSetBytes
ps.Memory.RSSBytes = &rSSBytes
ps.Memory.PageFaults = &pageFaults
ps.Memory.MajorPageFaults = &majorPageFaults
// Pod level PSI stats cannot be calculated from container level
}
}
func (p *criStatsProvider) addSwapStats(
ps *statsapi.PodStats,
podUID types.UID,
allInfos map[string]cadvisorapiv2.ContainerInfo,
cs *statsapi.ContainerStats,
) {
// try get swap stats from cadvisor first.
podCgroupInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)
if podCgroupInfo != nil {
ps.Swap = cadvisorInfoToSwapStats(podCgroupInfo)
return
}
// Sum Pod swap stats from containers stats.
if cs.Swap != nil {
if ps.Swap == nil {
ps.Swap = &statsapi.SwapStats{Time: cs.Swap.Time}
}
swapAvailableBytes := ptr.Deref(cs.Swap.SwapAvailableBytes, 0) + ptr.Deref(ps.Swap.SwapAvailableBytes, 0)
swapUsageBytes := ptr.Deref(cs.Swap.SwapUsageBytes, 0) + ptr.Deref(ps.Swap.SwapUsageBytes, 0)
ps.Swap.SwapAvailableBytes = &swapAvailableBytes
ps.Swap.SwapUsageBytes = &swapUsageBytes
}
}
func (p *criStatsProvider) addIOStats(
ps *statsapi.PodStats,
podUID types.UID,
allInfos map[string]cadvisorapiv2.ContainerInfo,
cs *statsapi.ContainerStats,
) {
if !utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
return
}
// try get IO stats from cadvisor first.
podCgroupInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos)
if podCgroupInfo != nil {
ps.IO = cadvisorInfoToIOStats(podCgroupInfo)
return
}
if cs.IO != nil {
if ps.IO == nil {
ps.IO = &statsapi.IOStats{Time: cs.IO.Time}
}
// Pod level PSI stats cannot be calculated from container level
}
}
func (p *criStatsProvider) addProcessStats(
ps *statsapi.PodStats,
container *cadvisorapiv2.ContainerInfo,
) {
processStats := cadvisorInfoToProcessStats(container)
// Sum up all of the process stats for each of the containers to obtain the cumulative pod level process count
ps.ProcessStats = mergeProcessStats(ps.ProcessStats, processStats)
}
func (p *criStatsProvider) makeContainerStats(
logger klog.Logger,
stats *runtimeapi.ContainerStats,
container *runtimeapi.Container,
rootFsInfo *cadvisorapiv2.FsInfo,
fsIDtoInfo map[string]*cadvisorapiv2.FsInfo,
meta *runtimeapi.PodSandboxMetadata,
updateCPUNanoCoreUsage bool,
) (*statsapi.ContainerStats, error) {
result := &statsapi.ContainerStats{
Name: stats.Attributes.Metadata.Name,
// The StartTime in the summary API is the container creation time.
StartTime: metav1.NewTime(time.Unix(0, container.CreatedAt)),
CPU: &statsapi.CPUStats{},
Memory: &statsapi.MemoryStats{},
Rootfs: &statsapi.FsStats{},
Swap: &statsapi.SwapStats{},
// UserDefinedMetrics is not supported by CRI.
}
if stats.Cpu != nil {
result.CPU.Time = metav1.NewTime(time.Unix(0, stats.Cpu.Timestamp))
if stats.Cpu.UsageCoreNanoSeconds != nil {
result.CPU.UsageCoreNanoSeconds = &stats.Cpu.UsageCoreNanoSeconds.Value
}
var usageNanoCores *uint64
if updateCPUNanoCoreUsage {
usageNanoCores = p.getAndUpdateContainerUsageNanoCores(logger, stats)
} else {
usageNanoCores = p.getContainerUsageNanoCores(stats)
}
if usageNanoCores != nil {
result.CPU.UsageNanoCores = usageNanoCores
}
result.CPU.PSI = makePSIStats(stats.Cpu.Psi)
} else {
result.CPU.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
result.CPU.UsageCoreNanoSeconds = ptr.To[uint64](0)
result.CPU.UsageNanoCores = ptr.To[uint64](0)
}
if stats.Memory != nil {
result.Memory.Time = metav1.NewTime(time.Unix(0, stats.Memory.Timestamp))
if stats.Memory.WorkingSetBytes != nil {
result.Memory.WorkingSetBytes = &stats.Memory.WorkingSetBytes.Value
}
if stats.Memory.UsageBytes != nil {
result.Memory.UsageBytes = &stats.Memory.UsageBytes.Value
}
if stats.Memory.RssBytes != nil {
result.Memory.RSSBytes = &stats.Memory.RssBytes.Value
}
result.Memory.PSI = makePSIStats(stats.Memory.Psi)
} else {
result.Memory.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
result.Memory.WorkingSetBytes = ptr.To[uint64](0)
result.Memory.UsageBytes = ptr.To[uint64](0)
result.Memory.RSSBytes = ptr.To[uint64](0)
}
if stats.Swap != nil {
result.Swap.Time = metav1.NewTime(time.Unix(0, stats.Swap.Timestamp))
if stats.Swap.SwapUsageBytes != nil {
result.Swap.SwapUsageBytes = &stats.Swap.SwapUsageBytes.Value
}
if stats.Swap.SwapAvailableBytes != nil {
result.Swap.SwapAvailableBytes = &stats.Swap.SwapAvailableBytes.Value
}
} else {
result.Swap.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
result.Swap.SwapUsageBytes = ptr.To[uint64](0)
result.Swap.SwapAvailableBytes = ptr.To[uint64](0)
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
result.IO = &statsapi.IOStats{}
if stats.Io != nil {
result.IO.Time = metav1.NewTime(time.Unix(0, stats.Io.Timestamp))
result.IO.PSI = makePSIStats(stats.Io.Psi)
} else {
result.IO.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano()))
}
}
if stats.WritableLayer != nil {
result.Rootfs.Time = metav1.NewTime(time.Unix(0, stats.WritableLayer.Timestamp))
if stats.WritableLayer.UsedBytes != nil {
result.Rootfs.UsedBytes = &stats.WritableLayer.UsedBytes.Value
}
if stats.WritableLayer.InodesUsed != nil {
result.Rootfs.InodesUsed = &stats.WritableLayer.InodesUsed.Value
}
}
fsID := stats.GetWritableLayer().GetFsId()
var err error
if fsID != nil {
imageFsInfo, found := fsIDtoInfo[fsID.Mountpoint]
if !found {
imageFsInfo, err = p.getFsInfo(logger, fsID)
if err != nil {
return nil, fmt.Errorf("get filesystem info: %w", err)
}
fsIDtoInfo[fsID.Mountpoint] = imageFsInfo
}
if imageFsInfo != nil {
// The image filesystem id is unknown to the local node or there's
// an error on retrieving the stats. In these cases, we omit those stats
// and return the best-effort partial result. See
// https://github.com/kubernetes/heapster/issues/1793.
result.Rootfs.AvailableBytes = &imageFsInfo.Available
result.Rootfs.CapacityBytes = &imageFsInfo.Capacity
result.Rootfs.InodesFree = imageFsInfo.InodesFree
result.Rootfs.Inodes = imageFsInfo.Inodes
}
}
// NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers
// using old log path, empty log stats are returned. This is fine, because we don't
// officially support in-place upgrade anyway.
result.Logs, err = p.hostStatsProvider.getPodContainerLogStats(meta.GetNamespace(), meta.GetName(), types.UID(meta.GetUid()), container.GetMetadata().GetName(), rootFsInfo)
if err != nil {
logger.Error(err, "Unable to fetch container log stats", "containerName", container.GetMetadata().GetName())
}
return result, nil
}
func (p *criStatsProvider) makeContainerCPUAndMemoryStats(
stats *runtimeapi.ContainerStats,
startTime time.Time,
zeroMissingValues bool, // whether to write zeros to missing values
) *statsapi.ContainerStats {
result := &statsapi.ContainerStats{
Name: stats.Attributes.Metadata.Name,
// The StartTime in the summary API is the container creation time.
StartTime: metav1.NewTime(startTime),
// UserDefinedMetrics is not supported by CRI.
}
getUint64 := func(val *runtimeapi.UInt64Value) *uint64 {
if val != nil {
return &val.Value
} else if zeroMissingValues {
return ptr.To[uint64](0)
} else {
return nil
}
}
if stats.Cpu != nil {
result.CPU = &statsapi.CPUStats{
Time: metav1.NewTime(time.Unix(0, stats.Cpu.Timestamp)),
UsageNanoCores: p.getContainerUsageNanoCores(stats),
UsageCoreNanoSeconds: getUint64(stats.Cpu.UsageCoreNanoSeconds),
PSI: makePSIStats(stats.Cpu.Psi),
}
} else if zeroMissingValues {
result.CPU = &statsapi.CPUStats{
Time: metav1.NewTime(time.Unix(0, time.Now().UnixNano())),
UsageCoreNanoSeconds: ptr.To[uint64](0),
UsageNanoCores: ptr.To[uint64](0),
}
}
if stats.Memory != nil {
result.Memory = &statsapi.MemoryStats{
Time: metav1.NewTime(time.Unix(0, stats.Memory.Timestamp)),
AvailableBytes: getUint64(stats.Memory.AvailableBytes),
UsageBytes: getUint64(stats.Memory.UsageBytes),
WorkingSetBytes: getUint64(stats.Memory.WorkingSetBytes),
RSSBytes: getUint64(stats.Memory.RssBytes),
PageFaults: getUint64(stats.Memory.PageFaults),
MajorPageFaults: getUint64(stats.Memory.MajorPageFaults),
PSI: makePSIStats(stats.Memory.Psi),
}
} else if zeroMissingValues {
result.Memory = &statsapi.MemoryStats{
Time: metav1.NewTime(time.Unix(0, time.Now().UnixNano())),
AvailableBytes: ptr.To[uint64](0),
UsageBytes: ptr.To[uint64](0),
WorkingSetBytes: ptr.To[uint64](0),
RSSBytes: ptr.To[uint64](0),
PageFaults: ptr.To[uint64](0),
MajorPageFaults: ptr.To[uint64](0),
}
}
if stats.Swap != nil {
result.Swap = &statsapi.SwapStats{
Time: metav1.NewTime(time.Unix(0, stats.Swap.Timestamp)),
SwapUsageBytes: getUint64(stats.Swap.SwapUsageBytes),
SwapAvailableBytes: getUint64(stats.Swap.SwapAvailableBytes),
}
} else if zeroMissingValues {
result.Swap = &statsapi.SwapStats{
Time: metav1.NewTime(time.Unix(0, time.Now().UnixNano())),
SwapUsageBytes: ptr.To[uint64](0),
SwapAvailableBytes: ptr.To[uint64](0),
}
}
return result
}
func makePSIStats(stats *runtimeapi.PsiStats) *statsapi.PSIStats {
if !utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
return nil
}
if stats == nil {
return nil
}
result := &statsapi.PSIStats{}
if stats.Full != nil {
result.Full = statsapi.PSIData{
Total: stats.Full.Total,
Avg10: stats.Full.Avg10,
Avg60: stats.Full.Avg60,
Avg300: stats.Full.Avg300,
}
}
if stats.Some != nil {
result.Some = statsapi.PSIData{
Total: stats.Some.Total,
Avg10: stats.Some.Avg10,
Avg60: stats.Some.Avg60,
Avg300: stats.Some.Avg300,
}
}
return result
}
// getContainerUsageNanoCores first attempts to get the usage nano cores from the stats reported
// by the CRI. If it is unable to, it gets the information from the cache instead.
func (p *criStatsProvider) getContainerUsageNanoCores(stats *runtimeapi.ContainerStats) *uint64 {
if stats == nil || stats.Attributes == nil {
return nil
}
// Bypass the cache if the CRI implementation specified the UsageNanoCores.
if stats.Cpu != nil && stats.Cpu.UsageNanoCores != nil {
return &stats.Cpu.UsageNanoCores.Value
}
p.mutex.RLock()
defer p.mutex.RUnlock()
cached, ok := p.cpuUsageCache[stats.Attributes.Id]
if !ok || cached.usageNanoCores == nil {
return nil
}
// return a copy of the usage
latestUsage := *cached.usageNanoCores
return &latestUsage
}
// getAndUpdateContainerUsageNanoCores first attempts to get the usage nano cores from the stats reported
// by the CRI. If it is unable to, it computes usageNanoCores based on the given and the cached usageCoreNanoSeconds,
// updates the cache with the computed usageNanoCores, and returns the usageNanoCores.
func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(logger klog.Logger, stats *runtimeapi.ContainerStats) *uint64 {
if stats == nil || stats.Attributes == nil || stats.Cpu == nil {
return nil
}
// Bypass the cache if the CRI implementation specified the UsageNanoCores.
if stats.Cpu.UsageNanoCores != nil {
return &stats.Cpu.UsageNanoCores.Value
}
// If there is no UsageNanoCores, nor UsageCoreNanoSeconds, there is no information to use
if stats.Cpu.UsageCoreNanoSeconds == nil {
return nil
}
id := stats.Attributes.Id
usage, err := func() (*uint64, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
cached, ok := p.cpuUsageCache[id]
if !ok || cached.stats.UsageCoreNanoSeconds == nil || stats.Cpu.UsageCoreNanoSeconds.Value < cached.stats.UsageCoreNanoSeconds.Value {
// Cannot compute the usage now, but update the cached stats anyway
p.cpuUsageCache[id] = &cpuUsageRecord{stats: stats.Cpu, usageNanoCores: nil}
return nil, nil
}
newStats := stats.Cpu
cachedStats := cached.stats
nanoSeconds := newStats.Timestamp - cachedStats.Timestamp
if nanoSeconds <= 0 {
return nil, fmt.Errorf("zero or negative interval (%v - %v)", newStats.Timestamp, cachedStats.Timestamp)
}
usageNanoCores := uint64(float64(newStats.UsageCoreNanoSeconds.Value-cachedStats.UsageCoreNanoSeconds.Value) /
float64(nanoSeconds) * float64(time.Second/time.Nanosecond))
// Update cache with new value.
usageToUpdate := usageNanoCores
p.cpuUsageCache[id] = &cpuUsageRecord{stats: newStats, usageNanoCores: &usageToUpdate}
return &usageNanoCores, nil
}()
if err != nil {
// This should not happen. Log now to raise visibility
logger.Error(err, "Failed updating cpu usage nano core")
}
return usage
}
func (p *criStatsProvider) cleanupOutdatedCaches() {
p.mutex.Lock()
defer p.mutex.Unlock()
for k, v := range p.cpuUsageCache {
if v == nil {
delete(p.cpuUsageCache, k)
continue
}
if time.Since(time.Unix(0, v.stats.Timestamp)) > defaultCachePeriod {
delete(p.cpuUsageCache, k)
}
}
}
// removeTerminatedPods returns pods with terminated ones removed.
// It only removes a terminated pod when there is a running instance
// of the pod with the same name and namespace.
// This is needed because:
// 1) PodSandbox may be recreated;
// 2) Pod may be recreated with the same name and namespace.
func removeTerminatedPods(pods []*runtimeapi.PodSandbox) []*runtimeapi.PodSandbox {
podMap := make(map[statsapi.PodReference][]*runtimeapi.PodSandbox)
// Sort order by create time
sort.Slice(pods, func(i, j int) bool {
return pods[i].CreatedAt < pods[j].CreatedAt
})
for _, pod := range pods {
refID := statsapi.PodReference{
Name: pod.GetMetadata().GetName(),
Namespace: pod.GetMetadata().GetNamespace(),
// UID is intentionally left empty.
}
podMap[refID] = append(podMap[refID], pod)
}
result := make([]*runtimeapi.PodSandbox, 0)
for _, refs := range podMap {
if len(refs) == 1 {
result = append(result, refs[0])
continue
}
found := false
for i := 0; i < len(refs); i++ {
if refs[i].State == runtimeapi.PodSandboxState_SANDBOX_READY {
found = true
result = append(result, refs[i])
}
}
if !found {
result = append(result, refs[len(refs)-1])
}
}
return result
}
// removeTerminatedContainers removes all terminated containers since they should
// not be used for usage calculations.
func removeTerminatedContainers(containers []*runtimeapi.Container) []*runtimeapi.Container {
containerMap := make(map[containerID][]*runtimeapi.Container)
// Sort order by create time
sort.Slice(containers, func(i, j int) bool {
return containers[i].CreatedAt < containers[j].CreatedAt
})
for _, container := range containers {
refID := containerID{
podRef: buildPodRef(container.Labels),
containerName: kubetypes.GetContainerName(container.Labels),
}
containerMap[refID] = append(containerMap[refID], container)
}
result := make([]*runtimeapi.Container, 0)
for _, refs := range containerMap {
for i := 0; i < len(refs); i++ {
if refs[i].State == runtimeapi.ContainerState_CONTAINER_RUNNING {
result = append(result, refs[i])
}
}
}
return result
}
func (p *criStatsProvider) addCadvisorContainerStats(
logger klog.Logger,
cs *statsapi.ContainerStats,
caPodStats *cadvisorapiv2.ContainerInfo,
) {
if caPodStats.Spec.HasCustomMetrics {
cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(logger, caPodStats)
}
cpu, memory := cadvisorInfoToCPUandMemoryStats(caPodStats)
if cpu != nil {
cs.CPU = cpu
}
if memory != nil {
cs.Memory = memory
}
swap := cadvisorInfoToSwapStats(caPodStats)
if swap != nil {
cs.Swap = swap
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
io := cadvisorInfoToIOStats(caPodStats)
if io != nil {
cs.IO = io
}
}
}
func (p *criStatsProvider) addCadvisorContainerCPUAndMemoryStats(
logger klog.Logger,
cs *statsapi.ContainerStats,
caPodStats *cadvisorapiv2.ContainerInfo,
) {
if caPodStats.Spec.HasCustomMetrics {
cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(logger, caPodStats)
}
cpu, memory := cadvisorInfoToCPUandMemoryStats(caPodStats)
if cpu != nil {
cs.CPU = cpu
}
if memory != nil {
cs.Memory = memory
}
}
func getCRICadvisorStats(logger klog.Logger, infos map[string]cadvisorapiv2.ContainerInfo) (map[string]cadvisorapiv2.ContainerInfo, map[string]cadvisorapiv2.ContainerInfo) {
stats := make(map[string]cadvisorapiv2.ContainerInfo)
filteredInfos, cinfosByPodCgroupKey := filterTerminatedContainerInfoAndAssembleByPodCgroupKey(logger, infos)
for key, info := range filteredInfos {
// On systemd using devicemapper each mount into the container has an
// associated cgroup. We ignore them to ensure we do not get duplicate
// entries in our summary. For details on .mount units:
// http://man7.org/linux/man-pages/man5/systemd.mount.5.html
if strings.HasSuffix(key, ".mount") {
continue
}
// Build the Pod key if this container is managed by a Pod
if !isPodManagedContainer(logger, &info) {
continue
}
stats[extractIDFromCgroupPath(key)] = info
}
return stats, cinfosByPodCgroupKey
}
func extractIDFromCgroupPath(cgroupPath string) string {
// case0 == cgroupfs: "/kubepods/burstable/pod2fc932ce-fdcc-454b-97bd-aadfdeb4c340/9be25294016e2dc0340dd605ce1f57b492039b267a6a618a7ad2a7a58a740f32"
id := filepath.Base(cgroupPath)
// case1 == systemd: "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fc932ce_fdcc_454b_97bd_aadfdeb4c340.slice/cri-containerd-aaefb9d8feed2d453b543f6d928cede7a4dbefa6a0ae7c9b990dd234c56e93b9.scope"
// trim anything before the final '-' and suffix .scope
systemdSuffix := ".scope"
if strings.HasSuffix(id, systemdSuffix) {
id = strings.TrimSuffix(id, systemdSuffix)
components := strings.Split(id, "-")
if len(components) > 1 {
id = components[len(components)-1]
}
}
return id
}
func criInterfaceToSummary(criIface *runtimeapi.NetworkInterfaceUsage) statsapi.InterfaceStats {
return statsapi.InterfaceStats{
Name: criIface.Name,
RxBytes: valueOfUInt64Value(criIface.RxBytes),
RxErrors: valueOfUInt64Value(criIface.RxErrors),
TxBytes: valueOfUInt64Value(criIface.TxBytes),
TxErrors: valueOfUInt64Value(criIface.TxErrors),
}
}
func valueOfUInt64Value(value *runtimeapi.UInt64Value) *uint64 {
if value == nil {
return nil
}
return &value.Value
}
//go:build linux
// +build linux
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"fmt"
"time"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
)
func (p *criStatsProvider) addCRIPodContainerStats(
logger klog.Logger,
criSandboxStat *runtimeapi.PodSandboxStats,
ps *statsapi.PodStats,
fsIDtoInfo map[string]*cadvisorapiv2.FsInfo,
containerMap map[string]*runtimeapi.Container,
podSandbox *runtimeapi.PodSandbox,
rootFsInfo *cadvisorapiv2.FsInfo, updateCPUNanoCoreUsage bool) error {
for _, criContainerStat := range criSandboxStat.Linux.Containers {
container, found := containerMap[criContainerStat.Attributes.Id]
if !found {
continue
}
// Fill available stats for full set of required pod stats
cs, err := p.makeContainerStats(logger, criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(),
updateCPUNanoCoreUsage)
if err != nil {
return fmt.Errorf("make container stats: %w", err)
}
ps.Containers = append(ps.Containers, *cs)
}
return nil
}
func addCRIPodNetworkStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSandboxStats) {
if criPodStat == nil || criPodStat.Linux == nil || criPodStat.Linux.Network == nil {
return
}
criNetwork := criPodStat.Linux.Network
iStats := statsapi.NetworkStats{
Time: metav1.NewTime(time.Unix(0, criNetwork.Timestamp)),
InterfaceStats: criInterfaceToSummary(criNetwork.DefaultInterface),
Interfaces: make([]statsapi.InterfaceStats, 0, len(criNetwork.Interfaces)),
}
for _, iface := range criNetwork.Interfaces {
iStats.Interfaces = append(iStats.Interfaces, criInterfaceToSummary(iface))
}
ps.Network = &iStats
}
func addCRIPodMemoryStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSandboxStats) {
if criPodStat == nil || criPodStat.Linux == nil || criPodStat.Linux.Memory == nil {
return
}
criMemory := criPodStat.Linux.Memory
ps.Memory = &statsapi.MemoryStats{
Time: metav1.NewTime(time.Unix(0, criMemory.Timestamp)),
AvailableBytes: valueOfUInt64Value(criMemory.AvailableBytes),
UsageBytes: valueOfUInt64Value(criMemory.UsageBytes),
WorkingSetBytes: valueOfUInt64Value(criMemory.WorkingSetBytes),
RSSBytes: valueOfUInt64Value(criMemory.RssBytes),
PageFaults: valueOfUInt64Value(criMemory.PageFaults),
MajorPageFaults: valueOfUInt64Value(criMemory.MajorPageFaults),
PSI: makePSIStats(criMemory.Psi),
}
}
func addCRIPodCPUStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSandboxStats) {
if criPodStat == nil || criPodStat.Linux == nil || criPodStat.Linux.Cpu == nil {
return
}
criCPU := criPodStat.Linux.Cpu
ps.CPU = &statsapi.CPUStats{
Time: metav1.NewTime(time.Unix(0, criCPU.Timestamp)),
UsageNanoCores: valueOfUInt64Value(criCPU.UsageNanoCores),
UsageCoreNanoSeconds: valueOfUInt64Value(criCPU.UsageCoreNanoSeconds),
PSI: makePSIStats(criCPU.Psi),
}
}
func addCRIPodIOStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSandboxStats) {
if !utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
return
}
if criPodStat == nil || criPodStat.Linux == nil || criPodStat.Linux.Io == nil {
return
}
criIO := criPodStat.Linux.Io
ps.IO = &statsapi.IOStats{
Time: metav1.NewTime(time.Unix(0, criIO.Timestamp)),
PSI: makePSIStats(criIO.Psi),
}
}
func addCRIPodProcessStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSandboxStats) {
if criPodStat == nil || criPodStat.Linux == nil || criPodStat.Linux.Process == nil {
return
}
ps.ProcessStats = &statsapi.ProcessStats{
ProcessCount: valueOfUInt64Value(criPodStat.Linux.Process.ProcessCount),
}
}
// listContainerNetworkStats returns the network stats of all the running containers.
// It should return (nil, nil) for platforms other than Windows.
func (p *criStatsProvider) listContainerNetworkStats(klog.Logger) (map[string]*statsapi.NetworkStats, error) {
return nil, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"fmt"
"time"
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/utils/ptr"
)
// defaultNetworkInterfaceName is used for collectng network stats.
// This logic relies on knowledge of the container runtime implementation and
// is not reliable.
const defaultNetworkInterfaceName = "eth0"
func cadvisorInfoToCPUandMemoryStats(info *cadvisorapiv2.ContainerInfo) (*statsapi.CPUStats, *statsapi.MemoryStats) {
cstat, found := latestContainerStats(info)
if !found {
return nil, nil
}
var cpuStats *statsapi.CPUStats
var memoryStats *statsapi.MemoryStats
cpuStats = &statsapi.CPUStats{
Time: metav1.NewTime(cstat.Timestamp),
UsageNanoCores: ptr.To[uint64](0),
UsageCoreNanoSeconds: ptr.To[uint64](0),
}
if info.Spec.HasCpu {
if cstat.CpuInst != nil {
cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total
}
if cstat.Cpu != nil {
cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
cpuStats.PSI = cadvisorPSIToStatsPSI(&cstat.Cpu.PSI)
}
}
}
if info.Spec.HasMemory && cstat.Memory != nil {
pageFaults := cstat.Memory.ContainerData.Pgfault
majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
memoryStats = &statsapi.MemoryStats{
Time: metav1.NewTime(cstat.Timestamp),
UsageBytes: &cstat.Memory.Usage,
WorkingSetBytes: &cstat.Memory.WorkingSet,
RSSBytes: &cstat.Memory.RSS,
PageFaults: &pageFaults,
MajorPageFaults: &majorPageFaults,
}
// availableBytes = memory limit (if known) - workingset
if !isMemoryUnlimited(info.Spec.Memory.Limit) {
availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet
memoryStats.AvailableBytes = &availableBytes
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
memoryStats.PSI = cadvisorPSIToStatsPSI(&cstat.Memory.PSI)
}
} else {
memoryStats = &statsapi.MemoryStats{
Time: metav1.NewTime(cstat.Timestamp),
WorkingSetBytes: ptr.To[uint64](0),
}
}
return cpuStats, memoryStats
}
// cadvisorInfoToContainerStats returns the statsapi.ContainerStats converted
// from the container and filesystem info.
func cadvisorInfoToContainerStats(logger klog.Logger, name string, info *cadvisorapiv2.ContainerInfo, rootFs, imageFs *cadvisorapiv2.FsInfo) *statsapi.ContainerStats {
result := &statsapi.ContainerStats{
StartTime: metav1.NewTime(info.Spec.CreationTime),
Name: name,
}
cstat, found := latestContainerStats(info)
if !found {
return result
}
cpu, memory := cadvisorInfoToCPUandMemoryStats(info)
result.CPU = cpu
result.Memory = memory
result.Swap = cadvisorInfoToSwapStats(info)
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
result.IO = cadvisorInfoToIOStats(info)
}
// NOTE: if they can be found, log stats will be overwritten
// by the caller, as it knows more information about the pod,
// which is needed to determine log size.
if rootFs != nil {
// The container logs live on the node rootfs device
result.Logs = buildLogsStats(cstat, rootFs)
}
if imageFs != nil {
// The container rootFs lives on the imageFs devices (which may not be the node root fs)
result.Rootfs = buildRootfsStats(cstat, imageFs)
}
cfs := cstat.Filesystem
if cfs != nil {
if cfs.BaseUsageBytes != nil {
if result.Rootfs != nil {
rootfsUsage := *cfs.BaseUsageBytes
result.Rootfs.UsedBytes = &rootfsUsage
}
if cfs.TotalUsageBytes != nil && result.Logs != nil {
logsUsage := *cfs.TotalUsageBytes - *cfs.BaseUsageBytes
result.Logs.UsedBytes = &logsUsage
}
}
if cfs.InodeUsage != nil && result.Rootfs != nil {
rootInodes := *cfs.InodeUsage
result.Rootfs.InodesUsed = &rootInodes
}
}
for _, acc := range cstat.Accelerators {
result.Accelerators = append(result.Accelerators, statsapi.AcceleratorStats{
Make: acc.Make,
Model: acc.Model,
ID: acc.ID,
MemoryTotal: acc.MemoryTotal,
MemoryUsed: acc.MemoryUsed,
DutyCycle: acc.DutyCycle,
})
}
result.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(logger, info)
return result
}
// cadvisorInfoToContainerCPUAndMemoryStats returns the statsapi.ContainerStats converted
// from the container and filesystem info.
func cadvisorInfoToContainerCPUAndMemoryStats(name string, info *cadvisorapiv2.ContainerInfo) *statsapi.ContainerStats {
result := &statsapi.ContainerStats{
StartTime: metav1.NewTime(info.Spec.CreationTime),
Name: name,
}
cpu, memory := cadvisorInfoToCPUandMemoryStats(info)
result.CPU = cpu
result.Memory = memory
result.Swap = cadvisorInfoToSwapStats(info)
return result
}
func cadvisorInfoToProcessStats(info *cadvisorapiv2.ContainerInfo) *statsapi.ProcessStats {
cstat, found := latestContainerStats(info)
if !found || cstat.Processes == nil {
return nil
}
num := cstat.Processes.ProcessCount
return &statsapi.ProcessStats{ProcessCount: ptr.To[uint64](num)}
}
func mergeProcessStats(first *statsapi.ProcessStats, second *statsapi.ProcessStats) *statsapi.ProcessStats {
if first == nil && second == nil {
return nil
}
if first == nil {
return second
}
if second == nil {
return first
}
firstProcessCount := uint64(0)
if first.ProcessCount != nil {
firstProcessCount = *first.ProcessCount
}
secondProcessCount := uint64(0)
if second.ProcessCount != nil {
secondProcessCount = *second.ProcessCount
}
return &statsapi.ProcessStats{ProcessCount: ptr.To[uint64](firstProcessCount + secondProcessCount)}
}
// cadvisorInfoToNetworkStats returns the statsapi.NetworkStats converted from
// the container info from cadvisor.
func cadvisorInfoToNetworkStats(info *cadvisorapiv2.ContainerInfo) *statsapi.NetworkStats {
if !info.Spec.HasNetwork {
return nil
}
cstat, found := latestContainerStats(info)
if !found {
return nil
}
if cstat.Network == nil {
return nil
}
iStats := statsapi.NetworkStats{
Time: metav1.NewTime(cstat.Timestamp),
}
for i := range cstat.Network.Interfaces {
inter := cstat.Network.Interfaces[i]
iStat := statsapi.InterfaceStats{
Name: inter.Name,
RxBytes: &inter.RxBytes,
RxErrors: &inter.RxErrors,
TxBytes: &inter.TxBytes,
TxErrors: &inter.TxErrors,
}
if inter.Name == defaultNetworkInterfaceName {
iStats.InterfaceStats = iStat
}
iStats.Interfaces = append(iStats.Interfaces, iStat)
}
return &iStats
}
// cadvisorInfoToUserDefinedMetrics returns the statsapi.UserDefinedMetric
// converted from the container info from cadvisor.
func cadvisorInfoToUserDefinedMetrics(logger klog.Logger, info *cadvisorapiv2.ContainerInfo) []statsapi.UserDefinedMetric {
type specVal struct {
ref statsapi.UserDefinedMetricDescriptor
valType cadvisorapiv1.DataType
time time.Time
value float64
}
udmMap := map[string]*specVal{}
for _, spec := range info.Spec.CustomMetrics {
udmMap[spec.Name] = &specVal{
ref: statsapi.UserDefinedMetricDescriptor{
Name: spec.Name,
Type: statsapi.UserDefinedMetricType(spec.Type),
Units: spec.Units,
},
valType: spec.Format,
}
}
for _, stat := range info.Stats {
for name, values := range stat.CustomMetrics {
specVal, ok := udmMap[name]
if !ok {
logger.Info("Spec for custom metric is missing from cAdvisor output", "metric", name, "spec", info.Spec, "metrics", stat.CustomMetrics)
continue
}
for _, value := range values {
// Pick the most recent value
if value.Timestamp.Before(specVal.time) {
continue
}
specVal.time = value.Timestamp
specVal.value = value.FloatValue
if specVal.valType == cadvisorapiv1.IntType {
specVal.value = float64(value.IntValue)
}
}
}
}
var udm []statsapi.UserDefinedMetric
for _, specVal := range udmMap {
udm = append(udm, statsapi.UserDefinedMetric{
UserDefinedMetricDescriptor: specVal.ref,
Time: metav1.NewTime(specVal.time),
Value: specVal.value,
})
}
return udm
}
func cadvisorInfoToSwapStats(info *cadvisorapiv2.ContainerInfo) *statsapi.SwapStats {
cstat, found := latestContainerStats(info)
if !found {
return nil
}
var swapStats *statsapi.SwapStats
if info.Spec.HasMemory && cstat.Memory != nil {
swapStats = &statsapi.SwapStats{
Time: metav1.NewTime(cstat.Timestamp),
SwapUsageBytes: &cstat.Memory.Swap,
}
if !isMemoryUnlimited(info.Spec.Memory.SwapLimit) {
swapAvailableBytes := info.Spec.Memory.SwapLimit - cstat.Memory.Swap
swapStats.SwapAvailableBytes = &swapAvailableBytes
}
}
return swapStats
}
func cadvisorInfoToIOStats(info *cadvisorapiv2.ContainerInfo) *statsapi.IOStats {
cstat, found := latestContainerStats(info)
if !found {
return nil
}
var ioStats *statsapi.IOStats
if info.Spec.HasDiskIo && cstat.DiskIo != nil {
ioStats = &statsapi.IOStats{
Time: metav1.NewTime(cstat.Timestamp),
PSI: cadvisorPSIToStatsPSI(&cstat.DiskIo.PSI),
}
}
return ioStats
}
// latestContainerStats returns the latest container stats from cadvisor, or nil if none exist
func latestContainerStats(info *cadvisorapiv2.ContainerInfo) (*cadvisorapiv2.ContainerStats, bool) {
stats := info.Stats
if len(stats) < 1 {
return nil, false
}
latest := stats[len(stats)-1]
if latest == nil {
return nil, false
}
return latest, true
}
func isMemoryUnlimited(v uint64) bool {
// Size after which we consider memory to be "unlimited". This is not
// MaxInt64 due to rounding by the kernel.
// TODO: cadvisor should export this https://github.com/google/cadvisor/blob/master/metrics/prometheus.go#L596
const maxMemorySize = uint64(1 << 62)
return v > maxMemorySize
}
// getCgroupInfo returns the information of the container with the specified
// containerName from cadvisor.
func getCgroupInfo(cadvisor cadvisor.Interface, containerName string, updateStats bool) (*cadvisorapiv2.ContainerInfo, error) {
var maxAge *time.Duration
if updateStats {
age := 0 * time.Second
maxAge = &age
}
infoMap, err := cadvisor.ContainerInfoV2(containerName, cadvisorapiv2.RequestOptions{
IdType: cadvisorapiv2.TypeName,
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
Recursive: false,
MaxAge: maxAge,
})
if err != nil {
return nil, fmt.Errorf("failed to get container info for %q: %w", containerName, err)
}
if len(infoMap) != 1 {
return nil, fmt.Errorf("unexpected number of containers: %v", len(infoMap))
}
info := infoMap[containerName]
return &info, nil
}
// getCgroupStats returns the latest stats of the container having the
// specified containerName from cadvisor.
func getCgroupStats(cadvisor cadvisor.Interface, containerName string, updateStats bool) (*cadvisorapiv2.ContainerStats, error) {
info, err := getCgroupInfo(cadvisor, containerName, updateStats)
if err != nil {
return nil, err
}
stats, found := latestContainerStats(info)
if !found {
return nil, fmt.Errorf("failed to get latest stats from container info for %q", containerName)
}
return stats, nil
}
func buildLogsStats(cstat *cadvisorapiv2.ContainerStats, rootFs *cadvisorapiv2.FsInfo) *statsapi.FsStats {
fsStats := &statsapi.FsStats{
Time: metav1.NewTime(cstat.Timestamp),
AvailableBytes: &rootFs.Available,
CapacityBytes: &rootFs.Capacity,
InodesFree: rootFs.InodesFree,
Inodes: rootFs.Inodes,
}
if rootFs.Inodes != nil && rootFs.InodesFree != nil {
logsInodesUsed := *rootFs.Inodes - *rootFs.InodesFree
fsStats.InodesUsed = &logsInodesUsed
}
return fsStats
}
func buildRootfsStats(cstat *cadvisorapiv2.ContainerStats, imageFs *cadvisorapiv2.FsInfo) *statsapi.FsStats {
return &statsapi.FsStats{
Time: metav1.NewTime(cstat.Timestamp),
AvailableBytes: &imageFs.Available,
CapacityBytes: &imageFs.Capacity,
InodesFree: imageFs.InodesFree,
Inodes: imageFs.Inodes,
}
}
func calcEphemeralStorage(containers []statsapi.ContainerStats, volumes []statsapi.VolumeStats, rootFsInfo *cadvisorapiv2.FsInfo,
podLogStats *statsapi.FsStats, etcHostsStats *statsapi.FsStats, isCRIStatsProvider bool) *statsapi.FsStats {
result := &statsapi.FsStats{
Time: metav1.NewTime(rootFsInfo.Timestamp),
AvailableBytes: &rootFsInfo.Available,
CapacityBytes: &rootFsInfo.Capacity,
InodesFree: rootFsInfo.InodesFree,
Inodes: rootFsInfo.Inodes,
}
for _, container := range containers {
addContainerUsage(result, &container, isCRIStatsProvider)
}
for _, volume := range volumes {
result.UsedBytes = addUsage(result.UsedBytes, volume.FsStats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, volume.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &volume.FsStats.Time)
}
if podLogStats != nil {
result.UsedBytes = addUsage(result.UsedBytes, podLogStats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, podLogStats.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &podLogStats.Time)
}
if etcHostsStats != nil {
result.UsedBytes = addUsage(result.UsedBytes, etcHostsStats.UsedBytes)
result.InodesUsed = addUsage(result.InodesUsed, etcHostsStats.InodesUsed)
result.Time = maxUpdateTime(&result.Time, &etcHostsStats.Time)
}
return result
}
func addContainerUsage(stat *statsapi.FsStats, container *statsapi.ContainerStats, isCRIStatsProvider bool) {
if rootFs := container.Rootfs; rootFs != nil {
stat.Time = maxUpdateTime(&stat.Time, &rootFs.Time)
stat.InodesUsed = addUsage(stat.InodesUsed, rootFs.InodesUsed)
stat.UsedBytes = addUsage(stat.UsedBytes, rootFs.UsedBytes)
if logs := container.Logs; logs != nil {
stat.UsedBytes = addUsage(stat.UsedBytes, logs.UsedBytes)
// We have accurate container log inode usage for CRI stats provider.
if isCRIStatsProvider {
stat.InodesUsed = addUsage(stat.InodesUsed, logs.InodesUsed)
}
stat.Time = maxUpdateTime(&stat.Time, &logs.Time)
}
}
}
func maxUpdateTime(first, second *metav1.Time) metav1.Time {
if first.Before(second) {
return *second
}
return *first
}
func addUsage(first, second *uint64) *uint64 {
if first == nil {
return second
} else if second == nil {
return first
}
total := *first + *second
return &total
}
func makePodStorageStats(logger klog.Logger, s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo, resourceAnalyzer stats.ResourceAnalyzer, hostStatsProvider HostStatsProvider, isCRIStatsProvider bool) {
podNs := s.PodRef.Namespace
podName := s.PodRef.Name
podUID := types.UID(s.PodRef.UID)
var ephemeralStats []statsapi.VolumeStats
if vstats, found := resourceAnalyzer.GetPodVolumeStats(podUID); found {
ephemeralStats = make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes))
copy(ephemeralStats, vstats.EphemeralVolumes)
s.VolumeStats = append(append([]statsapi.VolumeStats{}, vstats.EphemeralVolumes...), vstats.PersistentVolumes...)
}
logStats, err := hostStatsProvider.getPodLogStats(podNs, podName, podUID, rootFsInfo)
if err != nil {
logger.V(6).Error(err, "Unable to fetch pod log stats", "pod", klog.KRef(podNs, podName))
// If people do in-place upgrade, there might be pods still using
// the old log path. For those pods, no pod log stats is returned.
// We should continue generating other stats in that case.
// calcEphemeralStorage tolerants logStats == nil.
}
etcHostsStats, err := hostStatsProvider.getPodEtcHostsStats(podUID, rootFsInfo)
if err != nil {
logger.V(6).Error(err, "Unable to fetch pod etc hosts stats", "pod", klog.KRef(podNs, podName))
}
s.EphemeralStorage = calcEphemeralStorage(s.Containers, ephemeralStats, rootFsInfo, logStats, etcHostsStats, isCRIStatsProvider)
}
func cadvisorPSIToStatsPSI(psi *cadvisorapiv1.PSIStats) *statsapi.PSIStats {
if psi == nil {
return nil
}
return &statsapi.PSIStats{
Full: statsapi.PSIData{
Total: psi.Full.Total,
Avg10: psi.Full.Avg10,
Avg60: psi.Full.Avg60,
Avg300: psi.Full.Avg300,
},
Some: statsapi.PSIData{
Total: psi.Some.Total,
Avg10: psi.Some.Avg10,
Avg60: psi.Some.Avg60,
Avg300: psi.Some.Avg300,
},
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"fmt"
"os"
"path/filepath"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/volume"
)
// PodEtcHostsPathFunc is a function to fetch a etc hosts path by pod uid and whether etc host path is supported by the runtime
type PodEtcHostsPathFunc func(podUID types.UID) string
// metricsProviderByPath maps a path to its metrics provider
type metricsProviderByPath map[string]volume.MetricsProvider
// HostStatsProvider defines an interface for providing host stats associated with pod.
type HostStatsProvider interface {
// getPodLogStats gets stats associated with pod log usage
getPodLogStats(podNamespace, podName string, podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error)
// getPodContainerLogStats gets stats associated with container log usage
getPodContainerLogStats(podNamespace, podName string, podUID types.UID, containerName string, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error)
// getPodEtcHostsStats gets stats associated with pod etc-hosts usage
getPodEtcHostsStats(podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error)
}
type hostStatsProvider struct {
// osInterface is the interface for syscalls.
osInterface kubecontainer.OSInterface
// podEtcHostsPathFunc fetches a pod etc hosts path by uid.
podEtcHostsPathFunc PodEtcHostsPathFunc
// podLogsDirectory is the root directory path for pod logs.
podLogsDirectory string
}
// NewHostStatsProvider returns a new HostStatsProvider type struct.
func NewHostStatsProvider(osInterface kubecontainer.OSInterface, podEtcHostsPathFunc PodEtcHostsPathFunc, podLogsDirectory string) HostStatsProvider {
return hostStatsProvider{
osInterface: osInterface,
podEtcHostsPathFunc: podEtcHostsPathFunc,
podLogsDirectory: podLogsDirectory,
}
}
func (h hostStatsProvider) getPodLogStats(podNamespace, podName string, podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
metricsByPath, err := h.podLogMetrics(podNamespace, podName, podUID)
if err != nil {
return nil, err
}
return metricsByPathToFsStats(metricsByPath, rootFsInfo)
}
// getPodContainerLogStats gets stats for container
func (h hostStatsProvider) getPodContainerLogStats(podNamespace, podName string, podUID types.UID, containerName string, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
metricsByPath, err := h.podContainerLogMetrics(podNamespace, podName, podUID, containerName)
if err != nil {
return nil, err
}
return metricsByPathToFsStats(metricsByPath, rootFsInfo)
}
// getPodEtcHostsStats gets status for pod etc hosts usage
func (h hostStatsProvider) getPodEtcHostsStats(podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
// Runtimes may not support etc hosts file (Windows with docker)
podEtcHostsPath := h.podEtcHostsPathFunc(podUID)
// Some pods have an explicit /etc/hosts mount and the Kubelet will not create an etc-hosts file for them
if _, err := os.Stat(podEtcHostsPath); os.IsNotExist(err) {
return nil, nil
}
metrics := volume.NewMetricsDu(podEtcHostsPath)
hostMetrics, err := metrics.GetMetrics()
if err != nil {
return nil, fmt.Errorf("failed to get stats %v", err)
}
result := rootFsInfoToFsStats(rootFsInfo)
usedBytes := uint64(hostMetrics.Used.Value())
inodesUsed := uint64(hostMetrics.InodesUsed.Value())
result.UsedBytes = addUsage(result.UsedBytes, &usedBytes)
result.InodesUsed = addUsage(result.InodesUsed, &inodesUsed)
result.Time = maxUpdateTime(&result.Time, &hostMetrics.Time)
return result, nil
}
func (h hostStatsProvider) podLogMetrics(podNamespace, podName string, podUID types.UID) (metricsProviderByPath, error) {
podLogsDirectoryPath := kuberuntime.BuildPodLogsDirectory(h.podLogsDirectory, podNamespace, podName, podUID)
return h.fileMetricsByDir(podLogsDirectoryPath)
}
func (h hostStatsProvider) podContainerLogMetrics(podNamespace, podName string, podUID types.UID, containerName string) (metricsProviderByPath, error) {
podContainerLogsDirectoryPath := kuberuntime.BuildContainerLogsDirectory(h.podLogsDirectory, podNamespace, podName, podUID, containerName)
return h.fileMetricsByDir(podContainerLogsDirectoryPath)
}
// fileMetricsByDir returns metrics by path for each file under specified directory
func (h hostStatsProvider) fileMetricsByDir(dirname string) (metricsProviderByPath, error) {
files, err := h.osInterface.ReadDir(dirname)
if err != nil {
return nil, err
}
results := metricsProviderByPath{}
for _, f := range files {
if f.IsDir() {
continue
}
// Only include *files* under pod log directory.
fpath := filepath.Join(dirname, f.Name())
results[fpath] = volume.NewMetricsDu(fpath)
}
return results, nil
}
// metricsByPathToFsStats converts a metrics provider by path to fs stats
func metricsByPathToFsStats(metricsByPath metricsProviderByPath, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
result := rootFsInfoToFsStats(rootFsInfo)
for fpath, metrics := range metricsByPath {
hostMetrics, err := metrics.GetMetrics()
if err != nil {
return nil, fmt.Errorf("failed to get fsstats for %q: %v", fpath, err)
}
usedBytes := uint64(hostMetrics.Used.Value())
inodesUsed := uint64(hostMetrics.InodesUsed.Value())
result.UsedBytes = addUsage(result.UsedBytes, &usedBytes)
result.InodesUsed = addUsage(result.InodesUsed, &inodesUsed)
result.Time = maxUpdateTime(&result.Time, &hostMetrics.Time)
}
return result, nil
}
// rootFsInfoToFsStats is a utility to convert rootFsInfo into statsapi.FsStats
func rootFsInfoToFsStats(rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.FsStats {
return &statsapi.FsStats{
Time: metav1.NewTime(rootFsInfo.Timestamp),
AvailableBytes: &rootFsInfo.Available,
CapacityBytes: &rootFsInfo.Capacity,
InodesFree: rootFsInfo.InodesFree,
Inodes: rootFsInfo.Inodes,
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"fmt"
"path/filepath"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/apimachinery/pkg/types"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/volume"
)
type fakeHostStatsProvider struct {
fakeStats map[string]*volume.Metrics
osInterface kubecontainer.OSInterface
}
// NewFakeHostStatsProvider provides a way to test with fake host statistics
func NewFakeHostStatsProvider(osInterface kubecontainer.OSInterface) HostStatsProvider {
return &fakeHostStatsProvider{
osInterface: osInterface,
}
}
// NewFakeHostStatsProviderWithData provides a way to test with fake host statistics
func NewFakeHostStatsProviderWithData(fakeStats map[string]*volume.Metrics, osInterface kubecontainer.OSInterface) HostStatsProvider {
return &fakeHostStatsProvider{
fakeStats: fakeStats,
osInterface: osInterface,
}
}
func (f *fakeHostStatsProvider) getPodLogStats(podNamespace, podName string, podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
path := kuberuntime.BuildPodLogsDirectory("/var/log/kube/pods/", podNamespace, podName, podUID)
files, err := f.osInterface.ReadDir(path)
if err != nil {
return nil, err
}
var results []volume.MetricsProvider
for _, file := range files {
if file.IsDir() {
continue
}
// Only include *files* under pod log directory.
fpath := filepath.Join(path, file.Name())
results = append(results, NewFakeMetricsDu(fpath, f.fakeStats[fpath]))
}
return fakeMetricsProvidersToStats(results, rootFsInfo)
}
func (f *fakeHostStatsProvider) getPodContainerLogStats(podNamespace, podName string, podUID types.UID, containerName string, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
path := kuberuntime.BuildContainerLogsDirectory("/var/log/kube/pods/", podNamespace, podName, podUID, containerName)
metricsProvider := NewFakeMetricsDu(path, f.fakeStats[path])
return fakeMetricsProvidersToStats([]volume.MetricsProvider{metricsProvider}, rootFsInfo)
}
func (f *fakeHostStatsProvider) getPodEtcHostsStats(podUID types.UID, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
return nil, fmt.Errorf("not implemented")
}
func fakeMetricsProvidersToStats(metricsProviders []volume.MetricsProvider, rootFsInfo *cadvisorapiv2.FsInfo) (*statsapi.FsStats, error) {
result := rootFsInfoToFsStats(rootFsInfo)
for i, metricsProvider := range metricsProviders {
hostMetrics, err := metricsProvider.GetMetrics()
if err != nil {
return nil, fmt.Errorf("failed to get stats for item %d: %v", i, err)
}
usedBytes := uint64(hostMetrics.Used.Value())
inodesUsed := uint64(hostMetrics.InodesUsed.Value())
result.UsedBytes = addUsage(result.UsedBytes, &usedBytes)
result.InodesUsed = addUsage(result.InodesUsed, &inodesUsed)
result.Time = maxUpdateTime(&result.Time, &hostMetrics.Time)
}
return result, nil
}
type fakeMetricsDu struct {
fakeStats *volume.Metrics
}
// NewFakeMetricsDu inserts fake statistics when asked for metrics
func NewFakeMetricsDu(path string, stats *volume.Metrics) volume.MetricsProvider {
return &fakeMetricsDu{fakeStats: stats}
}
func (f *fakeMetricsDu) GetMetrics() (*volume.Metrics, error) {
if f.fakeStats == nil {
return nil, fmt.Errorf("no stats provided")
}
return f.fakeStats, nil
}
//go:build linux
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pidlimit
import (
"fmt"
"os"
"strconv"
"strings"
"syscall"
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
// Stats provides basic information about max and current process count
func Stats() (*statsapi.RlimitStats, error) {
rlimit := &statsapi.RlimitStats{}
taskMax := int64(-1)
// Calculate the minimum of kernel.pid_max and kernel.threads-max as they both specify the
// system-wide limit on the number of tasks.
for _, file := range []string{"/proc/sys/kernel/pid_max", "/proc/sys/kernel/threads-max"} {
if content, err := os.ReadFile(file); err == nil {
if limit, err := strconv.ParseInt(string(content[:len(content)-1]), 10, 64); err == nil {
if taskMax == -1 || taskMax > limit {
taskMax = limit
}
}
}
}
// Both reads did not fail.
if taskMax >= 0 {
rlimit.MaxPID = &taskMax
}
// Prefer to read "/proc/loadavg" when possible because sysinfo(2)
// returns truncated number when greater than 65538. See
// https://github.com/kubernetes/kubernetes/issues/107107
if procs, err := runningTaskCount(); err == nil {
rlimit.NumOfRunningProcesses = &procs
} else {
var info syscall.Sysinfo_t
syscall.Sysinfo(&info)
procs := int64(info.Procs)
rlimit.NumOfRunningProcesses = &procs
}
rlimit.Time = v1.NewTime(time.Now())
return rlimit, nil
}
func runningTaskCount() (int64, error) {
// Example: 1.36 3.49 4.53 2/3518 3715089
bytes, err := os.ReadFile("/proc/loadavg")
if err != nil {
return 0, err
}
fields := strings.Fields(string(bytes))
if len(fields) < 5 {
return 0, fmt.Errorf("not enough fields in /proc/loadavg")
}
subfields := strings.Split(fields[3], "/")
if len(subfields) != 2 {
return 0, fmt.Errorf("error parsing fourth field of /proc/loadavg")
}
return strconv.ParseInt(subfields[1], 10, 64)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"context"
"errors"
"fmt"
cadvisormemory "github.com/google/cadvisor/cache/memory"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
internalapi "k8s.io/cri-api/pkg/apis"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/utils/ptr"
)
// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet.
// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc.
type PodManager interface {
TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID
}
// NewCRIStatsProvider returns a Provider that provides the node stats
// from cAdvisor and the container stats from CRI.
func NewCRIStatsProvider(
cadvisor cadvisor.Interface,
resourceAnalyzer stats.ResourceAnalyzer,
podManager PodManager,
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
hostStatsProvider HostStatsProvider,
podAndContainerStatsFromCRI bool,
fallbackStatsProvider containerStatsProvider,
) *Provider {
return newStatsProvider(cadvisor, podManager, newCRIStatsProvider(cadvisor, resourceAnalyzer,
runtimeService, imageService, hostStatsProvider, podAndContainerStatsFromCRI, fallbackStatsProvider))
}
// NewCadvisorStatsProvider returns a containerStatsProvider that provides both
// the node and the container stats from cAdvisor.
func NewCadvisorStatsProvider(
cadvisor cadvisor.Interface,
resourceAnalyzer stats.ResourceAnalyzer,
podManager PodManager,
imageService kubecontainer.ImageService,
statusProvider status.PodStatusProvider,
hostStatsProvider HostStatsProvider,
containerManager cm.ContainerManager,
) *Provider {
return newStatsProvider(cadvisor, podManager, newCadvisorStatsProvider(cadvisor, resourceAnalyzer, imageService, statusProvider, hostStatsProvider, containerManager))
}
// newStatsProvider returns a new Provider that provides node stats from
// cAdvisor and the container stats using the containerStatsProvider.
func newStatsProvider(
cadvisor cadvisor.Interface,
podManager PodManager,
containerStatsProvider containerStatsProvider,
) *Provider {
return &Provider{
cadvisor: cadvisor,
podManager: podManager,
containerStatsProvider: containerStatsProvider,
}
}
// Provider provides the stats of the node and the pod-managed containers.
type Provider struct {
cadvisor cadvisor.Interface
podManager PodManager
containerStatsProvider
}
// containerStatsProvider is an interface that provides the stats of the
// containers managed by pods.
type containerStatsProvider interface {
// PodCPUAndMemoryStats gets the latest CPU & Memory stats for the pod and all its running containers.
PodCPUAndMemoryStats(context.Context, *v1.Pod, *kubecontainer.PodStatus) (*statsapi.PodStats, error)
ListPodStats(ctx context.Context) ([]statsapi.PodStats, error)
ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error)
ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error)
ImageFsStats(ctx context.Context) (*statsapi.FsStats, *statsapi.FsStats, error)
ImageFsDevice(ctx context.Context) (string, error)
}
// RlimitStats returns base information about process count
func (p *Provider) RlimitStats() (*statsapi.RlimitStats, error) {
return pidlimit.Stats()
}
// GetCgroupStats returns the stats of the cgroup with the cgroupName. Note that
// this function doesn't generate filesystem stats.
func (p *Provider) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
info, err := getCgroupInfo(p.cadvisor, cgroupName, updateStats)
if err != nil {
if errors.Is(err, cadvisormemory.ErrDataNotFound) {
return nil, nil, fmt.Errorf("cgroup stats not found for %q: %w", cgroupName, cadvisormemory.ErrDataNotFound)
}
return nil, nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err)
}
// Use klog.TODO() because we currently do not have a proper logger to pass in.
// Replace this with an appropriate logger when refactoring this function to accept a context parameter.
logger := klog.TODO()
// Rootfs and imagefs doesn't make sense for raw cgroup.
s := cadvisorInfoToContainerStats(logger, cgroupName, info, nil, nil)
n := cadvisorInfoToNetworkStats(info)
return s, n, nil
}
// GetCgroupCPUAndMemoryStats returns the CPU and memory stats of the cgroup with the cgroupName. Note that
// this function doesn't generate filesystem stats.
func (p *Provider) GetCgroupCPUAndMemoryStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, error) {
info, err := getCgroupInfo(p.cadvisor, cgroupName, updateStats)
if err != nil {
if errors.Is(err, cadvisormemory.ErrDataNotFound) {
return nil, fmt.Errorf("cgroup stats not found for %q: %w", cgroupName, cadvisormemory.ErrDataNotFound)
}
return nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err)
}
// Rootfs and imagefs doesn't make sense for raw cgroup.
s := cadvisorInfoToContainerCPUAndMemoryStats(cgroupName, info)
return s, nil
}
// RootFsStats returns the stats of the node root filesystem.
func (p *Provider) RootFsStats() (*statsapi.FsStats, error) {
rootFsInfo, err := p.cadvisor.RootFsInfo()
if err != nil {
return nil, fmt.Errorf("failed to get rootFs info: %v", err)
}
var nodeFsInodesUsed *uint64
if rootFsInfo.Inodes != nil && rootFsInfo.InodesFree != nil {
nodeFsIU := *rootFsInfo.Inodes - *rootFsInfo.InodesFree
nodeFsInodesUsed = &nodeFsIU
}
// Get the root container stats's timestamp, which will be used as the
// imageFs stats timestamp. Don't force a stats update, as we only want the timestamp.
rootStats, err := getCgroupStats(p.cadvisor, "/", false)
if err != nil {
return nil, fmt.Errorf("failed to get root container stats: %v", err)
}
return &statsapi.FsStats{
Time: metav1.NewTime(rootStats.Timestamp),
AvailableBytes: &rootFsInfo.Available,
CapacityBytes: &rootFsInfo.Capacity,
UsedBytes: &rootFsInfo.Usage,
InodesFree: rootFsInfo.InodesFree,
Inodes: rootFsInfo.Inodes,
InodesUsed: nodeFsInodesUsed,
}, nil
}
// HasDedicatedImageFs returns true if a dedicated image filesystem exists for storing images.
// KEP Issue Number 4191: Enhanced this to allow for the containers to be separate from images.
func (p *Provider) HasDedicatedImageFs(ctx context.Context) (bool, error) {
device, err := p.containerStatsProvider.ImageFsDevice(ctx)
if err != nil {
return false, err
}
rootFsInfo, err := p.cadvisor.RootFsInfo()
if err != nil {
return false, err
}
// KEP Enhancement: DedicatedImageFs can mean either container or image fs are separate from root
// CAdvisor reports this a bit differently than Container runtimes
if device == rootFsInfo.Device {
imageFs, containerFs, err := p.ImageFsStats(ctx)
if err != nil {
return false, err
}
if !equalFileSystems(imageFs, containerFs) {
return true, nil
}
}
return device != rootFsInfo.Device, nil
}
// HasDedicatedImageFs returns true if a dedicated image filesystem exists for storing images.
// KEP Issue Number 4191: Enhanced this to allow for the containers to be separate from images.
func (p *Provider) HasDedicatedContainerFs(ctx context.Context) (bool, error) {
imageFs, err := p.cadvisor.ImagesFsInfo(ctx)
if err != nil {
return false, err
}
containerFs, err := p.cadvisor.ContainerFsInfo(ctx)
if err != nil {
return false, err
}
return imageFs.Device != containerFs.Device, nil
}
func equalFileSystems(a, b *statsapi.FsStats) bool {
if a == nil || b == nil {
return false
}
if !ptr.Equal(a.AvailableBytes, b.AvailableBytes) {
return false
}
if !ptr.Equal(a.CapacityBytes, b.CapacityBytes) {
return false
}
if !ptr.Equal(a.InodesUsed, b.InodesUsed) {
return false
}
if !ptr.Equal(a.InodesFree, b.InodesFree) {
return false
}
if !ptr.Equal(a.Inodes, b.Inodes) {
return false
}
return true
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
)
const (
// UnknownContainerStatuses says that all container statuses are unknown.
UnknownContainerStatuses = "UnknownContainerStatuses"
// PodCompleted says that all related containers have succeeded.
PodCompleted = "PodCompleted"
// PodFailed says that the pod has failed and as such the containers have failed.
PodFailed = "PodFailed"
// ContainersNotReady says that one or more containers are not ready.
ContainersNotReady = "ContainersNotReady"
// ContainersNotInitialized says that one or more init containers have not succeeded.
ContainersNotInitialized = "ContainersNotInitialized"
// ReadinessGatesNotReady says that one or more pod readiness gates are not ready.
ReadinessGatesNotReady = "ReadinessGatesNotReady"
)
// GenerateContainersReadyCondition returns the status of "ContainersReady" condition.
// The status of "ContainersReady" condition is true when all containers are ready.
func GenerateContainersReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
// Find if all containers are ready or not.
if containerStatuses == nil {
return v1.PodCondition{
Type: v1.ContainersReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionFalse,
Reason: UnknownContainerStatuses,
}
}
unknownContainers := []string{}
unreadyContainers := []string{}
for _, container := range pod.Spec.InitContainers {
if !podutil.IsRestartableInitContainer(&container) {
continue
}
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
if !containerStatus.Ready {
unreadyContainers = append(unreadyContainers, container.Name)
}
} else {
unknownContainers = append(unknownContainers, container.Name)
}
}
for _, container := range pod.Spec.Containers {
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
if !containerStatus.Ready {
unreadyContainers = append(unreadyContainers, container.Name)
}
} else {
unknownContainers = append(unknownContainers, container.Name)
}
}
// If all containers are known and succeeded, just return PodCompleted.
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase)
}
// If the pod phase is failed, explicitly set the ready condition to false for containers since they may be in progress of terminating.
if podPhase == v1.PodFailed {
return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase)
}
// Generate message for containers in unknown condition.
unreadyMessages := []string{}
if len(unknownContainers) > 0 {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
}
if len(unreadyContainers) > 0 {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unready status: %s", unreadyContainers))
}
unreadyMessage := strings.Join(unreadyMessages, ", ")
if unreadyMessage != "" {
return v1.PodCondition{
Type: v1.ContainersReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionFalse,
Reason: ContainersNotReady,
Message: unreadyMessage,
}
}
return v1.PodCondition{
Type: v1.ContainersReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionTrue,
}
}
// GeneratePodReadyCondition returns "Ready" condition of a pod.
// The status of "Ready" condition is "True", if all containers in a pod are ready
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
func GeneratePodReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
containersReady := GenerateContainersReadyCondition(pod, oldPodStatus, containerStatuses, podPhase)
// If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady.
if containersReady.Status != v1.ConditionTrue {
return v1.PodCondition{
Type: v1.PodReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodReady),
Status: containersReady.Status,
Reason: containersReady.Reason,
Message: containersReady.Message,
}
}
// Evaluate corresponding conditions specified in readiness gate
// Generate message if any readiness gate is not satisfied.
unreadyMessages := []string{}
for _, rg := range pod.Spec.ReadinessGates {
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
if c == nil {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType)))
} else if c.Status != v1.ConditionTrue {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("the status of pod readiness gate %q is not \"True\", but %v", string(rg.ConditionType), c.Status))
}
}
// Set "Ready" condition to "False" if any readiness gate is not ready.
if len(unreadyMessages) != 0 {
unreadyMessage := strings.Join(unreadyMessages, ", ")
return v1.PodCondition{
Type: v1.PodReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodReady),
Status: v1.ConditionFalse,
Reason: ReadinessGatesNotReady,
Message: unreadyMessage,
}
}
return v1.PodCondition{
Type: v1.PodReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodReady),
Status: v1.ConditionTrue,
}
}
func isInitContainerInitialized(initContainer *v1.Container, containerStatus *v1.ContainerStatus) bool {
if podutil.IsRestartableInitContainer(initContainer) {
if containerStatus.Started == nil || !*containerStatus.Started {
return false
}
} else { // regular init container
if !containerStatus.Ready {
return false
}
}
return true
}
// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it
// returns an uninitialized condition.
func GeneratePodInitializedCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
// Find if all containers are ready or not.
if containerStatuses == nil && len(pod.Spec.InitContainers) > 0 {
return v1.PodCondition{
Type: v1.PodInitialized,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionFalse,
Reason: UnknownContainerStatuses,
}
}
unknownContainers := []string{}
incompleteContainers := []string{}
for _, container := range pod.Spec.InitContainers {
containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name)
if !ok {
unknownContainers = append(unknownContainers, container.Name)
continue
}
if !isInitContainerInitialized(&container, &containerStatus) {
incompleteContainers = append(incompleteContainers, container.Name)
}
}
// If all init containers are known and succeeded, just return PodCompleted.
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
return v1.PodCondition{
Type: v1.PodInitialized,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionTrue,
Reason: PodCompleted,
}
}
// If there is any regular container that has started, then the pod has
// been initialized before.
// This is needed to handle the case where the pod has been initialized but
// the restartable init containers are restarting.
if kubecontainer.HasAnyRegularContainerStarted(&pod.Spec, containerStatuses) {
return v1.PodCondition{
Type: v1.PodInitialized,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionTrue,
}
}
unreadyMessages := make([]string, 0, len(unknownContainers)+len(incompleteContainers))
if len(unknownContainers) > 0 {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
}
if len(incompleteContainers) > 0 {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with incomplete status: %s", incompleteContainers))
}
unreadyMessage := strings.Join(unreadyMessages, ", ")
if unreadyMessage != "" {
return v1.PodCondition{
Type: v1.PodInitialized,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionFalse,
Reason: ContainersNotInitialized,
Message: unreadyMessage,
}
}
return v1.PodCondition{
Type: v1.PodInitialized,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionTrue,
}
}
func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, podStatus *kubecontainer.PodStatus) v1.PodCondition {
newSandboxNeeded, _, _ := runtimeutil.PodSandboxChanged(pod, podStatus)
// if a new sandbox does not need to be created for a pod, it indicates that
// a sandbox for the pod with networking configured already exists.
// Otherwise, the kubelet needs to invoke the container runtime to create a
// fresh sandbox and configure networking for the sandbox.
if !newSandboxNeeded {
return v1.PodCondition{
Type: v1.PodReadyToStartContainers,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers),
Status: v1.ConditionTrue,
}
}
return v1.PodCondition{
Type: v1.PodReadyToStartContainers,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers),
Status: v1.ConditionFalse,
}
}
func generateContainersReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition {
condition := v1.PodCondition{
Type: v1.ContainersReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionFalse,
}
if podPhase == v1.PodFailed {
condition.Reason = PodFailed
} else if podPhase == v1.PodSucceeded {
condition.Reason = PodCompleted
}
return condition
}
func generatePodReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition {
condition := v1.PodCondition{
Type: v1.PodReady,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(oldPodStatus, pod.Generation, v1.PodReady),
Status: v1.ConditionFalse,
}
if podPhase == v1.PodFailed {
condition.Reason = PodFailed
} else if podPhase == v1.PodSucceeded {
condition.Reason = PodCompleted
}
return condition
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:generate mockery
package status
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"time"
clientset "k8s.io/client-go/kubernetes"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
kubeutil "k8s.io/kubernetes/pkg/kubelet/util"
statusutil "k8s.io/kubernetes/pkg/util/pod"
)
// A wrapper around v1.PodStatus that includes a version to enforce that stale pod statuses are
// not sent to the API server.
type versionedPodStatus struct {
// version is a monotonically increasing version number (per pod).
version uint64
// Pod name & namespace, for sending updates to API server.
podName string
podNamespace string
// at is the time at which the most recent status update was detected
at time.Time
// True if the status is generated at the end of SyncTerminatedPod, or after it is completed.
podIsFinished bool
status v1.PodStatus
}
// Updates pod statuses in apiserver. Writes only when new status has changed.
// All methods are thread-safe.
type manager struct {
kubeClient clientset.Interface
podManager PodManager
// Map from pod UID to sync status of the corresponding pod.
podStatuses map[types.UID]versionedPodStatus
podResizeConditions map[types.UID]podResizeConditions
podStatusesLock sync.RWMutex
podStatusChannel chan struct{}
// Map from (mirror) pod UID to latest status version successfully sent to the API server.
// apiStatusVersions must only be accessed from the sync thread.
apiStatusVersions map[kubetypes.MirrorPodUID]uint64
podDeletionSafety PodDeletionSafetyProvider
podStartupLatencyHelper PodStartupLatencyStateHelper
}
type podResizeConditions struct {
PodResizePending *v1.PodCondition
PodResizeInProgress *v1.PodCondition
}
func (prc podResizeConditions) List() []*v1.PodCondition {
var conditions []*v1.PodCondition
if prc.PodResizePending != nil {
conditions = append(conditions, prc.PodResizePending)
}
if prc.PodResizeInProgress != nil {
conditions = append(conditions, prc.PodResizeInProgress)
}
return conditions
}
// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet.
// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc.
type PodManager interface {
GetPodByUID(types.UID) (*v1.Pod, bool)
GetMirrorPodByPod(*v1.Pod) (*v1.Pod, bool)
TranslatePodUID(uid types.UID) kubetypes.ResolvedPodUID
GetUIDTranslations() (podToMirror map[kubetypes.ResolvedPodUID]kubetypes.MirrorPodUID, mirrorToPod map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID)
}
// PodStatusProvider knows how to provide status for a pod. It is intended to be used by other components
// that need to introspect the authoritative status of a pod. The PodStatusProvider represents the actual
// status of a running pod as the kubelet sees it.
type PodStatusProvider interface {
// GetPodStatus returns the cached status for the provided pod UID, as well as whether it
// was a cache hit.
GetPodStatus(uid types.UID) (v1.PodStatus, bool)
}
// PodDeletionSafetyProvider provides guarantees that a pod can be safely deleted.
type PodDeletionSafetyProvider interface {
// PodCouldHaveRunningContainers returns true if the pod could have running containers.
PodCouldHaveRunningContainers(pod *v1.Pod) bool
}
type PodStartupLatencyStateHelper interface {
RecordStatusUpdated(pod *v1.Pod)
DeletePodStartupState(podUID types.UID)
}
// Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with
// the latest v1.PodStatus. It also syncs updates back to the API server.
type Manager interface {
PodStatusProvider
// Start the API server status sync loop.
Start(ctx context.Context)
// SetPodStatus caches updates the cached status for the given pod, and triggers a status update.
SetPodStatus(logger klog.Logger, pod *v1.Pod, status v1.PodStatus)
// SetContainerReadiness updates the cached container status with the given readiness, and
// triggers a status update.
SetContainerReadiness(logger klog.Logger, podUID types.UID, containerID kubecontainer.ContainerID, ready bool)
// SetContainerStartup updates the cached container status with the given startup, and
// triggers a status update.
SetContainerStartup(logger klog.Logger, podUID types.UID, containerID kubecontainer.ContainerID, started bool)
// TerminatePod resets the container status for the provided pod to terminated and triggers
// a status update.
TerminatePod(logger klog.Logger, pod *v1.Pod)
// RemoveOrphanedStatuses scans the status cache and removes any entries for pods not included in
// the provided podUIDs.
RemoveOrphanedStatuses(logger klog.Logger, podUIDs map[types.UID]bool)
// GetPodResizeConditions returns cached PodStatus Resize conditions value
GetPodResizeConditions(podUID types.UID) []*v1.PodCondition
// SetPodResizePendingCondition caches the last PodResizePending condition for the pod.
SetPodResizePendingCondition(podUID types.UID, reason, message string, observedGeneration int64)
// SetPodResizeInProgressCondition caches the last PodResizeInProgress condition for the pod.
// This function does not update observedGeneration if the condition already exists, nor does
// it allow the reason or message to be cleared.
SetPodResizeInProgressCondition(podUID types.UID, reason, message string, observedGeneration int64)
// ClearPodResizePendingCondition clears the PodResizePending condition for the pod from the cache.
ClearPodResizePendingCondition(podUID types.UID)
// ClearPodResizeInProgressCondition clears the PodResizeInProgress condition for the pod from the cache.
// Returns true if the condition was cleared, false if it was not set.
ClearPodResizeInProgressCondition(podUID types.UID) bool
// IsPodResizeDeferred returns true if the pod resize is currently deferred.
IsPodResizeDeferred(podUID types.UID) bool
// IsPodResizeInfeasible returns true if the pod resize is infeasible.
IsPodResizeInfeasible(podUID types.UID) bool
// BackfillPodResizeConditions backfills the status manager's resize conditions by reading them from the
// provided pods' statuses.
BackfillPodResizeConditions(pods []*v1.Pod)
}
const syncPeriod = 10 * time.Second
// NewManager returns a functional Manager.
func NewManager(kubeClient clientset.Interface, podManager PodManager, podDeletionSafety PodDeletionSafetyProvider, podStartupLatencyHelper PodStartupLatencyStateHelper) Manager {
return &manager{
kubeClient: kubeClient,
podManager: podManager,
podStatuses: make(map[types.UID]versionedPodStatus),
podResizeConditions: make(map[types.UID]podResizeConditions),
podStatusChannel: make(chan struct{}, 1),
apiStatusVersions: make(map[kubetypes.MirrorPodUID]uint64),
podDeletionSafety: podDeletionSafety,
podStartupLatencyHelper: podStartupLatencyHelper,
}
}
// isPodStatusByKubeletEqual returns true if the given pod statuses are equal when non-kubelet-owned
// pod conditions are excluded.
// This method normalizes the status before comparing so as to make sure that meaningless
// changes will be ignored.
func isPodStatusByKubeletEqual(oldStatus, status *v1.PodStatus) bool {
oldCopy := oldStatus.DeepCopy()
newConditions := make(map[v1.PodConditionType]*v1.PodCondition, len(status.Conditions))
oldConditions := make(map[v1.PodConditionType]*v1.PodCondition, len(oldStatus.Conditions))
for _, c := range status.Conditions {
if kubetypes.PodConditionByKubelet(c.Type) || kubetypes.PodConditionSharedByKubelet(c.Type) {
newConditions[c.Type] = &c
}
}
for _, c := range oldStatus.Conditions {
if kubetypes.PodConditionByKubelet(c.Type) || kubetypes.PodConditionSharedByKubelet(c.Type) {
oldConditions[c.Type] = &c
}
}
if len(newConditions) != len(oldConditions) {
return false
}
for _, newCondition := range newConditions {
oldCondition := oldConditions[newCondition.Type]
if oldCondition == nil || oldCondition.Status != newCondition.Status || oldCondition.Message != newCondition.Message || oldCondition.Reason != newCondition.Reason {
return false
}
}
oldCopy.Conditions = status.Conditions
return apiequality.Semantic.DeepEqual(oldCopy, status)
}
func (m *manager) Start(ctx context.Context) {
logger := klog.FromContext(ctx)
// Don't start the status manager if we don't have a client. This will happen
// on the master, where the kubelet is responsible for bootstrapping the pods
// of the master components.
if m.kubeClient == nil {
logger.Info("Kubernetes client is nil, not starting status manager")
return
}
logger.Info("Starting to sync pod status with apiserver")
//nolint:staticcheck // SA1015 Ticker can leak since this is only called once and doesn't handle termination.
syncTicker := time.NewTicker(syncPeriod).C
// syncPod and syncBatch share the same go routine to avoid sync races.
go wait.Forever(func() {
for {
select {
case <-m.podStatusChannel:
logger.V(4).Info("Syncing updated statuses")
m.syncBatch(ctx, false)
case <-syncTicker:
logger.V(4).Info("Syncing all statuses")
m.syncBatch(ctx, true)
}
}
}, 0)
}
// GetPodResizeConditions returns the last cached ResizeStatus value.
func (m *manager) GetPodResizeConditions(podUID types.UID) []*v1.PodCondition {
m.podStatusesLock.RLock()
defer m.podStatusesLock.RUnlock()
return m.podResizeConditions[podUID].List()
}
// SetPodResizePendingCondition caches the last PodResizePending condition for the pod.
func (m *manager) SetPodResizePendingCondition(podUID types.UID, reason, message string, observedGeneration int64) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
alreadyPending := m.podResizeConditions[podUID].PodResizePending != nil
m.podResizeConditions[podUID] = podResizeConditions{
PodResizePending: updatedPodResizeCondition(v1.PodResizePending, m.podResizeConditions[podUID].PodResizePending, reason, message, observedGeneration),
PodResizeInProgress: m.podResizeConditions[podUID].PodResizeInProgress,
}
if !alreadyPending {
m.recordPendingResizeCount()
}
}
// This function does not update observedGeneration if the condition already exists, nor does
// it allow the reason or message to be cleared.
func (m *manager) SetPodResizeInProgressCondition(podUID types.UID, reason, message string, observedGeneration int64) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
alreadyInProgress := m.podResizeConditions[podUID].PodResizeInProgress != nil
if c := m.podResizeConditions[podUID].PodResizeInProgress; c != nil {
// Preserve the old reason, message if they exist.
if reason == "" && message == "" {
reason = c.Reason
message = c.Message
}
// The observedGeneration is always preserved. To update it, the caller must
// clear the existing condition first.
observedGeneration = c.ObservedGeneration
}
m.podResizeConditions[podUID] = podResizeConditions{
PodResizeInProgress: updatedPodResizeCondition(v1.PodResizeInProgress, m.podResizeConditions[podUID].PodResizeInProgress, reason, message, observedGeneration),
PodResizePending: m.podResizeConditions[podUID].PodResizePending,
}
if !alreadyInProgress {
m.recordInProgressResizeCount()
}
}
// ClearPodResizePendingCondition clears the PodResizePending condition for the pod from the cache.
func (m *manager) ClearPodResizePendingCondition(podUID types.UID) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
if m.podResizeConditions[podUID].PodResizePending == nil {
return
}
m.podResizeConditions[podUID] = podResizeConditions{
PodResizeInProgress: m.podResizeConditions[podUID].PodResizeInProgress,
PodResizePending: nil,
}
m.recordPendingResizeCount()
}
// ClearPodResizeInProgressCondition clears the PodResizeInProgress condition for the pod from the cache.
// Returns true if the condition was cleared, false if it was not set.
func (m *manager) ClearPodResizeInProgressCondition(podUID types.UID) bool {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
if m.podResizeConditions[podUID].PodResizeInProgress == nil {
return false
}
m.podResizeConditions[podUID] = podResizeConditions{
PodResizePending: m.podResizeConditions[podUID].PodResizePending,
PodResizeInProgress: nil,
}
m.recordInProgressResizeCount()
return true
}
func (m *manager) BackfillPodResizeConditions(pods []*v1.Pod) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
for _, pod := range pods {
for _, c := range pod.Status.Conditions {
switch c.Type {
case v1.PodResizePending:
newCondition := updatedPodResizeCondition(v1.PodResizePending, nil, c.Reason, c.Message, c.ObservedGeneration)
oldConditions := m.podResizeConditions[pod.UID]
m.podResizeConditions[pod.UID] = podResizeConditions{
PodResizePending: newCondition,
PodResizeInProgress: oldConditions.PodResizeInProgress,
}
case v1.PodResizeInProgress:
newCondition := updatedPodResizeCondition(v1.PodResizeInProgress, nil, c.Reason, c.Message, c.ObservedGeneration)
oldConditions := m.podResizeConditions[pod.UID]
m.podResizeConditions[pod.UID] = podResizeConditions{
PodResizeInProgress: newCondition,
PodResizePending: oldConditions.PodResizePending,
}
}
}
}
m.recordPendingResizeCount()
m.recordInProgressResizeCount()
}
// IsPodResizeDeferred returns true if the pod resize is currently deferred.
func (m *manager) IsPodResizeDeferred(podUID types.UID) bool {
m.podStatusesLock.RLock()
defer m.podStatusesLock.RUnlock()
return m.podResizeConditions[podUID].PodResizePending != nil && m.podResizeConditions[podUID].PodResizePending.Reason == v1.PodReasonDeferred
}
// IsPodResizeInfeasible returns true if the pod resize is currently infeasible.
func (m *manager) IsPodResizeInfeasible(podUID types.UID) bool {
m.podStatusesLock.RLock()
defer m.podStatusesLock.RUnlock()
return m.podResizeConditions[podUID].PodResizePending != nil && m.podResizeConditions[podUID].PodResizePending.Reason == v1.PodReasonInfeasible
}
func (m *manager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
m.podStatusesLock.RLock()
defer m.podStatusesLock.RUnlock()
status, ok := m.podStatuses[types.UID(m.podManager.TranslatePodUID(uid))]
return status.status, ok
}
func (m *manager) SetPodStatus(logger klog.Logger, pod *v1.Pod, status v1.PodStatus) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
// Make sure we're caching a deep copy.
status = *status.DeepCopy()
// Set the observedGeneration for this pod status.
status.ObservedGeneration = podutil.CalculatePodStatusObservedGeneration(pod)
// Force a status update if deletion timestamp is set. This is necessary
// because if the pod is in the non-running state, the pod worker still
// needs to be able to trigger an update and/or deletion.
m.updateStatusInternal(logger, pod, status, pod.DeletionTimestamp != nil, false)
}
func (m *manager) SetContainerReadiness(logger klog.Logger, podUID types.UID, containerID kubecontainer.ContainerID, ready bool) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
pod, ok := m.podManager.GetPodByUID(podUID)
if !ok {
logger.V(4).Info("Pod has been deleted, no need to update readiness", "podUID", podUID)
return
}
oldStatus, found := m.podStatuses[pod.UID]
if !found {
logger.Info("Container readiness changed before pod has synced",
"pod", klog.KObj(pod),
"containerID", containerID.String())
return
}
// Find the container to update.
containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String())
if !ok {
logger.Info("Container readiness changed for unknown container",
"pod", klog.KObj(pod),
"containerID", containerID.String())
return
}
if containerStatus.Ready == ready {
logger.V(4).Info("Container readiness unchanged",
"ready", ready,
"pod", klog.KObj(pod),
"containerID", containerID.String())
return
}
// Make sure we're not updating the cached version.
status := *oldStatus.status.DeepCopy()
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
containerStatus.Ready = ready
// updateConditionFunc updates the corresponding type of condition
updateConditionFunc := func(conditionType v1.PodConditionType, condition v1.PodCondition) {
conditionIndex := -1
for i, condition := range status.Conditions {
if condition.Type == conditionType {
conditionIndex = i
break
}
}
if conditionIndex != -1 {
status.Conditions[conditionIndex] = condition
} else {
logger.Info("PodStatus missing condition type", "conditionType", conditionType, "status", status)
status.Conditions = append(status.Conditions, condition)
}
}
allContainerStatuses := append(status.InitContainerStatuses, status.ContainerStatuses...)
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(pod, &oldStatus.status, status.Conditions, allContainerStatuses, status.Phase))
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(pod, &oldStatus.status, allContainerStatuses, status.Phase))
m.updateStatusInternal(logger, pod, status, false, false)
}
func (m *manager) SetContainerStartup(logger klog.Logger, podUID types.UID, containerID kubecontainer.ContainerID, started bool) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
pod, ok := m.podManager.GetPodByUID(podUID)
if !ok {
logger.V(4).Info("Pod has been deleted, no need to update startup", "podUID", string(podUID))
return
}
oldStatus, found := m.podStatuses[pod.UID]
if !found {
logger.Info("Container startup changed before pod has synced",
"pod", klog.KObj(pod),
"containerID", containerID.String())
return
}
// Find the container to update.
containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String())
if !ok {
logger.Info("Container startup changed for unknown container",
"pod", klog.KObj(pod),
"containerID", containerID.String())
return
}
if containerStatus.Started != nil && *containerStatus.Started == started {
logger.V(4).Info("Container startup unchanged",
"pod", klog.KObj(pod),
"containerID", containerID.String())
return
}
// Make sure we're not updating the cached version.
status := *oldStatus.status.DeepCopy()
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
containerStatus.Started = &started
m.updateStatusInternal(logger, pod, status, false, false)
}
func findContainerStatus(status *v1.PodStatus, containerID string) (containerStatus *v1.ContainerStatus, init bool, ok bool) {
// Find the container to update.
for i, c := range status.ContainerStatuses {
if c.ContainerID == containerID {
return &status.ContainerStatuses[i], false, true
}
}
for i, c := range status.InitContainerStatuses {
if c.ContainerID == containerID {
return &status.InitContainerStatuses[i], true, true
}
}
return nil, false, false
}
// TerminatePod ensures that the status of containers is properly defaulted at the end of the pod
// lifecycle. As the Kubelet must reconcile with the container runtime to observe container status
// there is always the possibility we are unable to retrieve one or more container statuses due to
// garbage collection, admin action, or loss of temporary data on a restart. This method ensures
// that any absent container status is treated as a failure so that we do not incorrectly describe
// the pod as successful. If we have not yet initialized the pod in the presence of init containers,
// the init container failure status is sufficient to describe the pod as failing, and we do not need
// to override waiting containers (unless there is evidence the pod previously started those containers).
// It also makes sure that pods are transitioned to a terminal phase (Failed or Succeeded) before
// their deletion.
func (m *manager) TerminatePod(logger klog.Logger, pod *v1.Pod) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
// ensure that all containers have a terminated state - because we do not know whether the container
// was successful, always report an error
oldStatus := &pod.Status
cachedStatus, isCached := m.podStatuses[pod.UID]
if isCached {
oldStatus = &cachedStatus.status
}
status := *oldStatus.DeepCopy()
// once a pod has initialized, any missing status is treated as a failure
if hasPodInitialized(logger, pod) {
for i := range status.ContainerStatuses {
if status.ContainerStatuses[i].State.Terminated != nil {
continue
}
status.ContainerStatuses[i].State = v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Reason: kubecontainer.ContainerReasonStatusUnknown,
Message: "The container could not be located when the pod was terminated",
ExitCode: 137,
},
}
}
}
// all but the final suffix of init containers which have no evidence of a container start are
// marked as failed containers
for i := range initializedContainers(status.InitContainerStatuses) {
if status.InitContainerStatuses[i].State.Terminated != nil {
continue
}
status.InitContainerStatuses[i].State = v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
Reason: kubecontainer.ContainerReasonStatusUnknown,
Message: "The container could not be located when the pod was terminated",
ExitCode: 137,
},
}
}
// Make sure all pods are transitioned to a terminal phase.
// TODO(#116484): Also assign terminal phase to static an pods.
if !kubetypes.IsStaticPod(pod) {
switch status.Phase {
case v1.PodSucceeded, v1.PodFailed:
// do nothing, already terminal
case v1.PodPending, v1.PodRunning:
if status.Phase == v1.PodRunning && isCached {
logger.Info("Terminal running pod should have already been marked as failed, programmer error", "pod", klog.KObj(pod), "podUID", pod.UID)
}
logger.V(3).Info("Marking terminal pod as failed", "oldPhase", status.Phase, "pod", klog.KObj(pod), "podUID", pod.UID)
status.Phase = v1.PodFailed
default:
logger.Error(fmt.Errorf("unknown phase: %v", status.Phase), "Unknown phase, programmer error", "pod", klog.KObj(pod), "podUID", pod.UID)
status.Phase = v1.PodFailed
}
}
logger.V(5).Info("TerminatePod calling updateStatusInternal", "pod", klog.KObj(pod), "podUID", pod.UID)
m.updateStatusInternal(logger, pod, status, true, true)
}
// hasPodInitialized returns true if the pod has no evidence of ever starting a regular container, which
// implies those containers should not be transitioned to terminated status.
func hasPodInitialized(logger klog.Logger, pod *v1.Pod) bool {
// a pod without init containers is always initialized
if len(pod.Spec.InitContainers) == 0 {
return true
}
// if any container has ever moved out of waiting state, the pod has initialized
for _, status := range pod.Status.ContainerStatuses {
if status.LastTerminationState.Terminated != nil || status.State.Waiting == nil {
return true
}
}
// if the last init container has ever completed with a zero exit code, the pod is initialized
if l := len(pod.Status.InitContainerStatuses); l > 0 {
container, ok := kubeutil.GetContainerByIndex(pod.Spec.InitContainers, pod.Status.InitContainerStatuses, l-1)
if !ok {
logger.V(4).Info("Mismatch between pod spec and status, likely programmer error", "pod", klog.KObj(pod), "containerName", container.Name)
return false
}
containerStatus := pod.Status.InitContainerStatuses[l-1]
if podutil.IsRestartableInitContainer(&container) {
if containerStatus.State.Running != nil &&
containerStatus.Started != nil && *containerStatus.Started {
return true
}
} else { // regular init container
if state := containerStatus.LastTerminationState; state.Terminated != nil && state.Terminated.ExitCode == 0 {
return true
}
if state := containerStatus.State; state.Terminated != nil && state.Terminated.ExitCode == 0 {
return true
}
}
}
// otherwise the pod has no record of being initialized
return false
}
// initializedContainers returns all status except for suffix of containers that are in Waiting
// state, which is the set of containers that have attempted to start at least once. If all containers
// are Waiting, the first container is always returned.
func initializedContainers(containers []v1.ContainerStatus) []v1.ContainerStatus {
for i := len(containers) - 1; i >= 0; i-- {
if containers[i].State.Waiting == nil || containers[i].LastTerminationState.Terminated != nil {
return containers[0 : i+1]
}
}
// always return at least one container
if len(containers) > 0 {
return containers[0:1]
}
return nil
}
// checkContainerStateTransition ensures that no container is trying to transition
// from a terminated to non-terminated state, which is illegal and indicates a
// logical error in the kubelet.
func checkContainerStateTransition(oldStatuses, newStatuses *v1.PodStatus, podSpec *v1.PodSpec) error {
// If we should always restart, containers are allowed to leave the terminated state
if podSpec.RestartPolicy == v1.RestartPolicyAlways {
return nil
}
for _, oldStatus := range oldStatuses.ContainerStatuses {
// Skip any container that wasn't terminated
if oldStatus.State.Terminated == nil {
continue
}
// Skip any container that failed but is allowed to restart
if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == v1.RestartPolicyOnFailure {
continue
}
// Skip any container that is allowed to restart by container restart policy
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
restartable := false
for _, container := range podSpec.Containers {
if container.Name == oldStatus.Name && podutil.ContainerShouldRestart(container, *podSpec, oldStatus.State.Terminated.ExitCode) {
restartable = true
}
}
if restartable {
continue
}
}
for _, newStatus := range newStatuses.ContainerStatuses {
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
return fmt.Errorf("terminated container %v attempted illegal transition to non-terminated state", newStatus.Name)
}
}
}
for i, oldStatus := range oldStatuses.InitContainerStatuses {
initContainer, ok := kubeutil.GetContainerByIndex(podSpec.InitContainers, oldStatuses.InitContainerStatuses, i)
if !ok {
return fmt.Errorf("found mismatch between pod spec and status, container: %v", oldStatus.Name)
}
// Skip any restartable init container as it always is allowed to restart
if podutil.IsRestartableInitContainer(&initContainer) {
continue
}
// Skip any container that wasn't terminated
if oldStatus.State.Terminated == nil {
continue
}
// Skip any container that failed but is allowed to restart
if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == v1.RestartPolicyOnFailure {
continue
}
// Skip any container that is allowed to restart by container restart policy
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
restartable := false
for _, container := range podSpec.InitContainers {
if container.Name == oldStatus.Name && podutil.ContainerShouldRestart(container, *podSpec, oldStatus.State.Terminated.ExitCode) {
restartable = true
}
}
if restartable {
continue
}
}
for _, newStatus := range newStatuses.InitContainerStatuses {
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
return fmt.Errorf("terminated init container %v attempted illegal transition to non-terminated state", newStatus.Name)
}
}
}
return nil
}
// updateStatusInternal updates the internal status cache, and queues an update to the api server if
// necessary.
// This method IS NOT THREAD SAFE and must be called from a locked function.
func (m *manager) updateStatusInternal(logger klog.Logger, pod *v1.Pod, status v1.PodStatus, forceUpdate, podIsFinished bool) {
var oldStatus v1.PodStatus
cachedStatus, isCached := m.podStatuses[pod.UID]
if isCached {
oldStatus = cachedStatus.status
// TODO(#116484): Also assign terminal phase to static pods.
if !kubetypes.IsStaticPod(pod) {
if cachedStatus.podIsFinished && !podIsFinished {
logger.Info("Got unexpected podIsFinished=false, while podIsFinished=true in status cache, programmer error", "pod", klog.KObj(pod))
podIsFinished = true
}
}
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
oldStatus = mirrorPod.Status
} else {
oldStatus = pod.Status
}
// Check for illegal state transition in containers
if err := checkContainerStateTransition(&oldStatus, &status, &pod.Spec); err != nil {
logger.Error(err, "Status update on pod aborted", "pod", klog.KObj(pod))
return
}
// Set ContainersReadyCondition.LastTransitionTime.
updateLastTransitionTime(&status, &oldStatus, v1.ContainersReady)
// Set ReadyCondition.LastTransitionTime.
updateLastTransitionTime(&status, &oldStatus, v1.PodReady)
// Set InitializedCondition.LastTransitionTime.
updateLastTransitionTime(&status, &oldStatus, v1.PodInitialized)
// Set PodReadyToStartContainersCondition.LastTransitionTime.
updateLastTransitionTime(&status, &oldStatus, v1.PodReadyToStartContainers)
// Set PodScheduledCondition.LastTransitionTime.
updateLastTransitionTime(&status, &oldStatus, v1.PodScheduled)
// Set DisruptionTarget.LastTransitionTime.
updateLastTransitionTime(&status, &oldStatus, v1.DisruptionTarget)
// ensure that the start time does not change across updates.
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
status.StartTime = oldStatus.StartTime
} else if status.StartTime.IsZero() {
// if the status has no start time, we need to set an initial time
now := metav1.Now()
status.StartTime = &now
}
// prevent sending unnecessary patches
if oldStatus.ObservedGeneration > status.ObservedGeneration {
status.ObservedGeneration = oldStatus.ObservedGeneration
}
normalizeStatus(pod, &status)
// Perform some more extensive logging of container termination state to assist in
// debugging production races (generally not needed).
if loggerV := logger.V(5); loggerV.Enabled() {
var containers []string
for _, s := range append(append([]v1.ContainerStatus(nil), status.InitContainerStatuses...), status.ContainerStatuses...) {
var current, previous string
switch {
case s.State.Running != nil:
current = "running"
case s.State.Waiting != nil:
current = "waiting"
case s.State.Terminated != nil:
current = fmt.Sprintf("terminated=%d", s.State.Terminated.ExitCode)
default:
current = "unknown"
}
switch {
case s.LastTerminationState.Running != nil:
previous = "running"
case s.LastTerminationState.Waiting != nil:
previous = "waiting"
case s.LastTerminationState.Terminated != nil:
previous = fmt.Sprintf("terminated=%d", s.LastTerminationState.Terminated.ExitCode)
default:
previous = "<none>"
}
containers = append(containers, fmt.Sprintf("(%s state=%s previous=%s)", s.Name, current, previous))
}
sort.Strings(containers)
loggerV.Info("updateStatusInternal", "version", cachedStatus.version+1, "podIsFinished", podIsFinished, "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " "))
}
// The intent here is to prevent concurrent updates to a pod's status from
// clobbering each other so the phase of a pod progresses monotonically.
if isCached && isPodStatusByKubeletEqual(&cachedStatus.status, &status) && !forceUpdate {
logger.V(3).Info("Ignoring same status for pod", "pod", klog.KObj(pod), "status", status)
return
}
newStatus := versionedPodStatus{
status: status,
version: cachedStatus.version + 1,
podName: pod.Name,
podNamespace: pod.Namespace,
podIsFinished: podIsFinished,
}
// Multiple status updates can be generated before we update the API server,
// so we track the time from the first status update until we retire it to
// the API.
if cachedStatus.at.IsZero() {
newStatus.at = time.Now()
} else {
newStatus.at = cachedStatus.at
}
m.podStatuses[pod.UID] = newStatus
select {
case m.podStatusChannel <- struct{}{}:
default:
// there's already a status update pending
}
}
// updateLastTransitionTime updates the LastTransitionTime of a pod condition.
func updateLastTransitionTime(status, oldStatus *v1.PodStatus, conditionType v1.PodConditionType) {
_, condition := podutil.GetPodCondition(status, conditionType)
if condition == nil {
return
}
// Need to set LastTransitionTime.
lastTransitionTime := metav1.Now()
_, oldCondition := podutil.GetPodCondition(oldStatus, conditionType)
if oldCondition != nil && condition.Status == oldCondition.Status {
lastTransitionTime = oldCondition.LastTransitionTime
}
condition.LastTransitionTime = lastTransitionTime
}
// deletePodStatus simply removes the given pod from the status cache.
func (m *manager) deletePodStatus(uid types.UID) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
delete(m.podStatuses, uid)
m.podStartupLatencyHelper.DeletePodStartupState(uid)
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if _, exists := m.podResizeConditions[uid]; exists {
delete(m.podResizeConditions, uid)
m.recordInProgressResizeCount()
m.recordPendingResizeCount()
}
}
}
// TODO(filipg): It'd be cleaner if we can do this without signal from user.
func (m *manager) RemoveOrphanedStatuses(logger klog.Logger, podUIDs map[types.UID]bool) {
m.podStatusesLock.Lock()
defer m.podStatusesLock.Unlock()
for key := range m.podStatuses {
if _, ok := podUIDs[key]; !ok {
logger.V(5).Info("Removing pod from status map", "podUID", key)
delete(m.podStatuses, key)
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if _, exists := m.podResizeConditions[key]; exists {
delete(m.podResizeConditions, key)
m.recordInProgressResizeCount()
m.recordPendingResizeCount()
}
}
}
}
}
// syncBatch syncs pods statuses with the apiserver. Returns the number of syncs
// attempted for testing.
func (m *manager) syncBatch(ctx context.Context, all bool) int {
logger := klog.FromContext(ctx)
type podSync struct {
podUID types.UID
statusUID kubetypes.MirrorPodUID
status versionedPodStatus
}
var updatedStatuses []podSync
podToMirror, mirrorToPod := m.podManager.GetUIDTranslations()
func() { // Critical section
m.podStatusesLock.RLock()
defer m.podStatusesLock.RUnlock()
// Clean up orphaned versions.
if all {
for uid := range m.apiStatusVersions {
_, hasPod := m.podStatuses[types.UID(uid)]
_, hasMirror := mirrorToPod[uid]
if !hasPod && !hasMirror {
delete(m.apiStatusVersions, uid)
}
}
}
// Decide which pods need status updates.
for uid, status := range m.podStatuses {
// translate the pod UID (source) to the status UID (API pod) -
// static pods are identified in source by pod UID but tracked in the
// API via the uid of the mirror pod
uidOfStatus := kubetypes.MirrorPodUID(uid)
if mirrorUID, ok := podToMirror[kubetypes.ResolvedPodUID(uid)]; ok {
if mirrorUID == "" {
logger.V(5).Info("Static pod does not have a corresponding mirror pod; skipping",
"podUID", uid,
"pod", klog.KRef(status.podNamespace, status.podName))
continue
}
uidOfStatus = mirrorUID
}
// if a new status update has been delivered, trigger an update, otherwise the
// pod can wait for the next bulk check (which performs reconciliation as well)
if !all {
if m.apiStatusVersions[uidOfStatus] >= status.version {
continue
}
updatedStatuses = append(updatedStatuses, podSync{uid, uidOfStatus, status})
continue
}
// Ensure that any new status, or mismatched status, or pod that is ready for
// deletion gets updated. If a status update fails we retry the next time any
// other pod is updated.
if m.needsUpdate(logger, types.UID(uidOfStatus), status) {
updatedStatuses = append(updatedStatuses, podSync{uid, uidOfStatus, status})
} else if m.needsReconcile(logger, uid, status.status) {
// Delete the apiStatusVersions here to force an update on the pod status
// In most cases the deleted apiStatusVersions here should be filled
// soon after the following syncPod() [If the syncPod() sync an update
// successfully].
delete(m.apiStatusVersions, uidOfStatus)
updatedStatuses = append(updatedStatuses, podSync{uid, uidOfStatus, status})
}
}
}()
for _, update := range updatedStatuses {
logger.V(5).Info("Sync pod status", "podUID", update.podUID, "statusUID", update.statusUID, "version", update.status.version)
m.syncPod(ctx, update.podUID, update.status)
}
return len(updatedStatuses)
}
// syncPod syncs the given status with the API server. The caller must not hold the status lock.
func (m *manager) syncPod(ctx context.Context, uid types.UID, status versionedPodStatus) {
logger := klog.FromContext(ctx)
// TODO: make me easier to express from client code
pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(ctx, status.podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
logger.V(3).Info("Pod does not exist on the server",
"podUID", uid,
"pod", klog.KRef(status.podNamespace, status.podName))
// If the Pod is deleted the status will be cleared in
// RemoveOrphanedStatuses, so we just ignore the update here.
return
}
if err != nil {
logger.Error(err, "Failed to get status for pod",
"podUID", uid,
"pod", klog.KRef(status.podNamespace, status.podName))
return
}
translatedUID := m.podManager.TranslatePodUID(pod.UID)
// Type convert original uid just for the purpose of comparison.
if len(translatedUID) > 0 && translatedUID != kubetypes.ResolvedPodUID(uid) {
logger.V(2).Info("Pod was deleted and then recreated, skipping status update",
"pod", klog.KObj(pod),
"oldPodUID", uid,
"podUID", translatedUID)
m.deletePodStatus(uid)
return
}
mergedStatus := mergePodStatus(pod, pod.Status, status.status, m.podDeletionSafety.PodCouldHaveRunningContainers(pod))
newPod, patchBytes, unchanged, err := statusutil.PatchPodStatus(ctx, m.kubeClient, pod.Namespace, pod.Name, pod.UID, pod.Status, mergedStatus)
logger.V(3).Info("Patch status for pod", "pod", klog.KObj(pod), "podUID", uid, "patch", string(patchBytes))
if err != nil {
logger.Error(err, "Failed to update status for pod", "pod", klog.KObj(pod))
return
}
if unchanged {
logger.V(3).Info("Status for pod is up-to-date", "pod", klog.KObj(pod), "statusVersion", status.version)
} else {
logger.V(3).Info("Status for pod updated successfully", "pod", klog.KObj(pod), "statusVersion", status.version, "status", mergedStatus)
pod = newPod
// We pass a new object (result of API call which contains updated ResourceVersion)
m.podStartupLatencyHelper.RecordStatusUpdated(pod)
}
// measure how long the status update took to propagate from generation to update on the server
if status.at.IsZero() {
logger.V(3).Info("Pod had no status time set", "pod", klog.KObj(pod), "podUID", uid, "version", status.version)
} else {
duration := time.Since(status.at).Truncate(time.Millisecond)
metrics.PodStatusSyncDuration.Observe(duration.Seconds())
}
m.apiStatusVersions[kubetypes.MirrorPodUID(pod.UID)] = status.version
// We don't handle graceful deletion of mirror pods.
if m.canBeDeleted(logger, pod, status.status, status.podIsFinished) {
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: new(int64),
// Use the pod UID as the precondition for deletion to prevent deleting a
// newly created pod with the same name and namespace.
Preconditions: metav1.NewUIDPreconditions(string(pod.UID)),
}
err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, deleteOptions)
if err != nil {
logger.Info("Failed to delete status for pod", "pod", klog.KObj(pod), "err", err)
return
}
logger.V(3).Info("Pod fully terminated and removed from etcd", "pod", klog.KObj(pod))
m.deletePodStatus(uid)
}
}
// needsUpdate returns whether the status is stale for the given pod UID.
// This method is not thread safe, and must only be accessed by the sync thread.
func (m *manager) needsUpdate(logger klog.Logger, uid types.UID, status versionedPodStatus) bool {
latest, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(uid)]
if !ok || latest < status.version {
return true
}
pod, ok := m.podManager.GetPodByUID(uid)
if !ok {
return false
}
return m.canBeDeleted(logger, pod, status.status, status.podIsFinished)
}
func (m *manager) canBeDeleted(logger klog.Logger, pod *v1.Pod, status v1.PodStatus, podIsFinished bool) bool {
if pod.DeletionTimestamp == nil || kubetypes.IsMirrorPod(pod) {
return false
}
// Delay deletion of pods until the phase is terminal, based on pod.Status
// which comes from pod manager.
if !podutil.IsPodPhaseTerminal(pod.Status.Phase) {
// For debugging purposes we also log the kubelet's local phase, when the deletion is delayed.
logger.V(3).Info("Delaying pod deletion as the phase is non-terminal", "phase", pod.Status.Phase, "localPhase", status.Phase, "pod", klog.KObj(pod), "podUID", pod.UID)
return false
}
// If this is an update completing pod termination then we know the pod termination is finished.
if podIsFinished {
logger.V(3).Info("The pod termination is finished as SyncTerminatedPod completes its execution", "phase", pod.Status.Phase, "localPhase", status.Phase, "pod", klog.KObj(pod), "podUID", pod.UID)
return true
}
return false
}
// needsReconcile compares the given status with the status in the pod manager (which
// in fact comes from apiserver), returns whether the status needs to be reconciled with
// the apiserver. Now when pod status is inconsistent between apiserver and kubelet,
// kubelet should forcibly send an update to reconcile the inconsistence, because kubelet
// should be the source of truth of pod status.
// NOTE(random-liu): It's simpler to pass in mirror pod uid and get mirror pod by uid, but
// now the pod manager only supports getting mirror pod by static pod, so we have to pass
// static pod uid here.
// TODO(random-liu): Simplify the logic when mirror pod manager is added.
func (m *manager) needsReconcile(logger klog.Logger, uid types.UID, status v1.PodStatus) bool {
// The pod could be a static pod, so we should translate first.
pod, ok := m.podManager.GetPodByUID(uid)
if !ok {
logger.V(4).Info("Pod has been deleted, no need to reconcile", "podUID", uid)
return false
}
// If the pod is a static pod, we should check its mirror pod, because only status in mirror pod is meaningful to us.
if kubetypes.IsStaticPod(pod) {
mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod)
if !ok {
logger.V(4).Info("Static pod has no corresponding mirror pod, no need to reconcile", "pod", klog.KObj(pod))
return false
}
pod = mirrorPod
}
podStatus := pod.Status.DeepCopy()
normalizeStatus(pod, podStatus)
if isPodStatusByKubeletEqual(podStatus, &status) {
// If the status from the source is the same with the cached status,
// reconcile is not needed. Just return.
return false
}
logger.V(3).Info("Pod status is inconsistent with cached status for pod, a reconciliation should be triggered",
"pod", klog.KObj(pod),
"statusDiff", diff.Diff(podStatus, &status))
return true
}
// normalizeStatus normalizes nanosecond precision timestamps in podStatus
// down to second precision (*RFC339NANO* -> *RFC3339*). This must be done
// before comparing podStatus to the status returned by apiserver because
// apiserver does not support RFC339NANO.
// Related issue #15262/PR #15263 to move apiserver to RFC339NANO is closed.
func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus {
bytesPerStatus := kubecontainer.MaxPodTerminationMessageLogLength
if containers := len(pod.Spec.Containers) + len(pod.Spec.InitContainers) + len(pod.Spec.EphemeralContainers); containers > 0 {
bytesPerStatus = bytesPerStatus / containers
}
normalizeTimeStamp := func(t *metav1.Time) {
*t = t.Rfc3339Copy()
}
normalizeContainerState := func(c *v1.ContainerState) {
if c.Running != nil {
normalizeTimeStamp(&c.Running.StartedAt)
}
if c.Terminated != nil {
normalizeTimeStamp(&c.Terminated.StartedAt)
normalizeTimeStamp(&c.Terminated.FinishedAt)
if len(c.Terminated.Message) > bytesPerStatus {
c.Terminated.Message = c.Terminated.Message[:bytesPerStatus]
}
}
}
if status.StartTime != nil {
normalizeTimeStamp(status.StartTime)
}
for i := range status.Conditions {
condition := &status.Conditions[i]
normalizeTimeStamp(&condition.LastProbeTime)
normalizeTimeStamp(&condition.LastTransitionTime)
}
normalizeContainerStatuses := func(containerStatuses []v1.ContainerStatus) {
for i := range containerStatuses {
cstatus := &containerStatuses[i]
normalizeContainerState(&cstatus.State)
normalizeContainerState(&cstatus.LastTerminationState)
}
}
normalizeContainerStatuses(status.ContainerStatuses)
sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses))
normalizeContainerStatuses(status.InitContainerStatuses)
kubetypes.SortInitContainerStatuses(pod, status.InitContainerStatuses)
normalizeContainerStatuses(status.EphemeralContainerStatuses)
sort.Sort(kubetypes.SortedContainerStatuses(status.EphemeralContainerStatuses))
return status
}
// mergePodStatus merges oldPodStatus and newPodStatus to preserve where pod conditions
// not owned by kubelet and to ensure terminal phase transition only happens after all
// running containers have terminated. This method does not modify the old status.
func mergePodStatus(pod *v1.Pod, oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningContainers bool) v1.PodStatus {
podConditions := make([]v1.PodCondition, 0, len(oldPodStatus.Conditions)+len(newPodStatus.Conditions))
for _, c := range oldPodStatus.Conditions {
if !kubetypes.PodConditionByKubelet(c.Type) {
podConditions = append(podConditions, c)
}
}
transitioningToTerminalPhase := !podutil.IsPodPhaseTerminal(oldPodStatus.Phase) && podutil.IsPodPhaseTerminal(newPodStatus.Phase)
for _, c := range newPodStatus.Conditions {
if kubetypes.PodConditionByKubelet(c.Type) {
podConditions = append(podConditions, c)
} else if kubetypes.PodConditionSharedByKubelet(c.Type) {
// we replace or append all the "shared by kubelet" conditions
if c.Type == v1.DisruptionTarget {
// guard the update of the DisruptionTarget condition with a check to ensure
// it will only be sent once all containers have terminated and the phase
// is terminal. This avoids sending an unnecessary patch request to add
// the condition if the actual status phase transition is delayed.
if transitioningToTerminalPhase && !couldHaveRunningContainers {
// update the LastTransitionTime again here because the older transition
// time set in updateStatusInternal is likely stale as sending of
// the condition was delayed until all pod's containers have terminated.
updateLastTransitionTime(&newPodStatus, &oldPodStatus, c.Type)
if _, c := podutil.GetPodConditionFromList(newPodStatus.Conditions, c.Type); c != nil {
// for shared conditions we update or append in podConditions
podConditions = statusutil.ReplaceOrAppendPodCondition(podConditions, c)
}
}
}
}
}
newPodStatus.Conditions = podConditions
// ResourceClaimStatuses is not owned and not modified by kubelet.
newPodStatus.ResourceClaimStatuses = oldPodStatus.ResourceClaimStatuses
// ExtendedResourceClaimStatus is not owned and not modified by kubelet.
newPodStatus.ExtendedResourceClaimStatus = oldPodStatus.ExtendedResourceClaimStatus
// Delay transitioning a pod to a terminal status unless the pod is actually terminal.
// The Kubelet should never transition a pod to terminal status that could have running
// containers and thus actively be leveraging exclusive resources. Note that resources
// like volumes are reconciled by a subsystem in the Kubelet and will converge if a new
// pod reuses an exclusive resource (unmount -> free -> mount), which means we do not
// need wait for those resources to be detached by the Kubelet. In general, resources
// the Kubelet exclusively owns must be released prior to a pod being reported terminal,
// while resources that have participanting components above the API use the pod's
// transition to a terminal phase (or full deletion) to release those resources.
if transitioningToTerminalPhase {
if couldHaveRunningContainers {
newPodStatus.Phase = oldPodStatus.Phase
newPodStatus.Reason = oldPodStatus.Reason
newPodStatus.Message = oldPodStatus.Message
}
}
// If the new phase is terminal, explicitly set the ready condition to false for v1.PodReady and v1.ContainersReady.
// It may take some time for kubelet to reconcile the ready condition, so explicitly set ready conditions to false if the phase is terminal.
// This is done to ensure kubelet does not report a status update with terminal pod phase and ready=true.
// See https://issues.k8s.io/108594 for more details.
if podutil.IsPodPhaseTerminal(newPodStatus.Phase) {
if podutil.IsPodReadyConditionTrue(newPodStatus) || podutil.IsContainersReadyConditionTrue(newPodStatus) {
containersReadyCondition := generateContainersReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase)
podutil.UpdatePodCondition(&newPodStatus, &containersReadyCondition)
podReadyCondition := generatePodReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase)
podutil.UpdatePodCondition(&newPodStatus, &podReadyCondition)
}
}
return newPodStatus
}
// NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile
func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
if len(pod.Spec.ReadinessGates) == 0 {
return false
}
podReadyCondition := GeneratePodReadyCondition(pod, &pod.Status, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase)
i, curCondition := podutil.GetPodConditionFromList(pod.Status.Conditions, v1.PodReady)
// Only reconcile if "Ready" condition is present and Status or Message is not expected
if i >= 0 && (curCondition.Status != podReadyCondition.Status || curCondition.Message != podReadyCondition.Message) {
return true
}
return false
}
func updatedPodResizeCondition(conditionType v1.PodConditionType, oldCondition *v1.PodCondition, reason, message string, observedGeneration int64) *v1.PodCondition {
now := metav1.NewTime(time.Now())
var lastTransitionTime metav1.Time
if oldCondition == nil || oldCondition.Reason != reason {
lastTransitionTime = now
} else {
lastTransitionTime = oldCondition.LastTransitionTime
}
return &v1.PodCondition{
Type: conditionType,
Status: v1.ConditionTrue,
LastProbeTime: now,
LastTransitionTime: lastTransitionTime,
ObservedGeneration: observedGeneration,
Reason: reason,
Message: message,
}
}
// recordPendingResizeCount sets the pending resize metric.
func (m *manager) recordPendingResizeCount() {
pendingResizeCount := make(map[string]int)
for _, conditions := range m.podResizeConditions {
if conditions.PodResizePending != nil {
pendingResizeCount[strings.ToLower(conditions.PodResizePending.Reason)]++
}
}
metrics.PodPendingResizes.Reset()
for reason, count := range pendingResizeCount {
metrics.PodPendingResizes.WithLabelValues(reason).Set(float64(count))
}
}
// recordInProgressResize sets the in-progress resize metric.
func (m *manager) recordInProgressResizeCount() {
inProgressResizeCount := 0
for _, conditions := range m.podResizeConditions {
if conditions.PodResizeInProgress != nil {
inProgressResizeCount++
}
}
metrics.PodInProgressResizes.Set(float64(inProgressResizeCount))
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import v1 "k8s.io/api/core/v1"
// FakePodDeletionSafetyProvider is a fake PodDeletionSafetyProvider for test.
type FakePodDeletionSafetyProvider struct {
HasRunning bool
}
func (f *FakePodDeletionSafetyProvider) PodCouldHaveRunningContainers(pod *v1.Pod) bool {
return f.HasRunning
}
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by mockery; DO NOT EDIT.
// github.com/vektra/mockery
// template: testify
package testing
import (
mock "github.com/stretchr/testify/mock"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
// NewMockPodStatusProvider creates a new instance of MockPodStatusProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockPodStatusProvider(t interface {
mock.TestingT
Cleanup(func())
}) *MockPodStatusProvider {
mock := &MockPodStatusProvider{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
// MockPodStatusProvider is an autogenerated mock type for the PodStatusProvider type
type MockPodStatusProvider struct {
mock.Mock
}
type MockPodStatusProvider_Expecter struct {
mock *mock.Mock
}
func (_m *MockPodStatusProvider) EXPECT() *MockPodStatusProvider_Expecter {
return &MockPodStatusProvider_Expecter{mock: &_m.Mock}
}
// GetPodStatus provides a mock function for the type MockPodStatusProvider
func (_mock *MockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
ret := _mock.Called(uid)
if len(ret) == 0 {
panic("no return value specified for GetPodStatus")
}
var r0 v1.PodStatus
var r1 bool
if returnFunc, ok := ret.Get(0).(func(types.UID) (v1.PodStatus, bool)); ok {
return returnFunc(uid)
}
if returnFunc, ok := ret.Get(0).(func(types.UID) v1.PodStatus); ok {
r0 = returnFunc(uid)
} else {
r0 = ret.Get(0).(v1.PodStatus)
}
if returnFunc, ok := ret.Get(1).(func(types.UID) bool); ok {
r1 = returnFunc(uid)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// MockPodStatusProvider_GetPodStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPodStatus'
type MockPodStatusProvider_GetPodStatus_Call struct {
*mock.Call
}
// GetPodStatus is a helper method to define mock.On call
// - uid types.UID
func (_e *MockPodStatusProvider_Expecter) GetPodStatus(uid interface{}) *MockPodStatusProvider_GetPodStatus_Call {
return &MockPodStatusProvider_GetPodStatus_Call{Call: _e.mock.On("GetPodStatus", uid)}
}
func (_c *MockPodStatusProvider_GetPodStatus_Call) Run(run func(uid types.UID)) *MockPodStatusProvider_GetPodStatus_Call {
_c.Call.Run(func(args mock.Arguments) {
var arg0 types.UID
if args[0] != nil {
arg0 = args[0].(types.UID)
}
run(
arg0,
)
})
return _c
}
func (_c *MockPodStatusProvider_GetPodStatus_Call) Return(podStatus v1.PodStatus, b bool) *MockPodStatusProvider_GetPodStatus_Call {
_c.Call.Return(podStatus, b)
return _c
}
func (_c *MockPodStatusProvider_GetPodStatus_Call) RunAndReturn(run func(uid types.UID) (v1.PodStatus, bool)) *MockPodStatusProvider_GetPodStatus_Call {
_c.Call.Return(run)
return _c
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sysctl
import (
"fmt"
"strings"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/kubernetes/pkg/apis/core/validation"
policyvalidation "k8s.io/kubernetes/pkg/apis/policy/validation"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
const (
ForbiddenReason = "SysctlForbidden"
)
// patternAllowlist takes a list of sysctls or sysctl patterns (ending in *) and
// checks validity via a sysctl and prefix map, rejecting those which are not known
// to be namespaced.
type patternAllowlist struct {
sysctls map[string]utilsysctl.Namespace
prefixes map[string]utilsysctl.Namespace
}
var _ lifecycle.PodAdmitHandler = &patternAllowlist{}
// NewAllowlist creates a new Allowlist from a list of sysctls and sysctl pattern (ending in *).
func NewAllowlist(patterns []string) (*patternAllowlist, error) {
w := &patternAllowlist{
sysctls: map[string]utilsysctl.Namespace{},
prefixes: map[string]utilsysctl.Namespace{},
}
for _, s := range patterns {
if !policyvalidation.IsValidSysctlPattern(s) {
return nil, fmt.Errorf("sysctl %q must have at most %d characters and match regex %s",
s,
validation.SysctlMaxLength,
policyvalidation.SysctlContainSlashPatternFmt,
)
}
ns, sysctlOrPrefix, prefixed := utilsysctl.GetNamespace(s)
if ns == utilsysctl.UnknownNamespace {
return nil, fmt.Errorf("the sysctls %q are not known to be namespaced", sysctlOrPrefix)
}
if prefixed {
w.prefixes[sysctlOrPrefix] = ns
} else {
w.sysctls[sysctlOrPrefix] = ns
}
}
return w, nil
}
// validateSysctl checks that a sysctl is allowlisted because it is known
// to be namespaced by the Linux kernel. Note that being allowlisted is required, but not
// sufficient: the container runtime might have a stricter check and refuse to launch a pod.
//
// The parameters hostNet and hostIPC are used to forbid sysctls for pod sharing the
// respective namespaces with the host. This check is only possible for sysctls on
// the static default allowlist, not those on the custom allowlist provided by the admin.
func (w *patternAllowlist) validateSysctl(sysctl string, hostNet, hostIPC bool) error {
sysctl = utilsysctl.NormalizeName(sysctl)
nsErrorFmt := "%q not allowed with host %s enabled"
if ns, found := w.sysctls[sysctl]; found {
if ns == utilsysctl.IPCNamespace && hostIPC {
return fmt.Errorf(nsErrorFmt, sysctl, ns)
}
if ns == utilsysctl.NetNamespace && hostNet {
return fmt.Errorf(nsErrorFmt, sysctl, ns)
}
return nil
}
for p, ns := range w.prefixes {
if strings.HasPrefix(sysctl, p) {
if ns == utilsysctl.IPCNamespace && hostIPC {
return fmt.Errorf(nsErrorFmt, sysctl, ns)
}
if ns == utilsysctl.NetNamespace && hostNet {
return fmt.Errorf(nsErrorFmt, sysctl, ns)
}
return nil
}
}
return fmt.Errorf("%q not allowlisted", sysctl)
}
// Admit checks that all sysctls given in pod's security context
// are valid according to the allowlist.
func (w *patternAllowlist) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
pod := attrs.Pod
if pod.Spec.SecurityContext == nil || len(pod.Spec.SecurityContext.Sysctls) == 0 {
return lifecycle.PodAdmitResult{
Admit: true,
}
}
for _, s := range pod.Spec.SecurityContext.Sysctls {
if err := w.validateSysctl(s.Name, pod.Spec.HostNetwork, pod.Spec.HostIPC); err != nil {
return lifecycle.PodAdmitResult{
Admit: false,
Reason: ForbiddenReason,
Message: fmt.Sprintf("forbidden sysctl: %v", err),
}
}
}
return lifecycle.PodAdmitResult{
Admit: true,
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sysctl
import (
"context"
goruntime "runtime"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/klog/v2"
utilkernel "k8s.io/kubernetes/pkg/util/kernel"
)
type sysctl struct {
// the name of sysctl
name string
// the minimum kernel version where the sysctl is available
kernel string
}
var safeSysctls = []sysctl{
{
name: "kernel.shm_rmid_forced",
}, {
name: "net.ipv4.ip_local_port_range",
}, {
name: "net.ipv4.tcp_syncookies",
}, {
name: "net.ipv4.ping_group_range",
}, {
name: "net.ipv4.ip_unprivileged_port_start",
}, {
name: "net.ipv4.ip_local_reserved_ports",
kernel: utilkernel.IPLocalReservedPortsNamespacedKernelVersion,
}, {
name: "net.ipv4.tcp_keepalive_time",
kernel: utilkernel.TCPKeepAliveTimeNamespacedKernelVersion,
}, {
name: "net.ipv4.tcp_fin_timeout",
kernel: utilkernel.TCPFinTimeoutNamespacedKernelVersion,
},
{
name: "net.ipv4.tcp_keepalive_intvl",
kernel: utilkernel.TCPKeepAliveIntervalNamespacedKernelVersion,
},
{
name: "net.ipv4.tcp_keepalive_probes",
kernel: utilkernel.TCPKeepAliveProbesNamespacedKernelVersion,
},
{
name: "net.ipv4.tcp_rmem",
kernel: utilkernel.TCPReceiveMemoryNamespacedKernelVersion,
},
{
name: "net.ipv4.tcp_wmem",
kernel: utilkernel.TCPTransmitMemoryNamespacedKernelVersion,
},
}
// SafeSysctlAllowlist returns the allowlist of safe sysctls and safe sysctl patterns (ending in *).
//
// A sysctl is called safe iff
// - it is namespaced in the container or the pod
// - it is isolated, i.e. has no influence on any other pod on the same node.
func SafeSysctlAllowlist(ctx context.Context) []string {
if goruntime.GOOS != "linux" {
return nil
}
return getSafeSysctlAllowlist(ctx, utilkernel.GetVersion)
}
func getSafeSysctlAllowlist(ctx context.Context, getVersion func() (*version.Version, error)) []string {
logger := klog.FromContext(ctx)
kernelVersion, err := getVersion()
if err != nil {
logger.Error(err, "failed to get kernel version, unable to determine which sysctls are available")
}
var safeSysctlAllowlist []string
for _, sc := range safeSysctls {
if sc.kernel == "" {
safeSysctlAllowlist = append(safeSysctlAllowlist, sc.name)
continue
}
if kernelVersion != nil && kernelVersion.AtLeast(version.MustParseGeneric(sc.kernel)) {
safeSysctlAllowlist = append(safeSysctlAllowlist, sc.name)
} else {
logger.Info("kernel version is too old, dropping the sysctl from safe sysctl list", "kernelVersion", kernelVersion, "sysctl", sc.name)
}
}
return safeSysctlAllowlist
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sysctl
import (
v1 "k8s.io/api/core/v1"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
)
// ConvertPodSysctlsVariableToDotsSeparator converts sysctls variable in the Pod.Spec.SecurityContext.Sysctls slice into a dot as a separator
// according to the linux sysctl conversion rules.
// see https://man7.org/linux/man-pages/man5/sysctl.d.5.html for more details.
func ConvertPodSysctlsVariableToDotsSeparator(securityContext *v1.PodSecurityContext) {
if securityContext == nil {
return
}
for i, sysctl := range securityContext.Sysctls {
securityContext.Sysctls[i].Name = utilsysctl.NormalizeName(sysctl.Name)
}
return
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package token implements a manager of serviceaccount tokens for pods running
// on the node.
package token
import (
"context"
"errors"
"fmt"
"math/rand"
"sync"
"time"
authenticationv1 "k8s.io/api/authentication/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
const (
maxTTL = 24 * time.Hour
gcPeriod = time.Minute
maxJitter = 10 * time.Second
)
// NewManager returns a new token manager.
func NewManager(c clientset.Interface) *Manager {
// check whether the server supports token requests so we can give a more helpful error message
supported := false
once := &sync.Once{}
tokenRequestsSupported := func() bool {
once.Do(func() {
resources, err := c.Discovery().ServerResourcesForGroupVersion("v1")
if err != nil {
return
}
for _, resource := range resources.APIResources {
if resource.Name == "serviceaccounts/token" {
supported = true
return
}
}
})
return supported
}
m := &Manager{
getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
if c == nil {
return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode")
}
tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{})
if apierrors.IsNotFound(err) && !tokenRequestsSupported() {
return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled")
}
return tokenRequest, err
},
cache: make(map[string]*authenticationv1.TokenRequest),
clock: clock.RealClock{},
}
go wait.Forever(m.cleanup, gcPeriod)
return m
}
// Manager manages service account tokens for pods.
type Manager struct {
// cacheMutex guards the cache
cacheMutex sync.RWMutex
cache map[string]*authenticationv1.TokenRequest
// mocked for testing
getToken func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
clock clock.Clock
}
// GetServiceAccountToken gets a service account token for a pod from cache or
// from the TokenRequest API. This process is as follows:
// * Check the cache for the current token request.
// * If the token exists and does not require a refresh, return the current token.
// * Attempt to refresh the token.
// * If the token is refreshed successfully, save it in the cache and return the token.
// * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
// TODO: pass ctx to GetServiceAccountToken after switching pkg/volume to contextual logging
ctx := context.TODO()
key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctx, ctr) {
return ctr, nil
}
tr, err := m.getToken(name, namespace, tr)
if err != nil {
switch {
case !ok:
return nil, fmt.Errorf("failed to fetch token: %v", err)
case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default:
logger := klog.FromContext(ctx)
logger.Error(err, "Couldn't update token", "cacheKey", key)
return ctr, nil
}
}
m.set(key, tr)
return tr, nil
}
// DeleteServiceAccountToken should be invoked when pod got deleted. It simply
// clean token manager cache.
func (m *Manager) DeleteServiceAccountToken(podUID types.UID) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if tr.Spec.BoundObjectRef.UID == podUID {
delete(m.cache, k)
}
}
}
func (m *Manager) cleanup() {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if m.expired(tr) {
delete(m.cache, k)
}
}
}
func (m *Manager) get(key string) (*authenticationv1.TokenRequest, bool) {
m.cacheMutex.RLock()
defer m.cacheMutex.RUnlock()
ctr, ok := m.cache[key]
return ctr, ok
}
func (m *Manager) set(key string, tr *authenticationv1.TokenRequest) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
m.cache[key] = tr
}
func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
return m.clock.Now().After(t.Status.ExpirationTimestamp.Time)
}
// requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(ctx context.Context, tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil {
cpy := tr.DeepCopy()
cpy.Status.Token = ""
logger := klog.FromContext(ctx)
logger.Error(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy)
return false
}
now := m.clock.Now()
exp := tr.Status.ExpirationTimestamp.Time
iat := exp.Add(-1 * time.Duration(*tr.Spec.ExpirationSeconds) * time.Second)
jitter := time.Duration(rand.Float64()*maxJitter.Seconds()) * time.Second
if now.After(iat.Add(maxTTL - jitter)) {
return true
}
// Require a refresh if within 20% of the TTL plus a jitter from the expiration time.
if now.After(exp.Add(-1*time.Duration((*tr.Spec.ExpirationSeconds*20)/100)*time.Second - jitter)) {
return true
}
return false
}
// keys should be nonconfidential and safe to log
func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string {
var exp int64
if tr.Spec.ExpirationSeconds != nil {
exp = *tr.Spec.ExpirationSeconds
}
var ref authenticationv1.BoundObjectReference
if tr.Spec.BoundObjectRef != nil {
ref = *tr.Spec.BoundObjectRef
}
var uid types.UID
if len(tr.UID) > 0 {
// If UID is set in the token request it is used as a precondition
// to ensure that the token request is for the same service account.
// This is useful to prevent stale tokens from being returned after a service account
// is deleted and recreated with the same name.
uid = tr.UID
}
return fmt.Sprintf("%q/%q/%#v/%#v/%#v/%q", name, namespace, tr.Spec.Audiences, exp, ref, uid)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
// PodConditionsByKubelet is the list of pod conditions owned by kubelet
var PodConditionsByKubelet = []v1.PodConditionType{
v1.PodScheduled,
v1.PodReady,
v1.PodInitialized,
v1.ContainersReady,
v1.PodResizeInProgress,
v1.PodResizePending,
}
// PodConditionByKubelet returns if the pod condition type is owned by kubelet
func PodConditionByKubelet(conditionType v1.PodConditionType) bool {
for _, c := range PodConditionsByKubelet {
if c == conditionType {
return true
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) {
if conditionType == v1.PodReadyToStartContainers {
return true
}
}
return false
}
// PodConditionSharedByKubelet returns if the pod condition type is shared by kubelet
func PodConditionSharedByKubelet(conditionType v1.PodConditionType) bool {
return conditionType == v1.DisruptionTarget
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/scheduling"
)
// Annotation keys for annotations used in this package.
const (
ConfigSourceAnnotationKey = "kubernetes.io/config.source"
ConfigMirrorAnnotationKey = v1.MirrorPodAnnotationKey
ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen"
ConfigHashAnnotationKey = "kubernetes.io/config.hash"
)
// PodOperation defines what changes will be made on a pod configuration.
type PodOperation int
// These constants identify the PodOperations that can be made on a pod configuration.
const (
// SET is the current pod configuration.
SET PodOperation = iota
// ADD signifies pods that are new to this source.
ADD
// DELETE signifies pods that are gracefully deleted from this source.
DELETE
// REMOVE signifies pods that have been removed from this source.
REMOVE
// UPDATE signifies pods have been updated in this source.
UPDATE
// RECONCILE signifies pods that have unexpected status in this source,
// kubelet should reconcile status with this source.
RECONCILE
)
// These constants identify the sources of pods.
const (
// Filesource idenitified updates from a file.
FileSource = "file"
// HTTPSource identifies updates from querying a web page.
HTTPSource = "http"
// ApiserverSource identifies updates from Kubernetes API Server.
ApiserverSource = "api"
// AllSource identifies updates from all sources.
AllSource = "*"
)
// NamespaceDefault is a string representing the default namespace.
const NamespaceDefault = metav1.NamespaceDefault
// PodUpdate defines an operation sent on the channel. You can add or remove single services by
// sending an array of size one and Op == ADD|REMOVE (with REMOVE, only the ID is required).
// For setting the state of the system to a given state for this source configuration, set
// Pods as desired and Op to SET, which will reset the system state to that specified in this
// operation for this source channel. To remove all pods, set Pods to empty object and Op to SET.
//
// Additionally, Pods should never be nil - it should always point to an empty slice. While
// functionally similar, this helps our unit tests properly check that the correct PodUpdates
// are generated.
type PodUpdate struct {
Pods []*v1.Pod
Op PodOperation
Source string
}
// GetValidatedSources gets all validated sources from the specified sources.
func GetValidatedSources(sources []string) ([]string, error) {
validated := make([]string, 0, len(sources))
for _, source := range sources {
switch source {
case AllSource:
return []string{FileSource, HTTPSource, ApiserverSource}, nil
case FileSource, HTTPSource, ApiserverSource:
validated = append(validated, source)
case "":
// Skip
default:
return []string{}, fmt.Errorf("unknown pod source %q", source)
}
}
return validated, nil
}
// GetPodSource returns the source of the pod based on the annotation.
func GetPodSource(pod *v1.Pod) (string, error) {
if pod.Annotations != nil {
if source, ok := pod.Annotations[ConfigSourceAnnotationKey]; ok {
return source, nil
}
}
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
}
// SyncPodType classifies pod updates, eg: create, update.
type SyncPodType int
const (
// SyncPodSync is when the pod is synced to ensure desired state
SyncPodSync SyncPodType = iota
// SyncPodUpdate is when the pod is updated from source
SyncPodUpdate
// SyncPodCreate is when the pod is created from source
SyncPodCreate
// SyncPodKill is when the pod should have no running containers. A pod stopped in this way could be
// restarted in the future due config changes.
SyncPodKill
)
func (sp SyncPodType) String() string {
switch sp {
case SyncPodCreate:
return "create"
case SyncPodUpdate:
return "update"
case SyncPodSync:
return "sync"
case SyncPodKill:
return "kill"
default:
return "unknown"
}
}
// IsMirrorPod returns true if the passed Pod is a Mirror Pod.
func IsMirrorPod(pod *v1.Pod) bool {
if pod.Annotations == nil {
return false
}
_, ok := pod.Annotations[ConfigMirrorAnnotationKey]
return ok
}
// IsStaticPod returns true if the pod is a static pod.
func IsStaticPod(pod *v1.Pod) bool {
source, err := GetPodSource(pod)
return err == nil && source != ApiserverSource
}
// IsCriticalPod returns true if pod's priority is greater than or equal to SystemCriticalPriority.
func IsCriticalPod(pod *v1.Pod) bool {
if IsStaticPod(pod) {
return true
}
if IsMirrorPod(pod) {
return true
}
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
return true
}
return false
}
// Preemptable returns true if preemptor pod can preempt preemptee pod
// if preemptee is not critical or if preemptor's priority is greater than preemptee's priority
func Preemptable(preemptor, preemptee *v1.Pod) bool {
if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) {
return true
}
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
(preemptee != nil && preemptee.Spec.Priority != nil) {
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
}
return false
}
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
func IsCriticalPodBasedOnPriority(priority int32) bool {
return priority >= scheduling.SystemCriticalPriority
}
// IsNodeCriticalPod checks if the given pod is a system-node-critical
func IsNodeCriticalPod(pod *v1.Pod) bool {
return IsCriticalPod(pod) && (pod.Spec.PriorityClassName == scheduling.SystemNodeCritical)
}
// HasRestartableInitContainer returns true if the pod has any restartable init
// container
func HasRestartableInitContainer(pod *v1.Pod) bool {
for _, container := range pod.Spec.InitContainers {
if podutil.IsRestartableInitContainer(&container) {
return true
}
}
return false
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"net/http"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cri-client/pkg/logs"
)
// TODO: Reconcile custom types in kubelet/types and this subpackage
// HTTPDoer encapsulates http.Do functionality
type HTTPDoer interface {
Do(req *http.Request) (*http.Response, error)
}
// Timestamp wraps around time.Time and offers utilities to format and parse
// the time using RFC3339Nano
type Timestamp struct {
time time.Time
}
// NewTimestamp returns a Timestamp object using the current time.
func NewTimestamp() *Timestamp {
return &Timestamp{time.Now()}
}
// ConvertToTimestamp takes a string, parses it using the RFC3339NanoLenient layout,
// and converts it to a Timestamp object.
func ConvertToTimestamp(timeString string) *Timestamp {
parsed, _ := time.Parse(logs.RFC3339NanoLenient, timeString)
return &Timestamp{parsed}
}
// Get returns the time as time.Time.
func (t *Timestamp) Get() time.Time {
return t.time
}
// GetString returns the time in the string format using the RFC3339NanoFixed
// layout.
func (t *Timestamp) GetString() string {
return t.time.Format(logs.RFC3339NanoFixed)
}
// SortedContainerStatuses is a type to help sort container statuses based on container names.
type SortedContainerStatuses []v1.ContainerStatus
func (s SortedContainerStatuses) Len() int { return len(s) }
func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SortedContainerStatuses) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
// SortInitContainerStatuses ensures that statuses are in the order that their
// init container appears in the pod spec. The function assumes there are no
// duplicate names in the statuses.
func SortInitContainerStatuses(p *v1.Pod, statuses []v1.ContainerStatus) {
containers := p.Spec.InitContainers
current := 0
for _, container := range containers {
for j := current; j < len(statuses); j++ {
if container.Name == statuses[j].Name {
statuses[current], statuses[j] = statuses[j], statuses[current]
current++
break
}
}
}
}
// SortStatusesOfInitContainers returns the statuses of InitContainers of pod p,
// in the order that they appear in its spec.
func SortStatusesOfInitContainers(p *v1.Pod, statusMap map[string]*v1.ContainerStatus) []v1.ContainerStatus {
containers := p.Spec.InitContainers
statuses := []v1.ContainerStatus{}
for _, container := range containers {
if status, found := statusMap[container.Name]; found {
statuses = append(statuses, *status)
}
}
return statuses
}
// Reservation represents reserved resources for non-pod components.
type Reservation struct {
// System represents resources reserved for non-kubernetes components.
System v1.ResourceList
// Kubernetes represents resources reserved for kubernetes system components.
Kubernetes v1.ResourceList
}
// ResolvedPodUID is a pod UID which has been translated/resolved to the representation known to kubelets.
type ResolvedPodUID types.UID
// MirrorPodUID is a pod UID for a mirror pod.
type MirrorPodUID types.UID
//go:build !windows
// +build !windows
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package userns
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
utilstore "k8s.io/kubernetes/pkg/kubelet/util/store"
"k8s.io/kubernetes/pkg/registry/core/service/allocator"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
const (
// Create a new map when we removed enough pods to avoid memory leaks
// since Go maps never free memory.
mapReInitializeThreshold = 1000
// userNsUnitLength is the unit length of UserNS
userNsUnitLength = 65536
)
type UsernsManager struct {
used *allocator.AllocationBitmap
usedBy map[types.UID]uint32 // Map pod.UID to range used
removed int
off int
len int
userNsLength uint32
kl userNsPodsManager
// This protects all members except for kl.anager
lock sync.Mutex
}
// UserNamespace holds the configuration for the user namespace.
type userNamespace struct {
// UIDs mappings for the user namespace.
UIDMappings []idMapping `json:"uidMappings"`
// GIDs mappings for the user namespace.
GIDMappings []idMapping `json:"gidMappings"`
}
// Pod user namespace mapping
type idMapping struct {
// Required.
HostId uint32 `json:"hostId"`
// Required.
ContainerId uint32 `json:"containerId"`
// Required.
Length uint32 `json:"length"`
}
// mappingsFile is the file where the user namespace mappings are persisted.
const mappingsFile = "userns"
// writeMappingsToFile writes the specified user namespace configuration to the pod
// directory.
func (m *UsernsManager) writeMappingsToFile(pod types.UID, userNs userNamespace) error {
dir := m.kl.GetPodDir(pod)
data, err := json.Marshal(userNs)
if err != nil {
return err
}
fstore, err := utilstore.NewFileStore(dir, &utilfs.DefaultFs{})
if err != nil {
return fmt.Errorf("create user namespace store: %w", err)
}
if err := fstore.Write(mappingsFile, data); err != nil {
return err
}
// We need to fsync the parent dir so the file is guaranteed to be there.
// fstore guarantees an atomic write, we need durability too.
parentDir, err := os.Open(dir)
if err != nil {
return err
}
if err = parentDir.Sync(); err != nil {
// Ignore return here, there is already an error reported.
parentDir.Close()
return err
}
return parentDir.Close()
}
// readMappingsFromFile reads the user namespace configuration from the pod directory.
func (m *UsernsManager) readMappingsFromFile(pod types.UID) ([]byte, error) {
dir := m.kl.GetPodDir(pod)
fstore, err := utilstore.NewFileStore(dir, &utilfs.DefaultFs{})
if err != nil {
return nil, fmt.Errorf("create user namespace store: %w", err)
}
return fstore.Read(mappingsFile)
}
func MakeUserNsManager(logger klog.Logger, kl userNsPodsManager) (*UsernsManager, error) {
kubeletMappingID, kubeletMappingLen, err := kl.GetKubeletMappings()
if err != nil {
return nil, fmt.Errorf("kubelet mappings: %w", err)
}
userNsLength := kl.GetUserNamespacesIDsPerPod()
if userNsLength%userNsUnitLength != 0 {
return nil, fmt.Errorf("kubelet user namespace length %v is not a multiple of %d", userNsLength, userNsUnitLength)
}
if kubeletMappingID%userNsLength != 0 {
return nil, fmt.Errorf("kubelet user assigned ID %v is not a multiple of %v", kubeletMappingID, userNsLength)
}
if kubeletMappingID < userNsLength {
// We don't allow to map 0, as security is circumvented.
return nil, fmt.Errorf("kubelet user assigned ID %v must be greater or equal to %v", kubeletMappingID, userNsLength)
}
if kubeletMappingLen%userNsLength != 0 {
return nil, fmt.Errorf("kubelet user assigned IDs length %v is not a multiple of %v", kubeletMappingLen, userNsLength)
}
if kubeletMappingLen/userNsLength < uint32(kl.GetMaxPods()) {
return nil, fmt.Errorf("kubelet user assigned IDs are not enough to support %v pods", kl.GetMaxPods())
}
off := int(kubeletMappingID / userNsLength)
len := int(kubeletMappingLen / userNsLength)
logger.V(5).Info("User namespace manager mapping", "offset", off, "length", len, "idsPerPod", userNsLength)
m := UsernsManager{
used: allocator.NewAllocationMap(len, "user namespaces"),
usedBy: make(map[types.UID]uint32),
kl: kl,
off: off,
len: len,
userNsLength: userNsLength,
}
// do not bother reading the list of pods if user namespaces are not enabled.
if !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) {
return &m, nil
}
found, err := kl.ListPodsFromDisk()
if err != nil {
if os.IsNotExist(err) {
return &m, nil
}
return nil, fmt.Errorf("read pods from disk: %w", err)
}
for _, podUID := range found {
logger.V(5).Info("reading pod from disk for user namespace", "podUID", podUID)
if err := m.recordPodMappings(logger, podUID); err != nil {
return nil, fmt.Errorf("record pod mappings: %w", err)
}
}
return &m, nil
}
// recordPodMappings registers the range used for the user namespace if the
// usernsConfFile exists in the pod directory.
func (m *UsernsManager) recordPodMappings(logger klog.Logger, pod types.UID) error {
content, err := m.readMappingsFromFile(pod)
if err != nil && err != utilstore.ErrKeyNotFound {
return err
}
// If no content, it means the pod doesn't have userns. Nothing else to do
if len(content) == 0 {
return nil
}
_, err = m.parseUserNsFileAndRecord(logger, pod, content)
return err
}
// isSet checks if the specified index is already set.
func (m *UsernsManager) isSet(v uint32) bool {
index := int(v/m.userNsLength) - m.off
if index < 0 || index >= m.len {
return true
}
return m.used.Has(index)
}
// allocateOne finds a free user namespace and allocate it to the specified pod.
// The first return value is the first ID in the user namespace, the second returns
// the length for the user namespace range.
func (m *UsernsManager) allocateOne(logger klog.Logger, pod types.UID) (firstID uint32, length uint32, err error) {
firstZero, found, err := m.used.AllocateNext()
if err != nil {
return 0, 0, err
}
if !found {
return 0, 0, fmt.Errorf("could not find an empty slot to allocate a user namespace")
}
logger.V(5).Info("new pod user namespace allocation", "podUID", pod)
firstID = uint32((firstZero + m.off)) * m.userNsLength
m.usedBy[pod] = firstID
return firstID, m.userNsLength, nil
}
// record stores the user namespace [from; from+length] to the specified pod.
func (m *UsernsManager) record(logger klog.Logger, pod types.UID, from, length uint32) (err error) {
if length != m.userNsLength {
return fmt.Errorf("wrong user namespace length %v", length)
}
if from%m.userNsLength != 0 {
return fmt.Errorf("wrong user namespace offset specified %v", from)
}
prevFrom, found := m.usedBy[pod]
if found && prevFrom != from {
return fmt.Errorf("different user namespace range already used by pod %q", pod)
}
index := int(from/m.userNsLength) - m.off
if index < 0 || index >= m.len {
return fmt.Errorf("id %v is out of range", from)
}
// if the pod wasn't found then verify the range is free.
if !found && m.used.Has(index) {
return fmt.Errorf("range picked for pod %q already taken", pod)
}
// The pod is already registered, nothing to do.
if found && prevFrom == from {
return nil
}
logger.V(5).Info("new pod user namespace allocation", "podUID", pod)
// "from" is a ID (UID/GID), set the corresponding userns of size
// userNsLength in the bit-array.
m.used.Allocate(index)
m.usedBy[pod] = from
return nil
}
// Release releases the user namespace allocated to the specified pod.
func (m *UsernsManager) Release(logger klog.Logger, podUID types.UID) {
if !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) {
return
}
m.lock.Lock()
defer m.lock.Unlock()
m.releaseWithLock(logger, podUID)
}
// podAllocated returns true if the pod is allocated, false otherwise.
func (m *UsernsManager) podAllocated(podUID types.UID) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) {
return false
}
m.lock.Lock()
defer m.lock.Unlock()
_, ok := m.usedBy[podUID]
return ok
}
func (m *UsernsManager) releaseWithLock(logger klog.Logger, pod types.UID) {
v, ok := m.usedBy[pod]
if !ok {
logger.V(5).Info("pod user namespace allocation not present", "podUID", pod)
return
}
delete(m.usedBy, pod)
logger.V(5).Info("releasing pod user namespace allocation", "podUID", pod)
m.removed++
_ = os.Remove(filepath.Join(m.kl.GetPodDir(pod), mappingsFile))
if m.removed%mapReInitializeThreshold == 0 {
n := make(map[types.UID]uint32)
for k, v := range m.usedBy {
n[k] = v
}
m.usedBy = n
m.removed = 0
}
_ = m.used.Release(int(v/m.userNsLength) - m.off)
}
func (m *UsernsManager) parseUserNsFileAndRecord(logger klog.Logger, pod types.UID, content []byte) (userNs userNamespace, err error) {
if err = json.Unmarshal([]byte(content), &userNs); err != nil {
err = fmt.Errorf("invalid user namespace mappings file: %w", err)
return
}
if len(userNs.UIDMappings) != 1 {
err = fmt.Errorf("invalid user namespace configuration: no more than one mapping allowed.")
return
}
if len(userNs.UIDMappings) != len(userNs.GIDMappings) {
err = fmt.Errorf("invalid user namespace configuration: GID and UID mappings should be identical.")
return
}
if userNs.UIDMappings[0] != userNs.GIDMappings[0] {
err = fmt.Errorf("invalid user namespace configuration: GID and UID mapping should be identical")
return
}
// We don't produce configs without root mapped and some runtimes assume it is mapped.
// Validate the file has something we produced and can digest.
if userNs.UIDMappings[0].ContainerId != 0 {
err = fmt.Errorf("invalid user namespace configuration: UID 0 must be mapped")
return
}
if userNs.GIDMappings[0].ContainerId != 0 {
err = fmt.Errorf("invalid user namespace configuration: GID 0 must be mapped")
return
}
hostId := userNs.UIDMappings[0].HostId
length := userNs.UIDMappings[0].Length
err = m.record(logger, pod, hostId, length)
return
}
func (m *UsernsManager) createUserNs(logger klog.Logger, pod *v1.Pod) (userNs userNamespace, err error) {
firstID, length, err := m.allocateOne(logger, pod.UID)
if err != nil {
return
}
defer func() {
if err != nil {
m.releaseWithLock(logger, pod.UID)
}
}()
userNs = userNamespace{
UIDMappings: []idMapping{
{
ContainerId: 0,
HostId: firstID,
Length: length,
},
},
GIDMappings: []idMapping{
{
ContainerId: 0,
HostId: firstID,
Length: length,
},
},
}
return userNs, m.writeMappingsToFile(pod.UID, userNs)
}
// GetOrCreateUserNamespaceMappings returns the configuration for the sandbox user namespace
func (m *UsernsManager) GetOrCreateUserNamespaceMappings(ctx context.Context, pod *v1.Pod, runtimeHandler string) (*runtimeapi.UserNamespace, error) {
logger := klog.FromContext(ctx)
featureEnabled := utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport)
// TODO: If the default value for hostUsers ever changes, change the default value of
// userNamespacesEnabled as well
if pod == nil || pod.Spec.HostUsers == nil {
// if the feature is enabled, specify to use the node mode...
if featureEnabled {
return &runtimeapi.UserNamespace{
Mode: runtimeapi.NamespaceMode_NODE,
}, nil
}
// ...otherwise don't even specify it
return nil, nil
}
// pod.Spec.HostUsers is set to true/false
if !featureEnabled {
return nil, fmt.Errorf("the feature gate %q is disabled: can't set spec.HostUsers", features.UserNamespacesSupport)
}
if *pod.Spec.HostUsers {
return &runtimeapi.UserNamespace{
Mode: runtimeapi.NamespaceMode_NODE,
}, nil
}
// From here onwards, hostUsers=false and the feature gate is enabled.
// if the pod requested a user namespace and the runtime doesn't support user namespaces then return an error.
if handlerSupportsUserns, err := m.kl.HandlerSupportsUserNamespaces(runtimeHandler); err != nil || !handlerSupportsUserns {
msg := "can't set `spec.hostUsers: false`, runtime does not support user namespaces"
if runtimeHandler != "" {
msg = fmt.Sprintf("can't set `spec.hostUsers: false`, RuntimeClass handler %q does not support user namespaces", runtimeHandler)
}
if err != nil {
return nil, fmt.Errorf("%v: %w", msg, err)
}
return nil, fmt.Errorf("%v", msg)
}
m.lock.Lock()
defer m.lock.Unlock()
content, err := m.readMappingsFromFile(pod.UID)
if err != nil && err != utilstore.ErrKeyNotFound {
return nil, err
}
var userNs userNamespace
if string(content) != "" {
userNs, err = m.parseUserNsFileAndRecord(logger, pod.UID, content)
if err != nil {
return nil, fmt.Errorf("user namespace: %w", err)
}
} else {
userNs, err = m.createUserNs(logger, pod)
if err != nil {
return nil, fmt.Errorf("create user namespace: %w", err)
}
}
var uids []*runtimeapi.IDMapping
var gids []*runtimeapi.IDMapping
for _, u := range userNs.UIDMappings {
uids = append(uids, &runtimeapi.IDMapping{
HostId: u.HostId,
ContainerId: u.ContainerId,
Length: u.Length,
})
}
for _, g := range userNs.GIDMappings {
gids = append(gids, &runtimeapi.IDMapping{
HostId: g.HostId,
ContainerId: g.ContainerId,
Length: g.Length,
})
}
return &runtimeapi.UserNamespace{
Mode: runtimeapi.NamespaceMode_POD,
Uids: uids,
Gids: gids,
}, nil
}
// CleanupOrphanedPodUsernsAllocations reconciliates the state of user namespace
// allocations with the pods actually running. It frees any user namespace
// allocation for orphaned pods.
func (m *UsernsManager) CleanupOrphanedPodUsernsAllocations(ctx context.Context, pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) {
return nil
}
logger := klog.FromContext(ctx)
m.lock.Lock()
defer m.lock.Unlock()
allPods := sets.New[string]()
for _, pod := range pods {
allPods.Insert(string(pod.UID))
}
for _, pod := range runningPods {
allPods.Insert(string(pod.ID))
}
allFound := sets.New[string]()
found, err := m.kl.ListPodsFromDisk()
if err != nil {
return fmt.Errorf("user namespace: read pods from disk: %w", err)
}
for _, podUID := range found {
allFound.Insert(string(podUID))
}
// Lets remove all the pods "found" that are not known.
for _, podUID := range found {
if allPods.Has(string(podUID)) {
continue
}
logger.V(5).Info("Clean up orphaned pod user namespace possible allocation", "podUID", podUID)
m.releaseWithLock(logger, podUID)
}
// Lets remove any existing allocation for a pod that is not "found".
for podUID := range m.usedBy {
if allFound.Has(string(podUID)) {
continue
}
logger.V(5).Info("Clean up orphaned pod user namespace possible allocation", "podUID", podUID)
m.releaseWithLock(logger, podUID)
}
return nil
}
func EnabledUserNamespacesSupport() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport)
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"time"
"golang.org/x/sys/unix"
"k8s.io/klog/v2"
)
// GetBootTime returns the time at which the machine was started, truncated to the nearest second.
// It uses /proc/stat first, which is more accurate, and falls back to the less accurate
// unix.Sysinfo if /proc/stat failed.
func GetBootTime() (time.Time, error) {
bootTime, err := getBootTimeWithProcStat()
if err != nil {
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
logger.Info("Failed to get boot time from /proc/uptime. Will retry with unix.Sysinfo.", "error", err)
return getBootTimeWithSysinfo()
}
return bootTime, nil
}
func getBootTimeWithProcStat() (time.Time, error) {
raw, err := os.ReadFile("/proc/stat")
if err != nil {
return time.Time{}, fmt.Errorf("error getting boot time: %w", err)
}
rawFields := strings.Fields(string(raw))
for i, v := range rawFields {
if v == "btime" {
if len(rawFields) > i+1 {
sec, err := strconv.ParseInt(rawFields[i+1], 10, 64)
if err != nil {
return time.Time{}, fmt.Errorf("error parsing boot time %s: %w", rawFields[i+1], err)
}
return time.Unix(sec, 0), nil
}
break
}
}
return time.Time{}, fmt.Errorf("can not find btime from /proc/stat: %s", raw)
}
func getBootTimeWithSysinfo() (time.Time, error) {
currentTime := time.Now()
var info unix.Sysinfo_t
if err := unix.Sysinfo(&info); err != nil {
return time.Time{}, fmt.Errorf("error getting system uptime: %w", err)
}
return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"time"
expirationcache "k8s.io/client-go/tools/cache"
)
// ObjectCache is a simple wrapper of expiration cache that
// 1. use string type key
// 2. has an updater to get value directly if it is expired
// 3. then update the cache
type ObjectCache struct {
cache expirationcache.Store
updater func() (interface{}, error)
}
// objectEntry is an object with string type key.
type objectEntry struct {
key string
obj interface{}
}
// NewObjectCache creates ObjectCache with an updater.
// updater returns an object to cache.
func NewObjectCache(f func() (interface{}, error), ttl time.Duration) *ObjectCache {
return &ObjectCache{
updater: f,
cache: expirationcache.NewTTLStore(stringKeyFunc, ttl),
}
}
// stringKeyFunc is a string as cache key function
func stringKeyFunc(obj interface{}) (string, error) {
key := obj.(objectEntry).key
return key, nil
}
// Get gets cached objectEntry by using a unique string as the key.
func (c *ObjectCache) Get(key string) (interface{}, error) {
value, ok, err := c.cache.Get(objectEntry{key: key})
if err != nil {
return nil, err
}
if !ok {
obj, err := c.updater()
if err != nil {
return nil, err
}
err = c.cache.Add(objectEntry{
key: key,
obj: obj,
})
if err != nil {
return nil, err
}
return obj, nil
}
return value.(objectEntry).obj, nil
}
// Add adds objectEntry by using a unique string as the key.
func (c *ObjectCache) Add(key string, obj interface{}) error {
return c.cache.Add(objectEntry{key: key, obj: obj})
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package env
import (
"bufio"
"fmt"
"os"
"strings"
)
// ParseEnv implements a strict parser for .env environment files,
// adhering to the format defined in the RFC documentation at https://smartmob-rfc.readthedocs.io/en/latest/2-dotenv.html.
//
// This function implements a strict parser for environment files similar to the requirements in the OCI and Docker env file RFCs:
// - Leading whitespace is ignored for all lines.
// - Blank lines (including those with only whitespace) are ignored.
// - Lines starting with '#' are treated as comments and ignored.
// - Each variable must be declared as VAR=VAL. Whitespace around '=' and at the end of the line is ignored.
// - A backslash ('\') at the end of a variable declaration line indicates the value continues on the next line. The lines are joined with a single space, and the backslash is not included.
// - If a continuation line is interrupted by a blank line or comment, it is considered invalid and an error is returned.
func ParseEnv(envFilePath, key string) (string, error) {
file, err := os.Open(envFilePath)
if err != nil {
return "", fmt.Errorf("failed to open environment variable file %q: %w", envFilePath, err)
}
defer func() { _ = file.Close() }()
scanner := bufio.NewScanner(file)
var (
currentLine string
inContinuation bool
lineNum int
)
for scanner.Scan() {
lineNum++
line := scanner.Text()
line = strings.TrimLeft(line, " \t")
if line == "" {
if inContinuation {
return "", fmt.Errorf("invalid environment variable format at line %d: blank line in continuation", lineNum)
}
continue
}
if strings.HasPrefix(line, "#") {
if inContinuation {
return "", fmt.Errorf("invalid environment variable format at line %d: comment in continuation", lineNum)
}
continue
}
if inContinuation {
trimmed := strings.TrimRight(line, " \t")
if strings.HasSuffix(trimmed, "\\") {
currentLine += " " + strings.TrimRight(trimmed[:len(trimmed)-1], " \t")
continue
} else {
currentLine += " " + trimmed
line = currentLine
inContinuation = false
currentLine = ""
}
} else {
trimmed := strings.TrimRight(line, " \t")
if strings.HasSuffix(trimmed, "\\") {
currentLine = strings.TrimRight(trimmed[:len(trimmed)-1], " \t")
inContinuation = true
continue
}
}
eqIdx := strings.Index(line, "=")
if eqIdx == -1 {
return "", fmt.Errorf("invalid environment variable format at line %d", lineNum)
}
varName := strings.TrimSpace(line[:eqIdx])
varValue := strings.TrimRight(strings.TrimSpace(line[eqIdx+1:]), " \t")
if varName == "" {
return "", fmt.Errorf("invalid environment variable format at line %d", lineNum)
}
if varName == key {
return varValue, nil
}
}
if inContinuation {
return "", fmt.Errorf("unexpected end of file: unfinished line continuation")
}
if err := scanner.Err(); err != nil {
return "", fmt.Errorf("error reading environment variable file %q: %w", envFilePath, err)
}
return "", nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package format
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
// Pod returns a string representing a pod in a consistent human readable format,
// with pod UID as part of the string.
func Pod(pod *v1.Pod) string {
if pod == nil {
return "<nil>"
}
return PodDesc(pod.Name, pod.Namespace, pod.UID)
}
// PodDesc returns a string representing a pod in a consistent human readable format,
// with pod UID as part of the string.
func PodDesc(podName, podNamespace string, podUID types.UID) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format.
return fmt.Sprintf("%s_%s(%s)", podName, podNamespace, podUID)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ioutils
import "io"
// LimitWriter is a copy of the standard library ioutils.LimitReader,
// applied to the writer interface.
// LimitWriter returns a Writer that writes to w
// but stops with EOF after n bytes.
// The underlying implementation is a *LimitedWriter.
func LimitWriter(w io.Writer, n int64) io.Writer { return &LimitedWriter{w, n} }
// A LimitedWriter writes to W but limits the amount of
// data returned to just N bytes. Each call to Write
// updates N to reflect the new amount remaining.
// Write returns EOF when N <= 0 or when the underlying W returns EOF.
type LimitedWriter struct {
W io.Writer // underlying writer
N int64 // max bytes remaining
}
func (l *LimitedWriter) Write(p []byte) (n int, err error) {
if l.N <= 0 {
return 0, io.ErrShortWrite
}
truncated := false
if int64(len(p)) > l.N {
p = p[0:l.N]
truncated = true
}
n, err = l.W.Write(p)
l.N -= int64(n)
if err == nil && truncated {
err = io.ErrShortWrite
}
return
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manager
import (
"fmt"
"strconv"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apiserver/pkg/storage"
"k8s.io/kubernetes/pkg/kubelet/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
)
// GetObjectTTLFunc defines a function to get value of TTL.
type GetObjectTTLFunc func() (time.Duration, bool)
// GetObjectFunc defines a function to get object with a given namespace and name.
type GetObjectFunc func(string, string, metav1.GetOptions) (runtime.Object, error)
type objectKey struct {
namespace string
name string
uid types.UID
}
// objectStoreItems is a single item stored in objectStore.
type objectStoreItem struct {
refCount int
data *objectData
}
type objectData struct {
sync.Mutex
object runtime.Object
err error
lastUpdateTime time.Time
}
// objectStore is a local cache of objects.
type objectStore struct {
getObject GetObjectFunc
clock clock.Clock
lock sync.Mutex
items map[objectKey]*objectStoreItem
defaultTTL time.Duration
getTTL GetObjectTTLFunc
}
// NewObjectStore returns a new ttl-based instance of Store interface.
func NewObjectStore(getObject GetObjectFunc, clock clock.Clock, getTTL GetObjectTTLFunc, ttl time.Duration) Store {
return &objectStore{
getObject: getObject,
clock: clock,
items: make(map[objectKey]*objectStoreItem),
defaultTTL: ttl,
getTTL: getTTL,
}
}
func isObjectOlder(newObject, oldObject runtime.Object) bool {
if newObject == nil || oldObject == nil {
return false
}
newVersion, _ := storage.APIObjectVersioner{}.ObjectResourceVersion(newObject)
oldVersion, _ := storage.APIObjectVersioner{}.ObjectResourceVersion(oldObject)
return newVersion < oldVersion
}
func (s *objectStore) AddReference(namespace, name string, _ types.UID) {
key := objectKey{namespace: namespace, name: name}
// AddReference is called from RegisterPod, thus it needs to be efficient.
// Thus Add() is only increasing refCount and generation of a given object.
// Then Get() is responsible for fetching if needed.
s.lock.Lock()
defer s.lock.Unlock()
item, exists := s.items[key]
if !exists {
item = &objectStoreItem{
refCount: 0,
data: &objectData{},
}
s.items[key] = item
}
item.refCount++
// This will trigger fetch on the next Get() operation.
item.data = nil
}
func (s *objectStore) DeleteReference(namespace, name string, _ types.UID) {
key := objectKey{namespace: namespace, name: name}
s.lock.Lock()
defer s.lock.Unlock()
if item, ok := s.items[key]; ok {
item.refCount--
if item.refCount == 0 {
delete(s.items, key)
}
}
}
// GetObjectTTLFromNodeFunc returns a function that returns TTL value
// from a given Node object.
func GetObjectTTLFromNodeFunc(getNode func() (*v1.Node, error)) GetObjectTTLFunc {
return func() (time.Duration, bool) {
node, err := getNode()
if err != nil {
return time.Duration(0), false
}
if node != nil && node.Annotations != nil {
if value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]; ok {
if intValue, err := strconv.Atoi(value); err == nil {
return time.Duration(intValue) * time.Second, true
}
}
}
return time.Duration(0), false
}
}
func (s *objectStore) isObjectFresh(data *objectData) bool {
objectTTL := s.defaultTTL
if ttl, ok := s.getTTL(); ok {
objectTTL = ttl
}
return s.clock.Now().Before(data.lastUpdateTime.Add(objectTTL))
}
func (s *objectStore) Get(namespace, name string) (runtime.Object, error) {
key := objectKey{namespace: namespace, name: name}
data := func() *objectData {
s.lock.Lock()
defer s.lock.Unlock()
item, exists := s.items[key]
if !exists {
return nil
}
if item.data == nil {
item.data = &objectData{}
}
return item.data
}()
if data == nil {
return nil, fmt.Errorf("object %q/%q not registered", namespace, name)
}
// After updating data in objectStore, lock the data, fetch object if
// needed and return data.
data.Lock()
defer data.Unlock()
if data.err != nil || !s.isObjectFresh(data) {
opts := metav1.GetOptions{}
if data.object != nil && data.err == nil {
// This is just a periodic refresh of an object we successfully fetched previously.
// In this case, server data from apiserver cache to reduce the load on both
// etcd and apiserver (the cache is eventually consistent).
util.FromApiserverCache(&opts)
}
object, err := s.getObject(namespace, name, opts)
if err != nil && !apierrors.IsNotFound(err) && data.object == nil && data.err == nil {
// Couldn't fetch the latest object, but there is no cached data to return.
// Return the fetch result instead.
return object, err
}
if (err == nil && !isObjectOlder(object, data.object)) || apierrors.IsNotFound(err) {
// If the fetch succeeded with a newer version of the object, or if the
// object could not be found in the apiserver, update the cached data to
// reflect the current status.
data.object = object
data.err = err
data.lastUpdateTime = s.clock.Now()
}
}
return data.object, data.err
}
// cacheBasedManager keeps a store with objects necessary
// for registered pods. Different implementations of the store
// may result in different semantics for freshness of objects
// (e.g. ttl-based implementation vs watch-based implementation).
type cacheBasedManager struct {
objectStore Store
getReferencedObjects func(*v1.Pod) sets.Set[string]
lock sync.Mutex
registeredPods map[objectKey]*v1.Pod
}
func (c *cacheBasedManager) GetObject(namespace, name string) (runtime.Object, error) {
return c.objectStore.Get(namespace, name)
}
func (c *cacheBasedManager) RegisterPod(pod *v1.Pod) {
names := c.getReferencedObjects(pod)
c.lock.Lock()
defer c.lock.Unlock()
var prev *v1.Pod
key := objectKey{namespace: pod.Namespace, name: pod.Name, uid: pod.UID}
prev = c.registeredPods[key]
c.registeredPods[key] = pod
// To minimize unnecessary API requests to the API server for the configmap/secret get API
// only invoke AddReference the first time RegisterPod is called for a pod.
if prev == nil {
for name := range names {
c.objectStore.AddReference(pod.Namespace, name, pod.UID)
}
} else {
prevNames := c.getReferencedObjects(prev)
// Add new references
for name := range names {
if !prevNames.Has(name) {
c.objectStore.AddReference(pod.Namespace, name, pod.UID)
}
}
// Remove dropped references
for prevName := range prevNames {
if !names.Has(prevName) {
c.objectStore.DeleteReference(pod.Namespace, prevName, pod.UID)
}
}
}
}
func (c *cacheBasedManager) UnregisterPod(pod *v1.Pod) {
var prev *v1.Pod
key := objectKey{namespace: pod.Namespace, name: pod.Name, uid: pod.UID}
c.lock.Lock()
defer c.lock.Unlock()
prev = c.registeredPods[key]
delete(c.registeredPods, key)
if prev != nil {
for name := range c.getReferencedObjects(prev) {
c.objectStore.DeleteReference(prev.Namespace, name, prev.UID)
}
}
}
// NewCacheBasedManager creates a manager that keeps a cache of all objects
// necessary for registered pods.
// It implements the following logic:
// - whenever a pod is created or updated, the cached versions of all objects
// is referencing are invalidated
// - every GetObject() call tries to fetch the value from local cache; if it is
// not there, invalidated or too old, we fetch it from apiserver and refresh the
// value in cache; otherwise it is just fetched from cache
func NewCacheBasedManager(objectStore Store, getReferencedObjects func(*v1.Pod) sets.Set[string]) Manager {
return &cacheBasedManager{
objectStore: objectStore,
getReferencedObjects: getReferencedObjects,
registeredPods: make(map[objectKey]*v1.Pod),
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manager
import (
"context"
"fmt"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
)
type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error)
type watchObjectFunc func(string, metav1.ListOptions) (watch.Interface, error)
type newObjectFunc func() runtime.Object
type isImmutableFunc func(runtime.Object) bool
// objectCacheItem is a single item stored in objectCache.
type objectCacheItem struct {
refMap map[types.UID]int
store *cacheStore
reflector *cache.Reflector
hasSynced func() (bool, error)
// waitGroup is used to ensure that there won't be two concurrent calls to reflector.Run
waitGroup sync.WaitGroup
// lock is to ensure the access and modify of lastAccessTime, stopped, and immutable are thread safety,
// and protecting from closing stopCh multiple times.
lock sync.Mutex
lastAccessTime time.Time
stopped bool
immutable bool
stopCh chan struct{}
}
func (i *objectCacheItem) stop() bool {
i.lock.Lock()
defer i.lock.Unlock()
return i.stopThreadUnsafe()
}
func (i *objectCacheItem) stopThreadUnsafe() bool {
if i.stopped {
return false
}
i.stopped = true
close(i.stopCh)
if !i.immutable {
i.store.unsetInitialized()
}
return true
}
func (i *objectCacheItem) setLastAccessTime(time time.Time) {
i.lock.Lock()
defer i.lock.Unlock()
i.lastAccessTime = time
}
func (i *objectCacheItem) setImmutable() {
i.lock.Lock()
defer i.lock.Unlock()
i.immutable = true
}
func (i *objectCacheItem) stopIfIdle(now time.Time, maxIdleTime time.Duration) bool {
i.lock.Lock()
defer i.lock.Unlock()
// Ensure that we don't try to stop not yet initialized reflector.
// In case of overloaded kube-apiserver, if the list request is
// already being processed, all the work would lost and would have
// to be retried.
if !i.stopped && i.store.hasSynced() && now.After(i.lastAccessTime.Add(maxIdleTime)) {
return i.stopThreadUnsafe()
}
return false
}
func (i *objectCacheItem) restartReflectorIfNeeded() {
i.lock.Lock()
defer i.lock.Unlock()
if i.immutable || !i.stopped {
return
}
i.stopCh = make(chan struct{})
i.stopped = false
go i.startReflector()
}
func (i *objectCacheItem) startReflector() {
i.waitGroup.Wait()
i.waitGroup.Add(1)
defer i.waitGroup.Done()
i.reflector.Run(i.stopCh)
}
// cacheStore is in order to rewrite Replace function to mark initialized flag
type cacheStore struct {
cache.Store
lock sync.Mutex
initialized bool
}
func (c *cacheStore) Replace(list []interface{}, resourceVersion string) error {
c.lock.Lock()
defer c.lock.Unlock()
err := c.Store.Replace(list, resourceVersion)
if err != nil {
return err
}
c.initialized = true
return nil
}
func (c *cacheStore) hasSynced() bool {
c.lock.Lock()
defer c.lock.Unlock()
return c.initialized
}
func (c *cacheStore) unsetInitialized() {
c.lock.Lock()
defer c.lock.Unlock()
c.initialized = false
}
// objectCache is a local cache of objects propagated via
// individual watches.
type objectCache struct {
listObject listObjectFunc
watchObject watchObjectFunc
newObject newObjectFunc
isImmutable isImmutableFunc
groupResource schema.GroupResource
clock clock.Clock
maxIdleTime time.Duration
lock sync.RWMutex
items map[objectKey]*objectCacheItem
stopped bool
}
const minIdleTime = 1 * time.Minute
// NewObjectCache returns a new watch-based instance of Store interface.
func NewObjectCache(
listObject listObjectFunc,
watchObject watchObjectFunc,
newObject newObjectFunc,
isImmutable isImmutableFunc,
groupResource schema.GroupResource,
clock clock.Clock,
maxIdleTime time.Duration,
stopCh <-chan struct{}) Store {
if maxIdleTime < minIdleTime {
maxIdleTime = minIdleTime
}
store := &objectCache{
listObject: listObject,
watchObject: watchObject,
newObject: newObject,
isImmutable: isImmutable,
groupResource: groupResource,
clock: clock,
maxIdleTime: maxIdleTime,
items: make(map[objectKey]*objectCacheItem),
}
go wait.Until(store.startRecycleIdleWatch, time.Minute, stopCh)
go store.shutdownWhenStopped(stopCh)
return store
}
func (c *objectCache) newStore() *cacheStore {
// TODO: We may consider created a dedicated store keeping just a single
// item, instead of using a generic store implementation for this purpose.
// However, simple benchmarks show that memory overhead in that case is
// decrease from ~600B to ~300B per object. So we are not optimizing it
// until we will see a good reason for that.
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
return &cacheStore{store, sync.Mutex{}, false}
}
func (c *objectCache) newReflectorLocked(namespace, name string) *objectCacheItem {
fieldSelector := fields.Set{"metadata.name": name}.AsSelector().String()
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
return c.listObject(namespace, options)
}
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
return c.watchObject(namespace, options)
}
store := c.newStore()
reflector := cache.NewReflectorWithOptions(
&cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc},
c.newObject(),
store,
cache.ReflectorOptions{
Name: fmt.Sprintf("object-%q/%q", namespace, name),
// Bump default 5m MinWatchTimeout to avoid recreating
// watches too often.
MinWatchTimeout: 30 * time.Minute,
},
)
item := &objectCacheItem{
refMap: make(map[types.UID]int),
store: store,
reflector: reflector,
hasSynced: func() (bool, error) { return store.hasSynced(), nil },
stopCh: make(chan struct{}),
}
// Don't start reflector if Kubelet is already shutting down.
if !c.stopped {
go item.startReflector()
}
return item
}
func (c *objectCache) AddReference(namespace, name string, referencedFrom types.UID) {
key := objectKey{namespace: namespace, name: name}
// AddReference is called from RegisterPod thus it needs to be efficient.
// Thus, it is only increasing refCount and in case of first registration
// of a given object it starts corresponding reflector.
// It's responsibility of the first Get operation to wait until the
// reflector propagated the store.
c.lock.Lock()
defer c.lock.Unlock()
item, exists := c.items[key]
if !exists {
item = c.newReflectorLocked(namespace, name)
c.items[key] = item
}
item.refMap[referencedFrom]++
}
func (c *objectCache) DeleteReference(namespace, name string, referencedFrom types.UID) {
key := objectKey{namespace: namespace, name: name}
c.lock.Lock()
defer c.lock.Unlock()
if item, ok := c.items[key]; ok {
item.refMap[referencedFrom]--
if item.refMap[referencedFrom] == 0 {
delete(item.refMap, referencedFrom)
}
if len(item.refMap) == 0 {
// Stop the underlying reflector.
item.stop()
delete(c.items, key)
}
}
}
// key returns key of an object with a given name and namespace.
// This has to be in-sync with cache.MetaNamespaceKeyFunc.
func (c *objectCache) key(namespace, name string) string {
if len(namespace) > 0 {
return namespace + "/" + name
}
return name
}
func (c *objectCache) isStopped() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.stopped
}
func (c *objectCache) Get(namespace, name string) (runtime.Object, error) {
key := objectKey{namespace: namespace, name: name}
c.lock.RLock()
item, exists := c.items[key]
c.lock.RUnlock()
if !exists {
return nil, fmt.Errorf("object %q/%q not registered", namespace, name)
}
// Record last access time independently if it succeeded or not.
// This protects from premature (racy) reflector closure.
item.setLastAccessTime(c.clock.Now())
// Don't restart reflector if Kubelet is already shutting down.
if !c.isStopped() {
item.restartReflectorIfNeeded()
}
if err := wait.PollImmediate(10*time.Millisecond, time.Second, item.hasSynced); err != nil {
return nil, fmt.Errorf("failed to sync %s cache: %v", c.groupResource.String(), err)
}
obj, exists, err := item.store.GetByKey(c.key(namespace, name))
if err != nil {
return nil, err
}
if !exists {
return nil, apierrors.NewNotFound(c.groupResource, name)
}
if object, ok := obj.(runtime.Object); ok {
// If the returned object is immutable, stop the reflector.
//
// NOTE: we may potentially not even start the reflector if the object is
// already immutable. However, given that:
// - we want to also handle the case when object is marked as immutable later
// - Secrets and ConfigMaps are periodically fetched by volumemanager anyway
// - doing that wouldn't provide visible scalability/performance gain - we
// already have it from here
// - doing that would require significant refactoring to reflector
// we limit ourselves to just quickly stop the reflector here.
if c.isImmutable(object) {
item.setImmutable()
if item.stop() {
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
logger.V(4).Info("Stopped watching for changes - object is immutable", "obj", klog.KRef(namespace, name))
}
}
return object, nil
}
return nil, fmt.Errorf("unexpected object type: %v", obj)
}
func (c *objectCache) startRecycleIdleWatch() {
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
c.lock.Lock()
defer c.lock.Unlock()
for key, item := range c.items {
if item.stopIfIdle(c.clock.Now(), c.maxIdleTime) {
logger.V(4).Info("Not acquired for long time, Stopped watching for changes", "objectKey", key, "maxIdleTime", c.maxIdleTime)
}
}
}
func (c *objectCache) shutdownWhenStopped(stopCh <-chan struct{}) {
<-stopCh
c.lock.Lock()
defer c.lock.Unlock()
c.stopped = true
for _, item := range c.items {
item.stop()
}
}
// NewWatchBasedManager creates a manager that keeps a cache of all objects
// necessary for registered pods.
// It implements the following logic:
// - whenever a pod is created or updated, we start individual watches for all
// referenced objects that aren't referenced from other registered pods
// - every GetObject() returns a value from local cache propagated via watches
func NewWatchBasedManager(
listObject listObjectFunc,
watchObject watchObjectFunc,
newObject newObjectFunc,
isImmutable isImmutableFunc,
groupResource schema.GroupResource,
resyncInterval time.Duration,
getReferencedObjects func(*v1.Pod) sets.Set[string]) Manager {
// If a configmap/secret is used as a volume, the volumeManager will visit the objectCacheItem every resyncInterval cycle,
// We just want to stop the objectCacheItem referenced by environment variables,
// So, maxIdleTime is set to an integer multiple of resyncInterval,
// We currently set it to 5 times.
maxIdleTime := resyncInterval * 5
// TODO propagate stopCh from the higher level.
objectStore := NewObjectCache(listObject, watchObject, newObject, isImmutable, groupResource, clock.RealClock{}, maxIdleTime, wait.NeverStop)
return NewCacheBasedManager(objectStore, getReferencedObjects)
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"sync"
"time"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
)
type NodeStartupLatencyTracker interface {
// This function may be called across Kubelet restart.
RecordAttemptRegisterNode()
// This function should not be called across Kubelet restart.
RecordRegisteredNewNode()
// This function may be called across Kubelet restart.
RecordNodeReady()
}
type basicNodeStartupLatencyTracker struct {
lock sync.Mutex
bootTime time.Time
kubeletStartTime time.Time
firstRegistrationAttemptTime time.Time
firstRegisteredNewNodeTime time.Time
firstNodeReadyTime time.Time
// For testability
clock clock.Clock
}
func NewNodeStartupLatencyTracker() NodeStartupLatencyTracker {
bootTime, err := GetBootTime()
if err != nil {
bootTime = time.Time{}
}
return &basicNodeStartupLatencyTracker{
bootTime: bootTime,
kubeletStartTime: time.Now(),
clock: clock.RealClock{},
}
}
func (n *basicNodeStartupLatencyTracker) RecordAttemptRegisterNode() {
n.lock.Lock()
defer n.lock.Unlock()
if !n.firstRegistrationAttemptTime.IsZero() {
return
}
n.firstRegistrationAttemptTime = n.clock.Now()
}
func (n *basicNodeStartupLatencyTracker) RecordRegisteredNewNode() {
n.lock.Lock()
defer n.lock.Unlock()
if n.firstRegistrationAttemptTime.IsZero() || !n.firstRegisteredNewNodeTime.IsZero() {
return
}
n.firstRegisteredNewNodeTime = n.clock.Now()
if !n.bootTime.IsZero() {
metrics.NodeStartupPreKubeletDuration.Set(n.kubeletStartTime.Sub(n.bootTime).Seconds())
}
metrics.NodeStartupPreRegistrationDuration.Set(n.firstRegistrationAttemptTime.Sub(n.kubeletStartTime).Seconds())
metrics.NodeStartupRegistrationDuration.Set(n.firstRegisteredNewNodeTime.Sub(n.firstRegistrationAttemptTime).Seconds())
}
func (n *basicNodeStartupLatencyTracker) RecordNodeReady() {
n.lock.Lock()
defer n.lock.Unlock()
if n.firstRegisteredNewNodeTime.IsZero() || !n.firstNodeReadyTime.IsZero() {
return
}
n.firstNodeReadyTime = n.clock.Now()
metrics.NodeStartupPostRegistrationDuration.Set(n.firstNodeReadyTime.Sub(n.firstRegisteredNewNodeTime).Seconds())
if !n.bootTime.IsZero() {
metrics.NodeStartupDuration.Set(n.firstNodeReadyTime.Sub(n.bootTime).Seconds())
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
// SetNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets
// a node OwnerReference to the given lease object
func SetNodeOwnerFunc(ctx context.Context, c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error {
return func(lease *coordinationv1.Lease) error {
// Setting owner reference needs node's UID. Note that it is different from
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
// the connection between master and node is not ready yet. So try to set
// owner reference every time when renewing the lease, until successful.
if len(lease.OwnerReferences) == 0 {
if node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}); err == nil {
lease.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
Name: nodeName,
UID: node.UID,
},
}
} else {
logger := klog.FromContext(ctx)
logger.Error(err, "Failed to get node when trying to set owner ref to the node lease", "node", klog.KRef("", nodeName))
return err
}
}
return nil
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
)
// PodStartupLatencyTracker records key moments for startup latency calculation,
// e.g. image pulling or pod observed running on watch.
type PodStartupLatencyTracker interface {
ObservedPodOnWatch(pod *v1.Pod, when time.Time)
RecordImageStartedPulling(podUID types.UID)
RecordImageFinishedPulling(podUID types.UID)
RecordStatusUpdated(pod *v1.Pod)
DeletePodStartupState(podUID types.UID)
}
type basicPodStartupLatencyTracker struct {
// protect against concurrent read and write on pods map
lock sync.Mutex
pods map[types.UID]*perPodState
// metrics for the first network pod only
firstNetworkPodSeen bool
// For testability
clock clock.Clock
}
type perPodState struct {
firstStartedPulling time.Time
lastFinishedPulling time.Time
// first time, when pod status changed into Running
observedRunningTime time.Time
// log, if pod latency was already Observed
metricRecorded bool
}
// NewPodStartupLatencyTracker creates an instance of PodStartupLatencyTracker
func NewPodStartupLatencyTracker() PodStartupLatencyTracker {
return &basicPodStartupLatencyTracker{
pods: map[types.UID]*perPodState{},
clock: clock.RealClock{},
}
}
func (p *basicPodStartupLatencyTracker) ObservedPodOnWatch(pod *v1.Pod, when time.Time) {
p.lock.Lock()
defer p.lock.Unlock()
// if the pod is terminal, we do not have to track it anymore for startup
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
delete(p.pods, pod.UID)
return
}
state := p.pods[pod.UID]
if state == nil {
// create a new record for pod, only if it was not yet acknowledged by the Kubelet
// this is required, as we want to log metric only for those pods, that where scheduled
// after Kubelet started
if pod.Status.StartTime.IsZero() {
p.pods[pod.UID] = &perPodState{}
}
return
}
if state.observedRunningTime.IsZero() {
// skip, pod didn't start yet
return
}
if state.metricRecorded {
// skip, pod's latency already recorded
return
}
if hasPodStartedSLO(pod) {
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
podStartingDuration := when.Sub(pod.CreationTimestamp.Time)
imagePullingDuration := state.lastFinishedPulling.Sub(state.firstStartedPulling)
podStartSLOduration := (podStartingDuration - imagePullingDuration).Seconds()
logger.Info("Observed pod startup duration",
"pod", klog.KObj(pod),
"podStartSLOduration", podStartSLOduration,
"podStartE2EDuration", podStartingDuration,
"podCreationTimestamp", pod.CreationTimestamp.Time,
"firstStartedPulling", state.firstStartedPulling,
"lastFinishedPulling", state.lastFinishedPulling,
"observedRunningTime", state.observedRunningTime,
"watchObservedRunningTime", when)
metrics.PodStartSLIDuration.WithLabelValues().Observe(podStartSLOduration)
metrics.PodStartTotalDuration.WithLabelValues().Observe(podStartingDuration.Seconds())
state.metricRecorded = true
// if is the first Pod with network track the start values
// these metrics will help to identify problems with the CNI plugin
if !pod.Spec.HostNetwork && !p.firstNetworkPodSeen {
metrics.FirstNetworkPodStartSLIDuration.Set(podStartSLOduration)
p.firstNetworkPodSeen = true
}
}
}
func (p *basicPodStartupLatencyTracker) RecordImageStartedPulling(podUID types.UID) {
p.lock.Lock()
defer p.lock.Unlock()
state := p.pods[podUID]
if state == nil {
return
}
if state.firstStartedPulling.IsZero() {
state.firstStartedPulling = p.clock.Now()
}
}
func (p *basicPodStartupLatencyTracker) RecordImageFinishedPulling(podUID types.UID) {
p.lock.Lock()
defer p.lock.Unlock()
state := p.pods[podUID]
if state == nil {
return
}
state.lastFinishedPulling = p.clock.Now() // Now is always grater than values from the past.
}
func (p *basicPodStartupLatencyTracker) RecordStatusUpdated(pod *v1.Pod) {
p.lock.Lock()
defer p.lock.Unlock()
state := p.pods[pod.UID]
if state == nil {
return
}
if state.metricRecorded {
// skip, pod latency already recorded
return
}
if !state.observedRunningTime.IsZero() {
// skip, pod already started
return
}
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
if hasPodStartedSLO(pod) {
logger.V(3).Info("Mark when the pod was running for the first time", "pod", klog.KObj(pod), "rv", pod.ResourceVersion)
state.observedRunningTime = p.clock.Now()
}
}
// hasPodStartedSLO, check if for given pod, each container has been started at least once
//
// This should reflect "Pod startup latency SLI" definition
// ref: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/pod_startup_latency.md
func hasPodStartedSLO(pod *v1.Pod) bool {
for _, cs := range pod.Status.ContainerStatuses {
if cs.State.Running == nil || cs.State.Running.StartedAt.IsZero() {
return false
}
}
return true
}
func (p *basicPodStartupLatencyTracker) DeletePodStartupState(podUID types.UID) {
p.lock.Lock()
defer p.lock.Unlock()
delete(p.pods, podUID)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/clock"
)
// WorkQueue allows queuing items with a timestamp. An item is
// considered ready to process if the timestamp has expired.
type WorkQueue interface {
// GetWork dequeues and returns all ready items.
GetWork() []types.UID
// Enqueue inserts a new item or overwrites an existing item.
Enqueue(item types.UID, delay time.Duration)
}
type basicWorkQueue struct {
clock clock.Clock
lock sync.Mutex
queue map[types.UID]time.Time
}
var _ WorkQueue = &basicWorkQueue{}
// NewBasicWorkQueue returns a new basic WorkQueue with the provided clock
func NewBasicWorkQueue(clock clock.Clock) WorkQueue {
queue := make(map[types.UID]time.Time)
return &basicWorkQueue{queue: queue, clock: clock}
}
func (q *basicWorkQueue) GetWork() []types.UID {
q.lock.Lock()
defer q.lock.Unlock()
now := q.clock.Now()
var items []types.UID
for k, v := range q.queue {
if v.Before(now) {
items = append(items, k)
delete(q.queue, k)
}
}
return items
}
func (q *basicWorkQueue) Enqueue(item types.UID, delay time.Duration) {
q.lock.Lock()
defer q.lock.Unlock()
q.queue[item] = q.clock.Now().Add(delay)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sliceutils
import (
v1 "k8s.io/api/core/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// PodsByCreationTime makes an array of pods sortable by their creation
// timestamps in ascending order.
type PodsByCreationTime []*v1.Pod
func (s PodsByCreationTime) Len() int {
return len(s)
}
func (s PodsByCreationTime) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s PodsByCreationTime) Less(i, j int) bool {
return s[i].CreationTimestamp.Before(&s[j].CreationTimestamp)
}
// ByImageSize makes an array of images sortable by their size in descending
// order.
type ByImageSize []kubecontainer.Image
func (a ByImageSize) Less(i, j int) bool {
if a[i].Size == a[j].Size {
return a[i].ID > a[j].ID
}
return a[i].Size > a[j].Size
}
func (a ByImageSize) Len() int { return len(a) }
func (a ByImageSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package store
import (
"fmt"
"os"
"path/filepath"
"strings"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
)
const (
// Name prefix for the temporary files.
tmpPrefix = "."
)
// FileStore is an implementation of the Store interface which stores data in files.
type FileStore struct {
// Absolute path to the base directory for storing data files.
directoryPath string
// filesystem to use.
filesystem utilfs.Filesystem
}
// NewFileStore returns an instance of FileStore.
func NewFileStore(path string, fs utilfs.Filesystem) (Store, error) {
if err := fs.MkdirAll(path, 0755); err != nil {
return nil, err
}
return &FileStore{directoryPath: path, filesystem: fs}, nil
}
// Write writes the given data to a file named key.
func (f *FileStore) Write(key string, data []byte) error {
if err := ValidateKey(key); err != nil {
return err
}
if err := f.filesystem.MkdirAll(f.directoryPath, 0755); err != nil {
return err
}
return writeFile(f.filesystem, f.getPathByKey(key), data)
}
// Read reads the data from the file named key.
func (f *FileStore) Read(key string) ([]byte, error) {
if err := ValidateKey(key); err != nil {
return nil, err
}
bytes, err := f.filesystem.ReadFile(f.getPathByKey(key))
if os.IsNotExist(err) {
return bytes, ErrKeyNotFound
}
return bytes, err
}
// Delete deletes the key file.
func (f *FileStore) Delete(key string) error {
if err := ValidateKey(key); err != nil {
return err
}
return removePath(f.filesystem, f.getPathByKey(key))
}
// List returns all keys in the store.
func (f *FileStore) List() ([]string, error) {
keys := make([]string, 0)
files, err := f.filesystem.ReadDir(f.directoryPath)
if err != nil {
return keys, err
}
for _, f := range files {
if !strings.HasPrefix(f.Name(), tmpPrefix) {
keys = append(keys, f.Name())
}
}
return keys, nil
}
// getPathByKey returns the full path of the file for the key.
func (f *FileStore) getPathByKey(key string) string {
return filepath.Join(f.directoryPath, key)
}
// writeFile writes data to path in a single transaction.
func writeFile(fs utilfs.Filesystem, path string, data []byte) (retErr error) {
// Create a temporary file in the base directory of `path` with a prefix.
tmpFile, err := fs.TempFile(filepath.Dir(path), tmpPrefix)
if err != nil {
return err
}
tmpPath := tmpFile.Name()
shouldClose := true
defer func() {
// Close the file.
if shouldClose {
if err := tmpFile.Close(); err != nil {
if retErr == nil {
retErr = fmt.Errorf("close error: %v", err)
} else {
retErr = fmt.Errorf("failed to close temp file after error %v; close error: %v", retErr, err)
}
}
}
// Clean up the temp file on error.
if retErr != nil && tmpPath != "" {
if err := removePath(fs, tmpPath); err != nil {
retErr = fmt.Errorf("failed to remove the temporary file (%q) after error %v; remove error: %v", tmpPath, retErr, err)
}
}
}()
// Write data.
if _, err := tmpFile.Write(data); err != nil {
return err
}
// Sync file.
if err := tmpFile.Sync(); err != nil {
return err
}
// Closing the file before renaming.
err = tmpFile.Close()
shouldClose = false
if err != nil {
return err
}
return fs.Rename(tmpPath, path)
}
func removePath(fs utilfs.Filesystem, path string) error {
if err := fs.Remove(path); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package store
import (
"fmt"
"regexp"
)
const (
keyMaxLength = 250
keyCharFmt string = "[A-Za-z0-9]"
keyExtCharFmt string = "[-A-Za-z0-9_.]"
qualifiedKeyFmt string = "(" + keyCharFmt + keyExtCharFmt + "*)?" + keyCharFmt
)
var (
// Key must consist of alphanumeric characters, '-', '_' or '.', and must start
// and end with an alphanumeric character.
keyRegex = regexp.MustCompile("^" + qualifiedKeyFmt + "$")
// ErrKeyNotFound is the error returned if key is not found in Store.
ErrKeyNotFound = fmt.Errorf("key is not found")
)
// Store provides the interface for storing keyed data.
// Store must be thread-safe
type Store interface {
// key must contain one or more characters in [A-Za-z0-9]
// Write writes data with key.
Write(key string, data []byte) error
// Read retrieves data with key
// Read must return ErrKeyNotFound if key is not found.
Read(key string) ([]byte, error)
// Delete deletes data by key
// Delete must not return error if key does not exist
Delete(key string) error
// List lists all existing keys.
List() ([]string, error)
}
// ValidateKey returns an error if the given key does not meet the requirement
// of the key format and length.
func ValidateKey(key string) error {
if len(key) <= keyMaxLength && keyRegex.MatchString(key) {
return nil
}
return fmt.Errorf("invalid key: %q", key)
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package swap
import (
"bytes"
"context"
"errors"
"os"
sysruntime "runtime"
"strings"
"sync"
inuserns "github.com/moby/sys/userns"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/klog/v2"
utilkernel "k8s.io/kubernetes/pkg/util/kernel"
"k8s.io/mount-utils"
)
var (
tmpfsNoswapOptionSupported bool
tmpfsNoswapOptionAvailabilityOnce sync.Once
swapOn bool
swapOnErr error
swapOnOnce sync.Once
)
const TmpfsNoswapOption = "noswap"
func IsTmpfsNoswapOptionSupported(mounter mount.Interface, mountPath string) bool {
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
isTmpfsNoswapOptionSupportedHelper := func() bool {
if sysruntime.GOOS == "windows" {
return false
}
if inuserns.RunningInUserNS() {
// Turning off swap in unprivileged tmpfs mounts unsupported
// https://github.com/torvalds/linux/blob/v6.8/mm/shmem.c#L4004-L4011
// https://github.com/kubernetes/kubernetes/issues/125137
logger.Info("Running under a user namespace - tmpfs noswap is not supported")
return false
}
kernelVersion, err := utilkernel.GetVersion()
if err != nil {
logger.Error(err, "cannot determine kernel version, unable to determine is tmpfs noswap is supported")
return false
}
if kernelVersion.AtLeast(version.MustParseGeneric(utilkernel.TmpfsNoswapSupportKernelVersion)) {
return true
}
if mountPath == "" {
logger.Error(errors.New("mount path is empty, falling back to /tmp"), "")
}
mountPath, err = os.MkdirTemp(mountPath, "tmpfs-noswap-test-")
if err != nil {
logger.Info("error creating dir to test if tmpfs noswap is enabled. Assuming not supported", "mount path", mountPath, "error", err)
return false
}
defer func() {
err = os.RemoveAll(mountPath)
if err != nil {
logger.Error(err, "error removing test tmpfs dir", "mount path", mountPath)
}
}()
err = mounter.MountSensitiveWithoutSystemd("tmpfs", mountPath, "tmpfs", []string{TmpfsNoswapOption}, nil)
if err != nil {
logger.Info("error mounting tmpfs with the noswap option. Assuming not supported", "error", err)
return false
}
err = mounter.Unmount(mountPath)
if err != nil {
logger.Error(err, "error unmounting test tmpfs dir", "mount path", mountPath)
}
return true
}
tmpfsNoswapOptionAvailabilityOnce.Do(func() {
tmpfsNoswapOptionSupported = isTmpfsNoswapOptionSupportedHelper()
})
return tmpfsNoswapOptionSupported
}
// gets /proc/swaps's content as an input, returns true if swap is enabled.
func isSwapOnAccordingToProcSwaps(logger klog.Logger, procSwapsContent []byte) bool {
procSwapsContent = bytes.TrimSpace(procSwapsContent) // extra trailing \n
procSwapsStr := string(procSwapsContent)
procSwapsLines := strings.Split(procSwapsStr, "\n")
// If there is more than one line (table headers) in /proc/swaps then swap is enabled
isSwapOn := len(procSwapsLines) > 1
if isSwapOn {
logger.Info("Swap is on", "/proc/swaps contents", procSwapsStr)
}
return isSwapOn
}
// IsSwapOn detects whether swap in enabled on the system by inspecting
// /proc/swaps. If the file does not exist, an os.NotFound error will be returned.
// If running on windows, swap is assumed to always be false.
func IsSwapOn() (bool, error) {
isSwapOnHelper := func() (bool, error) {
if sysruntime.GOOS == "windows" {
return false, nil
}
// TODO: it needs to be replaced by a proper context in the future
ctx := context.TODO()
logger := klog.FromContext(ctx)
const swapFilePath = "/proc/swaps"
procSwapsContent, err := os.ReadFile(swapFilePath)
if err != nil {
if os.IsNotExist(err) {
logger.Info("File does not exist, assuming that swap is disabled", "path", swapFilePath)
return false, nil
}
return false, err
}
return isSwapOnAccordingToProcSwaps(logger, procSwapsContent), nil
}
swapOnOnce.Do(func() {
swapOn, swapOnErr = isSwapOnHelper()
})
return swapOn, swapOnErr
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/filesystem"
)
// FromApiserverCache modifies <opts> so that the GET request will
// be served from apiserver cache instead of from etcd.
func FromApiserverCache(opts *metav1.GetOptions) {
opts.ResourceVersion = "0"
}
var IsUnixDomainSocket = filesystem.IsUnixDomainSocket
// GetNodenameForKernel gets hostname value to set in the hostname field (the nodename field of struct utsname) of the pod.
func GetNodenameForKernel(hostname string, hostDomainName string, setHostnameAsFQDN *bool) (string, error) {
kernelHostname := hostname
// FQDN has to be 64 chars to fit in the Linux nodename kernel field (specification 64 chars and the null terminating char).
const fqdnMaxLen = 64
if len(hostDomainName) > 0 && setHostnameAsFQDN != nil && *setHostnameAsFQDN {
fqdn := fmt.Sprintf("%s.%s", hostname, hostDomainName)
// FQDN has to be shorter than hostnameMaxLen characters.
if len(fqdn) > fqdnMaxLen {
return "", fmt.Errorf("failed to construct FQDN from pod hostname and cluster domain, FQDN %s is too long (%d characters is the max, %d characters requested)", fqdn, fqdnMaxLen, len(fqdn))
}
kernelHostname = fqdn
}
return kernelHostname, nil
}
// GetContainerByIndex validates and extracts the container at index "idx" from
// "containers" with respect to "statuses".
// It returns true if the container is valid, else returns false.
func GetContainerByIndex(containers []v1.Container, statuses []v1.ContainerStatus, idx int) (v1.Container, bool) {
if idx < 0 || idx >= len(containers) || idx >= len(statuses) {
return v1.Container{}, false
}
if statuses[idx].Name != containers[idx].Name {
return v1.Container{}, false
}
return containers[idx], true
}
//go:build linux
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
libcontainercgroups "github.com/opencontainers/cgroups"
)
// IsCgroup2UnifiedMode returns true if the cgroup v2 unified mode is enabled
func IsCgroup2UnifiedMode() bool {
return libcontainercgroups.IsCgroup2UnifiedMode()
}
//go:build freebsd || linux || darwin
// +build freebsd linux darwin
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net/url"
"path/filepath"
)
const (
// unixProtocol is the network protocol of unix socket.
unixProtocol = "unix"
)
// LocalEndpoint returns the full path to a unix socket at the given endpoint
func LocalEndpoint(path, file string) (string, error) {
u := url.URL{
Scheme: unixProtocol,
Path: path,
}
return filepath.Join(u.String(), file+".sock"), nil
}
// NormalizePath is a no-op for Linux for now
func NormalizePath(path string) string {
return path
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"fmt"
"runtime"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/clustertrustbundle"
"k8s.io/kubernetes/pkg/kubelet/configmap"
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
"k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/token"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
)
// NewInitializedVolumePluginMgr returns a new instance of
// volume.VolumePluginMgr initialized with kubelets implementation of the
// volume.VolumeHost interface.
//
// kubelet - used by VolumeHost methods to expose kubelet specific parameters
// plugins - used to initialize volumePluginMgr
func NewInitializedVolumePluginMgr(
kubelet *Kubelet,
secretManager secret.Manager,
configMapManager configmap.Manager,
tokenManager *token.Manager,
clusterTrustBundleManager clustertrustbundle.Manager,
plugins []volume.VolumePlugin,
prober volume.DynamicPluginProber) (*volume.VolumePluginMgr, error) {
// Initialize csiDriverLister before calling InitPlugins
var informerFactory informers.SharedInformerFactory
var csiDriverLister storagelisters.CSIDriverLister
var csiDriversSynced cache.InformerSynced
const resyncPeriod = 0
// Don't initialize if kubeClient is nil
if kubelet.kubeClient != nil {
informerFactory = informers.NewSharedInformerFactory(kubelet.kubeClient, resyncPeriod)
csiDriverInformer := informerFactory.Storage().V1().CSIDrivers()
csiDriverLister = csiDriverInformer.Lister()
csiDriversSynced = csiDriverInformer.Informer().HasSynced
} else {
klog.InfoS("KubeClient is nil. Skip initialization of CSIDriverLister")
}
kvh := &kubeletVolumeHost{
kubelet: kubelet,
volumePluginMgr: volume.VolumePluginMgr{},
secretManager: secretManager,
configMapManager: configMapManager,
tokenManager: tokenManager,
clusterTrustBundleManager: clusterTrustBundleManager,
podCertificateManager: kubelet.podCertificateManager,
informerFactory: informerFactory,
csiDriverLister: csiDriverLister,
csiDriversSynced: csiDriversSynced,
}
if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil {
return nil, fmt.Errorf(
"could not initialize volume plugins for KubeletVolumePluginMgr: %v",
err)
}
return &kvh.volumePluginMgr, nil
}
// Compile-time check to ensure kubeletVolumeHost implements the VolumeHost interface
var _ volume.VolumeHost = &kubeletVolumeHost{}
var _ volume.KubeletVolumeHost = &kubeletVolumeHost{}
func (kvh *kubeletVolumeHost) GetPluginDir(pluginName string) string {
return kvh.kubelet.getPluginDir(pluginName)
}
type kubeletVolumeHost struct {
kubelet *Kubelet
volumePluginMgr volume.VolumePluginMgr
secretManager secret.Manager
tokenManager *token.Manager
configMapManager configmap.Manager
clusterTrustBundleManager clustertrustbundle.Manager
podCertificateManager podcertificate.Manager
informerFactory informers.SharedInformerFactory
csiDriverLister storagelisters.CSIDriverLister
csiDriversSynced cache.InformerSynced
}
func (kvh *kubeletVolumeHost) SetKubeletError(err error) {
kvh.kubelet.runtimeState.setStorageState(err)
}
func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string {
return kvh.kubelet.getVolumeDevicePluginDir(pluginName)
}
func (kvh *kubeletVolumeHost) GetPodsDir() string {
return kvh.kubelet.getPodsDir()
}
func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
if runtime.GOOS == "windows" {
dir = util.GetWindowsPath(dir)
}
return dir
}
func (kvh *kubeletVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
return kvh.kubelet.getPodVolumeDeviceDir(podUID, pluginName)
}
func (kvh *kubeletVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
return kvh.kubelet.getPodPluginDir(podUID, pluginName)
}
func (kvh *kubeletVolumeHost) GetKubeClient() clientset.Interface {
return kvh.kubelet.kubeClient
}
func (kvh *kubeletVolumeHost) GetSubpather() subpath.Interface {
return kvh.kubelet.subpather
}
func (kvh *kubeletVolumeHost) GetHostUtil() hostutil.HostUtils {
return kvh.kubelet.hostutil
}
func (kvh *kubeletVolumeHost) GetInformerFactory() informers.SharedInformerFactory {
return kvh.informerFactory
}
func (kvh *kubeletVolumeHost) CSIDriverLister() storagelisters.CSIDriverLister {
return kvh.csiDriverLister
}
func (kvh *kubeletVolumeHost) CSIDriversSynced() cache.InformerSynced {
return kvh.csiDriversSynced
}
// WaitForCacheSync is a helper function that waits for cache sync for CSIDriverLister
func (kvh *kubeletVolumeHost) WaitForCacheSync() error {
if kvh.csiDriversSynced == nil {
klog.ErrorS(nil, "CsiDriversSynced not found on KubeletVolumeHost")
return fmt.Errorf("csiDriversSynced not found on KubeletVolumeHost")
}
synced := []cache.InformerSynced{kvh.csiDriversSynced}
if !cache.WaitForCacheSync(wait.NeverStop, synced...) {
klog.InfoS("Failed to wait for cache sync for CSIDriverLister")
return fmt.Errorf("failed to wait for cache sync for CSIDriverLister")
}
return nil
}
func (kvh *kubeletVolumeHost) NewWrapperMounter(
volName string,
spec volume.Spec,
pod *v1.Pod) (volume.Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
return kvh.kubelet.newVolumeMounterFromPlugins(&spec, pod)
}
func (kvh *kubeletVolumeHost) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plugin, err := kvh.kubelet.volumePluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plugin.NewUnmounter(spec.Name(), podUID)
}
func (kvh *kubeletVolumeHost) GetMounter() mount.Interface {
return kvh.kubelet.mounter
}
func (kvh *kubeletVolumeHost) GetHostName() string {
return kvh.kubelet.hostname
}
func (kvh *kubeletVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
node, err := kvh.kubelet.getNodeAnyWay()
if err != nil {
return nil, fmt.Errorf("error retrieving node: %v", err)
}
return node.Status.Allocatable, nil
}
func (kvh *kubeletVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
if kvh.secretManager != nil {
return kvh.secretManager.GetSecret
}
return func(namespace, name string) (*v1.Secret, error) {
return nil, fmt.Errorf("not supported due to running kubelet in standalone mode")
}
}
func (kvh *kubeletVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
if kvh.configMapManager != nil {
return kvh.configMapManager.GetConfigMap
}
return func(namespace, name string) (*v1.ConfigMap, error) {
return nil, fmt.Errorf("not supported due to running kubelet in standalone mode")
}
}
func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return kvh.tokenManager.GetServiceAccountToken
}
func (kvh *kubeletVolumeHost) DeleteServiceAccountTokenFunc() func(podUID types.UID) {
return kvh.tokenManager.DeleteServiceAccountToken
}
func (kvh *kubeletVolumeHost) GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error) {
return kvh.clusterTrustBundleManager.GetTrustAnchorsByName(name, allowMissing)
}
func (kvh *kubeletVolumeHost) GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error) {
return kvh.clusterTrustBundleManager.GetTrustAnchorsBySigner(signerName, labelSelector, allowMissing)
}
func (kvh *kubeletVolumeHost) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
return kvh.podCertificateManager.GetPodCertificateCredentialBundle(ctx, namespace, podName, podUID, volumeName, sourceIndex)
}
func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) {
node, err := kvh.kubelet.GetNode()
if err != nil {
return nil, fmt.Errorf("error retrieving node: %v", err)
}
return node.Labels, nil
}
func (kvh *kubeletVolumeHost) GetAttachedVolumesFromNodeStatus() (map[v1.UniqueVolumeName]string, error) {
node, err := kvh.kubelet.GetNode()
if err != nil {
return nil, fmt.Errorf("error retrieving node: %v", err)
}
attachedVolumes := node.Status.VolumesAttached
result := map[v1.UniqueVolumeName]string{}
for i := range attachedVolumes {
attachedVolume := attachedVolumes[i]
result[attachedVolume.Name] = attachedVolume.DevicePath
}
return result, nil
}
func (kvh *kubeletVolumeHost) GetNodeName() types.NodeName {
return kvh.kubelet.nodeName
}
func (kvh *kubeletVolumeHost) GetEventRecorder() record.EventRecorder {
return kvh.kubelet.recorder
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the kubelet volume manager to
keep track of attached volumes and the pods that mounted them.
*/
package cache
import (
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
// volume manager's actual state of the world cache.
// This cache contains volumes->pods i.e. a set of all volumes attached to this
// node and the pods that the manager believes have successfully mounted the
// volume.
// Note: This is distinct from the ActualStateOfWorld implemented by the
// attach/detach controller. They both keep track of different objects. This
// contains kubelet volume manager specific state.
type ActualStateOfWorld interface {
// ActualStateOfWorld must implement the methods required to allow
// operationexecutor to interact with it.
operationexecutor.ActualStateOfWorldMounterUpdater
// ActualStateOfWorld must implement the methods required to allow
// operationexecutor to interact with it.
operationexecutor.ActualStateOfWorldAttacherUpdater
// AddPodToVolume adds the given pod to the given volume in the cache
// indicating the specified volume has been successfully mounted to the
// specified pod.
// If a pod with the same unique name already exists under the specified
// volume, reset the pod's remountRequired value.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, an error is returned.
AddPodToVolume(operationexecutor.MarkVolumeOpts) error
// MarkRemountRequired marks each volume that is successfully attached and
// mounted for the specified pod as requiring remount (if the plugin for the
// volume indicates it requires remounting on pod updates). Atomically
// updating volumes depend on this to update the contents of the volume on
// pod update.
MarkRemountRequired(logger klog.Logger, podName volumetypes.UniquePodName)
// SetDeviceMountState sets device mount state for the given volume. When deviceMountState is set to DeviceGloballyMounted
// then device is mounted at a global mount point. When it is set to DeviceMountUncertain then also it means volume
// MAY be globally mounted at a global mount point. In both cases - the volume must be unmounted from
// global mount point prior to detach.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, an error is returned.
SetDeviceMountState(volumeName v1.UniqueVolumeName, deviceMountState operationexecutor.DeviceMountState, devicePath, deviceMountPath, seLinuxMountContext string) error
// DeletePodFromVolume removes the given pod from the given volume in the
// cache indicating the volume has been successfully unmounted from the pod.
// If a pod with the same unique name does not exist under the specified
// volume, this is a no-op.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, an error is returned.
DeletePodFromVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error
// DeleteVolume removes the given volume from the list of attached volumes
// in the cache indicating the volume has been successfully detached from
// this node.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, this is a no-op.
// If a volume with the name volumeName exists and its list of mountedPods
// is not empty, an error is returned.
DeleteVolume(volumeName v1.UniqueVolumeName) error
// PodExistsInVolume returns true if the given pod exists in the list of
// mountedPods for the given volume in the cache, indicating that the volume
// is attached to this node and the pod has successfully mounted it.
// If a pod with the same unique name does not exist under the specified
// volume, false is returned.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, a volumeNotAttachedError is returned indicating the
// given volume is not yet attached.
// If the given volumeName/podName combo exists but the value of
// remountRequired is true, a remountRequiredError is returned indicating
// the given volume has been successfully mounted to this pod but should be
// remounted to reflect changes in the referencing pod. Atomically updating
// volumes, depend on this to update the contents of the volume.
// All volume mounting calls should be idempotent so a second mount call for
// volumes that do not need to update contents should not fail.
PodExistsInVolume(logger klog.Logger, podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName, desiredVolumeSize resource.Quantity, seLinuxLabel string) (bool, string, error)
// PodRemovedFromVolume returns true if the given pod does not exist in the list of
// mountedPods for the given volume in the cache, indicating that the pod has
// fully unmounted it or it was never mounted the volume.
// If the volume is fully mounted or is in uncertain mount state for the pod, it is
// considered that the pod still exists in volume manager's actual state of the world
// and false is returned.
PodRemovedFromVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) bool
// PodHasMountedVolumes returns true if any volume is mounted on the given pod
PodHasMountedVolumes(podName volumetypes.UniquePodName) bool
// VolumeExistsWithSpecName returns true if the given volume specified with the
// volume spec name (a.k.a., InnerVolumeSpecName) exists in the list of
// volumes that should be attached to this node.
// If a pod with the same name does not exist under the specified
// volume, false is returned.
VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool
// VolumeExists returns true if the given volume exists in the list of
// attached volumes in the cache, indicating the volume is attached to this
// node.
VolumeExists(volumeName v1.UniqueVolumeName) bool
// GetMountedVolumes generates and returns a list of volumes and the pods
// they are successfully attached and mounted for based on the current
// actual state of the world.
GetMountedVolumes() []MountedVolume
// GetAllMountedVolumes returns list of all possibly mounted volumes including
// those that are in VolumeMounted state and VolumeMountUncertain state.
GetAllMountedVolumes() []MountedVolume
// GetMountedVolumesForPod generates and returns a list of volumes that are
// successfully attached and mounted for the specified pod based on the
// current actual state of the world.
GetMountedVolumesForPod(podName volumetypes.UniquePodName) []MountedVolume
// GetMountedVolumeForPod returns the volume and true if
// the given name is mounted on the given pod.
GetMountedVolumeForPod(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (MountedVolume, bool)
// GetPossiblyMountedVolumesForPod generates and returns a list of volumes for
// the specified pod that either are attached and mounted or are "uncertain",
// i.e. a volume plugin may be mounting the volume right now.
GetPossiblyMountedVolumesForPod(podName volumetypes.UniquePodName) []MountedVolume
// GetGloballyMountedVolumes generates and returns a list of all attached
// volumes that are globally mounted. This list can be used to determine
// which volumes should be reported as "in use" in the node's VolumesInUse
// status field. Globally mounted here refers to the shared plugin mount
// point for the attachable volume from which the pod specific mount points
// are created (via bind mount).
GetGloballyMountedVolumes() []AttachedVolume
// GetUnmountedVolumes generates and returns a list of attached volumes that
// have no mountedPods. This list can be used to determine which volumes are
// no longer referenced and may be globally unmounted and detached.
GetUnmountedVolumes() []AttachedVolume
// GetAttachedVolumes returns a list of volumes that is known to be attached
// to the node. This list can be used to determine volumes that are either in-use
// or have a mount/unmount operation pending.
GetAttachedVolumes() []AttachedVolume
// GetAttachedVolume returns the volume that is known to be attached to the node
// with the given volume name. If the volume is not found, the second return value
// is false.
GetAttachedVolume(volumeName v1.UniqueVolumeName) (AttachedVolume, bool)
// Add the specified volume to ASW as uncertainly attached.
AddAttachUncertainReconstructedVolume(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
// UpdateReconstructedDevicePath updates devicePath of a reconstructed volume
// from Node.Status.VolumesAttached. The ASW is updated only when the volume is still
// uncertain. If the volume got mounted in the meantime, its devicePath must have
// been fixed by such an update.
UpdateReconstructedDevicePath(volumeName v1.UniqueVolumeName, devicePath string)
// UpdateReconstructedVolumeAttachability updates volume attachability from the API server.
UpdateReconstructedVolumeAttachability(volumeName v1.UniqueVolumeName, volumeAttachable bool)
}
// MountedVolume represents a volume that has successfully been mounted to a pod.
type MountedVolume struct {
operationexecutor.MountedVolume
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
operationexecutor.AttachedVolume
// DeviceMountState indicates if device has been globally mounted or is not.
DeviceMountState operationexecutor.DeviceMountState
// SELinuxMountContext is the context with that the volume is globally mounted
// (via -o context=XYZ mount option). If empty, the volume is not mounted with
// "-o context=".
SELinuxMountContext string
}
// DeviceMayBeMounted returns true if device is mounted in global path or is in
// uncertain state.
func (av AttachedVolume) DeviceMayBeMounted() bool {
return av.DeviceMountState == operationexecutor.DeviceGloballyMounted ||
av.DeviceMountState == operationexecutor.DeviceMountUncertain
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(
nodeName types.NodeName,
volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{
nodeName: nodeName,
attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume),
foundDuringReconstruction: make(map[v1.UniqueVolumeName]map[volumetypes.UniquePodName]types.UID),
volumePluginMgr: volumePluginMgr,
volumesWithFinalExpansionErrors: sets.New[v1.UniqueVolumeName](),
}
}
// IsVolumeNotAttachedError returns true if the specified error is a
// volumeNotAttachedError.
func IsVolumeNotAttachedError(err error) bool {
_, ok := err.(volumeNotAttachedError)
return ok
}
// IsRemountRequiredError returns true if the specified error is a
// remountRequiredError.
func IsRemountRequiredError(err error) bool {
_, ok := err.(remountRequiredError)
return ok
}
type actualStateOfWorld struct {
// nodeName is the name of this node. This value is passed to Attach/Detach
nodeName types.NodeName
// attachedVolumes is a map containing the set of volumes the kubelet volume
// manager believes to be successfully attached to this node. Volume types
// that do not implement an attacher interface are assumed to be in this
// state by default.
// The key in this map is the name of the volume and the value is an object
// containing more information about the attached volume.
attachedVolumes map[v1.UniqueVolumeName]attachedVolume
// foundDuringReconstruction is a map of volumes which were discovered
// from kubelet root directory when kubelet was restarted.
foundDuringReconstruction map[v1.UniqueVolumeName]map[volumetypes.UniquePodName]types.UID
volumesWithFinalExpansionErrors sets.Set[v1.UniqueVolumeName]
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
type volumeAttachability string
const (
volumeAttachabilityTrue volumeAttachability = "True"
volumeAttachabilityFalse volumeAttachability = "False"
volumeAttachabilityUncertain volumeAttachability = "Uncertain"
)
// attachedVolume represents a volume the kubelet volume manager believes to be
// successfully attached to a node it is managing. Volume types that do not
// implement an attacher are assumed to be in this state.
type attachedVolume struct {
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// mountedPods is a map containing the set of pods that this volume has been
// successfully mounted to. The key in this map is the name of the pod and
// the value is a mountedPod object containing more information about the
// pod.
mountedPods map[volumetypes.UniquePodName]mountedPod
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to plugin methods.
// In particular, the Unmount method uses spec.Name() as the volumeSpecName
// in the mount path:
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/
spec *volume.Spec
// pluginName is the Unescaped Qualified name of the volume plugin used to
// attach and mount this volume. It is stored separately in case the full
// volume spec (everything except the name) can not be reconstructed for a
// volume that should be unmounted (which would be the case for a mount path
// read from disk without a full volume spec).
pluginName string
// pluginIsAttachable indicates the volume plugin used to attach and mount
// this volume implements the volume.Attacher interface
pluginIsAttachable volumeAttachability
// deviceMountState stores information that tells us if device is mounted
// globally or not
deviceMountState operationexecutor.DeviceMountState
// devicePath contains the path on the node where the volume is attached for
// attachable volumes
devicePath string
// deviceMountPath contains the path on the node where the device should
// be mounted after it is attached.
deviceMountPath string
// volumeInUseErrorForExpansion indicates volume driver has previously returned volume-in-use error
// for this volume and volume expansion on this node should not be retried
volumeInUseErrorForExpansion bool
// persistentVolumeSize records size of the volume when pod was started or
// size after successful completion of volume expansion operation.
persistentVolumeSize resource.Quantity
// seLinuxMountContext is the context with that the volume is mounted to global directory
// (via -o context=XYZ mount option). If nil, the volume is not mounted. If "", the volume is
// mounted without "-o context=".
seLinuxMountContext *string
}
// The mountedPod object represents a pod for which the kubelet volume manager
// believes the underlying volume has been successfully been mounted.
type mountedPod struct {
// the name of the pod
podName volumetypes.UniquePodName
// the UID of the pod
podUID types.UID
// mounter used to mount
mounter volume.Mounter
// mapper used to block volumes support
blockVolumeMapper volume.BlockVolumeMapper
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to plugin methods.
// In particular, the Unmount method uses spec.Name() as the volumeSpecName
// in the mount path:
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/
volumeSpec *volume.Spec
// remountRequired indicates the underlying volume has been successfully
// mounted to this pod but it should be remounted to reflect changes in the
// referencing pod.
// Atomically updating volumes depend on this to update the contents of the
// volume. All volume mounting calls should be idempotent so a second mount
// call for volumes that do not need to update contents should not fail.
remountRequired bool
// volumeGIDValue contains the value of the GID annotation, if present.
volumeGIDValue string
// volumeMountStateForPod stores state of volume mount for the pod. if it is:
// - VolumeMounted: means volume for pod has been successfully mounted
// - VolumeMountUncertain: means volume for pod may not be mounted, but it must be unmounted
volumeMountStateForPod operationexecutor.VolumeMountState
// seLinuxMountContext is the context with that the volume is mounted to Pod directory
// (via -o context=XYZ mount option). If nil, the volume is not mounted. If "", the volume is
// mounted without "-o context=".
seLinuxMountContext string
}
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
logger klog.Logger,
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
pluginIsAttachable := volumeAttachabilityFalse
if attachablePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec); err == nil && attachablePlugin != nil {
pluginIsAttachable = volumeAttachabilityTrue
}
return asw.addVolume(logger, volumeName, volumeSpec, devicePath, pluginIsAttachable)
}
func (asw *actualStateOfWorld) AddAttachUncertainReconstructedVolume(
logger klog.Logger,
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
return asw.addVolume(logger, volumeName, volumeSpec, devicePath, volumeAttachabilityUncertain)
}
func (asw *actualStateOfWorld) MarkVolumeAsUncertain(
logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error {
return nil
}
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.DeleteVolume(volumeName)
}
func (asw *actualStateOfWorld) MarkVolumeExpansionFailedWithFinalError(volumeName v1.UniqueVolumeName) {
asw.Lock()
defer asw.Unlock()
asw.volumesWithFinalExpansionErrors.Insert(volumeName)
}
func (asw *actualStateOfWorld) RemoveVolumeFromFailedWithFinalErrors(volumeName v1.UniqueVolumeName) {
asw.Lock()
defer asw.Unlock()
asw.volumesWithFinalExpansionErrors.Delete(volumeName)
}
func (asw *actualStateOfWorld) CheckVolumeInFailedExpansionWithFinalErrors(volumeName v1.UniqueVolumeName) bool {
asw.RLock()
defer asw.RUnlock()
return asw.volumesWithFinalExpansionErrors.Has(volumeName)
}
func (asw *actualStateOfWorld) IsVolumeReconstructed(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool {
volumeState := asw.GetVolumeMountState(volumeName, podName)
// only uncertain volumes are reconstructed
if volumeState != operationexecutor.VolumeMountUncertain {
return false
}
asw.RLock()
defer asw.RUnlock()
podMap, ok := asw.foundDuringReconstruction[volumeName]
if !ok {
return false
}
_, foundPod := podMap[podName]
return foundPod
}
func (asw *actualStateOfWorld) IsVolumeDeviceReconstructed(volumeName v1.UniqueVolumeName) bool {
asw.RLock()
defer asw.RUnlock()
_, ok := asw.foundDuringReconstruction[volumeName]
return ok
}
func (asw *actualStateOfWorld) CheckAndMarkVolumeAsUncertainViaReconstruction(opts operationexecutor.MarkVolumeOpts) (bool, error) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[opts.VolumeName]
if !volumeExists {
return false, nil
}
podObj, podExists := volumeObj.mountedPods[opts.PodName]
if podExists {
// if volume mount was uncertain we should keep trying to unmount the volume
if podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain {
return false, nil
}
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
return false, nil
}
}
podName := opts.PodName
podUID := opts.PodUID
volumeName := opts.VolumeName
mounter := opts.Mounter
blockVolumeMapper := opts.BlockVolumeMapper
volumeGIDValue := opts.VolumeGIDVolume
volumeSpec := opts.VolumeSpec
podObj = mountedPod{
podName: podName,
podUID: podUID,
mounter: mounter,
blockVolumeMapper: blockVolumeMapper,
volumeGIDValue: volumeGIDValue,
volumeSpec: volumeSpec,
remountRequired: false,
volumeMountStateForPod: operationexecutor.VolumeMountUncertain,
}
if mounter != nil {
// The mounter stored in the object may have old information,
// use the newest one.
podObj.mounter = mounter
}
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
podMap, ok := asw.foundDuringReconstruction[opts.VolumeName]
if !ok {
podMap = map[volumetypes.UniquePodName]types.UID{}
}
podMap[opts.PodName] = opts.PodUID
asw.foundDuringReconstruction[opts.VolumeName] = podMap
return true, nil
}
func (asw *actualStateOfWorld) CheckAndMarkDeviceUncertainViaReconstruction(volumeName v1.UniqueVolumeName, deviceMountPath string) bool {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
// CheckAndMarkDeviceUncertainViaReconstruction requires volume to be marked as attached, so if
// volume does not exist in ASOW or is in any state other than DeviceNotMounted we should return
if !volumeExists || volumeObj.deviceMountState != operationexecutor.DeviceNotMounted {
return false
}
volumeObj.deviceMountState = operationexecutor.DeviceMountUncertain
// we are only changing deviceMountPath because devicePath at at this stage is
// determined from node object.
volumeObj.deviceMountPath = deviceMountPath
asw.attachedVolumes[volumeName] = volumeObj
return true
}
func (asw *actualStateOfWorld) MarkVolumeAsMounted(markVolumeOpts operationexecutor.MarkVolumeOpts) error {
return asw.AddPodToVolume(markVolumeOpts)
}
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// no operation for kubelet side
}
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
// no operation for kubelet side
return nil
}
func (asw *actualStateOfWorld) MarkVolumeAsUnmounted(
podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error {
return asw.DeletePodFromVolume(podName, volumeName)
}
func (asw *actualStateOfWorld) MarkDeviceAsMounted(
volumeName v1.UniqueVolumeName, devicePath, deviceMountPath, seLinuxMountContext string) error {
return asw.SetDeviceMountState(volumeName, operationexecutor.DeviceGloballyMounted, devicePath, deviceMountPath, seLinuxMountContext)
}
func (asw *actualStateOfWorld) MarkDeviceAsUncertain(
volumeName v1.UniqueVolumeName, devicePath, deviceMountPath, seLinuxMountContext string) error {
return asw.SetDeviceMountState(volumeName, operationexecutor.DeviceMountUncertain, devicePath, deviceMountPath, seLinuxMountContext)
}
func (asw *actualStateOfWorld) MarkVolumeMountAsUncertain(markVolumeOpts operationexecutor.MarkVolumeOpts) error {
markVolumeOpts.VolumeMountState = operationexecutor.VolumeMountUncertain
return asw.AddPodToVolume(markVolumeOpts)
}
func (asw *actualStateOfWorld) MarkDeviceAsUnmounted(
volumeName v1.UniqueVolumeName) error {
return asw.SetDeviceMountState(volumeName, operationexecutor.DeviceNotMounted, "", "", "")
}
func (asw *actualStateOfWorld) UpdateReconstructedDevicePath(volumeName v1.UniqueVolumeName, devicePath string) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return
}
if volumeObj.deviceMountState != operationexecutor.DeviceMountUncertain {
// Reconciler must have updated volume state, i.e. when a pod uses the volume and
// succeeded mounting the volume. Such update has fixed the device path.
return
}
volumeObj.devicePath = devicePath
asw.attachedVolumes[volumeName] = volumeObj
}
func (asw *actualStateOfWorld) UpdateReconstructedVolumeAttachability(volumeName v1.UniqueVolumeName, attachable bool) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return
}
if volumeObj.pluginIsAttachable != volumeAttachabilityUncertain {
// Reconciler must have updated volume state, i.e. when a pod uses the volume and
// succeeded mounting the volume. Such update has fixed the device path.
return
}
if attachable {
volumeObj.pluginIsAttachable = volumeAttachabilityTrue
} else {
volumeObj.pluginIsAttachable = volumeAttachabilityFalse
}
asw.attachedVolumes[volumeName] = volumeObj
}
func (asw *actualStateOfWorld) GetDeviceMountState(volumeName v1.UniqueVolumeName) operationexecutor.DeviceMountState {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return operationexecutor.DeviceNotMounted
}
return volumeObj.deviceMountState
}
func (asw *actualStateOfWorld) MarkForInUseExpansionError(volumeName v1.UniqueVolumeName) {
asw.Lock()
defer asw.Unlock()
volumeObj, ok := asw.attachedVolumes[volumeName]
if ok {
volumeObj.volumeInUseErrorForExpansion = true
asw.attachedVolumes[volumeName] = volumeObj
}
}
func (asw *actualStateOfWorld) GetVolumeMountState(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) operationexecutor.VolumeMountState {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return operationexecutor.VolumeNotMounted
}
podObj, podExists := volumeObj.mountedPods[podName]
if !podExists {
return operationexecutor.VolumeNotMounted
}
return podObj.volumeMountStateForPod
}
func (asw *actualStateOfWorld) IsVolumeMountedElsewhere(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return false
}
for _, podObj := range volumeObj.mountedPods {
if podName != podObj.podName {
// Treat uncertain mount state as mounted until certain.
if podObj.volumeMountStateForPod != operationexecutor.VolumeNotMounted {
return true
}
}
}
return false
}
// addVolume adds the given volume to the cache indicating the specified
// volume is attached to this node. If no volume name is supplied, a unique
// volume name is generated from the volumeSpec and returned on success. If a
// volume with the same generated name already exists, this is a noop. If no
// volume plugin can support the given volumeSpec or more than one plugin can
// support it, an error is returned.
func (asw *actualStateOfWorld) addVolume(logger klog.Logger,
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, devicePath string, attachability volumeAttachability) error {
asw.Lock()
defer asw.Unlock()
volumePlugin, err := asw.volumePluginMgr.FindPluginBySpec(volumeSpec)
if err != nil || volumePlugin == nil {
return fmt.Errorf(
"failed to get Plugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
if len(volumeName) == 0 {
volumeName, err = util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
if err != nil {
return fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
volumeSpec.Name(),
volumePlugin.GetPluginName(),
err)
}
}
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
volumeObj = attachedVolume{
volumeName: volumeName,
spec: volumeSpec,
mountedPods: make(map[volumetypes.UniquePodName]mountedPod),
pluginName: volumePlugin.GetPluginName(),
pluginIsAttachable: attachability,
deviceMountState: operationexecutor.DeviceNotMounted,
devicePath: devicePath,
}
} else {
// If volume object already exists, update the fields such as device path
volumeObj.devicePath = devicePath
logger.V(2).Info("Volume is already added to attachedVolume list, update device path", "volumeName", volumeName, "path", devicePath)
}
asw.attachedVolumes[volumeName] = volumeObj
return nil
}
func (asw *actualStateOfWorld) AddPodToVolume(markVolumeOpts operationexecutor.MarkVolumeOpts) error {
podName := markVolumeOpts.PodName
podUID := markVolumeOpts.PodUID
volumeName := markVolumeOpts.VolumeName
mounter := markVolumeOpts.Mounter
blockVolumeMapper := markVolumeOpts.BlockVolumeMapper
volumeGIDValue := markVolumeOpts.VolumeGIDVolume
volumeSpec := markVolumeOpts.VolumeSpec
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"no volume with the name %q exists in the list of attached volumes",
volumeName)
}
podObj, podExists := volumeObj.mountedPods[podName]
updateUncertainVolume := false
if podExists {
// Update uncertain volumes - the new markVolumeOpts may have updated information.
// Especially reconstructed volumes (marked as uncertain during reconstruction) need
// an update.
updateUncertainVolume = podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain
}
if !podExists || updateUncertainVolume {
// Add new mountedPod or update existing one.
podObj = mountedPod{
podName: podName,
podUID: podUID,
mounter: mounter,
blockVolumeMapper: blockVolumeMapper,
volumeGIDValue: volumeGIDValue,
volumeSpec: volumeSpec,
volumeMountStateForPod: markVolumeOpts.VolumeMountState,
seLinuxMountContext: markVolumeOpts.SELinuxMountContext,
}
}
// If pod exists, reset remountRequired value
podObj.remountRequired = false
podObj.volumeMountStateForPod = markVolumeOpts.VolumeMountState
// if volume is mounted successfully, then it should be removed from foundDuringReconstruction map
if markVolumeOpts.VolumeMountState == operationexecutor.VolumeMounted {
delete(asw.foundDuringReconstruction[volumeName], podName)
}
if mounter != nil {
// The mounter stored in the object may have old information,
// use the newest one.
podObj.mounter = mounter
}
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
// Store the mount context also in the AttachedVolume to have a global volume context
// for a quick comparison in PodExistsInVolume.
if volumeObj.seLinuxMountContext == nil {
volumeObj.seLinuxMountContext = &markVolumeOpts.SELinuxMountContext
asw.attachedVolumes[volumeName] = volumeObj
}
}
return nil
}
func (asw *actualStateOfWorld) MarkVolumeAsResized(volumeName v1.UniqueVolumeName, claimSize resource.Quantity) bool {
asw.Lock()
defer asw.Unlock()
volumeObj, ok := asw.attachedVolumes[volumeName]
if ok {
volumeObj.persistentVolumeSize = claimSize
asw.attachedVolumes[volumeName] = volumeObj
return true
}
return false
}
func (asw *actualStateOfWorld) MarkRemountRequired(
logger klog.Logger,
podName volumetypes.UniquePodName) {
asw.Lock()
defer asw.Unlock()
for volumeName, volumeObj := range asw.attachedVolumes {
if podObj, podExists := volumeObj.mountedPods[podName]; podExists {
volumePlugin, err :=
asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec)
if err != nil || volumePlugin == nil {
// Log and continue processing
logger.Error(nil, "MarkRemountRequired failed to FindPluginBySpec for volume", "uniquePodName", podObj.podName, "podUID", podObj.podUID, "volumeName", volumeName, "volumeSpecName", podObj.volumeSpec.Name())
continue
}
if volumePlugin.RequiresRemount(podObj.volumeSpec) {
podObj.remountRequired = true
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
}
}
}
}
func (asw *actualStateOfWorld) SetDeviceMountState(
volumeName v1.UniqueVolumeName, deviceMountState operationexecutor.DeviceMountState, devicePath, deviceMountPath, seLinuxMountContext string) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"no volume with the name %q exists in the list of attached volumes",
volumeName)
}
volumeObj.deviceMountState = deviceMountState
volumeObj.deviceMountPath = deviceMountPath
if devicePath != "" {
volumeObj.devicePath = devicePath
}
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
if seLinuxMountContext != "" {
volumeObj.seLinuxMountContext = &seLinuxMountContext
}
}
asw.attachedVolumes[volumeName] = volumeObj
return nil
}
func (asw *actualStateOfWorld) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize resource.Quantity) {
asw.Lock()
defer asw.Unlock()
volumeObj, ok := asw.attachedVolumes[volumeName]
// only set volume claim size if claimStatusSize is zero
// this can happen when volume was rebuilt after kubelet startup
if ok && volumeObj.persistentVolumeSize.IsZero() {
volumeObj.persistentVolumeSize = claimSize
asw.attachedVolumes[volumeName] = volumeObj
}
}
func (asw *actualStateOfWorld) GetClaimSize(volumeName v1.UniqueVolumeName) resource.Quantity {
asw.RLock()
defer asw.RUnlock()
volumeObj, ok := asw.attachedVolumes[volumeName]
if ok {
return volumeObj.persistentVolumeSize.DeepCopy()
}
return resource.Quantity{}
}
func (asw *actualStateOfWorld) DeletePodFromVolume(
podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return fmt.Errorf(
"no volume with the name %q exists in the list of attached volumes",
volumeName)
}
_, podExists := volumeObj.mountedPods[podName]
if podExists {
delete(asw.attachedVolumes[volumeName].mountedPods, podName)
}
// if there were reconstructed volumes, we should remove them
_, podExists = asw.foundDuringReconstruction[volumeName]
if podExists {
delete(asw.foundDuringReconstruction[volumeName], podName)
}
return nil
}
func (asw *actualStateOfWorld) DeleteVolume(volumeName v1.UniqueVolumeName) error {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return nil
}
if len(volumeObj.mountedPods) != 0 {
return fmt.Errorf(
"failed to DeleteVolume %q, it still has %v mountedPods",
volumeName,
len(volumeObj.mountedPods))
}
delete(asw.attachedVolumes, volumeName)
delete(asw.foundDuringReconstruction, volumeName)
return nil
}
func (asw *actualStateOfWorld) PodExistsInVolume(logger klog.Logger, podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName, desiredVolumeSize resource.Quantity, seLinuxLabel string) (bool, string, error) {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return false, "", newVolumeNotAttachedError(volumeName)
}
// The volume exists, check its SELinux context mount option
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
if volumeObj.seLinuxMountContext != nil && *volumeObj.seLinuxMountContext != seLinuxLabel {
fullErr := newSELinuxMountMismatchError(volumeName)
return false, volumeObj.devicePath, fullErr
}
}
podObj, podExists := volumeObj.mountedPods[podName]
if podExists {
// if volume mount was uncertain we should keep trying to mount the volume
if podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain {
return false, volumeObj.devicePath, nil
}
if podObj.remountRequired {
return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName)
}
if currentSize, expandVolume := asw.volumeNeedsExpansion(logger, volumeObj, desiredVolumeSize); expandVolume {
return true, volumeObj.devicePath, newFsResizeRequiredError(volumeObj.volumeName, podObj.podName, currentSize)
}
}
return podExists, volumeObj.devicePath, nil
}
func (asw *actualStateOfWorld) PodHasMountedVolumes(podName volumetypes.UniquePodName) bool {
asw.RLock()
defer asw.RUnlock()
for _, volumeObj := range asw.attachedVolumes {
if podObj, hasPod := volumeObj.mountedPods[podName]; hasPod {
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
return true
}
}
}
return false
}
func (asw *actualStateOfWorld) volumeNeedsExpansion(logger klog.Logger, volumeObj attachedVolume, desiredVolumeSize resource.Quantity) (resource.Quantity, bool) {
currentSize := volumeObj.persistentVolumeSize.DeepCopy()
if volumeObj.volumeInUseErrorForExpansion {
return currentSize, false
}
if volumeObj.persistentVolumeSize.IsZero() || desiredVolumeSize.IsZero() {
return currentSize, false
}
logger.V(5).Info("NodeExpandVolume checking size", "actualSize", volumeObj.persistentVolumeSize.String(), "desiredSize", desiredVolumeSize.String(), "volume", volumeObj.volumeName)
if desiredVolumeSize.Cmp(volumeObj.persistentVolumeSize) > 0 {
volumePlugin, err := asw.volumePluginMgr.FindNodeExpandablePluginBySpec(volumeObj.spec)
if err != nil || volumePlugin == nil {
// Log and continue processing
logger.Info("PodExistsInVolume failed to find expandable plugin",
"volume", volumeObj.volumeName,
"volumeSpecName", volumeObj.spec.Name())
return currentSize, false
}
if volumePlugin.RequiresFSResize() {
return currentSize, true
}
}
return currentSize, false
}
func (asw *actualStateOfWorld) PodRemovedFromVolume(
podName volumetypes.UniquePodName,
volumeName v1.UniqueVolumeName) bool {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return true
}
podObj, podExists := volumeObj.mountedPods[podName]
if podExists {
// if volume mount was uncertain we should keep trying to unmount the volume
if podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain {
return false
}
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
return false
}
}
return true
}
func (asw *actualStateOfWorld) VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool {
asw.RLock()
defer asw.RUnlock()
for _, volumeObj := range asw.attachedVolumes {
if podObj, podExists := volumeObj.mountedPods[podName]; podExists {
if podObj.volumeSpec.Name() == volumeSpecName {
return true
}
}
}
return false
}
func (asw *actualStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName) bool {
asw.RLock()
defer asw.RUnlock()
_, volumeExists := asw.attachedVolumes[volumeName]
return volumeExists
}
func (asw *actualStateOfWorld) GetMountedVolumes() []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for _, podObj := range volumeObj.mountedPods {
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
// GetAllMountedVolumes returns all volumes which could be locally mounted for a pod.
func (asw *actualStateOfWorld) GetAllMountedVolumes() []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for _, podObj := range volumeObj.mountedPods {
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted ||
podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
func (asw *actualStateOfWorld) GetMountedVolumesForPod(
podName volumetypes.UniquePodName) []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */)
for _, volumeObj := range asw.attachedVolumes {
for mountedPodName, podObj := range volumeObj.mountedPods {
if mountedPodName == podName && podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
func (asw *actualStateOfWorld) GetMountedVolumeForPod(
podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (MountedVolume, bool) {
asw.RLock()
defer asw.RUnlock()
volumeObj := asw.attachedVolumes[volumeName]
if podObj, hasPod := volumeObj.mountedPods[podName]; hasPod {
if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted {
return getMountedVolume(&podObj, &volumeObj), true
}
}
return MountedVolume{}, false
}
func (asw *actualStateOfWorld) GetPossiblyMountedVolumesForPod(
podName volumetypes.UniquePodName) []MountedVolume {
asw.RLock()
defer asw.RUnlock()
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for mountedPodName, podObj := range volumeObj.mountedPods {
if mountedPodName == podName &&
(podObj.volumeMountStateForPod == operationexecutor.VolumeMounted ||
podObj.volumeMountStateForPod == operationexecutor.VolumeMountUncertain) {
mountedVolume = append(
mountedVolume,
getMountedVolume(&podObj, &volumeObj))
}
}
}
return mountedVolume
}
func (asw *actualStateOfWorld) GetGloballyMountedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
globallyMountedVolumes := make(
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
if volumeObj.deviceMountState == operationexecutor.DeviceGloballyMounted {
globallyMountedVolumes = append(
globallyMountedVolumes,
asw.newAttachedVolume(&volumeObj))
}
}
return globallyMountedVolumes
}
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
allAttachedVolumes := make(
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
allAttachedVolumes = append(
allAttachedVolumes,
asw.newAttachedVolume(&volumeObj))
}
return allAttachedVolumes
}
func (asw *actualStateOfWorld) GetAttachedVolume(volumeName v1.UniqueVolumeName) (AttachedVolume, bool) {
asw.RLock()
defer asw.RUnlock()
volumeObj, ok := asw.attachedVolumes[volumeName]
if !ok {
return AttachedVolume{}, false
}
return asw.newAttachedVolume(&volumeObj), true
}
func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
unmountedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
if len(volumeObj.mountedPods) == 0 {
unmountedVolumes = append(
unmountedVolumes,
asw.newAttachedVolume(&volumeObj))
}
}
return unmountedVolumes
}
func (asw *actualStateOfWorld) newAttachedVolume(
attachedVolume *attachedVolume) AttachedVolume {
seLinuxMountContext := ""
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
if attachedVolume.seLinuxMountContext != nil {
seLinuxMountContext = *attachedVolume.seLinuxMountContext
}
}
return AttachedVolume{
AttachedVolume: operationexecutor.AttachedVolume{
VolumeName: attachedVolume.volumeName,
VolumeSpec: attachedVolume.spec,
NodeName: asw.nodeName,
PluginIsAttachable: attachedVolume.pluginIsAttachable == volumeAttachabilityTrue,
DevicePath: attachedVolume.devicePath,
DeviceMountPath: attachedVolume.deviceMountPath,
PluginName: attachedVolume.pluginName,
SELinuxMountContext: seLinuxMountContext},
DeviceMountState: attachedVolume.deviceMountState,
SELinuxMountContext: seLinuxMountContext,
}
}
// Compile-time check to ensure volumeNotAttachedError implements the error interface
var _ error = volumeNotAttachedError{}
// volumeNotAttachedError is an error returned when PodExistsInVolume() fails to
// find specified volume in the list of attached volumes.
type volumeNotAttachedError struct {
volumeName v1.UniqueVolumeName
}
func (err volumeNotAttachedError) Error() string {
return fmt.Sprintf(
"volumeName %q does not exist in the list of attached volumes",
err.volumeName)
}
func newVolumeNotAttachedError(volumeName v1.UniqueVolumeName) error {
return volumeNotAttachedError{
volumeName: volumeName,
}
}
// Compile-time check to ensure remountRequiredError implements the error interface
var _ error = remountRequiredError{}
// remountRequiredError is an error returned when PodExistsInVolume() found
// volume/pod attached/mounted but remountRequired was true, indicating the
// given volume should be remounted to the pod to reflect changes in the
// referencing pod.
type remountRequiredError struct {
volumeName v1.UniqueVolumeName
podName volumetypes.UniquePodName
}
func (err remountRequiredError) Error() string {
return fmt.Sprintf(
"volumeName %q is mounted to %q but should be remounted",
err.volumeName, err.podName)
}
func newRemountRequiredError(
volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) error {
return remountRequiredError{
volumeName: volumeName,
podName: podName,
}
}
// fsResizeRequiredError is an error returned when PodExistsInVolume() found
// volume/pod attached/mounted but fsResizeRequired was true, indicating the
// given volume receives an resize request after attached/mounted.
type FsResizeRequiredError struct {
CurrentSize resource.Quantity
volumeName v1.UniqueVolumeName
podName volumetypes.UniquePodName
}
func (err FsResizeRequiredError) Error() string {
return fmt.Sprintf(
"volumeName %q mounted to %q needs to resize file system",
err.volumeName, err.podName)
}
func newFsResizeRequiredError(
volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, currentSize resource.Quantity) error {
return FsResizeRequiredError{
CurrentSize: currentSize,
volumeName: volumeName,
podName: podName,
}
}
// IsFSResizeRequiredError returns true if the specified error is a
// fsResizeRequiredError.
func IsFSResizeRequiredError(err error) bool {
_, ok := err.(FsResizeRequiredError)
return ok
}
// getMountedVolume constructs and returns a MountedVolume object from the given
// mountedPod and attachedVolume objects.
func getMountedVolume(
mountedPod *mountedPod, attachedVolume *attachedVolume) MountedVolume {
seLinuxMountContext := ""
if attachedVolume.seLinuxMountContext != nil {
seLinuxMountContext = *attachedVolume.seLinuxMountContext
}
return MountedVolume{
MountedVolume: operationexecutor.MountedVolume{
PodName: mountedPod.podName,
VolumeName: attachedVolume.volumeName,
InnerVolumeSpecName: mountedPod.volumeSpec.Name(),
PluginName: attachedVolume.pluginName,
PodUID: mountedPod.podUID,
Mounter: mountedPod.mounter,
BlockVolumeMapper: mountedPod.blockVolumeMapper,
VolumeGIDValue: mountedPod.volumeGIDValue,
VolumeSpec: mountedPod.volumeSpec,
DeviceMountPath: attachedVolume.deviceMountPath,
SELinuxMountContext: seLinuxMountContext}}
}
// seLinuxMountMismatchError is an error returned when PodExistsInVolume() found
// a volume mounted with a different SELinux label than expected.
type seLinuxMountMismatchError struct {
volumeName v1.UniqueVolumeName
}
func (err seLinuxMountMismatchError) Error() string {
return fmt.Sprintf(
"waiting for unmount of volume %q, because it is already mounted to a different pod with a different SELinux label",
err.volumeName)
}
func newSELinuxMountMismatchError(volumeName v1.UniqueVolumeName) error {
return seLinuxMountMismatchError{
volumeName: volumeName,
}
}
// IsSELinuxMountMismatchError returns true if the specified error is a
// seLinuxMountMismatchError.
func IsSELinuxMountMismatchError(err error) bool {
_, ok := err.(seLinuxMountMismatchError)
return ok
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
compbasemetrics "k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
var (
// TODO: add plugin name + access mode labels to all these metrics
seLinuxContainerContextErrors = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Name: "volume_manager_selinux_container_errors_total",
Help: "Number of errors when kubelet cannot compute SELinux context for a container. Kubelet can't start such a Pod then and it will retry, therefore value of this metric may not represent the actual nr. of containers.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"access_mode"},
)
seLinuxContainerContextWarnings = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Name: "volume_manager_selinux_container_warnings_total",
StabilityLevel: compbasemetrics.ALPHA,
Help: "Number of errors when kubelet cannot compute SELinux context for a container that are ignored. They will become real errors when SELinuxMountReadWriteOncePod feature is expanded to all volume access modes.",
},
[]string{"access_mode"},
)
seLinuxPodContextMismatchErrors = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Name: "volume_manager_selinux_pod_context_mismatch_errors_total",
Help: "Number of errors when a Pod defines different SELinux contexts for its containers that use the same volume. Kubelet can't start such a Pod then and it will retry, therefore value of this metric may not represent the actual nr. of Pods.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"access_mode"},
)
seLinuxPodContextMismatchWarnings = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Name: "volume_manager_selinux_pod_context_mismatch_warnings_total",
Help: "Number of errors when a Pod defines different SELinux contexts for its containers that use the same volume. They are not errors yet, but they will become real errors when SELinuxMountReadWriteOncePod feature is expanded to all volume access modes.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"access_mode"},
)
seLinuxVolumeContextMismatchErrors = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Name: "volume_manager_selinux_volume_context_mismatch_errors_total",
Help: "Number of errors when a Pod uses a volume that is already mounted with a different SELinux context than the Pod needs. Kubelet can't start such a Pod then and it will retry, therefore value of this metric may not represent the actual nr. of Pods.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"volume_plugin", "access_mode"},
)
seLinuxVolumeContextMismatchWarnings = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Name: "volume_manager_selinux_volume_context_mismatch_warnings_total",
Help: "Number of errors when a Pod uses a volume that is already mounted with a different SELinux context than the Pod needs. They are not errors yet, but they will become real errors when SELinuxMountReadWriteOncePod feature is expanded to all volume access modes.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"volume_plugin", "access_mode"},
)
seLinuxVolumesAdmitted = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Name: "volume_manager_selinux_volumes_admitted_total",
Help: "Number of volumes whose SELinux context was fine and will be mounted with mount -o context option.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"volume_plugin", "access_mode"},
)
registerMetrics sync.Once
)
func registerSELinuxMetrics() {
registerMetrics.Do(func() {
legacyregistry.MustRegister(seLinuxContainerContextErrors)
legacyregistry.MustRegister(seLinuxContainerContextWarnings)
legacyregistry.MustRegister(seLinuxPodContextMismatchErrors)
legacyregistry.MustRegister(seLinuxPodContextMismatchWarnings)
legacyregistry.MustRegister(seLinuxVolumeContextMismatchErrors)
legacyregistry.MustRegister(seLinuxVolumeContextMismatchWarnings)
legacyregistry.MustRegister(seLinuxVolumesAdmitted)
})
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the kubelet volume manager to
keep track of attached volumes and the pods that mounted them.
*/
package cache
import (
"fmt"
"slices"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/metrics"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume/csi"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
)
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
// volume manager's desired state of the world cache.
// This cache contains volumes->pods i.e. a set of all volumes that should be
// attached to this node and the pods that reference them and should mount the
// volume.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// attach/detach controller. They both keep track of different objects. This
// contains kubelet volume manager specific state.
type DesiredStateOfWorld interface {
// AddPodToVolume adds the given pod to the given volume in the cache
// indicating the specified pod should mount the specified volume.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If no volume plugin can support the given volumeSpec or more than one
// plugin can support it, an error is returned.
// If a volume with the name volumeName does not exist in the list of
// volumes that should be attached to this node, the volume is implicitly
// added.
// If a pod with the same unique name already exists under the specified
// volume, this is a no-op.
AddPodToVolume(logger klog.Logger, podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, outerVolumeSpecName string, volumeGIDValue string, seLinuxContainerContexts []*v1.SELinuxOptions) (v1.UniqueVolumeName, error)
// MarkVolumesReportedInUse sets the ReportedInUse value to true for the
// reportedVolumes. For volumes not in the reportedVolumes list, the
// ReportedInUse value is reset to false. The default ReportedInUse value
// for a newly created volume is false.
// When set to true this value indicates that the volume was successfully
// added to the VolumesInUse field in the node's status. Mount operation needs
// to check this value before issuing the operation.
// If a volume in the reportedVolumes list does not exist in the list of
// volumes that should be attached to this node, it is skipped without error.
MarkVolumesReportedInUse(reportedVolumes []v1.UniqueVolumeName)
// DeletePodFromVolume removes the given pod from the given volume in the
// cache indicating the specified pod no longer requires the specified
// volume.
// If a pod with the same unique name does not exist under the specified
// volume, this is a no-op.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePodFromVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName)
// VolumeExists returns true if the given volume exists in the list of
// volumes that should be attached to this node.
// If a pod with the same unique name does not exist under the specified
// volume, false is returned.
VolumeExists(volumeName v1.UniqueVolumeName, seLinuxMountContext string) bool
// PodExistsInVolume returns true if the given pod exists in the list of
// podsToMount for the given volume in the cache.
// If a pod with the same unique name does not exist under the specified
// volume, false is returned.
// If a volume with the name volumeName does not exist in the list of
// attached volumes, false is returned.
PodExistsInVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName, seLinuxMountContext string) bool
// GetVolumeName returns the UniqueVolumeName for the given pod, indexed by outerVolumeSpecName.
GetVolumeNamesForPod(podName types.UniquePodName) map[string]v1.UniqueVolumeName
// GetVolumesToMount generates and returns a list of volumes that should be
// attached to this node and the pods they should be mounted to based on the
// current desired state of the world.
GetVolumesToMount() []VolumeToMount
// GetPods generates and returns a map of pods in which map is indexed
// with pod's unique name. This map can be used to determine which pod is currently
// in desired state of world.
GetPods() map[types.UniquePodName]bool
// VolumeExistsWithSpecName returns true if the given volume specified with the
// volume spec name (a.k.a., InnerVolumeSpecName) exists in the list of
// volumes that should be attached to this node.
// If a pod with the same name does not exist under the specified
// volume, false is returned.
VolumeExistsWithSpecName(podName types.UniquePodName, volumeSpecName string) bool
// AddErrorToPod adds the given error to the given pod in the cache.
// It will be returned by subsequent GetPodErrors().
// Each error string is stored only once.
AddErrorToPod(podName types.UniquePodName, err string)
// PopPodErrors returns accumulated errors on a given pod and clears
// them.
PopPodErrors(podName types.UniquePodName) []string
// GetPodsWithErrors returns names of pods that have stored errors.
GetPodsWithErrors() []types.UniquePodName
// MarkVolumeAttachability updates the volume's attachability for a given volume
MarkVolumeAttachability(volumeName v1.UniqueVolumeName, attachable bool)
// UpdatePersistentVolumeSize updates persistentVolumeSize in desired state of the world
// so as it can be compared against actual size and volume expansion performed
// if necessary
UpdatePersistentVolumeSize(volumeName v1.UniqueVolumeName, size resource.Quantity)
}
// VolumeToMount represents a volume that is attached to this node and needs to
// be mounted to PodName.
type VolumeToMount struct {
operationexecutor.VolumeToMount
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr, seLinuxTranslator util.SELinuxLabelTranslator) DesiredStateOfWorld {
if feature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
registerSELinuxMetrics()
}
return &desiredStateOfWorld{
volumesToMount: make(map[v1.UniqueVolumeName]volumeToMount),
volumePluginMgr: volumePluginMgr,
podErrors: make(map[types.UniquePodName]sets.Set[string]),
seLinuxTranslator: seLinuxTranslator,
}
}
type desiredStateOfWorld struct {
// volumesToMount is a map containing the set of volumes that should be
// attached to this node and mounted to the pods referencing it. The key in
// the map is the name of the volume and the value is a volume object
// containing more information about the volume.
volumesToMount map[v1.UniqueVolumeName]volumeToMount
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
// podErrors are errors caught by desiredStateOfWorldPopulator about volumes for a given pod.
podErrors map[types.UniquePodName]sets.Set[string]
// seLinuxTranslator translates v1.SELinuxOptions to a file SELinux label.
seLinuxTranslator util.SELinuxLabelTranslator
sync.RWMutex
}
// The volume object represents a volume that should be attached to this node,
// and mounted to podsToMount.
type volumeToMount struct {
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// podsToMount is a map containing the set of pods that reference this
// volume and should mount it once it is attached. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
podsToMount map[types.UniquePodName]podToMount
// pluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface
pluginIsAttachable bool
// pluginIsDeviceMountable indicates that the plugin for this volume implements
// the volume.DeviceMounter interface
pluginIsDeviceMountable bool
// volumeGIDValue contains the value of the GID annotation, if present.
volumeGIDValue string
// reportedInUse indicates that the volume was successfully added to the
// VolumesInUse field in the node's status.
reportedInUse bool
// desiredSizeLimit indicates the desired upper bound on the size of the volume
// (if so implemented)
desiredSizeLimit *resource.Quantity
// persistentVolumeSize records desired size of a persistent volume.
// Usually this value reflects size recorded in pv.Spec.Capacity
persistentVolumeSize resource.Quantity
// effectiveSELinuxMountFileLabel is the SELinux label that will be applied to the volume using mount options.
// If empty, then:
// - either the context+label is unknown (assigned randomly by the container runtime)
// - or the volume plugin responsible for this volume does not support mounting with -o context
// - or the volume is not ReadWriteOncePod
// - or the OS does not support SELinux
// In all cases, the SELinux context does not matter when mounting the volume.
effectiveSELinuxMountFileLabel string
// originalSELinuxLabel is the SELinux label that would be used if SELinux mount was supported for all access modes.
// For RWOP volumes it's the same as effectiveSELinuxMountFileLabel.
// It is used only to report potential SELinux mismatch metrics.
// If empty, then:
// - either the context+label is unknown (assigned randomly by the container runtime)
// - or the volume plugin responsible for this volume does not support mounting with -o context
// - or the OS does not support SELinux
originalSELinuxLabel string
}
// The pod object represents a pod that references the underlying volume and
// should mount it once it is attached.
type podToMount struct {
// podName contains the name of this pod.
podName types.UniquePodName
// Pod to mount the volume to. Used to create NewMounter.
pod *v1.Pod
// volume spec containing the specification for this volume. Used to
// generate the volume plugin object, and passed to plugin methods.
// For non-PVC volumes this is the same as defined in the pod object. For
// PVC volumes it is from the dereferenced PV object.
volumeSpec *volume.Spec
// outerVolumeSpecNames are the podSpec.Volume[x].Name of the volume.
outerVolumeSpecNames []string
// mountRequestTime stores time at which mount was requested
mountRequestTime time.Time
}
const (
// Maximum errors to be stored per pod in desiredStateOfWorld.podErrors to
// prevent unbound growth.
maxPodErrors = 10
)
func (dsw *desiredStateOfWorld) AddPodToVolume(
logger klog.Logger,
podName types.UniquePodName,
pod *v1.Pod,
volumeSpec *volume.Spec,
outerVolumeSpecName string,
volumeGIDValue string,
seLinuxContainerContexts []*v1.SELinuxOptions) (v1.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
volumePlugin, err := dsw.volumePluginMgr.FindPluginBySpec(volumeSpec)
if err != nil || volumePlugin == nil {
return "", fmt.Errorf(
"failed to get Plugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumePluginName := getVolumePluginNameWithDriver(logger, volumePlugin, volumeSpec)
accessMode := getVolumeAccessMode(volumeSpec)
var volumeName v1.UniqueVolumeName
// The unique volume name used depends on whether the volume is attachable/device-mountable
// or not.
attachable := util.IsAttachableVolume(volumeSpec, dsw.volumePluginMgr)
deviceMountable := util.IsDeviceMountableVolume(volumeSpec, dsw.volumePluginMgr)
if attachable || deviceMountable {
// For attachable/device-mountable volumes, use the unique volume name as reported by
// the plugin.
volumeName, err =
util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
volumeSpec.Name(),
volumePlugin.GetPluginName(),
err)
}
} else {
// For non-attachable and non-device-mountable volumes, generate a unique name based on the unique pod
// name (refer to podUID) and the name of the volume within the pod.
volumeName = util.GetUniqueVolumeNameFromSpecWithPod(podName, volumePlugin, volumeSpec)
}
seLinuxFileLabel, pluginSupportsSELinuxContextMount, err := dsw.getSELinuxLabel(logger, volumeSpec, seLinuxContainerContexts, pod.Spec.SecurityContext)
if err != nil {
return "", err
}
logger.V(4).Info("expected volume SELinux label context", "volume", volumeSpec.Name(), "label", seLinuxFileLabel)
if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
var sizeLimit *resource.Quantity
if volumeSpec.Volume != nil {
if util.IsLocalEphemeralVolume(*volumeSpec.Volume) {
podLimits := resourcehelper.PodLimits(pod, resourcehelper.PodResourcesOptions{})
ephemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage]
sizeLimit = resource.NewQuantity(ephemeralStorageLimit.Value(), resource.BinarySI)
if volumeSpec.Volume.EmptyDir != nil &&
volumeSpec.Volume.EmptyDir.SizeLimit != nil &&
volumeSpec.Volume.EmptyDir.SizeLimit.Value() > 0 &&
(sizeLimit.Value() == 0 || volumeSpec.Volume.EmptyDir.SizeLimit.Value() < sizeLimit.Value()) {
sizeLimit = resource.NewQuantity(volumeSpec.Volume.EmptyDir.SizeLimit.Value(), resource.BinarySI)
}
}
}
effectiveSELinuxMountLabel := seLinuxFileLabel
if !util.VolumeSupportsSELinuxMount(volumeSpec) {
// Clear SELinux label for the volume with unsupported access modes.
logger.V(4).Info("volume does not support SELinux context mount, clearing the expected label", "volume", volumeSpec.Name())
effectiveSELinuxMountLabel = ""
}
if seLinuxFileLabel != "" {
seLinuxVolumesAdmitted.WithLabelValues(volumePluginName, accessMode).Add(1.0)
}
vmt := volumeToMount{
volumeName: volumeName,
podsToMount: make(map[types.UniquePodName]podToMount),
pluginIsAttachable: attachable,
pluginIsDeviceMountable: deviceMountable,
volumeGIDValue: volumeGIDValue,
reportedInUse: false,
desiredSizeLimit: sizeLimit,
effectiveSELinuxMountFileLabel: effectiveSELinuxMountLabel,
originalSELinuxLabel: seLinuxFileLabel,
}
// record desired size of the volume
if volumeSpec.PersistentVolume != nil {
pvCap := volumeSpec.PersistentVolume.Spec.Capacity.Storage()
if pvCap != nil {
pvCapCopy := pvCap.DeepCopy()
vmt.persistentVolumeSize = pvCapCopy
}
}
dsw.volumesToMount[volumeName] = vmt
}
oldPodMount, ok := dsw.volumesToMount[volumeName].podsToMount[podName]
mountRequestTime := time.Now()
var outerVolumeSpecNames []string
if ok {
if !volumePlugin.RequiresRemount(volumeSpec) {
mountRequestTime = oldPodMount.mountRequestTime
}
outerVolumeSpecNames = oldPodMount.outerVolumeSpecNames
}
if !slices.Contains(outerVolumeSpecNames, outerVolumeSpecName) {
outerVolumeSpecNames = append(outerVolumeSpecNames, outerVolumeSpecName)
}
if !ok {
// The volume exists, but not with this pod.
// It will be added below as podToMount, now just report SELinux metric.
if pluginSupportsSELinuxContextMount {
existingVolume := dsw.volumesToMount[volumeName]
if seLinuxFileLabel != existingVolume.originalSELinuxLabel {
fullErr := fmt.Errorf("conflicting SELinux labels of volume %s: %q and %q", volumeSpec.Name(), existingVolume.originalSELinuxLabel, seLinuxFileLabel)
supported := util.VolumeSupportsSELinuxMount(volumeSpec)
err := handleSELinuxMetricError(
logger,
fullErr,
supported,
seLinuxVolumeContextMismatchWarnings.WithLabelValues(volumePluginName, accessMode),
seLinuxVolumeContextMismatchErrors.WithLabelValues(volumePluginName, accessMode))
if err != nil {
return "", err
}
}
}
}
// Create new podToMount object. If it already exists, it is refreshed with
// updated values (this is required for volumes that require remounting on
// pod update, like Downward API volumes).
dsw.volumesToMount[volumeName].podsToMount[podName] = podToMount{
podName: podName,
pod: pod,
volumeSpec: volumeSpec,
outerVolumeSpecNames: outerVolumeSpecNames,
mountRequestTime: mountRequestTime,
}
return volumeName, nil
}
// getSELinuxLabel returns the SELinux label for a given volume and combination of SELinux labels and bool indicating
// if the plugin supports mounting the volume with SELinux context.
// It returns error if the SELinux label cannot be constructed or when the volume is used with multiple SELinux
// labels.
func (dsw *desiredStateOfWorld) getSELinuxLabel(logger klog.Logger, volumeSpec *volume.Spec, seLinuxContainerContexts []*v1.SELinuxOptions, podSecurityContext *v1.PodSecurityContext) (seLinuxFileLabel string, pluginSupportsSELinuxContextMount bool, err error) {
labelInfo, err := util.GetMountSELinuxLabel(volumeSpec, seLinuxContainerContexts, podSecurityContext, dsw.volumePluginMgr, dsw.seLinuxTranslator)
if err != nil {
accessMode := getVolumeAccessMode(volumeSpec)
seLinuxSupported := util.VolumeSupportsSELinuxMount(volumeSpec)
if util.IsSELinuxLabelTranslationError(err) {
err := handleSELinuxMetricError(
logger,
err,
seLinuxSupported,
seLinuxContainerContextWarnings.WithLabelValues(accessMode),
seLinuxContainerContextErrors.WithLabelValues(accessMode))
return "", labelInfo.PluginSupportsSELinuxContextMount, err
}
if util.IsMultipleSELinuxLabelsError(err) {
err := handleSELinuxMetricError(
logger,
err,
seLinuxSupported,
seLinuxPodContextMismatchWarnings.WithLabelValues(accessMode),
seLinuxPodContextMismatchErrors.WithLabelValues(accessMode))
return "", false, err
}
return "", labelInfo.PluginSupportsSELinuxContextMount, err
}
return labelInfo.SELinuxMountLabel, labelInfo.PluginSupportsSELinuxContextMount, nil
}
func (dsw *desiredStateOfWorld) MarkVolumesReportedInUse(
reportedVolumes []v1.UniqueVolumeName) {
dsw.Lock()
defer dsw.Unlock()
reportedVolumesMap := make(
map[v1.UniqueVolumeName]bool, len(reportedVolumes) /* capacity */)
for _, reportedVolume := range reportedVolumes {
reportedVolumesMap[reportedVolume] = true
}
for volumeName, volumeObj := range dsw.volumesToMount {
_, volumeReported := reportedVolumesMap[volumeName]
volumeObj.reportedInUse = volumeReported
dsw.volumesToMount[volumeName] = volumeObj
}
}
func (dsw *desiredStateOfWorld) DeletePodFromVolume(
podName types.UniquePodName, volumeName v1.UniqueVolumeName) {
dsw.Lock()
defer dsw.Unlock()
delete(dsw.podErrors, podName)
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.podsToMount[podName]; !podExists {
return
}
// Delete pod if it exists
delete(dsw.volumesToMount[volumeName].podsToMount, podName)
if len(dsw.volumesToMount[volumeName].podsToMount) == 0 {
// Delete volume if no child pods left
delete(dsw.volumesToMount, volumeName)
}
}
// UpdatePersistentVolumeSize updates last known PV size. This is used for volume expansion and
// should be only used for persistent volumes.
func (dsw *desiredStateOfWorld) UpdatePersistentVolumeSize(volumeName v1.UniqueVolumeName, size resource.Quantity) {
dsw.Lock()
defer dsw.Unlock()
vol, volExists := dsw.volumesToMount[volumeName]
if volExists {
vol.persistentVolumeSize = size
dsw.volumesToMount[volumeName] = vol
}
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName, seLinuxMountContext string) bool {
dsw.RLock()
defer dsw.RUnlock()
vol, volumeExists := dsw.volumesToMount[volumeName]
if !volumeExists {
return false
}
if feature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
// Handling two volumes with the same name and different SELinux context
// as two *different* volumes here. Because if a volume is mounted with
// an old SELinux context, it must be unmounted first and then mounted again
// with the new context.
//
// This will happen when a pod A with context alpha_t runs and is being
// terminated by kubelet and its volumes are being torn down, while a
// pod B with context beta_t is already scheduled on the same node,
// using the same volumes
// The volumes from Pod A must be fully unmounted (incl. UnmountDevice)
// and mounted with new SELinux mount options for pod B.
// Without SELinux, kubelet can (and often does) reuse device mounted
// for A.
return vol.effectiveSELinuxMountFileLabel == seLinuxMountContext
}
return true
}
func (dsw *desiredStateOfWorld) PodExistsInVolume(
podName types.UniquePodName, volumeName v1.UniqueVolumeName, seLinuxMountOption string) bool {
dsw.RLock()
defer dsw.RUnlock()
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
if !volumeExists {
return false
}
if feature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
if volumeObj.effectiveSELinuxMountFileLabel != seLinuxMountOption {
// The volume is in DSW, but with a different SELinux mount option.
// Report it as unused, so the volume is unmounted and mounted back
// with the right SELinux option.
return false
}
}
_, podExists := volumeObj.podsToMount[podName]
return podExists
}
func (dsw *desiredStateOfWorld) VolumeExistsWithSpecName(podName types.UniquePodName, volumeSpecName string) bool {
dsw.RLock()
defer dsw.RUnlock()
for _, volumeObj := range dsw.volumesToMount {
if podObj, podExists := volumeObj.podsToMount[podName]; podExists {
if podObj.volumeSpec.Name() == volumeSpecName {
return true
}
}
}
return false
}
func (dsw *desiredStateOfWorld) GetPods() map[types.UniquePodName]bool {
dsw.RLock()
defer dsw.RUnlock()
podList := make(map[types.UniquePodName]bool)
for _, volumeObj := range dsw.volumesToMount {
for podName := range volumeObj.podsToMount {
podList[podName] = true
}
}
return podList
}
func (dsw *desiredStateOfWorld) GetVolumeNamesForPod(podName types.UniquePodName) map[string]v1.UniqueVolumeName {
dsw.RLock()
defer dsw.RUnlock()
volumeNames := make(map[string]v1.UniqueVolumeName)
for volumeName, volumeObj := range dsw.volumesToMount {
for _, outerVolumeSpecName := range volumeObj.podsToMount[podName].outerVolumeSpecNames {
volumeNames[outerVolumeSpecName] = volumeName
}
}
return volumeNames
}
func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount {
dsw.RLock()
defer dsw.RUnlock()
volumesToMount := make([]VolumeToMount, 0 /* len */, len(dsw.volumesToMount) /* cap */)
for volumeName, volumeObj := range dsw.volumesToMount {
for podName, podObj := range volumeObj.podsToMount {
vmt := VolumeToMount{
VolumeToMount: operationexecutor.VolumeToMount{
VolumeName: volumeName,
PodName: podName,
Pod: podObj.pod,
VolumeSpec: podObj.volumeSpec,
PluginIsAttachable: volumeObj.pluginIsAttachable,
PluginIsDeviceMountable: volumeObj.pluginIsDeviceMountable,
OuterVolumeSpecNames: podObj.outerVolumeSpecNames,
VolumeGIDValue: volumeObj.volumeGIDValue,
ReportedInUse: volumeObj.reportedInUse,
MountRequestTime: podObj.mountRequestTime,
DesiredSizeLimit: volumeObj.desiredSizeLimit,
SELinuxLabel: volumeObj.effectiveSELinuxMountFileLabel,
},
}
if !volumeObj.persistentVolumeSize.IsZero() {
vmt.DesiredPersistentVolumeSize = volumeObj.persistentVolumeSize.DeepCopy()
}
volumesToMount = append(volumesToMount, vmt)
}
}
return volumesToMount
}
func (dsw *desiredStateOfWorld) AddErrorToPod(podName types.UniquePodName, err string) {
dsw.Lock()
defer dsw.Unlock()
if errs, found := dsw.podErrors[podName]; found {
if errs.Len() <= maxPodErrors {
errs.Insert(err)
}
return
}
dsw.podErrors[podName] = sets.New[string](err)
}
func (dsw *desiredStateOfWorld) PopPodErrors(podName types.UniquePodName) []string {
dsw.Lock()
defer dsw.Unlock()
if errs, found := dsw.podErrors[podName]; found {
delete(dsw.podErrors, podName)
return sets.List(errs)
}
return []string{}
}
func (dsw *desiredStateOfWorld) GetPodsWithErrors() []types.UniquePodName {
dsw.RLock()
defer dsw.RUnlock()
pods := make([]types.UniquePodName, 0, len(dsw.podErrors))
for podName := range dsw.podErrors {
pods = append(pods, podName)
}
return pods
}
func (dsw *desiredStateOfWorld) MarkVolumeAttachability(volumeName v1.UniqueVolumeName, attachable bool) {
dsw.Lock()
defer dsw.Unlock()
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
if !volumeExists {
return
}
volumeObj.pluginIsAttachable = attachable
dsw.volumesToMount[volumeName] = volumeObj
}
// Based on isRWOP, bump the right warning / error metric and either consume the error or return it.
func handleSELinuxMetricError(logger klog.Logger, err error, seLinuxSupported bool, warningMetric, errorMetric metrics.GaugeMetric) error {
if seLinuxSupported {
errorMetric.Add(1.0)
return err
}
// This is not an error yet, but it will be when support for other access modes is added.
warningMetric.Add(1.0)
logger.V(4).Error(err, "Please report this error in https://github.com/kubernetes/enhancements/issues/1710, together with full Pod yaml file")
return nil
}
// Return the volume plugin name, together with the CSI driver name if it's a CSI volume.
func getVolumePluginNameWithDriver(logger klog.Logger, plugin volume.VolumePlugin, spec *volume.Spec) string {
pluginName := plugin.GetPluginName()
if pluginName != csi.CSIPluginName {
return pluginName
}
// It's a CSI volume
driverName, err := csi.GetCSIDriverName(spec)
if err != nil {
// In theory this is unreachable - such volume would not pass validation.
logger.V(4).Error(err, "failed to get CSI driver name from volume spec")
driverName = "unknown"
}
// `/` is used to separate plugin + CSI driver in util.GetUniqueVolumeName() too
return pluginName + "/" + driverName
}
func getVolumeAccessMode(spec *volume.Spec) string {
if spec.PersistentVolume == nil {
// In-line volumes in pod do not have a specific access mode, using "inline".
return "inline"
}
// For purpose of this PR, report only the "highest" access mode in this order: RWX (highest priority), ROX, RWO, RWOP (lowest priority
pv := spec.PersistentVolume
if util.ContainsAccessMode(pv.Spec.AccessModes, v1.ReadWriteMany) {
return "RWX"
}
if util.ContainsAccessMode(pv.Spec.AccessModes, v1.ReadOnlyMany) {
return "ROX"
}
if util.ContainsAccessMode(pv.Spec.AccessModes, v1.ReadWriteOnce) {
return "RWO"
}
if util.ContainsAccessMode(pv.Spec.AccessModes, v1.ReadWriteOncePod) {
return "RWOP"
}
// This should not happen, validation does not allow empty or unknown AccessModes.
return ""
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
pluginNameNotAvailable = "N/A"
// Metric keys for Volume Manager.
volumeManagerTotalVolumes = "volume_manager_total_volumes"
reconstructVolumeOperationsTotal = "reconstruct_volume_operations_total"
reconstructVolumeOperationsErrorsTotal = "reconstruct_volume_operations_errors_total"
forceCleanedFailedVolumeOperationsTotal = "force_cleaned_failed_volume_operations_total"
forceCleanedFailedVolumeOperationsErrorsTotal = "force_cleaned_failed_volume_operation_errors_total"
)
var (
registerMetrics sync.Once
totalVolumesDesc = metrics.NewDesc(
volumeManagerTotalVolumes,
"Number of volumes in Volume Manager",
[]string{"plugin_name", "state"},
nil,
metrics.ALPHA, "",
)
ReconstructVolumeOperationsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Name: reconstructVolumeOperationsTotal,
Help: "The number of volumes that were attempted to be reconstructed from the operating system during kubelet startup. This includes both successful and failed reconstruction.",
StabilityLevel: metrics.ALPHA,
},
)
ReconstructVolumeOperationsErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Name: reconstructVolumeOperationsErrorsTotal,
Help: "The number of volumes that failed reconstruction from the operating system during kubelet startup.",
StabilityLevel: metrics.ALPHA,
},
)
ForceCleanedFailedVolumeOperationsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Name: forceCleanedFailedVolumeOperationsTotal,
Help: "The number of volumes that were force cleaned after their reconstruction failed during kubelet startup. This includes both successful and failed cleanups.",
StabilityLevel: metrics.ALPHA,
},
)
ForceCleanedFailedVolumeOperationsErrorsTotal = metrics.NewCounter(
&metrics.CounterOpts{
Name: forceCleanedFailedVolumeOperationsErrorsTotal,
Help: "The number of volumes that failed force cleanup after their reconstruction failed during kubelet startup.",
StabilityLevel: metrics.ALPHA,
},
)
)
// volumeCount is a map of maps used as a counter.
type volumeCount map[string]map[string]int64
func (v volumeCount) add(state, plugin string) {
count, ok := v[state]
if !ok {
count = map[string]int64{}
}
count[plugin]++
v[state] = count
}
// Register registers Volume Manager metrics.
func Register(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, pluginMgr *volume.VolumePluginMgr) {
registerMetrics.Do(func() {
legacyregistry.CustomMustRegister(&totalVolumesCollector{asw: asw, dsw: dsw, pluginMgr: pluginMgr})
legacyregistry.MustRegister(ReconstructVolumeOperationsTotal)
legacyregistry.MustRegister(ReconstructVolumeOperationsErrorsTotal)
legacyregistry.MustRegister(ForceCleanedFailedVolumeOperationsTotal)
legacyregistry.MustRegister(ForceCleanedFailedVolumeOperationsErrorsTotal)
})
}
type totalVolumesCollector struct {
metrics.BaseStableCollector
asw cache.ActualStateOfWorld
dsw cache.DesiredStateOfWorld
pluginMgr *volume.VolumePluginMgr
}
var _ metrics.StableCollector = &totalVolumesCollector{}
// DescribeWithStability implements the metrics.StableCollector interface.
func (c *totalVolumesCollector) DescribeWithStability(ch chan<- *metrics.Desc) {
ch <- totalVolumesDesc
}
// CollectWithStability implements the metrics.StableCollector interface.
func (c *totalVolumesCollector) CollectWithStability(ch chan<- metrics.Metric) {
for stateName, pluginCount := range c.getVolumeCount() {
for pluginName, count := range pluginCount {
ch <- metrics.NewLazyConstMetric(totalVolumesDesc,
metrics.GaugeValue,
float64(count),
pluginName,
stateName)
}
}
}
func (c *totalVolumesCollector) getVolumeCount() volumeCount {
counter := make(volumeCount)
for _, mountedVolume := range c.asw.GetMountedVolumes() {
pluginName := volumeutil.GetFullQualifiedPluginNameForVolume(mountedVolume.PluginName, mountedVolume.VolumeSpec)
if pluginName == "" {
pluginName = pluginNameNotAvailable
}
counter.add("actual_state_of_world", pluginName)
}
for _, volumeToMount := range c.dsw.GetVolumesToMount() {
pluginName := pluginNameNotAvailable
if plugin, err := c.pluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec); err == nil {
pluginName = volumeutil.GetFullQualifiedPluginNameForVolume(plugin.GetPluginName(), volumeToMount.VolumeSpec)
}
counter.add("desired_state_of_world", pluginName)
}
return counter
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package populator implements interfaces that monitor and keep the states of the
caches in sync with the "ground truth".
*/
package populator
import (
"context"
"errors"
"fmt"
"slices"
"sync"
"time"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// DesiredStateOfWorldPopulator periodically loops through the list of active
// pods and ensures that each one exists in the desired state of the world cache
// if it has volumes. It also verifies that the pods in the desired state of the
// world cache still exist, if not, it removes them.
type DesiredStateOfWorldPopulator interface {
Run(ctx context.Context, sourcesReady config.SourcesReady)
// ReprocessPod sets value for the specified pod in processedPods
// to false, forcing it to be reprocessed. This is required to enable
// remounting volumes on pod updates (volumes like Downward API volumes
// depend on this behavior to ensure volume content is updated).
ReprocessPod(podName volumetypes.UniquePodName)
// HasAddedPods returns whether the populator has looped through the list
// of active pods and added them to the desired state of the world cache,
// at a time after sources are all ready, at least once. It does not
// return true before sources are all ready because before then, there is
// a chance many or all pods are missing from the list of active pods and
// so few to none will have been added.
HasAddedPods() bool
}
// PodStateProvider can determine if a pod is going to be terminated.
type PodStateProvider interface {
ShouldPodContainersBeTerminating(types.UID) bool
ShouldPodRuntimeBeRemoved(types.UID) bool
}
// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet.
// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc.
type PodManager interface {
GetPodByUID(types.UID) (*v1.Pod, bool)
GetPods() []*v1.Pod
}
// NewDesiredStateOfWorldPopulator returns a new instance of
// DesiredStateOfWorldPopulator.
//
// kubeClient - used to fetch PV and PVC objects from the API server
// loopSleepDuration - the amount of time the populator loop sleeps between
// successive executions
//
// podManager - the kubelet podManager that is the source of truth for the pods
// that exist on this host
//
// desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator(
kubeClient clientset.Interface,
loopSleepDuration time.Duration,
podManager PodManager,
podStateProvider PodStateProvider,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
csiMigratedPluginManager csimigration.PluginManager,
intreeToCSITranslator csimigration.InTreeToCSITranslator,
volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{
kubeClient: kubeClient,
loopSleepDuration: loopSleepDuration,
podManager: podManager,
podStateProvider: podStateProvider,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
pods: processedPods{
processedPods: make(map[volumetypes.UniquePodName]bool)},
hasAddedPods: false,
hasAddedPodsLock: sync.RWMutex{},
csiMigratedPluginManager: csiMigratedPluginManager,
intreeToCSITranslator: intreeToCSITranslator,
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorldPopulator struct {
kubeClient clientset.Interface
loopSleepDuration time.Duration
podManager PodManager
podStateProvider PodStateProvider
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
pods processedPods
hasAddedPods bool
hasAddedPodsLock sync.RWMutex
csiMigratedPluginManager csimigration.PluginManager
intreeToCSITranslator csimigration.InTreeToCSITranslator
volumePluginMgr *volume.VolumePluginMgr
}
type processedPods struct {
processedPods map[volumetypes.UniquePodName]bool
sync.RWMutex
}
func (dswp *desiredStateOfWorldPopulator) Run(ctx context.Context, sourcesReady config.SourcesReady) {
// Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly
logger := klog.FromContext(ctx)
logger.Info("Desired state populator starts to run")
_ = wait.PollUntilContextCancel(ctx, dswp.loopSleepDuration, false, func(ctx context.Context) (bool, error) {
done := sourcesReady.AllReady()
dswp.populatorLoop(ctx)
return done, nil
})
dswp.hasAddedPodsLock.Lock()
if !dswp.hasAddedPods {
logger.Info("Finished populating initial desired state of world")
dswp.hasAddedPods = true
}
dswp.hasAddedPodsLock.Unlock()
wait.UntilWithContext(ctx, dswp.populatorLoop, dswp.loopSleepDuration)
}
func (dswp *desiredStateOfWorldPopulator) ReprocessPod(
podName volumetypes.UniquePodName) {
dswp.markPodProcessingFailed(podName)
}
func (dswp *desiredStateOfWorldPopulator) HasAddedPods() bool {
dswp.hasAddedPodsLock.RLock()
defer dswp.hasAddedPodsLock.RUnlock()
return dswp.hasAddedPods
}
func (dswp *desiredStateOfWorldPopulator) populatorLoop(ctx context.Context) {
logger := klog.FromContext(ctx)
dswp.findAndAddNewPods(ctx)
dswp.findAndRemoveDeletedPods(logger)
}
// Iterate through all pods and add to desired state of world if they don't
// exist but should
func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods(ctx context.Context) {
for _, pod := range dswp.podManager.GetPods() {
// Keep consistency of adding pod during reconstruction
if dswp.hasAddedPods && dswp.podStateProvider.ShouldPodContainersBeTerminating(pod.UID) {
// Do not (re)add volumes for pods that can't also be starting containers
continue
}
if !dswp.hasAddedPods && dswp.podStateProvider.ShouldPodRuntimeBeRemoved(pod.UID) {
// When kubelet restarts, we need to add pods to dsw if there is a possibility
// that the container may still be running
continue
}
dswp.processPodVolumes(ctx, pod)
}
}
// Iterate through all pods in desired state of world, and remove if they no
// longer exist
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods(logger klog.Logger) {
podsFromCache := make(map[volumetypes.UniquePodName]struct{})
for _, volumeToMount := range dswp.desiredStateOfWorld.GetVolumesToMount() {
podsFromCache[volumetypes.UniquePodName(volumeToMount.Pod.UID)] = struct{}{}
pod, podExists := dswp.podManager.GetPodByUID(volumeToMount.Pod.UID)
if podExists {
// check if the attachability has changed for this volume
if volumeToMount.PluginIsAttachable {
attachableVolumePlugin, err := dswp.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
// only this means the plugin is truly non-attachable
if err == nil && attachableVolumePlugin == nil {
// It is not possible right now for a CSI plugin to be both attachable and non-deviceMountable
// So the uniqueVolumeName should remain the same after the attachability change
dswp.desiredStateOfWorld.MarkVolumeAttachability(volumeToMount.VolumeName, false)
logger.Info("Volume changes from attachable to non-attachable", "volumeName", volumeToMount.VolumeName)
continue
}
}
// Exclude known pods that we expect to be running
if !dswp.podStateProvider.ShouldPodRuntimeBeRemoved(pod.UID) {
continue
}
}
// Once a pod has been deleted from kubelet pod manager, do not delete
// it immediately from volume manager. Instead, check the kubelet
// pod state provider to verify that all containers in the pod have been
// terminated.
if !dswp.podStateProvider.ShouldPodRuntimeBeRemoved(volumeToMount.Pod.UID) {
logger.V(4).Info("Pod still has one or more containers in the non-exited state and will not be removed from desired state", "pod", klog.KObj(volumeToMount.Pod))
continue
}
var volumeToMountSpecName string
if volumeToMount.VolumeSpec != nil {
volumeToMountSpecName = volumeToMount.VolumeSpec.Name()
}
removed := dswp.actualStateOfWorld.PodRemovedFromVolume(volumeToMount.PodName, volumeToMount.VolumeName)
if removed && podExists {
logger.V(4).Info("Actual state does not yet have volume mount information and pod still exists in pod manager, skip removing volume from desired state", "pod", klog.KObj(volumeToMount.Pod), "podUID", volumeToMount.Pod.UID, "volumeName", volumeToMountSpecName)
continue
}
logger.V(4).Info("Removing volume from desired state", "pod", klog.KObj(volumeToMount.Pod), "podUID", volumeToMount.Pod.UID, "volumeName", volumeToMountSpecName)
dswp.desiredStateOfWorld.DeletePodFromVolume(
volumeToMount.PodName, volumeToMount.VolumeName)
dswp.deleteProcessedPod(volumeToMount.PodName)
}
// Cleanup orphanded entries from processedPods
dswp.pods.Lock()
orphanedPods := make([]volumetypes.UniquePodName, 0, len(dswp.pods.processedPods))
for k := range dswp.pods.processedPods {
if _, ok := podsFromCache[k]; !ok {
orphanedPods = append(orphanedPods, k)
}
}
dswp.pods.Unlock()
for _, orphanedPod := range orphanedPods {
uid := types.UID(orphanedPod)
_, podExists := dswp.podManager.GetPodByUID(uid)
if !podExists && dswp.podStateProvider.ShouldPodRuntimeBeRemoved(uid) {
dswp.deleteProcessedPod(orphanedPod)
}
}
podsWithError := dswp.desiredStateOfWorld.GetPodsWithErrors()
for _, podName := range podsWithError {
if _, podExists := dswp.podManager.GetPodByUID(types.UID(podName)); !podExists {
dswp.desiredStateOfWorld.PopPodErrors(podName)
}
}
}
// processPodVolumes processes the volumes in the given pod and adds them to the
// desired state of the world.
func (dswp *desiredStateOfWorldPopulator) processPodVolumes(ctx context.Context, pod *v1.Pod) {
if pod == nil {
return
}
logger := klog.FromContext(ctx)
uniquePodName := util.GetUniquePodName(pod)
if dswp.podPreviouslyProcessed(uniquePodName) {
return
}
allVolumesAdded := true
collectSELinuxOptions := utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod)
mounts, devices, seLinuxContainerContexts := util.GetPodVolumeNames(pod, collectSELinuxOptions)
// Process volume spec for each volume defined in pod
for _, podVolume := range pod.Spec.Volumes {
if !mounts.Has(podVolume.Name) && !devices.Has(podVolume.Name) {
// Volume is not used in the pod, ignore it.
logger.V(4).Info("Skipping unused volume", "pod", klog.KObj(pod), "volumeName", podVolume.Name)
continue
}
pvc, volumeSpec, volumeGIDValue, err :=
dswp.createVolumeSpec(ctx, podVolume, pod, mounts, devices)
if err != nil {
logger.Error(err, "Error processing volume", "pod", klog.KObj(pod), "volumeName", podVolume.Name)
dswp.desiredStateOfWorld.AddErrorToPod(uniquePodName, err.Error())
allVolumesAdded = false
continue
}
// Add volume to desired state of world
uniqueVolumeName, err := dswp.desiredStateOfWorld.AddPodToVolume(
logger, uniquePodName, pod, volumeSpec, podVolume.Name, volumeGIDValue, seLinuxContainerContexts[podVolume.Name])
if err != nil {
logger.Error(err, "Failed to add volume to desiredStateOfWorld", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "volumeSpecName", volumeSpec.Name())
dswp.desiredStateOfWorld.AddErrorToPod(uniquePodName, err.Error())
allVolumesAdded = false
continue
}
logger.V(4).Info("Added volume to desired state", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "volumeSpecName", volumeSpec.Name())
dswp.checkVolumeFSResize(logger, pod, podVolume, pvc, volumeSpec, uniqueVolumeName)
}
// some of the volume additions may have failed, should not mark this pod as fully processed
if allVolumesAdded {
dswp.markPodProcessed(uniquePodName)
// New pod has been synced. Re-mount all volumes that need it
// (e.g. DownwardAPI)
dswp.actualStateOfWorld.MarkRemountRequired(logger, uniquePodName)
// Remove any stored errors for the pod, everything went well in this processPodVolumes
dswp.desiredStateOfWorld.PopPodErrors(uniquePodName)
} else if dswp.podHasBeenSeenOnce(uniquePodName) {
// For the Pod which has been processed at least once, even though some volumes
// may not have been reprocessed successfully this round, we still mark it as processed to avoid
// processing it at a very high frequency. The pod will be reprocessed when volume manager calls
// ReprocessPod() which is triggered by SyncPod.
dswp.markPodProcessed(uniquePodName)
}
}
// checkVolumeFSResize records desired PVC size for a volume mounted by the pod.
// It is used for comparison with actual size(coming from pvc.Status.Capacity) and calling
// volume expansion on the node if needed.
func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
logger klog.Logger,
pod *v1.Pod,
podVolume v1.Volume,
pvc *v1.PersistentVolumeClaim,
volumeSpec *volume.Spec,
uniqueVolumeName v1.UniqueVolumeName) {
// if a volumeSpec does not have PV or has InlineVolumeSpecForCSIMigration set or pvc is nil
// we can't resize the volume and hence resizing should be skipped.
if volumeSpec.PersistentVolume == nil || volumeSpec.InlineVolumeSpecForCSIMigration || pvc == nil {
// Only PVC supports resize operation.
return
}
// volumeSpec.ReadOnly is the value that determines if volume could be formatted when being mounted.
// This is the same flag that determines filesystem resizing behaviour for offline resizing and hence
// we should use it here. This value comes from Pod.spec.volumes.persistentVolumeClaim.readOnly.
if volumeSpec.ReadOnly {
// This volume is used as read only by this pod, we don't perform resize for read only volumes.
logger.V(5).Info("Skip file system resize check for the volume, as the volume is mounted as readonly", "pod", klog.KObj(pod), "volumeName", podVolume.Name)
return
}
pvCap := volumeSpec.PersistentVolume.Spec.Capacity.Storage().DeepCopy()
pvcStatusCap := pvc.Status.Capacity.Storage().DeepCopy()
dswp.desiredStateOfWorld.UpdatePersistentVolumeSize(uniqueVolumeName, pvCap)
logger.V(5).Info("NodeExpandVolume updating size", "actualSize", pvcStatusCap.String(), "desiredSize", pvCap.String(), "volumeName", uniqueVolumeName)
// in case the actualStateOfWorld was rebuild after kubelet restart ensure that claimSize is set to accurate value
dswp.actualStateOfWorld.InitializeClaimSize(logger, uniqueVolumeName, pvcStatusCap)
}
// podPreviouslyProcessed returns true if the volumes for this pod have already
// been processed/reprocessed by the populator. Otherwise, the volumes for this pod need to
// be reprocessed.
func (dswp *desiredStateOfWorldPopulator) podPreviouslyProcessed(
podName volumetypes.UniquePodName) bool {
dswp.pods.RLock()
defer dswp.pods.RUnlock()
return dswp.pods.processedPods[podName]
}
// markPodProcessingFailed marks the specified pod from processedPods as false to indicate that it failed processing
func (dswp *desiredStateOfWorldPopulator) markPodProcessingFailed(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
dswp.pods.processedPods[podName] = false
dswp.pods.Unlock()
}
// podHasBeenSeenOnce returns true if the pod has been seen by the popoulator
// at least once.
func (dswp *desiredStateOfWorldPopulator) podHasBeenSeenOnce(
podName volumetypes.UniquePodName) bool {
dswp.pods.RLock()
_, exist := dswp.pods.processedPods[podName]
dswp.pods.RUnlock()
return exist
}
// markPodProcessed records that the volumes for the specified pod have been
// processed by the populator
func (dswp *desiredStateOfWorldPopulator) markPodProcessed(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
defer dswp.pods.Unlock()
dswp.pods.processedPods[podName] = true
}
// deleteProcessedPod removes the specified pod from processedPods
func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
podName volumetypes.UniquePodName) {
dswp.pods.Lock()
defer dswp.pods.Unlock()
delete(dswp.pods.processedPods, podName)
}
// createVolumeSpec creates and returns a mutable volume.Spec object for the
// specified volume. It dereference any PVC to get PV objects, if needed.
// Returns an error if unable to obtain the volume at this time.
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
ctx context.Context, podVolume v1.Volume, pod *v1.Pod, mounts, devices sets.Set[string]) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
logger := klog.FromContext(ctx)
pvcSource := podVolume.VolumeSource.PersistentVolumeClaim
isEphemeral := pvcSource == nil && podVolume.VolumeSource.Ephemeral != nil
if isEphemeral {
// Generic ephemeral inline volumes are handled the
// same way as a PVC reference. The only additional
// constraint (checked below) is that the PVC must be
// owned by the pod.
pvcSource = &v1.PersistentVolumeClaimVolumeSource{
ClaimName: ephemeral.VolumeClaimName(pod, &podVolume),
}
}
if pvcSource != nil {
logger.V(5).Info("Found PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName))
// If podVolume is a PVC, fetch the real PV behind the claim
pvc, err := dswp.getPVCExtractPV(
ctx, pod.Namespace, pvcSource.ClaimName)
if err != nil {
return nil, nil, "", fmt.Errorf(
"error processing PVC %s/%s: %v",
pod.Namespace,
pvcSource.ClaimName,
err)
}
if isEphemeral {
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
return nil, nil, "", err
}
}
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
logger.V(5).Info("Found bound PV for PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName), "PVCUID", pvcUID, "PVName", pvName)
// Fetch actual PV object
volumeSpec, volumeGIDValue, err :=
dswp.getPVSpec(ctx, pvName, pvcSource.ReadOnly, pvcUID)
if err != nil {
return nil, nil, "", fmt.Errorf(
"error processing PVC %s/%s: %v",
pod.Namespace,
pvcSource.ClaimName,
err)
}
logger.V(5).Info("Extracted volumeSpec from bound PV and PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName), "PVCUID", pvcUID, "PVName", pvName, "volumeSpecName", volumeSpec.Name())
migratable, err := dswp.csiMigratedPluginManager.IsMigratable(volumeSpec)
if err != nil {
return nil, nil, "", err
}
if migratable {
volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(logger, volumeSpec, pod.Namespace, dswp.intreeToCSITranslator)
if err != nil {
return nil, nil, "", err
}
}
volumeMode, err := util.GetVolumeMode(volumeSpec)
if err != nil {
return nil, nil, "", err
}
// Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem.
if mounts.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeFilesystem {
return nil, nil, "", fmt.Errorf(
"volume %s has volumeMode %s, but is specified in volumeMounts",
podVolume.Name,
volumeMode)
}
// Error if a container has volumeDevices but the volumeMode of PVC isn't Block
if devices.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeBlock {
return nil, nil, "", fmt.Errorf(
"volume %s has volumeMode %s, but is specified in volumeDevices",
podVolume.Name,
volumeMode)
}
return pvc, volumeSpec, volumeGIDValue, nil
}
// Do not return the original volume object, since the source could mutate it
clonedPodVolume := podVolume.DeepCopy()
spec := volume.NewSpecFromVolume(clonedPodVolume)
migratable, err := dswp.csiMigratedPluginManager.IsMigratable(spec)
if err != nil {
return nil, nil, "", err
}
if migratable {
spec, err = csimigration.TranslateInTreeSpecToCSI(logger, spec, pod.Namespace, dswp.intreeToCSITranslator)
if err != nil {
return nil, nil, "", err
}
}
return nil, spec, "", nil
}
// getPVCExtractPV fetches the PVC object with the given namespace and name from
// the API server, checks whether PVC is being deleted, extracts the name of the PV
// it is pointing to and returns it.
// An error is returned if the PVC object's phase is not "Bound".
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
ctx context.Context, namespace string, claimName string) (*v1.PersistentVolumeClaim, error) {
pvc, err :=
dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, claimName, metav1.GetOptions{})
if err != nil || pvc == nil {
return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err)
}
// Pods that uses a PVC that is being deleted and not protected by
// kubernetes.io/pvc-protection must not be started.
//
// 1) In case an old kubelet is running without this check, the worst
// that can happen is that such pod is scheduled. This was the default
// behavior in 1.8 and earlier and users should not be that surprised.
// It should happen only in very rare case when scheduler schedules
// a pod and user deletes a PVC that's used by it at the same time.
//
// 2) Adding a check for kubernetes.io/pvc-protection here to prevent
// the existing running pods from being affected during the rebuild of
// the desired state of the world cache when the kubelet is restarted.
// It is safe for kubelet to add this check here because the PVC will
// be stuck in Terminating state until the pod is deleted.
if pvc.ObjectMeta.DeletionTimestamp != nil && !slices.Contains(pvc.Finalizers, util.PVCProtectionFinalizer) {
return nil, errors.New("PVC is being deleted")
}
if pvc.Status.Phase != v1.ClaimBound {
return nil, errors.New("PVC is not bound")
}
if pvc.Spec.VolumeName == "" {
return nil, errors.New("PVC has empty pvc.Spec.VolumeName")
}
return pvc, nil
}
// getPVSpec fetches the PV object with the given name from the API server
// and returns a volume.Spec representing it.
// An error is returned if the call to fetch the PV object fails.
func (dswp *desiredStateOfWorldPopulator) getPVSpec(
ctx context.Context,
name string,
pvcReadOnly bool,
expectedClaimUID types.UID) (*volume.Spec, string, error) {
pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(ctx, name, metav1.GetOptions{})
if err != nil || pv == nil {
return nil, "", fmt.Errorf(
"failed to fetch PV %s from API server: %v", name, err)
}
if pv.Spec.ClaimRef == nil {
return nil, "", fmt.Errorf(
"found PV object %s but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim",
name)
}
if pv.Spec.ClaimRef.UID != expectedClaimUID {
return nil, "", fmt.Errorf(
"found PV object %s but its pv.Spec.ClaimRef.UID %s does not point to claim.UID %s",
name,
pv.Spec.ClaimRef.UID,
expectedClaimUID)
}
volumeGIDValue := getPVVolumeGidAnnotationValue(pv)
return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGIDValue, nil
}
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
if volumeGid, ok := pv.Annotations[util.VolumeGidAnnotationKey]; ok {
return volumeGid
}
return ""
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"context"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
func (rc *reconciler) Run(ctx context.Context, stopCh <-chan struct{}) {
logger := klog.FromContext(ctx)
rc.reconstructVolumes(logger)
logger.Info("Reconciler: start to sync state")
wait.Until(func() { rc.reconcile(ctx) }, rc.loopSleepDuration, stopCh)
}
func (rc *reconciler) reconcile(ctx context.Context) {
logger := klog.FromContext(ctx)
readyToUnmount := rc.readyToUnmount()
if readyToUnmount {
// Unmounts are triggered before mounts so that a volume that was
// referenced by a pod that was deleted and is now referenced by another
// pod is unmounted from the first pod before being mounted to the new
// pod.
rc.unmountVolumes(logger)
}
// Next we mount required volumes. This function could also trigger
// attach if kubelet is responsible for attaching volumes.
// If underlying PVC was resized while in-use then this function also handles volume
// resizing.
rc.mountOrAttachVolumes(logger)
// Unmount volumes only when DSW and ASW are fully populated to prevent unmounting a volume
// that is still needed, but it did not reach DSW yet.
if readyToUnmount {
// Ensure devices that should be detached/unmounted are detached/unmounted.
rc.unmountDetachDevices(logger)
// Clean up any orphan volumes that failed reconstruction.
rc.cleanOrphanVolumes(logger)
}
if len(rc.volumesNeedUpdateFromNodeStatus) != 0 {
rc.updateReconstructedFromNodeStatus(ctx)
}
if len(rc.volumesNeedUpdateFromNodeStatus) == 0 {
// ASW is fully populated only after both devicePaths and uncertain volume attach-ability
// were reconstructed from the API server.
// This will start reconciliation of node.status.volumesInUse.
rc.updateLastSyncTime()
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"context"
"fmt"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
volumepkg "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/mount-utils"
)
// Reconciler runs a periodic loop to reconcile the desired state of the world
// with the actual state of the world by triggering attach, detach, mount, and
// unmount operations.
// Note: This is distinct from the Reconciler implemented by the attach/detach
// controller. This reconciles state for the kubelet volume manager. That
// reconciles state for the attach/detach controller.
type Reconciler interface {
// Starts running the reconciliation loop which executes periodically, checks
// if volumes that should be mounted are mounted and volumes that should
// be unmounted are unmounted. If not, it will trigger mount/unmount
// operations to rectify.
// If attach/detach management is enabled, the manager will also check if
// volumes that should be attached are attached and volumes that should
// be detached are detached and trigger attach/detach operations as needed.
Run(ctx context.Context, stopCh <-chan struct{})
// StatesHasBeenSynced returns true only after syncStates process starts to sync
// states at least once after kubelet starts
StatesHasBeenSynced() bool
}
// NewReconciler returns a new instance of Reconciler.
//
// controllerAttachDetachEnabled - if true, indicates that the attach/detach
// controller is responsible for managing the attach/detach operations for
// this node, and therefore the volume manager should not
//
// loopSleepDuration - the amount of time the reconciler loop sleeps between
// successive executions
//
// waitForAttachTimeout - the amount of time the Mount function will wait for
// the volume to be attached
//
// nodeName - the Name for this node, used by Attach and Detach methods
//
// desiredStateOfWorld - cache containing the desired state of the world
//
// actualStateOfWorld - cache containing the actual state of the world
//
// populatorHasAddedPods - checker for whether the populator has finished
// adding pods to the desiredStateOfWorld cache at least once after sources
// are all ready (before sources are ready, pods are probably missing)
//
// operationExecutor - used to trigger attach/detach/mount/unmount operations
// safely (prevents more than one operation from being triggered on the same
// volume)
//
// mounter - mounter passed in from kubelet, passed down unmount path
//
// hostutil - hostutil passed in from kubelet
//
// volumePluginMgr - volume plugin manager passed from kubelet
func NewReconciler(
kubeClient clientset.Interface,
controllerAttachDetachEnabled bool,
loopSleepDuration time.Duration,
waitForAttachTimeout time.Duration,
nodeName types.NodeName,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
populatorHasAddedPods func() bool,
operationExecutor operationexecutor.OperationExecutor,
mounter mount.Interface,
hostutil hostutil.HostUtils,
volumePluginMgr *volumepkg.VolumePluginMgr,
kubeletPodsDir string) Reconciler {
return &reconciler{
kubeClient: kubeClient,
controllerAttachDetachEnabled: controllerAttachDetachEnabled,
loopSleepDuration: loopSleepDuration,
waitForAttachTimeout: waitForAttachTimeout,
nodeName: nodeName,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
populatorHasAddedPods: populatorHasAddedPods,
operationExecutor: operationExecutor,
mounter: mounter,
hostutil: hostutil,
skippedDuringReconstruction: map[v1.UniqueVolumeName]*globalVolumeInfo{},
volumePluginMgr: volumePluginMgr,
kubeletPodsDir: kubeletPodsDir,
timeOfLastSync: time.Time{},
volumesFailedReconstruction: make([]podVolume, 0),
volumesNeedUpdateFromNodeStatus: make([]v1.UniqueVolumeName, 0),
}
}
type reconciler struct {
kubeClient clientset.Interface
controllerAttachDetachEnabled bool
loopSleepDuration time.Duration
waitForAttachTimeout time.Duration
nodeName types.NodeName
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
populatorHasAddedPods func() bool
operationExecutor operationexecutor.OperationExecutor
mounter mount.Interface
hostutil hostutil.HostUtils
volumePluginMgr *volumepkg.VolumePluginMgr
skippedDuringReconstruction map[v1.UniqueVolumeName]*globalVolumeInfo
kubeletPodsDir string
// lock protects timeOfLastSync for updating and checking
timeOfLastSyncLock sync.Mutex
timeOfLastSync time.Time
volumesFailedReconstruction []podVolume
volumesNeedUpdateFromNodeStatus []v1.UniqueVolumeName
}
func (rc *reconciler) unmountVolumes(logger klog.Logger) {
// Ensure volumes that should be unmounted are unmounted.
for _, mountedVolume := range rc.actualStateOfWorld.GetAllMountedVolumes() {
if rc.operationExecutor.IsOperationPending(mountedVolume.VolumeName, mountedVolume.PodName, nestedpendingoperations.EmptyNodeName) {
continue
}
if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName, mountedVolume.SELinuxMountContext) {
// Volume is mounted, unmount it
logger.V(5).Info(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", ""))
err := rc.operationExecutor.UnmountVolume(
mountedVolume.MountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
if err != nil && !isExpectedError(err) {
logger.Error(err, mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
}
if err == nil {
logger.Info(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", ""))
}
}
}
}
func (rc *reconciler) mountOrAttachVolumes(logger klog.Logger) {
// Ensure volumes that should be attached/mounted are attached/mounted.
for _, volumeToMount := range rc.desiredStateOfWorld.GetVolumesToMount() {
if rc.operationExecutor.IsOperationPending(volumeToMount.VolumeName, nestedpendingoperations.EmptyUniquePodName, nestedpendingoperations.EmptyNodeName) {
continue
}
volMounted, devicePath, err := rc.actualStateOfWorld.PodExistsInVolume(logger, volumeToMount.PodName, volumeToMount.VolumeName, volumeToMount.DesiredPersistentVolumeSize, volumeToMount.SELinuxLabel)
volumeToMount.DevicePath = devicePath
if cache.IsSELinuxMountMismatchError(err) {
// The volume is mounted, but with an unexpected SELinux context.
// It will get unmounted in unmountVolumes / unmountDetachDevices and
// then removed from actualStateOfWorld.
rc.desiredStateOfWorld.AddErrorToPod(volumeToMount.PodName, err.Error())
continue
} else if cache.IsVolumeNotAttachedError(err) {
rc.waitForVolumeAttach(logger, volumeToMount)
} else if !volMounted || cache.IsRemountRequiredError(err) {
rc.mountAttachedVolumes(logger, volumeToMount, err)
} else if cache.IsFSResizeRequiredError(err) {
fsResizeRequiredErr, _ := err.(cache.FsResizeRequiredError)
rc.expandVolume(logger, volumeToMount, fsResizeRequiredErr.CurrentSize)
}
}
}
func (rc *reconciler) expandVolume(logger klog.Logger, volumeToMount cache.VolumeToMount, currentSize resource.Quantity) {
logger.V(4).Info(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandInUseVolume", ""), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.ExpandInUseVolume(volumeToMount.VolumeToMount, rc.actualStateOfWorld, currentSize)
if err != nil && !isExpectedError(err) {
logger.Error(err, volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandInUseVolume failed", err).Error(), "pod", klog.KObj(volumeToMount.Pod))
}
if err == nil {
logger.V(4).Info(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandInUseVolume started", ""), "pod", klog.KObj(volumeToMount.Pod))
}
}
func (rc *reconciler) mountAttachedVolumes(logger klog.Logger, volumeToMount cache.VolumeToMount, podExistError error) {
// Volume is not mounted, or is already mounted, but requires remounting
remountingLogStr := ""
isRemount := cache.IsRemountRequiredError(podExistError)
if isRemount {
remountingLogStr = "Volume is already mounted to pod, but remount was requested."
}
logger.V(4).Info(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.MountVolume(
rc.waitForAttachTimeout,
volumeToMount.VolumeToMount,
rc.actualStateOfWorld,
isRemount)
if err != nil && !isExpectedError(err) {
logger.Error(err, volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error(), "pod", klog.KObj(volumeToMount.Pod))
}
if err == nil {
if remountingLogStr == "" {
logger.V(1).Info(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr), "pod", klog.KObj(volumeToMount.Pod))
} else {
logger.V(5).Info(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr), "pod", klog.KObj(volumeToMount.Pod))
}
}
}
func (rc *reconciler) waitForVolumeAttach(logger klog.Logger, volumeToMount cache.VolumeToMount) {
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
//// lets not spin a goroutine and unnecessarily trigger exponential backoff if this happens
if volumeToMount.PluginIsAttachable && !volumeToMount.ReportedInUse {
logger.V(5).Info(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume failed", " volume not marked in-use"), "pod", klog.KObj(volumeToMount.Pod))
return
}
// Volume is not attached (or doesn't implement attacher), kubelet attach is disabled, wait
// for controller to finish attaching volume.
logger.V(5).Info(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.VerifyControllerAttachedVolume(
logger,
volumeToMount.VolumeToMount,
rc.nodeName,
rc.actualStateOfWorld)
if err != nil && !isExpectedError(err) {
logger.Error(err, volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error(), "pod", klog.KObj(volumeToMount.Pod))
}
if err == nil {
logger.Info(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", ""), "pod", klog.KObj(volumeToMount.Pod))
}
} else {
// Volume is not attached to node, kubelet attach is enabled, volume implements an attacher,
// so attach it
volumeToAttach := operationexecutor.VolumeToAttach{
VolumeName: volumeToMount.VolumeName,
VolumeSpec: volumeToMount.VolumeSpec,
NodeName: rc.nodeName,
ScheduledPods: []*v1.Pod{volumeToMount.Pod},
}
logger.V(5).Info(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.AttachVolume(logger, volumeToAttach, rc.actualStateOfWorld)
if err != nil && !isExpectedError(err) {
logger.Error(err, volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error(), "pod", klog.KObj(volumeToMount.Pod))
}
if err == nil {
logger.Info(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", ""), "pod", klog.KObj(volumeToMount.Pod))
}
}
}
func (rc *reconciler) unmountDetachDevices(logger klog.Logger) {
for _, attachedVolume := range rc.actualStateOfWorld.GetUnmountedVolumes() {
// Check IsOperationPending to avoid marking a volume as detached if it's in the process of mounting.
if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName, attachedVolume.SELinuxMountContext) &&
!rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName, nestedpendingoperations.EmptyNodeName) {
// Re-read the actual state of the world, maybe the volume got mounted in the meantime.
// This is safe, because there is no pending operation (checked above) and no new operation
// could start in the meantime. The only goroutine that adds new operations is this reconciler.
attachedVolume, _ = rc.actualStateOfWorld.GetAttachedVolume(attachedVolume.VolumeName)
if attachedVolume.DeviceMayBeMounted() {
// Volume is globally mounted to device, unmount it
logger.V(5).Info(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", ""))
err := rc.operationExecutor.UnmountDevice(
attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.hostutil)
if err != nil && !isExpectedError(err) {
logger.Error(err, attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
}
if err == nil {
logger.Info(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", ""))
}
} else {
// Volume is attached to node, detach it
// Kubelet not responsible for detaching or this volume has a non-attachable volume plugin.
if rc.controllerAttachDetachEnabled || !attachedVolume.PluginIsAttachable {
rc.actualStateOfWorld.MarkVolumeAsDetached(attachedVolume.VolumeName, attachedVolume.NodeName)
logger.Info(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath)))
} else {
// Only detach if kubelet detach is enabled
logger.V(5).Info(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
err := rc.operationExecutor.DetachVolume(
logger, attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
if err != nil && !isExpectedError(err) {
logger.Error(err, attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
}
if err == nil {
logger.Info(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", ""))
}
}
}
}
}
}
// ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected.
func isExpectedError(err error) bool {
return nestedpendingoperations.IsAlreadyExists(err) || exponentialbackoff.IsExponentialBackoff(err) || operationexecutor.IsMountFailedPreconditionError(err)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
)
// readyToUnmount returns true when reconciler can start unmounting volumes.
func (rc *reconciler) readyToUnmount() bool {
// During kubelet startup, all volumes present on disk are added as uncertain to ASW.
// Allow unmount only when DSW is fully populated to prevent unmounting volumes that
// did not reach DSW yet.
if !rc.populatorHasAddedPods() {
return false
}
// Allow unmount only when ASW device paths were corrected from node.status to prevent
// calling unmount with a wrong devicePath.
if len(rc.volumesNeedUpdateFromNodeStatus) != 0 {
return false
}
return true
}
// reconstructVolumes tries to reconstruct the actual state of world by scanning all pods' volume
// directories from the disk. For the volumes that cannot support or fail reconstruction, it will
// put the volumes to volumesFailedReconstruction to be cleaned up later when DesiredStateOfWorld
// is populated.
func (rc *reconciler) reconstructVolumes(logger klog.Logger) {
// Get volumes information by reading the pod's directory
podVolumes, err := getVolumesFromPodDir(logger, rc.kubeletPodsDir)
if err != nil {
logger.Error(err, "Cannot get volumes from disk, skip sync states for volume reconstruction")
return
}
reconstructedVolumes := make(map[v1.UniqueVolumeName]*globalVolumeInfo)
reconstructedVolumeNames := []v1.UniqueVolumeName{}
for _, volume := range podVolumes {
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
logger.V(4).Info("Volume exists in actual state, skip cleaning up mounts", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
// There is nothing to reconstruct
continue
}
reconstructedVolume, err := rc.reconstructVolume(volume)
if err != nil {
logger.Info("Could not construct volume information", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName, "err", err)
// We can't reconstruct the volume. Remember to check DSW after it's fully populated and force unmount the volume when it's orphaned.
rc.volumesFailedReconstruction = append(rc.volumesFailedReconstruction, volume)
continue
}
logger.V(4).Info("Adding reconstructed volume to actual state and node status", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
gvl := &globalVolumeInfo{
volumeName: reconstructedVolume.volumeName,
volumeSpec: reconstructedVolume.volumeSpec,
devicePath: reconstructedVolume.devicePath,
deviceMounter: reconstructedVolume.deviceMounter,
blockVolumeMapper: reconstructedVolume.blockVolumeMapper,
mounter: reconstructedVolume.mounter,
}
if cachedInfo, ok := reconstructedVolumes[reconstructedVolume.volumeName]; ok {
gvl = cachedInfo
}
gvl.addPodVolume(reconstructedVolume)
reconstructedVolumeNames = append(reconstructedVolumeNames, reconstructedVolume.volumeName)
reconstructedVolumes[reconstructedVolume.volumeName] = gvl
}
if len(reconstructedVolumes) > 0 {
// Add the volumes to ASW
rc.updateStates(logger, reconstructedVolumes)
// Remember to update devicePath from node.status.volumesAttached
rc.volumesNeedUpdateFromNodeStatus = reconstructedVolumeNames
}
logger.V(2).Info("Volume reconstruction finished")
}
func (rc *reconciler) updateStates(logger klog.Logger, reconstructedVolumes map[v1.UniqueVolumeName]*globalVolumeInfo) {
for _, gvl := range reconstructedVolumes {
err := rc.actualStateOfWorld.AddAttachUncertainReconstructedVolume(
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108
logger, gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
if err != nil {
logger.Error(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
continue
}
var seLinuxMountContext string
for _, volume := range gvl.podVolumes {
markVolumeOpts := operationexecutor.MarkVolumeOpts{
PodName: volume.podName,
PodUID: types.UID(volume.podName),
VolumeName: volume.volumeName,
Mounter: volume.mounter,
BlockVolumeMapper: volume.blockVolumeMapper,
VolumeGIDVolume: volume.volumeGIDValue,
VolumeSpec: volume.volumeSpec,
VolumeMountState: operationexecutor.VolumeMountUncertain,
SELinuxMountContext: volume.seLinuxMountContext,
}
_, err = rc.actualStateOfWorld.CheckAndMarkVolumeAsUncertainViaReconstruction(markVolumeOpts)
if err != nil {
logger.Error(err, "Could not add pod to volume information to actual state of world", "pod", klog.KObj(volume.pod))
continue
}
seLinuxMountContext = volume.seLinuxMountContext
logger.V(2).Info("Volume is marked as uncertain and added into the actual state", "pod", klog.KObj(volume.pod), "podName", volume.podName, "volumeName", volume.volumeName, "seLinuxMountContext", volume.seLinuxMountContext)
}
// If the volume has device to mount, we mark its device as uncertain.
if gvl.deviceMounter != nil || gvl.blockVolumeMapper != nil {
deviceMountPath, err := getDeviceMountPath(gvl)
if err != nil {
logger.Error(err, "Could not find device mount path for volume", "volumeName", gvl.volumeName)
continue
}
err = rc.actualStateOfWorld.MarkDeviceAsUncertain(gvl.volumeName, gvl.devicePath, deviceMountPath, seLinuxMountContext)
if err != nil {
logger.Error(err, "Could not mark device is uncertain to actual state of world", "volumeName", gvl.volumeName, "deviceMountPath", deviceMountPath)
continue
}
logger.V(2).Info("Volume is marked device as uncertain and added into the actual state", "volumeName", gvl.volumeName, "deviceMountPath", deviceMountPath)
}
}
}
// cleanOrphanVolumes tries to clean up all volumes that failed reconstruction.
func (rc *reconciler) cleanOrphanVolumes(logger klog.Logger) {
if len(rc.volumesFailedReconstruction) == 0 {
return
}
for _, volume := range rc.volumesFailedReconstruction {
if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
// Some pod needs the volume, don't clean it up and hope that
// reconcile() calls SetUp and reconstructs the volume in ASW.
logger.V(4).Info("Volume exists in desired state, skip cleaning up mounts", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
continue
}
logger.Info("Cleaning up mounts for volume that could not be reconstructed", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
rc.cleanupMounts(logger, volume)
}
logger.V(2).Info("Orphan volume cleanup finished")
// Clean the cache, cleanup is one shot operation.
rc.volumesFailedReconstruction = make([]podVolume, 0)
}
// updateReconstructedFromNodeStatus tries to file devicePaths of reconstructed volumes from
// node.Status.VolumesAttached. This can be done only after connection to the API
// server is established, i.e. it can't be part of reconstructVolumes().
func (rc *reconciler) updateReconstructedFromNodeStatus(ctx context.Context) {
logger := klog.FromContext(ctx)
logger.V(4).Info("Updating reconstructed devicePaths")
if rc.kubeClient == nil {
// Skip reconstructing devicePath from node objects if kubelet is in standalone mode.
// Such kubelet is not expected to mount any attachable volume or Secrets / ConfigMap.
logger.V(2).Info("Skipped reconstruction of DevicePaths from node.status in standalone mode")
rc.volumesNeedUpdateFromNodeStatus = nil
return
}
node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(ctx, string(rc.nodeName), metav1.GetOptions{})
if fetchErr != nil {
// This may repeat few times per second until kubelet is able to read its own status for the first time.
logger.V(4).Error(fetchErr, "Failed to get Node status to reconstruct device paths")
return
}
for _, volumeID := range rc.volumesNeedUpdateFromNodeStatus {
attachable := false
for _, attachedVolume := range node.Status.VolumesAttached {
if volumeID != attachedVolume.Name {
continue
}
rc.actualStateOfWorld.UpdateReconstructedDevicePath(volumeID, attachedVolume.DevicePath)
attachable = true
logger.V(4).Info("Updated devicePath from node status for volume", "volumeName", attachedVolume.Name, "path", attachedVolume.DevicePath)
}
rc.actualStateOfWorld.UpdateReconstructedVolumeAttachability(volumeID, attachable)
}
logger.V(2).Info("DevicePaths of reconstructed volumes updated")
rc.volumesNeedUpdateFromNodeStatus = nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"time"
"github.com/go-logr/logr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics"
volumepkg "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
utilpath "k8s.io/utils/path"
utilstrings "k8s.io/utils/strings"
)
// these interfaces are necessary to keep the structures private
// and at the same time log them correctly in structured logs.
var _ logr.Marshaler = podVolume{}
var _ logr.Marshaler = reconstructedVolume{}
var _ logr.Marshaler = globalVolumeInfo{}
type podVolume struct {
podName volumetypes.UniquePodName
volumeSpecName string
volumePath string
pluginName string
volumeMode v1.PersistentVolumeMode
}
func (p podVolume) MarshalLog() interface{} {
return struct {
PodName string `json:"podName"`
VolumeSpecName string `json:"volumeSpecName"`
VolumePath string `json:"volumePath"`
PluginName string `json:"pluginName"`
VolumeMode string `json:"volumeMode"`
}{
PodName: string(p.podName),
VolumeSpecName: p.volumeSpecName,
VolumePath: p.volumePath,
PluginName: p.pluginName,
VolumeMode: string(p.volumeMode),
}
}
type reconstructedVolume struct {
volumeName v1.UniqueVolumeName
podName volumetypes.UniquePodName
volumeSpec *volumepkg.Spec
pod *v1.Pod
volumeGIDValue string
devicePath string
mounter volumepkg.Mounter
deviceMounter volumepkg.DeviceMounter
blockVolumeMapper volumepkg.BlockVolumeMapper
seLinuxMountContext string
}
func (rv reconstructedVolume) MarshalLog() interface{} {
return struct {
VolumeName string `json:"volumeName"`
PodName string `json:"podName"`
VolumeSpecName string `json:"volumeSpecName"`
PodUID string `json:"podUID"`
VolumeGIDValue string `json:"volumeGIDValue"`
DevicePath string `json:"devicePath"`
SeLinuxMountContext string `json:"seLinuxMountContext"`
}{
VolumeName: string(rv.volumeName),
PodName: string(rv.podName),
VolumeSpecName: rv.volumeSpec.Name(),
PodUID: string(rv.pod.UID),
VolumeGIDValue: rv.volumeGIDValue,
DevicePath: rv.devicePath,
SeLinuxMountContext: rv.seLinuxMountContext,
}
}
// globalVolumeInfo stores reconstructed volume information
// for each pod that was using that volume.
type globalVolumeInfo struct {
volumeName v1.UniqueVolumeName
volumeSpec *volumepkg.Spec
devicePath string
mounter volumepkg.Mounter
deviceMounter volumepkg.DeviceMounter
blockVolumeMapper volumepkg.BlockVolumeMapper
podVolumes map[volumetypes.UniquePodName]*reconstructedVolume
}
func (gvi globalVolumeInfo) MarshalLog() interface{} {
podVolumes := make(map[volumetypes.UniquePodName]v1.UniqueVolumeName)
for podName, volume := range gvi.podVolumes {
podVolumes[podName] = volume.volumeName
}
return struct {
VolumeName string `json:"volumeName"`
VolumeSpecName string `json:"volumeSpecName"`
DevicePath string `json:"devicePath"`
PodVolumes map[volumetypes.UniquePodName]v1.UniqueVolumeName `json:"podVolumes"`
}{
VolumeName: string(gvi.volumeName),
VolumeSpecName: gvi.volumeSpec.Name(),
DevicePath: gvi.devicePath,
PodVolumes: podVolumes,
}
}
func (rc *reconciler) updateLastSyncTime() {
rc.timeOfLastSyncLock.Lock()
defer rc.timeOfLastSyncLock.Unlock()
rc.timeOfLastSync = time.Now()
}
func (rc *reconciler) StatesHasBeenSynced() bool {
rc.timeOfLastSyncLock.Lock()
defer rc.timeOfLastSyncLock.Unlock()
return !rc.timeOfLastSync.IsZero()
}
func (gvi *globalVolumeInfo) addPodVolume(rcv *reconstructedVolume) {
if gvi.podVolumes == nil {
gvi.podVolumes = map[volumetypes.UniquePodName]*reconstructedVolume{}
}
gvi.podVolumes[rcv.podName] = rcv
}
func (rc *reconciler) cleanupMounts(logger klog.Logger, volume podVolume) {
logger.V(2).Info("Reconciler sync states: could not find volume information in desired state, clean up the mount points", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
mountedVolume := operationexecutor.MountedVolume{
PodName: volume.podName,
// VolumeName should be generated by `GetUniqueVolumeNameFromSpec` or `GetUniqueVolumeNameFromSpecWithPod`.
// However, since we don't have the volume information in asw when cleanup mounts, it doesn't matter what we put here.
VolumeName: v1.UniqueVolumeName(volume.volumeSpecName),
InnerVolumeSpecName: volume.volumeSpecName,
PluginName: volume.pluginName,
PodUID: types.UID(volume.podName),
}
metrics.ForceCleanedFailedVolumeOperationsTotal.Inc()
// TODO: Currently cleanupMounts only includes UnmountVolume operation. In the next PR, we will add
// to unmount both volume and device in the same routine.
err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir)
if err != nil {
metrics.ForceCleanedFailedVolumeOperationsErrorsTotal.Inc()
logger.Error(err, mountedVolume.GenerateErrorDetailed("volumeHandler.UnmountVolumeHandler for UnmountVolume failed", err).Error())
return
}
}
// getDeviceMountPath returns device mount path for block volume which
// implements BlockVolumeMapper or filesystem volume which implements
// DeviceMounter
func getDeviceMountPath(gvi *globalVolumeInfo) (string, error) {
if gvi.blockVolumeMapper != nil {
// for block gvi, we return its global map path
return gvi.blockVolumeMapper.GetGlobalMapPath(gvi.volumeSpec)
} else if gvi.deviceMounter != nil {
// for filesystem gvi, we return its device mount path if the plugin implements DeviceMounter
return gvi.deviceMounter.GetDeviceMountPath(gvi.volumeSpec)
} else {
return "", fmt.Errorf("blockVolumeMapper or deviceMounter required")
}
}
// getVolumesFromPodDir scans through the volumes directories under the given pod directory.
// It returns a list of pod volume information including pod's uid, volume's plugin name, mount path,
// and volume spec name.
func getVolumesFromPodDir(logger klog.Logger, podDir string) ([]podVolume, error) {
podsDirInfo, err := os.ReadDir(podDir)
if err != nil {
return nil, err
}
volumes := []podVolume{}
for i := range podsDirInfo {
if !podsDirInfo[i].IsDir() {
continue
}
podName := podsDirInfo[i].Name()
podDir := filepath.Join(podDir, podName)
// Find filesystem volume information
// ex. filesystem volume: /pods/{podUid}/volumes/{escapeQualifiedPluginName}/{volumeName}
volumesDirs := map[v1.PersistentVolumeMode]string{
v1.PersistentVolumeFilesystem: filepath.Join(podDir, config.DefaultKubeletVolumesDirName),
}
// Find block volume information
// ex. block volume: /pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName}
volumesDirs[v1.PersistentVolumeBlock] = filepath.Join(podDir, config.DefaultKubeletVolumeDevicesDirName)
for volumeMode, volumesDir := range volumesDirs {
var volumesDirInfo []fs.DirEntry
if volumesDirInfo, err = os.ReadDir(volumesDir); err != nil {
// Just skip the loop because given volumesDir doesn't exist depending on volumeMode
continue
}
for _, volumeDir := range volumesDirInfo {
pluginName := volumeDir.Name()
volumePluginPath := filepath.Join(volumesDir, pluginName)
volumePluginDirs, err := utilpath.ReadDirNoStat(volumePluginPath)
if err != nil {
logger.Error(err, "Could not read volume plugin directory", "volumePluginPath", volumePluginPath)
continue
}
unescapePluginName := utilstrings.UnescapeQualifiedName(pluginName)
for _, volumeName := range volumePluginDirs {
volumePath := filepath.Join(volumePluginPath, volumeName)
logger.V(5).Info("Volume path from volume plugin directory", "podName", podName, "volumePath", volumePath)
volumes = append(volumes, podVolume{
podName: volumetypes.UniquePodName(podName),
volumeSpecName: volumeName,
volumePath: volumePath,
pluginName: unescapePluginName,
volumeMode: volumeMode,
})
}
}
}
}
for _, volume := range volumes {
logger.V(4).Info("Get volume from pod directory", "path", podDir, "volume", volume)
}
return volumes, nil
}
// Reconstruct volume data structure by reading the pod's volume directories
func (rc *reconciler) reconstructVolume(volume podVolume) (rvolume *reconstructedVolume, rerr error) {
metrics.ReconstructVolumeOperationsTotal.Inc()
defer func() {
if rerr != nil {
metrics.ReconstructVolumeOperationsErrorsTotal.Inc()
}
}()
// plugin initializations
plugin, err := rc.volumePluginMgr.FindPluginByName(volume.pluginName)
if err != nil {
return nil, err
}
// Create pod object
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(volume.podName),
},
}
mapperPlugin, err := rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName)
if err != nil {
return nil, err
}
if volume.volumeMode == v1.PersistentVolumeBlock && mapperPlugin == nil {
return nil, fmt.Errorf("could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID)
}
reconstructed, err := rc.operationExecutor.ReconstructVolumeOperation(
volume.volumeMode,
plugin,
mapperPlugin,
pod.UID,
volume.podName,
volume.volumeSpecName,
volume.volumePath,
volume.pluginName)
if err != nil {
return nil, err
}
volumeSpec := reconstructed.Spec
if volumeSpec == nil {
return nil, fmt.Errorf("failed to reconstruct volume for plugin %q (spec.Name: %q) pod %q (UID: %q): got nil", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID)
}
// We have to find the plugins by volume spec (NOT by plugin name) here
// in order to correctly reconstruct ephemeral volume types.
// Searching by spec checks whether the volume is actually attachable
// (i.e. has a PV) whereas searching by plugin name can only tell whether
// the plugin supports attachable volumes.
deviceMountablePlugin, err := rc.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeSpec)
if err != nil {
return nil, err
}
// The unique volume name used depends on whether the volume is attachable/device-mountable
// (needsNameFromSpec = true) or not.
needsNameFromSpec := deviceMountablePlugin != nil
if !needsNameFromSpec {
// Check attach-ability of a volume only as a fallback to avoid calling
// FindAttachablePluginBySpec for CSI volumes - it needs a connection to the API server,
// but it may not be available at this stage of kubelet startup.
// All CSI volumes are device-mountable, so they won't reach this code.
attachablePlugin, err := rc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil {
return nil, err
}
needsNameFromSpec = attachablePlugin != nil
}
var uniqueVolumeName v1.UniqueVolumeName
if needsNameFromSpec {
uniqueVolumeName, err = util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil {
return nil, err
}
} else {
uniqueVolumeName = util.GetUniqueVolumeNameFromSpecWithPod(volume.podName, plugin, volumeSpec)
}
var volumeMapper volumepkg.BlockVolumeMapper
var volumeMounter volumepkg.Mounter
var deviceMounter volumepkg.DeviceMounter
if volume.volumeMode == v1.PersistentVolumeBlock {
var newMapperErr error
volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper(
volumeSpec,
pod)
if newMapperErr != nil {
return nil, fmt.Errorf(
"reconstructVolume.NewBlockVolumeMapper failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
uniqueVolumeName,
volumeSpec.Name(),
volume.podName,
pod.UID,
newMapperErr)
}
} else {
var err error
volumeMounter, err = plugin.NewMounter(volumeSpec, pod)
if err != nil {
return nil, fmt.Errorf(
"reconstructVolume.NewMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
uniqueVolumeName,
volumeSpec.Name(),
volume.podName,
pod.UID,
err)
}
if deviceMountablePlugin != nil {
deviceMounter, err = deviceMountablePlugin.NewDeviceMounter()
if err != nil {
return nil, fmt.Errorf("reconstructVolume.NewDeviceMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
uniqueVolumeName,
volumeSpec.Name(),
volume.podName,
pod.UID,
err)
}
}
}
reconstructedVolume := &reconstructedVolume{
volumeName: uniqueVolumeName,
podName: volume.podName,
volumeSpec: volumeSpec,
pod: pod,
deviceMounter: deviceMounter,
volumeGIDValue: "",
// devicePath is updated during updateStates() by checking node status's VolumesAttached data.
// TODO: get device path directly from the volume mount path.
devicePath: "",
mounter: volumeMounter,
blockVolumeMapper: volumeMapper,
seLinuxMountContext: reconstructed.SELinuxMountContext,
}
return reconstructedVolume, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumemanager
import (
"context"
"errors"
"fmt"
"slices"
"strconv"
"strings"
"sync"
"time"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
const (
// reconcilerLoopSleepPeriod is the amount of time the reconciler loop waits
// between successive executions
reconcilerLoopSleepPeriod = 100 * time.Millisecond
// desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the
// DesiredStateOfWorldPopulator loop waits between successive executions
desiredStateOfWorldPopulatorLoopSleepPeriod = 100 * time.Millisecond
// podAttachAndMountTimeout is the maximum amount of time the
// WaitForAttachAndMount call will wait for all volumes in the specified pod
// to be attached and mounted. Even though cloud operations can take several
// minutes to complete, we set the timeout to 2 minutes because kubelet
// will retry in the next sync iteration. This frees the associated
// goroutine of the pod to process newer updates if needed (e.g., a delete
// request to the pod).
// Value is slightly offset from 2 minutes to make timeouts due to this
// constant recognizable.
podAttachAndMountTimeout = 2*time.Minute + 3*time.Second
// podAttachAndMountRetryInterval is the amount of time the GetVolumesForPod
// call waits before retrying
podAttachAndMountRetryInterval = 300 * time.Millisecond
// waitForAttachTimeout is the maximum amount of time a
// operationexecutor.Mount call will wait for a volume to be attached.
// Set to 10 minutes because we've seen attach operations take several
// minutes to complete for some volume plugins in some cases. While this
// operation is waiting it only blocks other operations on the same device,
// other devices are not affected.
waitForAttachTimeout = 10 * time.Minute
// VolumeAttachmentLimitExceededReason is the reason for rejecting a pod
// when the node has reached its volume attachment limit.
VolumeAttachmentLimitExceededReason = "VolumeAttachmentLimitExceeded"
)
// VolumeManager runs a set of asynchronous loops that figure out which volumes
// need to be attached/mounted/unmounted/detached based on the pods scheduled on
// this node and makes it so.
type VolumeManager interface {
// Starts the volume manager and all the asynchronous loops that it controls
Run(ctx context.Context, sourcesReady config.SourcesReady)
// WaitForAttachAndMount processes the volumes referenced in the specified
// pod and blocks until they are all attached and mounted (reflected in
// actual state of the world).
// An error is returned if all volumes are not attached and mounted within
// the duration defined in podAttachAndMountTimeout.
WaitForAttachAndMount(ctx context.Context, pod *v1.Pod) error
// WaitForUnmount processes the volumes referenced in the specified
// pod and blocks until they are all unmounted (reflected in the actual
// state of the world).
// An error is returned if all volumes are not unmounted within
// the duration defined in podAttachAndMountTimeout.
WaitForUnmount(ctx context.Context, pod *v1.Pod) error
// WaitForAllPodsUnmount is a version of WaitForUnmount that blocks and
// waits until all the volumes belonging to all the pods are unmounted.
// An error is returned if there's at least one Pod with volumes not unmounted
// within the duration defined in podAttachAndMountTimeout.
WaitForAllPodsUnmount(ctx context.Context, pods []*v1.Pod) error
// GetMountedVolumesForPod returns a VolumeMap containing the volumes
// referenced by the specified pod that are desired and actually attached and
// mounted. The key in the map is the OuterVolumeSpecName (i.e.
// pod.Spec.Volumes[x].Name). It returns an empty VolumeMap if pod has no
// volumes.
GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap
// HasPossiblyMountedVolumesForPod returns whether the pod has
// any volumes that are either successfully attached
// and mounted or are "uncertain", i.e. a volume plugin may be mounting
// them right now.
HasPossiblyMountedVolumesForPod(podName types.UniquePodName) bool
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64
// GetVolumesInUse returns a list of all volumes that implement the volume.Attacher
// interface and are currently in use according to the actual and desired
// state of the world caches. A volume is considered "in use" as soon as it
// is added to the desired state of world, indicating it *should* be
// attached to this node and remains "in use" until it is removed from both
// the desired state of the world and the actual state of the world, or it
// has been unmounted (as indicated in actual state of world).
GetVolumesInUse() []v1.UniqueVolumeName
// ReconcilerStatesHasBeenSynced returns true only after the actual states in reconciler
// has been synced at least once after kubelet starts so that it is safe to update mounted
// volume list retrieved from actual state.
ReconcilerStatesHasBeenSynced() bool
// VolumeIsAttached returns true if the given volume is attached to this
// node.
VolumeIsAttached(volumeName v1.UniqueVolumeName) bool
// Marks the specified volume as having successfully been reported as "in
// use" in the nodes's volume status.
MarkVolumesAsReportedInUse(volumesReportedAsInUse []v1.UniqueVolumeName)
}
// podStateProvider can determine if a pod is going to be terminated
type PodStateProvider interface {
ShouldPodContainersBeTerminating(k8stypes.UID) bool
ShouldPodRuntimeBeRemoved(k8stypes.UID) bool
}
// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet.
// See pkg/k8s.io/kubernetes/pkg/kubelet/pod.Manager for method godoc.
type PodManager interface {
GetPodByUID(k8stypes.UID) (*v1.Pod, bool)
GetPods() []*v1.Pod
}
// NewVolumeManager returns a new concrete instance implementing the
// VolumeManager interface.
//
// kubeClient - kubeClient is the kube API client used by DesiredStateOfWorldPopulator
// to communicate with the API server to fetch PV and PVC objects
//
// volumePluginMgr - the volume plugin manager used to access volume plugins.
// Must be pre-initialized.
func NewVolumeManager(
controllerAttachDetachEnabled bool,
nodeName k8stypes.NodeName,
podManager PodManager,
podStateProvider PodStateProvider,
kubeClient clientset.Interface,
volumePluginMgr *volume.VolumePluginMgr,
mounter mount.Interface,
hostutil hostutil.HostUtils,
kubeletPodsDir string,
recorder record.EventRecorder,
blockVolumePathHandler volumepathhandler.BlockVolumePathHandler) VolumeManager {
seLinuxTranslator := util.NewSELinuxLabelTranslator()
vm := &volumeManager{
kubeClient: kubeClient,
volumePluginMgr: volumePluginMgr,
desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator),
actualStateOfWorld: cache.NewActualStateOfWorld(nodeName, volumePluginMgr),
operationExecutor: operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
kubeClient,
volumePluginMgr,
recorder,
blockVolumePathHandler)),
}
intreeToCSITranslator := csitrans.New()
csiMigratedPluginManager := csimigration.NewPluginManager(intreeToCSITranslator, utilfeature.DefaultFeatureGate)
vm.intreeToCSITranslator = intreeToCSITranslator
vm.csiMigratedPluginManager = csiMigratedPluginManager
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
kubeClient,
desiredStateOfWorldPopulatorLoopSleepPeriod,
podManager,
podStateProvider,
vm.desiredStateOfWorld,
vm.actualStateOfWorld,
csiMigratedPluginManager,
intreeToCSITranslator,
volumePluginMgr)
vm.reconciler = reconciler.NewReconciler(
kubeClient,
controllerAttachDetachEnabled,
reconcilerLoopSleepPeriod,
waitForAttachTimeout,
nodeName,
vm.desiredStateOfWorld,
vm.actualStateOfWorld,
vm.desiredStateOfWorldPopulator.HasAddedPods,
vm.operationExecutor,
mounter,
hostutil,
volumePluginMgr,
kubeletPodsDir)
return vm
}
// volumeManager implements the VolumeManager interface
type volumeManager struct {
// kubeClient is the kube API client used by DesiredStateOfWorldPopulator to
// communicate with the API server to fetch PV and PVC objects
kubeClient clientset.Interface
// volumePluginMgr is the volume plugin manager used to access volume
// plugins. It must be pre-initialized.
volumePluginMgr *volume.VolumePluginMgr
// desiredStateOfWorld is a data structure containing the desired state of
// the world according to the volume manager: i.e. what volumes should be
// attached and which pods are referencing the volumes).
// The data structure is populated by the desired state of the world
// populator using the kubelet pod manager.
desiredStateOfWorld cache.DesiredStateOfWorld
// actualStateOfWorld is a data structure containing the actual state of
// the world according to the manager: i.e. which volumes are attached to
// this node and what pods the volumes are mounted to.
// The data structure is populated upon successful completion of attach,
// detach, mount, and unmount actions triggered by the reconciler.
actualStateOfWorld cache.ActualStateOfWorld
// operationExecutor is used to start asynchronous attach, detach, mount,
// and unmount operations.
operationExecutor operationexecutor.OperationExecutor
// reconciler runs an asynchronous periodic loop to reconcile the
// desiredStateOfWorld with the actualStateOfWorld by triggering attach,
// detach, mount, and unmount operations using the operationExecutor.
reconciler reconciler.Reconciler
// desiredStateOfWorldPopulator runs an asynchronous periodic loop to
// populate the desiredStateOfWorld using the kubelet PodManager.
desiredStateOfWorldPopulator populator.DesiredStateOfWorldPopulator
// csiMigratedPluginManager keeps track of CSI migration status of plugins
csiMigratedPluginManager csimigration.PluginManager
// intreeToCSITranslator translates in-tree volume specs to CSI
intreeToCSITranslator csimigration.InTreeToCSITranslator
}
type VolumeAttachLimitExceededError struct {
UnmountedVolumes []string
UnattachedVolumes []string
VolumesNotInDSW []string
OriginalError error
}
func (e *VolumeAttachLimitExceededError) Error() string {
return fmt.Sprintf("Node has reached its volume attachment limit, rejecting pod. unmounted volumes=%v, unattached volumes=%v, failed to process volumes=%v: %v",
e.UnmountedVolumes, e.UnattachedVolumes, e.VolumesNotInDSW, e.OriginalError)
}
func (vm *volumeManager) Run(ctx context.Context, sourcesReady config.SourcesReady) {
logger := klog.FromContext(ctx)
defer runtime.HandleCrash()
if vm.kubeClient != nil {
// start informer for CSIDriver
go vm.volumePluginMgr.Run(ctx.Done())
}
go vm.desiredStateOfWorldPopulator.Run(ctx, sourcesReady)
logger.V(2).Info("The desired_state_of_world populator starts")
logger.Info("Starting Kubelet Volume Manager")
go vm.reconciler.Run(ctx, ctx.Done())
metrics.Register(vm.actualStateOfWorld, vm.desiredStateOfWorld, vm.volumePluginMgr)
<-ctx.Done()
logger.Info("Shutting down Kubelet Volume Manager")
}
func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap {
podVolumes := make(container.VolumeMap)
for name, mountedVolume := range vm.getMountedVolumes(podName) {
podVolumes[name] = container.VolumeInfo{
Mounter: mountedVolume.Mounter,
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName,
}
}
return podVolumes
}
func (vm *volumeManager) HasPossiblyMountedVolumesForPod(podName types.UniquePodName) bool {
return len(vm.actualStateOfWorld.GetPossiblyMountedVolumesForPod(podName)) > 0
}
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
podName := util.GetUniquePodName(pod)
supplementalGroups := sets.New[string]()
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
if mountedVolume.VolumeGIDValue != "" {
supplementalGroups.Insert(mountedVolume.VolumeGIDValue)
}
}
result := make([]int64, 0, supplementalGroups.Len())
for _, group := range supplementalGroups.UnsortedList() {
iGroup, extra := getExtraSupplementalGID(group, pod)
if !extra {
continue
}
result = append(result, int64(iGroup))
}
return result
}
func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName {
// Report volumes in desired state of world and actual state of world so
// that volumes are marked in use as soon as the decision is made that the
// volume *should* be attached to this node until it is safely unmounted.
desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount()
allAttachedVolumes := vm.actualStateOfWorld.GetAttachedVolumes()
volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(allAttachedVolumes))
for _, volume := range desiredVolumes {
if volume.PluginIsAttachable {
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
}
}
for _, volume := range allAttachedVolumes {
if volume.PluginIsAttachable {
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
}
}
slices.Sort(volumesToReportInUse)
return slices.Compact(volumesToReportInUse)
}
func (vm *volumeManager) ReconcilerStatesHasBeenSynced() bool {
return vm.reconciler.StatesHasBeenSynced()
}
func (vm *volumeManager) VolumeIsAttached(
volumeName v1.UniqueVolumeName) bool {
return vm.actualStateOfWorld.VolumeExists(volumeName)
}
func (vm *volumeManager) MarkVolumesAsReportedInUse(
volumesReportedAsInUse []v1.UniqueVolumeName) {
vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse)
}
func (vm *volumeManager) WaitForAttachAndMount(ctx context.Context, pod *v1.Pod) error {
logger := klog.FromContext(ctx)
if pod == nil {
return nil
}
expectedVolumes := getExpectedVolumes(pod)
if len(expectedVolumes) == 0 {
// No volumes to verify
return nil
}
logger.V(3).Info("Waiting for volumes to attach and mount for pod", "pod", klog.KObj(pod))
uniquePodName := util.GetUniquePodName(pod)
// Some pods expect to have Setup called over and over again to update.
// Remount plugins for which this is true. (Atomically updating volumes,
// like Downward API, depend on this to update the contents of the volume).
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
err := wait.PollUntilContextTimeout(
ctx,
podAttachAndMountRetryInterval,
podAttachAndMountTimeout,
true,
vm.verifyVolumesMountedFunc(uniquePodName, expectedVolumes))
if err != nil {
unmountedVolumes :=
vm.getUnmountedVolumes(uniquePodName, expectedVolumes)
// Also get unattached volumes and volumes not in dsw for error message
unattachedVolumeMounts :=
vm.getUnattachedVolumes(uniquePodName)
volumesNotInDSW :=
vm.getVolumesNotInDSW(uniquePodName, expectedVolumes)
if len(unmountedVolumes) == 0 {
return nil
}
unattachedVolumes := []string{}
for _, volumeToMount := range unattachedVolumeMounts {
unattachedVolumes = append(unattachedVolumes, volumeToMount.OuterVolumeSpecNames...)
}
slices.Sort(unattachedVolumes)
if utilfeature.DefaultFeatureGate.Enabled(features.MutableCSINodeAllocatableCount) {
for _, volumeToMount := range unattachedVolumeMounts {
attachablePlugin, findErr := vm.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
if findErr != nil || attachablePlugin == nil {
// This volume type doesn't support the attachable interface, so we can skip our check.
continue
}
if attachablePlugin.VerifyExhaustedResource(volumeToMount.VolumeSpec) {
// Return error to the kubelet, which will then trigger the pod termination logic.
return &VolumeAttachLimitExceededError{
UnmountedVolumes: unmountedVolumes,
UnattachedVolumes: unattachedVolumes,
VolumesNotInDSW: volumesNotInDSW,
OriginalError: err,
}
}
}
}
return fmt.Errorf(
"unmounted volumes=%v, unattached volumes=%v, failed to process volumes=%v: %w",
unmountedVolumes,
unattachedVolumes,
volumesNotInDSW,
err)
}
logger.V(3).Info("All volumes are attached and mounted for pod", "pod", klog.KObj(pod))
return nil
}
func (vm *volumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error {
logger := klog.FromContext(ctx)
if pod == nil {
return nil
}
logger.V(3).Info("Waiting for volumes to unmount for pod", "pod", klog.KObj(pod))
uniquePodName := util.GetUniquePodName(pod)
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
err := wait.PollUntilContextTimeout(
ctx,
podAttachAndMountRetryInterval,
podAttachAndMountTimeout,
true,
vm.verifyVolumesUnmountedFunc(uniquePodName))
if err != nil {
var mountedVolumes []v1.UniqueVolumeName
for _, v := range vm.actualStateOfWorld.GetMountedVolumesForPod(uniquePodName) {
mountedVolumes = append(mountedVolumes, v.VolumeName)
}
if len(mountedVolumes) == 0 {
return nil
}
slices.Sort(mountedVolumes)
return fmt.Errorf(
"mounted volumes=%v: %w",
mountedVolumes,
err)
}
logger.V(3).Info("All volumes are unmounted for pod", "pod", klog.KObj(pod))
return nil
}
func (vm *volumeManager) WaitForAllPodsUnmount(ctx context.Context, pods []*v1.Pod) error {
var (
errors []error
wg sync.WaitGroup
)
wg.Add(len(pods))
for _, pod := range pods {
go func(pod *v1.Pod) {
defer wg.Done()
if err := vm.WaitForUnmount(ctx, pod); err != nil {
errors = append(errors, err)
}
}(pod)
}
wg.Wait()
return utilerrors.NewAggregate(errors)
}
func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, expectedVolumes []string) []string {
volumesNotInDSW := sets.New(expectedVolumes...)
for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() {
if volumeToMount.PodName == uniquePodName {
volumesNotInDSW.Delete(volumeToMount.OuterVolumeSpecNames...)
}
}
return sets.List(volumesNotInDSW)
}
// getUnattachedVolumes returns a list of the volumes that are expected to be attached but
// are not currently attached to the node
func (vm *volumeManager) getUnattachedVolumes(uniquePodName types.UniquePodName) []cache.VolumeToMount {
unattachedVolumes := []cache.VolumeToMount{}
for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() {
if volumeToMount.PodName == uniquePodName &&
volumeToMount.PluginIsAttachable &&
!vm.actualStateOfWorld.VolumeExists(volumeToMount.VolumeName) {
unattachedVolumes = append(unattachedVolumes, volumeToMount)
}
}
return unattachedVolumes
}
// verifyVolumesMountedFunc returns a method that returns true when all expected
// volumes are mounted.
func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, expectedVolumes []string) wait.ConditionWithContextFunc {
return func(_ context.Context) (done bool, err error) {
if errs := vm.desiredStateOfWorld.PopPodErrors(podName); len(errs) > 0 {
return true, errors.New(strings.Join(errs, "; "))
}
return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil
}
}
// verifyVolumesUnmountedFunc returns a method that is true when there are no mounted volumes for this
// pod.
func (vm *volumeManager) verifyVolumesUnmountedFunc(podName types.UniquePodName) wait.ConditionWithContextFunc {
return func(_ context.Context) (done bool, err error) {
if errs := vm.desiredStateOfWorld.PopPodErrors(podName); len(errs) > 0 {
return true, errors.New(strings.Join(errs, "; "))
}
return !vm.actualStateOfWorld.PodHasMountedVolumes(podName), nil
}
}
// getMountedVolumes returns volumes that are desired and actually mounted,
// indexed by the outer volume spec name.
func (vm *volumeManager) getMountedVolumes(podName types.UniquePodName) map[string]*cache.MountedVolume {
volumes := vm.actualStateOfWorld.GetMountedVolumesForPod(podName)
volumesByName := make(map[v1.UniqueVolumeName]*cache.MountedVolume, len(volumes))
for i, mountedVolume := range volumes {
volumesByName[mountedVolume.VolumeName] = &volumes[i]
}
volumeNames := vm.desiredStateOfWorld.GetVolumeNamesForPod(podName)
volumesByOuterName := make(map[string]*cache.MountedVolume, len(volumeNames))
for outerName, volumeName := range volumeNames {
mountedVolume, ok := volumesByName[volumeName]
if ok {
volumesByOuterName[outerName] = mountedVolume
}
}
return volumesByOuterName
}
// getUnmountedVolumes returns a list of unmounted volumes.
// This includes the volumes in expectedVolumes, but not in one of DSW/ASW.
// The list also includes volume that may be mounted in uncertain state.
func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expectedVolumes []string) []string {
unmountedVolumes := []string{}
volumeNames := vm.desiredStateOfWorld.GetVolumeNamesForPod(podName)
for _, outerName := range expectedVolumes {
volumeName, ok := volumeNames[outerName]
if !ok {
unmountedVolumes = append(unmountedVolumes, outerName)
continue
}
_, ok = vm.actualStateOfWorld.GetMountedVolumeForPod(podName, volumeName)
if !ok {
unmountedVolumes = append(unmountedVolumes, outerName)
}
}
slices.Sort(unmountedVolumes)
return unmountedVolumes
}
// getExpectedVolumes returns a list of volumes that must be mounted in order to
// consider the volume setup step for this pod satisfied.
func getExpectedVolumes(pod *v1.Pod) []string {
mounts, devices, _ := util.GetPodVolumeNames(pod, false /* collectSELinuxOptions */)
return mounts.Union(devices).UnsortedList()
}
// getExtraSupplementalGID returns the value of an extra supplemental GID as
// defined by an annotation on a volume and a boolean indicating whether the
// volume defined a GID that the pod doesn't already request.
func getExtraSupplementalGID(volumeGIDValue string, pod *v1.Pod) (int64, bool) {
if volumeGIDValue == "" {
return 0, false
}
gid, err := strconv.ParseInt(volumeGIDValue, 10, 64)
if err != nil {
return 0, false
}
if pod.Spec.SecurityContext != nil {
for _, existingGID := range pod.Spec.SecurityContext.SupplementalGroups {
if gid == int64(existingGID) {
return 0, false
}
}
}
return gid, true
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumemanager
import (
"context"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/volume/util/types"
)
// FakeVolumeManager is a test implementation that just tracks calls
type FakeVolumeManager struct {
volumes map[v1.UniqueVolumeName]bool
reportedInUse map[v1.UniqueVolumeName]bool
unmountDelay time.Duration
unmountError error
volumeAttachLimitExceeded bool
}
var _ VolumeManager = &FakeVolumeManager{}
// NewFakeVolumeManager creates a new VolumeManager test instance
func NewFakeVolumeManager(initialVolumes []v1.UniqueVolumeName, unmountDelay time.Duration, unmountError error, volumeAttachLimitExceeded bool) *FakeVolumeManager {
volumes := map[v1.UniqueVolumeName]bool{}
for _, v := range initialVolumes {
volumes[v] = true
}
return &FakeVolumeManager{
volumes: volumes,
reportedInUse: map[v1.UniqueVolumeName]bool{},
unmountDelay: unmountDelay,
unmountError: unmountError,
volumeAttachLimitExceeded: volumeAttachLimitExceeded,
}
}
// Run is not implemented
func (f *FakeVolumeManager) Run(ctx context.Context, sourcesReady config.SourcesReady) {
}
// WaitForAttachAndMount is not implemented
func (f *FakeVolumeManager) WaitForAttachAndMount(ctx context.Context, pod *v1.Pod) error {
if f.volumeAttachLimitExceeded {
return &VolumeAttachLimitExceededError{}
}
return nil
}
// WaitForUnmount is not implemented
func (f *FakeVolumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error {
return nil
}
func (f *FakeVolumeManager) WaitForAllPodsUnmount(ctx context.Context, pods []*v1.Pod) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(f.unmountDelay):
return f.unmountError
}
}
// GetMountedVolumesForPod is not implemented
func (f *FakeVolumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap {
return nil
}
// HasPossiblyMountedVolumesForPod is not implemented
func (f *FakeVolumeManager) HasPossiblyMountedVolumesForPod(podName types.UniquePodName) bool {
return false
}
// GetExtraSupplementalGroupsForPod is not implemented
func (f *FakeVolumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return nil
}
// GetVolumesInUse returns a list of the initial volumes
func (f *FakeVolumeManager) GetVolumesInUse() []v1.UniqueVolumeName {
inuse := []v1.UniqueVolumeName{}
for v := range f.volumes {
inuse = append(inuse, v)
}
return inuse
}
// ReconcilerStatesHasBeenSynced is not implemented
func (f *FakeVolumeManager) ReconcilerStatesHasBeenSynced() bool {
return true
}
// VolumeIsAttached is not implemented
func (f *FakeVolumeManager) VolumeIsAttached(volumeName v1.UniqueVolumeName) bool {
return false
}
// MarkVolumesAsReportedInUse adds the given volumes to the reportedInUse map
func (f *FakeVolumeManager) MarkVolumesAsReportedInUse(volumesReportedAsInUse []v1.UniqueVolumeName) {
for _, reportedVolume := range volumesReportedAsInUse {
if _, ok := f.volumes[reportedVolume]; ok {
f.reportedInUse[reportedVolume] = true
}
}
}
// GetVolumesReportedInUse is a test function only that returns a list of volumes
// from the reportedInUse map
func (f *FakeVolumeManager) GetVolumesReportedInUse() []v1.UniqueVolumeName {
inuse := []v1.UniqueVolumeName{}
for reportedVolume := range f.reportedInUse {
inuse = append(inuse, reportedVolume)
}
return inuse
}
//go:build linux
// +build linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watchdog
import (
"context"
"fmt"
"sync/atomic"
"time"
"github.com/coreos/go-systemd/v22/daemon"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/klog/v2"
)
// WatchdogClient defines the interface for interacting with the systemd watchdog.
type WatchdogClient interface {
SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error)
SdNotify(unsetEnvironment bool) (bool, error)
}
// DefaultWatchdogClient implements the WatchdogClient interface using the actual systemd daemon functions.
type DefaultWatchdogClient struct{}
var _ WatchdogClient = &DefaultWatchdogClient{}
func (d *DefaultWatchdogClient) SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
return daemon.SdWatchdogEnabled(unsetEnvironment)
}
func (d *DefaultWatchdogClient) SdNotify(unsetEnvironment bool) (bool, error) {
return daemon.SdNotify(unsetEnvironment, daemon.SdNotifyWatchdog)
}
// Option defines optional parameters for initializing the healthChecker
// structure.
type Option func(*healthChecker)
func WithWatchdogClient(watchdog WatchdogClient) Option {
return func(hc *healthChecker) {
hc.watchdog = watchdog
}
}
type healthChecker struct {
checkers atomic.Value
retryBackoff wait.Backoff
interval time.Duration
watchdog WatchdogClient
}
var _ HealthChecker = &healthChecker{}
const minimalNotifyInterval = time.Second
// NewHealthChecker creates a new HealthChecker instance.
// This function initializes the health checker and configures its behavior based on the status of the systemd watchdog.
// If the watchdog is not enabled, the function returns an error.
func NewHealthChecker(logger klog.Logger, opts ...Option) (HealthChecker, error) {
hc := &healthChecker{
watchdog: &DefaultWatchdogClient{},
}
for _, o := range opts {
o(hc)
}
// get watchdog information
watchdogVal, err := hc.watchdog.SdWatchdogEnabled(false)
if err != nil {
// Failed to get watchdog configuration information.
// This occurs when we want to start the watchdog but the configuration is incorrect,
// for example, the time is not configured correctly.
return nil, fmt.Errorf("configure watchdog: %w", err)
}
if watchdogVal == 0 {
logger.Info("Systemd watchdog is not enabled")
return &healthChecker{}, nil
}
if watchdogVal <= minimalNotifyInterval {
return nil, fmt.Errorf("configure watchdog timeout too small: %v", watchdogVal)
}
retryBackoff := wait.Backoff{
Duration: time.Second,
Factor: 2.0,
Jitter: 0.1,
Steps: 2,
}
hc.retryBackoff = retryBackoff
hc.interval = watchdogVal / 2
return hc, nil
}
func (hc *healthChecker) SetHealthCheckers(syncLoop syncLoopHealthChecker, checkers []healthz.HealthChecker) {
// Define the default set of health checkers that should always be present
defaultCheckers := []healthz.HealthChecker{
healthz.PingHealthz,
healthz.LogHealthz,
healthz.NamedCheck("syncloop", syncLoop.SyncLoopHealthCheck),
}
var combined []healthz.HealthChecker
combined = append(combined, defaultCheckers...)
combined = append(combined, checkers...)
hc.checkers.Store(combined)
}
func (hc *healthChecker) getHealthCheckers() []healthz.HealthChecker {
if v := hc.checkers.Load(); v != nil {
return v.([]healthz.HealthChecker)
}
return []healthz.HealthChecker{}
}
func (hc *healthChecker) Start(ctx context.Context) {
logger := klog.FromContext(ctx)
if hc.interval <= 0 {
logger.Info("Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started.")
return
}
logger.Info("Starting systemd watchdog with interval", "interval", hc.interval)
go wait.UntilWithContext(ctx, func(ctx context.Context) {
if err := hc.doCheck(); err != nil {
logger.Error(err, "Do not notify watchdog this iteration as the kubelet is reportedly not healthy")
return
}
err := wait.ExponentialBackoffWithContext(ctx, hc.retryBackoff, func(_ context.Context) (bool, error) {
ack, err := hc.watchdog.SdNotify(false)
if err != nil {
logger.V(5).Info("Failed to notify systemd watchdog, retrying", "error", err)
return false, nil
}
if !ack {
return false, fmt.Errorf("failed to notify systemd watchdog, notification not supported - (i.e. NOTIFY_SOCKET is unset)")
}
logger.V(5).Info("Watchdog plugin notified", "acknowledgment", ack, "state", daemon.SdNotifyWatchdog)
return true, nil
})
if err != nil {
logger.Error(err, "Failed to notify watchdog")
}
}, hc.interval)
}
func (hc *healthChecker) doCheck() error {
for _, hc := range hc.getHealthCheckers() {
if err := hc.Check(nil); err != nil {
return fmt.Errorf("checker %s failed: %w", hc.Name(), err)
}
}
return nil
}
//go:build !windows
// +build !windows
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package probe
import (
"net"
"syscall"
)
// ProbeDialer returns a dialer optimized for probes to avoid lingering sockets on TIME-WAIT state.
// The dialer reduces the TIME-WAIT period to 1 seconds instead of the OS default of 60 seconds.
// Using 1 second instead of 0 because SO_LINGER socket option to 0 causes pending data to be
// discarded and the connection to be aborted with an RST rather than for the pending data to be
// transmitted and the connection closed cleanly with a FIN.
// Ref: https://issues.k8s.io/89898
func ProbeDialer() *net.Dialer {
dialer := &net.Dialer{
Control: func(network, address string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
syscall.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &syscall.Linger{Onoff: 1, Linger: 1})
})
},
}
return dialer
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exec
import (
"bytes"
"errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
remote "k8s.io/cri-client/pkg"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/klog/v2"
"k8s.io/utils/exec"
)
const (
maxReadLength = 10 * 1 << 10 // 10KB
)
// New creates a Prober.
func New() Prober {
return execProber{}
}
// Prober is an interface defining the Probe object for container readiness/liveness checks.
type Prober interface {
Probe(e exec.Cmd) (probe.Result, string, error)
}
type execProber struct{}
// Probe executes a command to check the liveness/readiness of container
// from executing a command. Returns the Result status, command output, and
// errors if any.
func (pr execProber) Probe(e exec.Cmd) (probe.Result, string, error) {
var dataBuffer bytes.Buffer
writer := ioutils.LimitWriter(&dataBuffer, maxReadLength)
e.SetStderr(writer)
e.SetStdout(writer)
err := e.Start()
if err == nil {
err = e.Wait()
}
data := dataBuffer.Bytes()
klog.V(4).Infof("Exec probe response: %q", string(data))
if err != nil {
exit, ok := err.(exec.ExitError)
if ok {
if exit.ExitStatus() == 0 {
return probe.Success, string(data), nil
}
return probe.Failure, string(data), nil
}
if errors.Is(err, remote.ErrCommandTimedOut) {
if utilfeature.DefaultFeatureGate.Enabled(features.ExecProbeTimeout) {
return probe.Failure, err.Error(), nil
}
klog.Warningf("Exec probe timed out but ExecProbeTimeout feature gate was disabled")
}
return probe.Unknown, "", err
}
return probe.Success, string(data), nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package grpc
import (
"context"
"fmt"
"net"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
grpchealth "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"k8s.io/component-base/version"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/probe"
)
// Prober is an interface that defines the Probe function for doing GRPC readiness/liveness/startup checks.
type Prober interface {
Probe(host, service string, port int, timeout time.Duration) (probe.Result, string, error)
}
type grpcProber struct {
}
// New Prober for execute grpc probe
func New() Prober {
return grpcProber{}
}
// Probe executes a grpc call to check the liveness/readiness/startup of container.
// Returns the Result status, command output, and errors if any.
// Any failure is considered as a probe failure to mimic grpc_health_probe tool behavior.
// err is always nil
func (p grpcProber) Probe(host, service string, port int, timeout time.Duration) (probe.Result, string, error) {
v := version.Get()
opts := []grpc.DialOption{
grpc.WithUserAgent(fmt.Sprintf("kube-probe/%s.%s", v.Major, v.Minor)),
grpc.WithBlock(),
grpc.WithTransportCredentials(insecure.NewCredentials()), //credentials are currently not supported
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
return probe.ProbeDialer().DialContext(ctx, "tcp", addr)
}),
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
addr := net.JoinHostPort(host, fmt.Sprintf("%d", port))
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
if err == context.DeadlineExceeded {
klog.V(4).ErrorS(err, "failed to connect grpc service due to timeout", "addr", addr, "service", service, "timeout", timeout)
return probe.Failure, fmt.Sprintf("timeout: failed to connect service %q within %v: %+v", addr, timeout, err), nil
} else {
klog.V(4).ErrorS(err, "failed to connect grpc service", "service", addr)
return probe.Failure, fmt.Sprintf("error: failed to connect service at %q: %+v", addr, err), nil
}
}
defer func() {
_ = conn.Close()
}()
client := grpchealth.NewHealthClient(conn)
resp, err := client.Check(metadata.NewOutgoingContext(ctx, make(metadata.MD)), &grpchealth.HealthCheckRequest{
Service: service,
})
if err != nil {
stat, ok := status.FromError(err)
if ok {
switch stat.Code() {
case codes.Unimplemented:
klog.V(4).ErrorS(err, "server does not implement the grpc health protocol (grpc.health.v1.Health)", "addr", addr, "service", service)
return probe.Failure, fmt.Sprintf("error: this server does not implement the grpc health protocol (grpc.health.v1.Health): %s", stat.Message()), nil
case codes.DeadlineExceeded:
klog.V(4).ErrorS(err, "rpc request not finished within timeout", "addr", addr, "service", service, "timeout", timeout)
return probe.Failure, fmt.Sprintf("timeout: health rpc did not complete within %v", timeout), nil
default:
klog.V(4).ErrorS(err, "rpc probe failed")
}
} else {
klog.V(4).ErrorS(err, "health rpc probe failed")
}
return probe.Failure, fmt.Sprintf("error: health rpc probe failed: %+v", err), nil
}
if resp.GetStatus() != grpchealth.HealthCheckResponse_SERVING {
return probe.Failure, fmt.Sprintf("service unhealthy (responded with %q)", resp.GetStatus().String()), nil
}
return probe.Success, fmt.Sprintf("service healthy"), nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package http
import (
"crypto/tls"
"errors"
"fmt"
"net/http"
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/klog/v2"
utilio "k8s.io/utils/io"
)
const (
maxRespBodyLength = 10 * 1 << 10 // 10KB
)
// New creates Prober that will skip TLS verification while probing.
// followNonLocalRedirects configures whether the prober should follow redirects to a different hostname.
// If disabled, redirects to other hosts will trigger a warning result.
func New(followNonLocalRedirects bool) Prober {
tlsConfig := &tls.Config{InsecureSkipVerify: true}
return NewWithTLSConfig(tlsConfig, followNonLocalRedirects)
}
// NewWithTLSConfig takes tls config as parameter.
// followNonLocalRedirects configures whether the prober should follow redirects to a different hostname.
// If disabled, redirects to other hosts will trigger a warning result.
func NewWithTLSConfig(config *tls.Config, followNonLocalRedirects bool) Prober {
// We do not want the probe use node's local proxy set.
transport := utilnet.SetTransportDefaults(
&http.Transport{
TLSClientConfig: config,
DisableKeepAlives: true,
Proxy: http.ProxyURL(nil),
DisableCompression: true, // removes Accept-Encoding header
// DialContext creates unencrypted TCP connections
// and is also used by the transport for HTTPS connection
DialContext: probe.ProbeDialer().DialContext,
})
return httpProber{transport, followNonLocalRedirects}
}
// Prober is an interface that defines the Probe function for doing HTTP readiness/liveness checks.
type Prober interface {
Probe(req *http.Request, timeout time.Duration) (probe.Result, string, error)
}
type httpProber struct {
transport *http.Transport
followNonLocalRedirects bool
}
// Probe returns a ProbeRunner capable of running an HTTP check.
func (pr httpProber) Probe(req *http.Request, timeout time.Duration) (probe.Result, string, error) {
client := &http.Client{
Timeout: timeout,
Transport: pr.transport,
CheckRedirect: RedirectChecker(pr.followNonLocalRedirects),
}
return DoHTTPProbe(req, client)
}
// GetHTTPInterface is an interface for making HTTP requests, that returns a response and error.
type GetHTTPInterface interface {
Do(req *http.Request) (*http.Response, error)
}
// DoHTTPProbe checks if a GET request to the url succeeds.
// If the HTTP response code is successful (i.e. 400 > code >= 200), it returns Success.
// If the HTTP response code is unsuccessful or HTTP communication fails, it returns Failure.
// This is exported because some other packages may want to do direct HTTP probes.
func DoHTTPProbe(req *http.Request, client GetHTTPInterface) (probe.Result, string, error) {
url := req.URL
headers := req.Header
res, err := client.Do(req)
if err != nil {
// Convert errors into failures to catch timeouts.
return probe.Failure, err.Error(), nil
}
defer res.Body.Close()
b, err := utilio.ReadAtMost(res.Body, maxRespBodyLength)
if err != nil {
if err == utilio.ErrLimitReached {
klog.V(4).Infof("Non fatal body truncation for %s, Response: %v", url.String(), *res)
} else {
return probe.Failure, "", err
}
}
body := string(b)
if res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest {
if res.StatusCode >= http.StatusMultipleChoices { // Redirect
klog.V(4).Infof("Probe terminated redirects for %s, Response: %v", url.String(), *res)
return probe.Warning, fmt.Sprintf("Probe terminated redirects, Response body: %v", body), nil
}
klog.V(4).Infof("Probe succeeded for %s, Response: %v", url.String(), *res)
return probe.Success, body, nil
}
klog.V(4).Infof("Probe failed for %s with request headers %v, response body: %v", url.String(), headers, body)
// Note: Until https://issue.k8s.io/99425 is addressed, this user-facing failure message must not contain the response body.
failureMsg := fmt.Sprintf("HTTP probe failed with statuscode: %d", res.StatusCode)
return probe.Failure, failureMsg, nil
}
// RedirectChecker returns a function that can be used to check HTTP redirects.
func RedirectChecker(followNonLocalRedirects bool) func(*http.Request, []*http.Request) error {
if followNonLocalRedirects {
return nil // Use the default http client checker.
}
return func(req *http.Request, via []*http.Request) error {
if req.URL.Hostname() != via[0].URL.Hostname() {
return http.ErrUseLastResponse
}
// Default behavior: stop after 10 redirects.
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
return nil
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package http
import (
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/component-base/version"
"k8s.io/kubernetes/pkg/probe"
)
// NewProbeRequest returns an http.Request suitable for use as a request for a
// probe.
func NewProbeRequest(url *url.URL, headers http.Header) (*http.Request, error) {
return newProbeRequest(url, headers, "probe")
}
// NewRequestForHTTPGetAction returns an http.Request derived from httpGet.
// When httpGet.Host is empty, podIP will be used instead.
func NewRequestForHTTPGetAction(httpGet *v1.HTTPGetAction, container *v1.Container, podIP string, userAgentFragment string) (*http.Request, error) {
scheme := strings.ToLower(string(httpGet.Scheme))
if scheme == "" {
scheme = "http"
}
host := httpGet.Host
if host == "" {
host = podIP
}
port, err := probe.ResolveContainerPort(httpGet.Port, container)
if err != nil {
return nil, err
}
path := httpGet.Path
url := formatURL(scheme, host, port, path)
headers := v1HeaderToHTTPHeader(httpGet.HTTPHeaders)
return newProbeRequest(url, headers, userAgentFragment)
}
func newProbeRequest(url *url.URL, headers http.Header, userAgentFragment string) (*http.Request, error) {
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
if headers == nil {
headers = http.Header{}
}
if _, ok := headers["User-Agent"]; !ok {
// User-Agent header was not defined, set it
headers.Set("User-Agent", userAgent(userAgentFragment))
}
if _, ok := headers["Accept"]; !ok {
// Accept header was not defined. accept all
headers.Set("Accept", "*/*")
} else if headers.Get("Accept") == "" {
// Accept header was overridden but is empty. removing
headers.Del("Accept")
}
req.Header = headers
req.Host = headers.Get("Host")
return req, nil
}
func userAgent(purpose string) string {
v := version.Get()
return fmt.Sprintf("kube-%s/%s.%s", purpose, v.Major, v.Minor)
}
// formatURL formats a URL from args. For testability.
func formatURL(scheme string, host string, port int, path string) *url.URL {
u, err := url.Parse(path)
// Something is busted with the path, but it's too late to reject it. Pass it along as is.
//
// This construction of a URL may be wrong in some cases, but it preserves
// legacy prober behavior.
if err != nil {
u = &url.URL{
Path: path,
}
}
u.Scheme = scheme
u.Host = net.JoinHostPort(host, strconv.Itoa(port))
return u
}
// v1HeaderToHTTPHeader takes a list of HTTPHeader <name, value> string pairs
// and returns a populated string->[]string http.Header map.
func v1HeaderToHTTPHeader(headerList []v1.HTTPHeader) http.Header {
headers := make(http.Header)
for _, header := range headerList {
headers.Add(header.Name, header.Value)
}
return headers
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tcp
import (
"net"
"strconv"
"time"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/klog/v2"
)
// New creates Prober.
func New() Prober {
return tcpProber{}
}
// Prober is an interface that defines the Probe function for doing TCP readiness/liveness checks.
type Prober interface {
Probe(host string, port int, timeout time.Duration) (probe.Result, string, error)
}
type tcpProber struct{}
// Probe checks that a TCP connection to the address can be opened.
func (pr tcpProber) Probe(host string, port int, timeout time.Duration) (probe.Result, string, error) {
return DoTCPProbe(net.JoinHostPort(host, strconv.Itoa(port)), timeout)
}
// DoTCPProbe checks that a TCP socket to the address can be opened.
// If the socket can be opened, it returns Success
// If the socket fails to open, it returns Failure.
// This is exported because some other packages may want to do direct TCP probes.
func DoTCPProbe(addr string, timeout time.Duration) (probe.Result, string, error) {
d := probe.ProbeDialer()
d.Timeout = timeout
conn, err := d.Dial("tcp", addr)
if err != nil {
// Convert errors to failures to handle timeouts.
return probe.Failure, err.Error(), nil
}
err = conn.Close()
if err != nil {
klog.Errorf("Unexpected error closing TCP probe socket: %v (%#v)", err, err)
}
return probe.Success, "", nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package probe
import (
"fmt"
"strconv"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func ResolveContainerPort(param intstr.IntOrString, container *v1.Container) (int, error) {
port := -1
var err error
switch param.Type {
case intstr.Int:
port = param.IntValue()
case intstr.String:
if port, err = findPortByName(container, param.StrVal); err != nil {
// Last ditch effort - maybe it was an int stored as string?
if port, err = strconv.Atoi(param.StrVal); err != nil {
return port, err
}
}
default:
return port, fmt.Errorf("intOrString had no kind: %+v", param)
}
if port > 0 && port < 65536 {
return port, nil
}
return port, fmt.Errorf("invalid port number: %v", port)
}
// findPortByName is a helper function to look up a port in a container by name.
func findPortByName(container *v1.Container, portName string) (int, error) {
for _, port := range container.Ports {
if port.Name == portName {
return int(port.ContainerPort), nil
}
}
return 0, fmt.Errorf("port %s not found", portName)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"fmt"
"net/netip"
"time"
"sigs.k8s.io/randfill"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/utils/ptr"
)
// generateRandomIP is copied from pkg/apis/networking/fuzzer/fuzzer.go
func generateRandomIP(is6 bool, c randfill.Continue) string {
n := 4
if is6 {
n = 16
}
bytes := make([]byte, n)
for i := 0; i < n; i++ {
bytes[i] = uint8(c.Rand.Intn(255))
}
ip, ok := netip.AddrFromSlice(bytes)
if ok {
return ip.String()
}
// this should not happen
panic(fmt.Sprintf("invalid IP %v", bytes))
}
// generateRandomCIDR is copied from pkg/apis/networking/fuzzer/fuzzer.go
func generateRandomCIDR(is6 bool, c randfill.Continue) string {
ip, err := netip.ParseAddr(generateRandomIP(is6, c))
if err != nil {
// generateRandomIP already panics if returns a not valid ip
panic(err)
}
n := 32
if is6 {
n = 128
}
bits := c.Rand.Intn(n)
prefix := netip.PrefixFrom(ip, bits)
return prefix.Masked().String()
}
// getRandomDualStackCIDR returns a random dual-stack CIDR.
func getRandomDualStackCIDR(c randfill.Continue) []string {
cidrIPv4 := generateRandomCIDR(false, c)
cidrIPv6 := generateRandomCIDR(true, c)
cidrs := []string{cidrIPv4, cidrIPv6}
if c.Bool() {
cidrs = []string{cidrIPv6, cidrIPv4}
}
return cidrs[:1+c.Intn(2)]
}
// Funcs returns the fuzzer functions for the kube-proxy apis.
func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(obj *kubeproxyconfig.KubeProxyConfiguration, c randfill.Continue) {
c.FillNoCustom(obj)
obj.BindAddress = fmt.Sprintf("%d.%d.%d.%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256))
obj.ClientConnection.ContentType = c.String(0)
obj.DetectLocal.ClusterCIDRs = getRandomDualStackCIDR(c)
obj.Linux.Conntrack.MaxPerCore = ptr.To(c.Int31())
obj.Linux.Conntrack.Min = ptr.To(c.Int31())
obj.Linux.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: time.Duration(c.Int63()) * time.Hour}
obj.Linux.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: time.Duration(c.Int63()) * time.Hour}
obj.FeatureGates = map[string]bool{c.String(0): true}
obj.HealthzBindAddress = fmt.Sprintf("%d.%d.%d.%d:%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(65536))
obj.IPTables.MasqueradeBit = ptr.To(c.Int31())
obj.IPTables.LocalhostNodePorts = ptr.To(c.Bool())
obj.NFTables.MasqueradeBit = ptr.To(c.Int31())
obj.MetricsBindAddress = fmt.Sprintf("%d.%d.%d.%d:%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(65536))
obj.Linux.OOMScoreAdj = ptr.To(c.Int31())
obj.ClientConnection.ContentType = "bar"
obj.NodePortAddresses = []string{"1.2.3.0/24"}
if obj.Logging.Format == "" {
obj.Logging.Format = "text"
}
},
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "kubeproxy.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes registers known types to the given scheme
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeProxyConfiguration{},
)
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1"
)
var (
// Scheme defines methods for serializing and deserializing API objects.
Scheme = runtime.NewScheme()
// Codecs provides methods for retrieving codecs and serializers for specific
// versions and content types.
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
)
func init() {
AddToScheme(Scheme)
}
// AddToScheme adds the types of this group into the given scheme.
func AddToScheme(scheme *runtime.Scheme) {
utilruntime.Must(v1alpha1.AddToScheme(scheme))
utilruntime.Must(config.AddToScheme(scheme))
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
logsapi "k8s.io/component-base/logs/api/v1"
)
// KubeProxyLinuxConfiguration contains Linux platform related configuration details for the
// Kubernetes proxy server that aren't specific to a particular backend.
type KubeProxyLinuxConfiguration struct {
// conntrack contains conntrack-related configuration options.
Conntrack KubeProxyConntrackConfiguration
// masqueradeAll tells kube-proxy to SNAT all traffic sent to Service cluster IPs. This may
// be required with some CNI plugins.
MasqueradeAll bool
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
// the range [-1000, 1000]
OOMScoreAdj *int32
}
// KubeProxyWindowsConfiguration contains Windows platform related configuration details for the
// Kubernetes proxy server that aren't specific to a particular backend
type KubeProxyWindowsConfiguration struct {
// runAsService, if true, enables Windows service control manager API integration.
RunAsService bool
}
// KubeProxyIPTablesConfiguration contains iptables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPTablesConfiguration struct {
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
// the iptables or ipvs proxy mode. Values must be within the range [0, 31].
MasqueradeBit *int32
// localhostNodePorts, if false, tells kube-proxy to disable the legacy behavior
// of allowing NodePort services to be accessed via localhost. (Applies only to
// iptables mode and IPv4; localhost NodePorts are never allowed with other proxy
// modes or with IPv6.)
LocalhostNodePorts *bool
}
// KubeProxyIPVSConfiguration contains ipvs-related configuration
// details for the Kubernetes proxy server.
type KubeProxyIPVSConfiguration struct {
// scheduler is the IPVS scheduler to use
Scheduler string
// excludeCIDRs is a list of CIDRs which the ipvs proxier should not touch
// when cleaning up ipvs services.
ExcludeCIDRs []string
// strictARP configures arp_ignore and arp_announce to avoid answering ARP queries
// from kube-ipvs0 interface
StrictARP bool
// tcpTimeout is the timeout value used for idle IPVS TCP sessions.
// The default value is 0, which preserves the current timeout value on the system.
TCPTimeout metav1.Duration
// tcpFinTimeout is the timeout value used for IPVS TCP sessions after receiving a FIN.
// The default value is 0, which preserves the current timeout value on the system.
TCPFinTimeout metav1.Duration
// udpTimeout is the timeout value used for IPVS UDP packets.
// The default value is 0, which preserves the current timeout value on the system.
UDPTimeout metav1.Duration
}
// KubeProxyNFTablesConfiguration contains nftables-related configuration
// details for the Kubernetes proxy server.
type KubeProxyNFTablesConfiguration struct {
// masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using
// the nftables proxy mode. Values must be within the range [0, 31].
MasqueradeBit *int32
}
// KubeProxyConntrackConfiguration contains conntrack settings for
// the Kubernetes proxy server.
type KubeProxyConntrackConfiguration struct {
// maxPerCore is the maximum number of NAT connections to track
// per CPU core (0 to leave the limit as-is and ignore min).
MaxPerCore *int32
// min is the minimum value of connect-tracking records to allocate,
// regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is).
Min *int32
// tcpEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '2s'). Must be greater than 0 to set.
TCPEstablishedTimeout *metav1.Duration
// tcpCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
TCPCloseWaitTimeout *metav1.Duration
// tcpBeLiberal, if true, kube-proxy will configure conntrack
// to run in liberal mode for TCP connections and packets with
// out-of-window sequence numbers won't be marked INVALID.
TCPBeLiberal bool
// udpTimeout is how long an idle UDP conntrack entry in
// UNREPLIED state will remain in the conntrack table
// (e.g. '30s'). Must be greater than 0 to set.
UDPTimeout metav1.Duration
// udpStreamTimeout is how long an idle UDP conntrack entry in
// ASSURED state will remain in the conntrack table
// (e.g. '300s'). Must be greater than 0 to set.
UDPStreamTimeout metav1.Duration
}
// KubeProxyWinkernelConfiguration contains Windows/HNS settings for
// the Kubernetes proxy server.
type KubeProxyWinkernelConfiguration struct {
// networkName is the name of the network kube-proxy will use
// to create endpoints and policies
NetworkName string
// sourceVip is the IP address of the source VIP endpoint used for
// NAT when loadbalancing
SourceVip string
// enableDSR tells kube-proxy whether HNS policies should be created
// with DSR
EnableDSR bool
// rootHnsEndpointName is the name of hnsendpoint that is attached to
// l2bridge for root network namespace
RootHnsEndpointName string
// forwardHealthCheckVip forwards service VIP for health check port on
// Windows
ForwardHealthCheckVip bool
}
// DetectLocalConfiguration contains optional settings related to DetectLocalMode option
type DetectLocalConfiguration struct {
// bridgeInterface is a bridge interface name. When DetectLocalMode is set to
// LocalModeBridgeInterface, kube-proxy will consider traffic to be local if
// it originates from this bridge.
BridgeInterface string
// clusterCIDRs is the dual-stack list of CIDR ranges of the pods in the cluster. When
// DetectLocalMode is set to LocalModeClusterCIDR, kube-proxy will consider
// traffic to be local if its source IP is in the range of any given CIDR.
ClusterCIDRs []string
// interfaceNamePrefix is an interface name prefix. When DetectLocalMode is set to
// LocalModeInterfaceNamePrefix, kube-proxy will consider traffic to be local if
// it originates from any interface whose name begins with this prefix.
InterfaceNamePrefix string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeProxyConfiguration contains everything necessary to configure the
// Kubernetes proxy server.
type KubeProxyConfiguration struct {
metav1.TypeMeta
// linux contains Linux-related configuration options.
Linux KubeProxyLinuxConfiguration
// windows contains Windows-related configuration options.
Windows KubeProxyWindowsConfiguration
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
FeatureGates map[string]bool
// clientConnection specifies the kubeconfig file and client connection settings for the proxy
// server to use when communicating with the apiserver.
ClientConnection componentbaseconfig.ClientConnectionConfiguration
// logging specifies the options of logging.
// Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go)
// for more information.
Logging logsapi.LoggingConfiguration
// hostnameOverride, if non-empty, will be used as the name of the Node that
// kube-proxy is running on. If unset, the node name is assumed to be the same as
// the node's hostname.
HostnameOverride string
// bindAddress can be used to override kube-proxy's idea of what its node's
// primary IP is. Note that the name is a historical artifact, and kube-proxy does
// not actually bind any sockets to this IP.
BindAddress string
// healthzBindAddress is the IP address and port for the health check server to
// serve on, defaulting to "0.0.0.0:10256" (if bindAddress is unset or IPv4), or
// "[::]:10256" (if bindAddress is IPv6).
HealthzBindAddress string
// metricsBindAddress is the IP address and port for the metrics server to serve
// on, defaulting to "127.0.0.1:10249" (if bindAddress is unset or IPv4), or
// "[::1]:10249" (if bindAddress is IPv6). (Set to "0.0.0.0:10249" / "[::]:10249"
// to bind on all interfaces.)
MetricsBindAddress string
// bindAddressHardFail, if true, tells kube-proxy to treat failure to bind to a
// port as fatal and exit
BindAddressHardFail bool
// enableProfiling enables profiling via web interface on /debug/pprof handler.
// Profiling handlers will be handled by metrics server.
EnableProfiling bool
// showHiddenMetricsForVersion is the version for which you want to show hidden metrics.
ShowHiddenMetricsForVersion string
// mode specifies which proxy mode to use.
Mode ProxyMode
// iptables contains iptables-related configuration options.
IPTables KubeProxyIPTablesConfiguration
// ipvs contains ipvs-related configuration options.
IPVS KubeProxyIPVSConfiguration
// winkernel contains winkernel-related configuration options.
Winkernel KubeProxyWinkernelConfiguration
// nftables contains nftables-related configuration options.
NFTables KubeProxyNFTablesConfiguration
// detectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR
DetectLocalMode LocalMode
// detectLocal contains optional configuration settings related to DetectLocalMode.
DetectLocal DetectLocalConfiguration
// nodePortAddresses is a list of CIDR ranges that contain valid node IPs, or
// alternatively, the single string 'primary'. If set to a list of CIDRs,
// connections to NodePort services will only be accepted on node IPs in one of
// the indicated ranges. If set to 'primary', NodePort services will only be
// accepted on the node's primary IPv4 and/or IPv6 address according to the Node
// object. If unset, NodePort connections will be accepted on all local IPs.
NodePortAddresses []string
// syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently
// various re-synchronizing and cleanup operations are performed. Must be greater
// than 0.
SyncPeriod metav1.Duration
// minSyncPeriod is the minimum period between proxier rule resyncs (e.g. '5s',
// '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will
// result in an immediate proxier resync.
MinSyncPeriod metav1.Duration
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
// than 0.
ConfigSyncPeriod metav1.Duration
}
// ProxyMode represents modes used by the Kubernetes proxy server.
//
// Three modes of proxy are available on Linux platforms: `iptables`, `ipvs`, and
// `nftables`. One mode of proxy is available on Windows platforms: `kernelspace`.
//
// If the proxy mode is unspecified, a default proxy mode will be used (currently this
// is `iptables` on Linux and `kernelspace` on Windows). If the selected proxy mode cannot be
// used (due to lack of kernel support, missing userspace components, etc) then kube-proxy
// will exit with an error.
type ProxyMode string
const (
ProxyModeIPTables ProxyMode = "iptables"
ProxyModeIPVS ProxyMode = "ipvs"
ProxyModeNFTables ProxyMode = "nftables"
ProxyModeKernelspace ProxyMode = "kernelspace"
)
func (m *ProxyMode) Set(s string) error {
*m = ProxyMode(s)
return nil
}
func (m *ProxyMode) String() string {
if m != nil {
return string(*m)
}
return ""
}
func (m *ProxyMode) Type() string {
return "ProxyMode"
}
// LocalMode represents modes to detect local traffic from the node
type LocalMode string
// Currently supported modes for LocalMode
const (
LocalModeClusterCIDR LocalMode = "ClusterCIDR"
LocalModeNodeCIDR LocalMode = "NodeCIDR"
LocalModeBridgeInterface LocalMode = "BridgeInterface"
LocalModeInterfaceNamePrefix LocalMode = "InterfaceNamePrefix"
)
func (m *LocalMode) Set(s string) error {
*m = LocalMode(s)
return nil
}
func (m *LocalMode) String() string {
if m != nil {
return string(*m)
}
return ""
}
func (m *LocalMode) Type() string {
return "LocalMode"
}
// NodePortAddressesPrimary is a special value for NodePortAddresses indicating that it
// should only use the primary node IPs.
const NodePortAddressesPrimary string = "primary"
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"strings"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kube-proxy/config/v1alpha1"
"k8s.io/kubernetes/pkg/proxy/apis/config"
)
// Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is defined here, because public conversion is not auto-generated due to existing warnings.
func Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *config.KubeProxyConfiguration, out *v1alpha1.KubeProxyConfiguration, scope conversion.Scope) error {
if err := autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, scope); err != nil {
return err
}
out.WindowsRunAsService = in.Windows.RunAsService
out.Conntrack = v1alpha1.KubeProxyConntrackConfiguration(in.Linux.Conntrack)
out.OOMScoreAdj = in.Linux.OOMScoreAdj
switch in.Mode {
case config.ProxyModeNFTables:
out.NFTables.MasqueradeAll = in.Linux.MasqueradeAll
default:
out.IPTables.MasqueradeAll = in.Linux.MasqueradeAll
}
switch in.Mode {
case config.ProxyModeIPVS:
out.IPVS.SyncPeriod = in.SyncPeriod
out.IPVS.MinSyncPeriod = in.MinSyncPeriod
case config.ProxyModeNFTables:
out.NFTables.SyncPeriod = in.SyncPeriod
out.NFTables.MinSyncPeriod = in.MinSyncPeriod
default:
out.IPTables.SyncPeriod = in.SyncPeriod
out.IPTables.MinSyncPeriod = in.MinSyncPeriod
}
if len(in.DetectLocal.ClusterCIDRs) > 0 {
out.ClusterCIDR = strings.Join(in.DetectLocal.ClusterCIDRs, ",")
}
return nil
}
// Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration is defined here, because public conversion is not auto-generated due to existing warnings.
func Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration, out *config.KubeProxyConfiguration, scope conversion.Scope) error {
if err := autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in, out, scope); err != nil {
return err
}
out.Windows.RunAsService = in.WindowsRunAsService
out.Linux.Conntrack = config.KubeProxyConntrackConfiguration(in.Conntrack)
out.Linux.OOMScoreAdj = in.OOMScoreAdj
switch config.ProxyMode(in.Mode) {
case config.ProxyModeNFTables:
out.Linux.MasqueradeAll = in.NFTables.MasqueradeAll
default:
out.Linux.MasqueradeAll = in.IPTables.MasqueradeAll
}
switch config.ProxyMode(in.Mode) {
case config.ProxyModeIPVS:
out.SyncPeriod = in.IPVS.SyncPeriod
out.MinSyncPeriod = in.IPVS.MinSyncPeriod
case config.ProxyModeNFTables:
out.SyncPeriod = in.NFTables.SyncPeriod
out.MinSyncPeriod = in.NFTables.MinSyncPeriod
default:
out.SyncPeriod = in.IPTables.SyncPeriod
out.MinSyncPeriod = in.IPTables.MinSyncPeriod
}
if len(in.ClusterCIDR) > 0 {
out.DetectLocal.ClusterCIDRs = strings.Split(in.ClusterCIDR, ",")
}
return nil
}
// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration is defined here, because public conversion is not auto-generated due to existing warnings.
func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in *v1alpha1.KubeProxyIPTablesConfiguration, out *config.KubeProxyIPTablesConfiguration, scope conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in, out, scope)
}
// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration is defined here, because public conversion is not auto-generated due to existing warnings.
func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in *v1alpha1.KubeProxyIPVSConfiguration, out *config.KubeProxyIPVSConfiguration, scope conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in, out, scope)
}
// Convert_v1alpha1_KubeProxyNFTablesConfiguration_To_config_KubeProxyNFTablesConfiguration is defined here, because public conversion is not auto-generated due to existing warnings.
func Convert_v1alpha1_KubeProxyNFTablesConfiguration_To_config_KubeProxyNFTablesConfiguration(in *v1alpha1.KubeProxyNFTablesConfiguration, out *config.KubeProxyNFTablesConfiguration, scope conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyNFTablesConfiguration_To_config_KubeProxyNFTablesConfiguration(in, out, scope)
}
// Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration is defined here, because public conversion is not auto-generated due to existing warnings.
func Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(in *config.DetectLocalConfiguration, out *v1alpha1.DetectLocalConfiguration, s conversion.Scope) error {
return autoConvert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(in, out, s)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kruntime "k8s.io/apimachinery/pkg/runtime"
kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
logsapi "k8s.io/component-base/logs/api/v1"
"k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/pkg/kubelet/qos"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
netutils "k8s.io/utils/net"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyConfiguration) {
if len(obj.BindAddress) == 0 {
obj.BindAddress = "0.0.0.0"
}
defaultHealthzAddress, defaultMetricsAddress := getDefaultAddresses(obj.BindAddress)
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = fmt.Sprintf("%s:%v", defaultHealthzAddress, ports.ProxyHealthzPort)
} else {
obj.HealthzBindAddress = proxyutil.AppendPortIfNeeded(obj.HealthzBindAddress, ports.ProxyHealthzPort)
}
if obj.MetricsBindAddress == "" {
obj.MetricsBindAddress = fmt.Sprintf("%s:%v", defaultMetricsAddress, ports.ProxyStatusPort)
} else {
obj.MetricsBindAddress = proxyutil.AppendPortIfNeeded(obj.MetricsBindAddress, ports.ProxyStatusPort)
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeProxyOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.IPTables.SyncPeriod.Duration == 0 {
obj.IPTables.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
}
if obj.IPTables.MinSyncPeriod.Duration == 0 {
obj.IPTables.MinSyncPeriod = metav1.Duration{Duration: 1 * time.Second}
}
if obj.IPTables.LocalhostNodePorts == nil {
obj.IPTables.LocalhostNodePorts = ptr.To(true)
}
if obj.IPVS.SyncPeriod.Duration == 0 {
obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
}
if obj.NFTables.SyncPeriod.Duration == 0 {
obj.NFTables.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
}
if obj.NFTables.MinSyncPeriod.Duration == 0 {
obj.NFTables.MinSyncPeriod = metav1.Duration{Duration: 1 * time.Second}
}
if obj.Conntrack.MaxPerCore == nil {
obj.Conntrack.MaxPerCore = ptr.To[int32](32 * 1024)
}
if obj.Conntrack.Min == nil {
obj.Conntrack.Min = ptr.To[int32](128 * 1024)
}
if obj.IPTables.MasqueradeBit == nil {
temp := int32(14)
obj.IPTables.MasqueradeBit = &temp
}
if obj.NFTables.MasqueradeBit == nil {
temp := int32(14)
obj.NFTables.MasqueradeBit = &temp
}
if obj.Conntrack.TCPEstablishedTimeout == nil {
obj.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
}
if obj.Conntrack.TCPCloseWaitTimeout == nil {
// See https://github.com/kubernetes/kubernetes/issues/32551.
//
// CLOSE_WAIT conntrack state occurs when the Linux kernel
// sees a FIN from the remote server. Note: this is a half-close
// condition that persists as long as the local side keeps the
// socket open. The condition is rare as it is typical in most
// protocols for both sides to issue a close; this typically
// occurs when the local socket is lazily garbage collected.
//
// If the CLOSE_WAIT conntrack entry expires, then FINs from the
// local socket will not be properly SNAT'd and will not reach the
// remote server (if the connection was subject to SNAT). If the
// remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT
// timeout, then there will be an inconsistency in the state of
// the connection and a new connection reusing the SNAT (src,
// port) pair may be rejected by the remote side with RST. This
// can cause new calls to connect(2) to return with ECONNREFUSED.
//
// We set CLOSE_WAIT to one hour by default to better match
// typical server timeouts.
obj.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: 1 * time.Hour}
}
if obj.ConfigSyncPeriod.Duration == 0 {
obj.ConfigSyncPeriod.Duration = 15 * time.Minute
}
if len(obj.ClientConnection.ContentType) == 0 {
obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf"
}
if obj.ClientConnection.QPS == 0.0 {
obj.ClientConnection.QPS = 5.0
}
if obj.ClientConnection.Burst == 0 {
obj.ClientConnection.Burst = 10
}
if obj.FeatureGates == nil {
obj.FeatureGates = make(map[string]bool)
}
// Use the Default LoggingConfiguration option
logsapi.SetRecommendedLoggingConfiguration(&obj.Logging)
}
// getDefaultAddresses returns default address of healthz and metrics server
// based on the given bind address. IPv6 addresses are enclosed in square
// brackets for appending port.
func getDefaultAddresses(bindAddress string) (defaultHealthzAddress, defaultMetricsAddress string) {
if netutils.ParseIPSloppy(bindAddress).To4() != nil {
return "0.0.0.0", "127.0.0.1"
}
return "[::]", "[::1]"
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
)
// GroupName is the group name used in this package
const GroupName = "kubeproxy.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
var (
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
// defaulting and conversion init funcs are registered as well.
localSchemeBuilder = &kubeproxyconfigv1alpha1.SchemeBuilder
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1"
configv1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
config "k8s.io/kubernetes/pkg/proxy/apis/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1alpha1.DetectLocalConfiguration)(nil), (*config.DetectLocalConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(a.(*configv1alpha1.DetectLocalConfiguration), b.(*config.DetectLocalConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.KubeProxyConntrackConfiguration)(nil), (*config.KubeProxyConntrackConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(a.(*configv1alpha1.KubeProxyConntrackConfiguration), b.(*config.KubeProxyConntrackConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyConntrackConfiguration)(nil), (*configv1alpha1.KubeProxyConntrackConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(a.(*config.KubeProxyConntrackConfiguration), b.(*configv1alpha1.KubeProxyConntrackConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyIPTablesConfiguration)(nil), (*configv1alpha1.KubeProxyIPTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(a.(*config.KubeProxyIPTablesConfiguration), b.(*configv1alpha1.KubeProxyIPTablesConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyIPVSConfiguration)(nil), (*configv1alpha1.KubeProxyIPVSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(a.(*config.KubeProxyIPVSConfiguration), b.(*configv1alpha1.KubeProxyIPVSConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyNFTablesConfiguration)(nil), (*configv1alpha1.KubeProxyNFTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyNFTablesConfiguration_To_v1alpha1_KubeProxyNFTablesConfiguration(a.(*config.KubeProxyNFTablesConfiguration), b.(*configv1alpha1.KubeProxyNFTablesConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1alpha1.KubeProxyWinkernelConfiguration)(nil), (*config.KubeProxyWinkernelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(a.(*configv1alpha1.KubeProxyWinkernelConfiguration), b.(*config.KubeProxyWinkernelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeProxyWinkernelConfiguration)(nil), (*configv1alpha1.KubeProxyWinkernelConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(a.(*config.KubeProxyWinkernelConfiguration), b.(*configv1alpha1.KubeProxyWinkernelConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.DetectLocalConfiguration)(nil), (*configv1alpha1.DetectLocalConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(a.(*config.DetectLocalConfiguration), b.(*configv1alpha1.DetectLocalConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.KubeProxyConfiguration)(nil), (*configv1alpha1.KubeProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(a.(*config.KubeProxyConfiguration), b.(*configv1alpha1.KubeProxyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.KubeProxyConfiguration)(nil), (*config.KubeProxyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(a.(*configv1alpha1.KubeProxyConfiguration), b.(*config.KubeProxyConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.KubeProxyIPTablesConfiguration)(nil), (*config.KubeProxyIPTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(a.(*configv1alpha1.KubeProxyIPTablesConfiguration), b.(*config.KubeProxyIPTablesConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.KubeProxyIPVSConfiguration)(nil), (*config.KubeProxyIPVSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(a.(*configv1alpha1.KubeProxyIPVSConfiguration), b.(*config.KubeProxyIPVSConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1alpha1.KubeProxyNFTablesConfiguration)(nil), (*config.KubeProxyNFTablesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KubeProxyNFTablesConfiguration_To_config_KubeProxyNFTablesConfiguration(a.(*configv1alpha1.KubeProxyNFTablesConfiguration), b.(*config.KubeProxyNFTablesConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(in *configv1alpha1.DetectLocalConfiguration, out *config.DetectLocalConfiguration, s conversion.Scope) error {
out.BridgeInterface = in.BridgeInterface
out.InterfaceNamePrefix = in.InterfaceNamePrefix
return nil
}
// Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(in *configv1alpha1.DetectLocalConfiguration, out *config.DetectLocalConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(in, out, s)
}
func autoConvert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(in *config.DetectLocalConfiguration, out *configv1alpha1.DetectLocalConfiguration, s conversion.Scope) error {
out.BridgeInterface = in.BridgeInterface
// WARNING: in.ClusterCIDRs requires manual conversion: does not exist in peer-type
out.InterfaceNamePrefix = in.InterfaceNamePrefix
return nil
}
func autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in *configv1alpha1.KubeProxyConfiguration, out *config.KubeProxyConfiguration, s conversion.Scope) error {
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
if err := componentbaseconfigv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
out.Logging = in.Logging
out.HostnameOverride = in.HostnameOverride
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.BindAddressHardFail = in.BindAddressHardFail
out.EnableProfiling = in.EnableProfiling
out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion
out.Mode = config.ProxyMode(in.Mode)
if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyNFTablesConfiguration_To_config_KubeProxyNFTablesConfiguration(&in.NFTables, &out.NFTables, s); err != nil {
return err
}
if err := Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(&in.Winkernel, &out.Winkernel, s); err != nil {
return err
}
out.DetectLocalMode = config.LocalMode(in.DetectLocalMode)
if err := Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(&in.DetectLocal, &out.DetectLocal, s); err != nil {
return err
}
// WARNING: in.ClusterCIDR requires manual conversion: does not exist in peer-type
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
// WARNING: in.OOMScoreAdj requires manual conversion: does not exist in peer-type
// WARNING: in.Conntrack requires manual conversion: does not exist in peer-type
out.ConfigSyncPeriod = in.ConfigSyncPeriod
// WARNING: in.PortRange requires manual conversion: does not exist in peer-type
// WARNING: in.WindowsRunAsService requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *config.KubeProxyConfiguration, out *configv1alpha1.KubeProxyConfiguration, s conversion.Scope) error {
// WARNING: in.Linux requires manual conversion: does not exist in peer-type
// WARNING: in.Windows requires manual conversion: does not exist in peer-type
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
if err := componentbaseconfigv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
out.Logging = in.Logging
out.HostnameOverride = in.HostnameOverride
out.BindAddress = in.BindAddress
out.HealthzBindAddress = in.HealthzBindAddress
out.MetricsBindAddress = in.MetricsBindAddress
out.BindAddressHardFail = in.BindAddressHardFail
out.EnableProfiling = in.EnableProfiling
out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion
out.Mode = configv1alpha1.ProxyMode(in.Mode)
if err := Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil {
return err
}
if err := Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil {
return err
}
if err := Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(&in.Winkernel, &out.Winkernel, s); err != nil {
return err
}
if err := Convert_config_KubeProxyNFTablesConfiguration_To_v1alpha1_KubeProxyNFTablesConfiguration(&in.NFTables, &out.NFTables, s); err != nil {
return err
}
out.DetectLocalMode = configv1alpha1.LocalMode(in.DetectLocalMode)
if err := Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(&in.DetectLocal, &out.DetectLocal, s); err != nil {
return err
}
out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses))
// WARNING: in.SyncPeriod requires manual conversion: does not exist in peer-type
// WARNING: in.MinSyncPeriod requires manual conversion: does not exist in peer-type
out.ConfigSyncPeriod = in.ConfigSyncPeriod
return nil
}
func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in *configv1alpha1.KubeProxyConntrackConfiguration, out *config.KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
out.TCPBeLiberal = in.TCPBeLiberal
out.UDPTimeout = in.UDPTimeout
out.UDPStreamTimeout = in.UDPStreamTimeout
return nil
}
// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in *configv1alpha1.KubeProxyConntrackConfiguration, out *config.KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *config.KubeProxyConntrackConfiguration, out *configv1alpha1.KubeProxyConntrackConfiguration, s conversion.Scope) error {
out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore))
out.Min = (*int32)(unsafe.Pointer(in.Min))
out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout))
out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout))
out.TCPBeLiberal = in.TCPBeLiberal
out.UDPTimeout = in.UDPTimeout
out.UDPStreamTimeout = in.UDPStreamTimeout
return nil
}
// Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *config.KubeProxyConntrackConfiguration, out *configv1alpha1.KubeProxyConntrackConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(in *configv1alpha1.KubeProxyIPTablesConfiguration, out *config.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
// WARNING: in.MasqueradeAll requires manual conversion: does not exist in peer-type
out.LocalhostNodePorts = (*bool)(unsafe.Pointer(in.LocalhostNodePorts))
// WARNING: in.SyncPeriod requires manual conversion: does not exist in peer-type
// WARNING: in.MinSyncPeriod requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *config.KubeProxyIPTablesConfiguration, out *configv1alpha1.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
out.LocalhostNodePorts = (*bool)(unsafe.Pointer(in.LocalhostNodePorts))
return nil
}
// Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *config.KubeProxyIPTablesConfiguration, out *configv1alpha1.KubeProxyIPTablesConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(in *configv1alpha1.KubeProxyIPVSConfiguration, out *config.KubeProxyIPVSConfiguration, s conversion.Scope) error {
// WARNING: in.SyncPeriod requires manual conversion: does not exist in peer-type
// WARNING: in.MinSyncPeriod requires manual conversion: does not exist in peer-type
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
out.StrictARP = in.StrictARP
out.TCPTimeout = in.TCPTimeout
out.TCPFinTimeout = in.TCPFinTimeout
out.UDPTimeout = in.UDPTimeout
return nil
}
func autoConvert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *config.KubeProxyIPVSConfiguration, out *configv1alpha1.KubeProxyIPVSConfiguration, s conversion.Scope) error {
out.Scheduler = in.Scheduler
out.ExcludeCIDRs = *(*[]string)(unsafe.Pointer(&in.ExcludeCIDRs))
out.StrictARP = in.StrictARP
out.TCPTimeout = in.TCPTimeout
out.TCPFinTimeout = in.TCPFinTimeout
out.UDPTimeout = in.UDPTimeout
return nil
}
// Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *config.KubeProxyIPVSConfiguration, out *configv1alpha1.KubeProxyIPVSConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyNFTablesConfiguration_To_config_KubeProxyNFTablesConfiguration(in *configv1alpha1.KubeProxyNFTablesConfiguration, out *config.KubeProxyNFTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
// WARNING: in.MasqueradeAll requires manual conversion: does not exist in peer-type
// WARNING: in.SyncPeriod requires manual conversion: does not exist in peer-type
// WARNING: in.MinSyncPeriod requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_config_KubeProxyNFTablesConfiguration_To_v1alpha1_KubeProxyNFTablesConfiguration(in *config.KubeProxyNFTablesConfiguration, out *configv1alpha1.KubeProxyNFTablesConfiguration, s conversion.Scope) error {
out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit))
return nil
}
// Convert_config_KubeProxyNFTablesConfiguration_To_v1alpha1_KubeProxyNFTablesConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyNFTablesConfiguration_To_v1alpha1_KubeProxyNFTablesConfiguration(in *config.KubeProxyNFTablesConfiguration, out *configv1alpha1.KubeProxyNFTablesConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyNFTablesConfiguration_To_v1alpha1_KubeProxyNFTablesConfiguration(in, out, s)
}
func autoConvert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(in *configv1alpha1.KubeProxyWinkernelConfiguration, out *config.KubeProxyWinkernelConfiguration, s conversion.Scope) error {
out.NetworkName = in.NetworkName
out.SourceVip = in.SourceVip
out.EnableDSR = in.EnableDSR
out.RootHnsEndpointName = in.RootHnsEndpointName
out.ForwardHealthCheckVip = in.ForwardHealthCheckVip
return nil
}
// Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(in *configv1alpha1.KubeProxyWinkernelConfiguration, out *config.KubeProxyWinkernelConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(in, out, s)
}
func autoConvert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(in *config.KubeProxyWinkernelConfiguration, out *configv1alpha1.KubeProxyWinkernelConfiguration, s conversion.Scope) error {
out.NetworkName = in.NetworkName
out.SourceVip = in.SourceVip
out.EnableDSR = in.EnableDSR
out.RootHnsEndpointName = in.RootHnsEndpointName
out.ForwardHealthCheckVip = in.ForwardHealthCheckVip
return nil
}
// Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration is an autogenerated conversion function.
func Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(in *config.KubeProxyWinkernelConfiguration, out *configv1alpha1.KubeProxyWinkernelConfiguration, s conversion.Scope) error {
return autoConvert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
configv1alpha1 "k8s.io/kube-proxy/config/v1alpha1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&configv1alpha1.KubeProxyConfiguration{}, func(obj interface{}) {
SetObjectDefaults_KubeProxyConfiguration(obj.(*configv1alpha1.KubeProxyConfiguration))
})
return nil
}
func SetObjectDefaults_KubeProxyConfiguration(in *configv1alpha1.KubeProxyConfiguration) {
SetDefaults_KubeProxyConfiguration(in)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DetectLocalConfiguration) DeepCopyInto(out *DetectLocalConfiguration) {
*out = *in
if in.ClusterCIDRs != nil {
in, out := &in.ClusterCIDRs, &out.ClusterCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetectLocalConfiguration.
func (in *DetectLocalConfiguration) DeepCopy() *DetectLocalConfiguration {
if in == nil {
return nil
}
out := new(DetectLocalConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Linux.DeepCopyInto(&out.Linux)
out.Windows = in.Windows
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.ClientConnection = in.ClientConnection
in.Logging.DeepCopyInto(&out.Logging)
in.IPTables.DeepCopyInto(&out.IPTables)
in.IPVS.DeepCopyInto(&out.IPVS)
out.Winkernel = in.Winkernel
in.NFTables.DeepCopyInto(&out.NFTables)
in.DetectLocal.DeepCopyInto(&out.DetectLocal)
if in.NodePortAddresses != nil {
in, out := &in.NodePortAddresses, &out.NodePortAddresses
*out = make([]string, len(*in))
copy(*out, *in)
}
out.SyncPeriod = in.SyncPeriod
out.MinSyncPeriod = in.MinSyncPeriod
out.ConfigSyncPeriod = in.ConfigSyncPeriod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration.
func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) {
*out = *in
if in.MaxPerCore != nil {
in, out := &in.MaxPerCore, &out.MaxPerCore
*out = new(int32)
**out = **in
}
if in.Min != nil {
in, out := &in.Min, &out.Min
*out = new(int32)
**out = **in
}
if in.TCPEstablishedTimeout != nil {
in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout
*out = new(v1.Duration)
**out = **in
}
if in.TCPCloseWaitTimeout != nil {
in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout
*out = new(v1.Duration)
**out = **in
}
out.UDPTimeout = in.UDPTimeout
out.UDPStreamTimeout = in.UDPStreamTimeout
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration.
func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyConntrackConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) {
*out = *in
if in.MasqueradeBit != nil {
in, out := &in.MasqueradeBit, &out.MasqueradeBit
*out = new(int32)
**out = **in
}
if in.LocalhostNodePorts != nil {
in, out := &in.LocalhostNodePorts, &out.LocalhostNodePorts
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration.
func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPTablesConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) {
*out = *in
if in.ExcludeCIDRs != nil {
in, out := &in.ExcludeCIDRs, &out.ExcludeCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
out.TCPTimeout = in.TCPTimeout
out.TCPFinTimeout = in.TCPFinTimeout
out.UDPTimeout = in.UDPTimeout
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration.
func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyIPVSConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyLinuxConfiguration) DeepCopyInto(out *KubeProxyLinuxConfiguration) {
*out = *in
in.Conntrack.DeepCopyInto(&out.Conntrack)
if in.OOMScoreAdj != nil {
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyLinuxConfiguration.
func (in *KubeProxyLinuxConfiguration) DeepCopy() *KubeProxyLinuxConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyLinuxConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyNFTablesConfiguration) DeepCopyInto(out *KubeProxyNFTablesConfiguration) {
*out = *in
if in.MasqueradeBit != nil {
in, out := &in.MasqueradeBit, &out.MasqueradeBit
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyNFTablesConfiguration.
func (in *KubeProxyNFTablesConfiguration) DeepCopy() *KubeProxyNFTablesConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyNFTablesConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyWindowsConfiguration) DeepCopyInto(out *KubeProxyWindowsConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyWindowsConfiguration.
func (in *KubeProxyWindowsConfiguration) DeepCopy() *KubeProxyWindowsConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyWindowsConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyWinkernelConfiguration) DeepCopyInto(out *KubeProxyWinkernelConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyWinkernelConfiguration.
func (in *KubeProxyWinkernelConfiguration) DeepCopy() *KubeProxyWinkernelConfiguration {
if in == nil {
return nil
}
out := new(KubeProxyWinkernelConfiguration)
in.DeepCopyInto(out)
return out
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP
// part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"),
// then the brackets are stripped as well.
func IPPart(s string) string {
if ip := netutils.ParseIPSloppy(s); ip != nil {
// IP address without port
return s
}
// Must be IP:port
host, _, err := net.SplitHostPort(s)
if err != nil {
klog.ErrorS(err, "Failed to parse host-port", "input", s)
return ""
}
// Check if host string is a valid IP address
ip := netutils.ParseIPSloppy(host)
if ip == nil {
klog.ErrorS(nil, "Failed to parse IP", "input", host)
return ""
}
return ip.String()
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"fmt"
"strings"
"github.com/go-logr/logr"
)
// LineBuffer is an interface for writing lines of input to a bytes.Buffer
type LineBuffer interface {
// Write takes a list of arguments, each a string or []string, joins all the
// individual strings with spaces, terminates with newline, and writes them to the
// buffer. Any other argument type will panic.
Write(args ...interface{})
// WriteBytes writes bytes to the buffer, and terminates with newline.
WriteBytes(bytes []byte)
// Reset clears the buffer
Reset()
// Bytes returns the contents of the buffer as a []byte
Bytes() []byte
// String returns the contents of the buffer as a string
String() string
// Lines returns the number of lines in the buffer. Note that more precisely, this
// returns the number of times Write() or WriteBytes() was called; it assumes that
// you never wrote any newlines to the buffer yourself.
Lines() int
}
var _ logr.Marshaler = &realLineBuffer{}
type realLineBuffer struct {
b bytes.Buffer
lines int
}
// NewLineBuffer returns a new "real" LineBuffer
func NewLineBuffer() LineBuffer {
return &realLineBuffer{}
}
// Write is part of LineBuffer
func (buf *realLineBuffer) Write(args ...interface{}) {
for i, arg := range args {
if i > 0 {
buf.b.WriteByte(' ')
}
switch x := arg.(type) {
case string:
buf.b.WriteString(x)
case []string:
for j, s := range x {
if j > 0 {
buf.b.WriteByte(' ')
}
buf.b.WriteString(s)
}
default:
panic(fmt.Sprintf("unknown argument type: %T", x))
}
}
buf.b.WriteByte('\n')
buf.lines++
}
// WriteBytes is part of LineBuffer
func (buf *realLineBuffer) WriteBytes(bytes []byte) {
buf.b.Write(bytes)
buf.b.WriteByte('\n')
buf.lines++
}
// Reset is part of LineBuffer
func (buf *realLineBuffer) Reset() {
buf.b.Reset()
buf.lines = 0
}
// Bytes is part of LineBuffer
func (buf *realLineBuffer) Bytes() []byte {
return buf.b.Bytes()
}
// String is part of LineBuffer
func (buf *realLineBuffer) String() string {
return buf.b.String()
}
// Lines is part of LineBuffer
func (buf *realLineBuffer) Lines() int {
return buf.lines
}
// Implements the logs.Marshaler interface
func (buf *realLineBuffer) MarshalLog() any {
return strings.Split(buf.b.String(), "\n")
}
type discardLineBuffer struct {
lines int
}
// NewDiscardLineBuffer returns a dummy LineBuffer that counts the number of writes but
// throws away the data. (This is used for iptables proxy partial syncs, to keep track of
// how many rules we managed to avoid having to sync.)
func NewDiscardLineBuffer() LineBuffer {
return &discardLineBuffer{}
}
// Write is part of LineBuffer
func (buf *discardLineBuffer) Write(args ...interface{}) {
buf.lines++
}
// WriteBytes is part of LineBuffer
func (buf *discardLineBuffer) WriteBytes(bytes []byte) {
buf.lines++
}
// Reset is part of LineBuffer
func (buf *discardLineBuffer) Reset() {
buf.lines = 0
}
// Bytes is part of LineBuffer
func (buf *discardLineBuffer) Bytes() []byte {
return []byte{}
}
// String is part of LineBuffer
func (buf *discardLineBuffer) String() string {
return ""
}
// Lines is part of LineBuffer
func (buf *discardLineBuffer) Lines() int {
return buf.lines
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
netutils "k8s.io/utils/net"
)
// LocalTrafficDetector generates iptables or nftables rules to detect traffic from local pods.
type LocalTrafficDetector interface {
// IsImplemented returns true if the implementation does something, false
// otherwise. You should not call the other methods if IsImplemented() returns
// false.
IsImplemented() bool
// IfLocal returns iptables arguments that will match traffic from a local pod.
IfLocal() []string
// IfNotLocal returns iptables arguments that will match traffic that is not from
// a local pod.
IfNotLocal() []string
// IfLocalNFT returns nftables arguments that will match traffic from a local pod.
IfLocalNFT() []string
// IfNotLocalNFT returns nftables arguments that will match traffic that is not
// from a local pod.
IfNotLocalNFT() []string
}
type detectLocal struct {
ifLocal []string
ifNotLocal []string
ifLocalNFT []string
ifNotLocalNFT []string
}
func (d *detectLocal) IsImplemented() bool {
return len(d.ifLocal) > 0
}
func (d *detectLocal) IfLocal() []string {
return d.ifLocal
}
func (d *detectLocal) IfNotLocal() []string {
return d.ifNotLocal
}
func (d *detectLocal) IfLocalNFT() []string {
return d.ifLocalNFT
}
func (d *detectLocal) IfNotLocalNFT() []string {
return d.ifNotLocalNFT
}
// NewNoOpLocalDetector returns a no-op implementation of LocalTrafficDetector.
func NewNoOpLocalDetector() LocalTrafficDetector {
return &detectLocal{}
}
// NewDetectLocalByCIDR returns a LocalTrafficDetector that considers traffic from the
// provided cidr to be from a local pod, and other traffic to be non-local. cidr is
// assumed to be valid.
func NewDetectLocalByCIDR(cidr string) LocalTrafficDetector {
nftFamily := "ip"
if netutils.IsIPv6CIDRString(cidr) {
nftFamily = "ip6"
}
return &detectLocal{
ifLocal: []string{"-s", cidr},
ifNotLocal: []string{"!", "-s", cidr},
ifLocalNFT: []string{nftFamily, "saddr", cidr},
ifNotLocalNFT: []string{nftFamily, "saddr", "!=", cidr},
}
}
// NewDetectLocalByBridgeInterface returns a LocalTrafficDetector that considers traffic
// from interfaceName to be from a local pod, and traffic from other interfaces to be
// non-local.
func NewDetectLocalByBridgeInterface(interfaceName string) LocalTrafficDetector {
return &detectLocal{
ifLocal: []string{"-i", interfaceName},
ifNotLocal: []string{"!", "-i", interfaceName},
ifLocalNFT: []string{"iif", interfaceName},
ifNotLocalNFT: []string{"iif", "!=", interfaceName},
}
}
// NewDetectLocalByInterfaceNamePrefix returns a LocalTrafficDetector that considers
// traffic from interfaces starting with interfacePrefix to be from a local pod, and
// traffic from other interfaces to be non-local.
func NewDetectLocalByInterfaceNamePrefix(interfacePrefix string) LocalTrafficDetector {
return &detectLocal{
ifLocal: []string{"-i", interfacePrefix + "+"},
ifNotLocal: []string{"!", "-i", interfacePrefix + "+"},
ifLocalNFT: []string{"iif", interfacePrefix + "*"},
ifNotLocalNFT: []string{"iif", "!=", interfacePrefix + "*"},
}
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
)
// NetworkInterfacer defines an interface for several net library functions. Production
// code will forward to net library functions, and unit tests will override the methods
// for testing purposes.
type NetworkInterfacer interface {
InterfaceAddrs() ([]net.Addr, error)
}
// RealNetwork implements the NetworkInterfacer interface for production code, just
// wrapping the underlying net library function calls.
type RealNetwork struct{}
// InterfaceAddrs wraps net.InterfaceAddrs(), it's a part of NetworkInterfacer interface.
func (RealNetwork) InterfaceAddrs() ([]net.Addr, error) {
return net.InterfaceAddrs()
}
var _ NetworkInterfacer = &RealNetwork{}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"k8s.io/api/core/v1"
netutils "k8s.io/utils/net"
)
// NodePortAddresses is used to handle the --nodeport-addresses flag
type NodePortAddresses struct {
cidrStrings []string
cidrs []*net.IPNet
containsIPv4Loopback bool
matchAll bool
}
// RFC 5735 127.0.0.0/8 - This block is assigned for use as the Internet host loopback address
var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0)
// NewNodePortAddresses takes an IP family and the `--nodeport-addresses` value (which is
// assumed to contain only valid CIDRs, potentially of both IP families) and returns a
// NodePortAddresses object for the given family. If there are no CIDRs of the given
// family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are CIDRs of
// the other family).
func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAddresses {
npa := &NodePortAddresses{}
// Filter CIDRs to correct family
for _, str := range cidrStrings {
if (family == v1.IPv4Protocol) == netutils.IsIPv4CIDRString(str) {
npa.cidrStrings = append(npa.cidrStrings, str)
}
}
if len(npa.cidrStrings) == 0 {
if family == v1.IPv4Protocol {
npa.cidrStrings = []string{IPv4ZeroCIDR}
} else {
npa.cidrStrings = []string{IPv6ZeroCIDR}
}
}
// Now parse
for _, str := range npa.cidrStrings {
_, cidr, _ := netutils.ParseCIDRSloppy(str)
if netutils.IsIPv4CIDR(cidr) {
if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) {
npa.containsIPv4Loopback = true
}
}
if IsZeroCIDR(cidr) {
// Ignore everything else
npa.cidrs = []*net.IPNet{cidr}
npa.matchAll = true
break
}
npa.cidrs = append(npa.cidrs, cidr)
}
return npa
}
func (npa *NodePortAddresses) String() string {
return fmt.Sprintf("%v", npa.cidrStrings)
}
// MatchAll returns true if npa matches all node IPs (of npa's given family)
func (npa *NodePortAddresses) MatchAll() bool {
return npa.matchAll
}
// GetNodeIPs return all matched node IP addresses for npa's CIDRs. If no matching
// IPs are found, it returns an empty list.
// NetworkInterfacer is injected for test purpose.
func (npa *NodePortAddresses) GetNodeIPs(nw NetworkInterfacer) ([]net.IP, error) {
addrs, err := nw.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("error listing all interfaceAddrs from host, error: %v", err)
}
// Use a map to dedup matches
addresses := make(map[string]net.IP)
for _, cidr := range npa.cidrs {
for _, addr := range addrs {
var ip net.IP
// nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux.
switch v := addr.(type) {
case *net.IPAddr:
ip = v.IP
case *net.IPNet:
ip = v.IP
default:
continue
}
if cidr.Contains(ip) {
addresses[ip.String()] = ip
}
}
}
ips := make([]net.IP, 0, len(addresses))
for _, ip := range addresses {
ips = append(ips, ip)
}
return ips, nil
}
// ContainsIPv4Loopback returns true if npa's CIDRs contain an IPv4 loopback address.
func (npa *NodePortAddresses) ContainsIPv4Loopback() bool {
return npa.containsIPv4Loopback
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
)
const (
// IPv4ZeroCIDR is the CIDR block for the whole IPv4 address space
IPv4ZeroCIDR = "0.0.0.0/0"
// IPv6ZeroCIDR is the CIDR block for the whole IPv6 address space
IPv6ZeroCIDR = "::/0"
// FullSyncPeriod is iptables and nftables proxier full sync period
FullSyncPeriod = 1 * time.Hour
)
// IsZeroCIDR checks whether the input CIDR string is either
// the IPv4 or IPv6 zero CIDR
func IsZeroCIDR(cidr *net.IPNet) bool {
if cidr == nil {
return false
}
prefixLen, _ := cidr.Mask.Size()
return prefixLen == 0
}
// ShouldSkipService checks if a given service should skip proxying
func ShouldSkipService(service *v1.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
klog.V(3).InfoS("Skipping service due to cluster IP", "service", klog.KObj(service), "clusterIP", service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == v1.ServiceTypeExternalName {
klog.V(3).InfoS("Skipping service due to Type=ExternalName", "service", klog.KObj(service))
return true
}
return false
}
// AddressSet validates the addresses in the slice using the "isValid" function.
// Addresses that pass the validation are returned as a string Set.
func AddressSet(isValid func(ip net.IP) bool, addrs []net.Addr) sets.Set[string] {
ips := sets.New[string]()
for _, a := range addrs {
var ip net.IP
switch v := a.(type) {
case *net.IPAddr:
ip = v.IP
case *net.IPNet:
ip = v.IP
default:
continue
}
if isValid(ip) {
ips.Insert(ip.String())
}
}
return ips
}
// MapIPsByIPFamily maps a slice of IPs to their respective IP families (v4 or v6)
func MapIPsByIPFamily(ipStrings []string) map[v1.IPFamily][]net.IP {
ipFamilyMap := map[v1.IPFamily][]net.IP{}
for _, ipStr := range ipStrings {
ip := netutils.ParseIPSloppy(ipStr)
if ip != nil {
// Since ip is parsed ok, GetIPFamilyFromIP will never return v1.IPFamilyUnknown
ipFamily := GetIPFamilyFromIP(ip)
ipFamilyMap[ipFamily] = append(ipFamilyMap[ipFamily], ip)
} else {
// ExternalIPs may not be validated by the api-server.
// Specifically empty strings validation, which yields into a lot
// of bad error logs.
if len(strings.TrimSpace(ipStr)) != 0 {
klog.ErrorS(nil, "Skipping invalid IP", "ip", ipStr)
}
}
}
return ipFamilyMap
}
// MapCIDRsByIPFamily maps a slice of CIDRs to their respective IP families (v4 or v6)
func MapCIDRsByIPFamily(cidrsStrings []string) map[v1.IPFamily][]*net.IPNet {
ipFamilyMap := map[v1.IPFamily][]*net.IPNet{}
for _, cidrStrUntrimmed := range cidrsStrings {
cidrStr := strings.TrimSpace(cidrStrUntrimmed)
_, cidr, err := netutils.ParseCIDRSloppy(cidrStr)
if err != nil {
// Ignore empty strings. Same as in MapIPsByIPFamily
if len(cidrStr) != 0 {
klog.ErrorS(err, "Invalid CIDR ignored", "CIDR", cidrStr)
}
continue
}
// since we just succefully parsed the CIDR, IPFamilyOfCIDR will never return "IPFamilyUnknown"
ipFamily := convertToV1IPFamily(netutils.IPFamilyOfCIDR(cidr))
ipFamilyMap[ipFamily] = append(ipFamilyMap[ipFamily], cidr)
}
return ipFamilyMap
}
// GetIPFamilyFromIP Returns the IP family of ipStr, or IPFamilyUnknown if ipStr can't be parsed as an IP
func GetIPFamilyFromIP(ip net.IP) v1.IPFamily {
return convertToV1IPFamily(netutils.IPFamilyOf(ip))
}
// Convert netutils.IPFamily to v1.IPFamily
func convertToV1IPFamily(ipFamily netutils.IPFamily) v1.IPFamily {
switch ipFamily {
case netutils.IPv4:
return v1.IPv4Protocol
case netutils.IPv6:
return v1.IPv6Protocol
}
return v1.IPFamilyUnknown
}
// OtherIPFamily returns the other ip family
func OtherIPFamily(ipFamily v1.IPFamily) v1.IPFamily {
if ipFamily == v1.IPv6Protocol {
return v1.IPv4Protocol
}
return v1.IPv6Protocol
}
// AppendPortIfNeeded appends the given port to IP address unless it is already in
// "ipv4:port" or "[ipv6]:port" format.
func AppendPortIfNeeded(addr string, port int32) string {
// Return if address is already in "ipv4:port" or "[ipv6]:port" format.
if _, _, err := net.SplitHostPort(addr); err == nil {
return addr
}
// Simply return for invalid case. This should be caught by validation instead.
ip := netutils.ParseIPSloppy(addr)
if ip == nil {
return addr
}
// Append port to address.
if ip.To4() != nil {
return fmt.Sprintf("%s:%d", addr, port)
}
return fmt.Sprintf("[%s]:%d", addr, port)
}
// EnsureSysctl sets a kernel sysctl to a given numeric value.
func EnsureSysctl(sysctl utilsysctl.Interface, name string, newVal int) error {
if oldVal, _ := sysctl.GetSysctl(name); oldVal != newVal {
if err := sysctl.SetSysctl(name, newVal); err != nil {
return fmt.Errorf("can't set sysctl %s to %d: %v", name, newVal, err)
}
klog.V(1).InfoS("Changed sysctl", "name", name, "before", oldVal, "after", newVal)
}
return nil
}
// GetClusterIPByFamily returns a service clusterip by family
func GetClusterIPByFamily(ipFamily v1.IPFamily, service *v1.Service) string {
// allowing skew
if len(service.Spec.IPFamilies) == 0 {
if len(service.Spec.ClusterIP) == 0 || service.Spec.ClusterIP == v1.ClusterIPNone {
return ""
}
IsIPv6Family := (ipFamily == v1.IPv6Protocol)
if IsIPv6Family == netutils.IsIPv6String(service.Spec.ClusterIP) {
return service.Spec.ClusterIP
}
return ""
}
for idx, family := range service.Spec.IPFamilies {
if family == ipFamily {
if idx < len(service.Spec.ClusterIPs) {
return service.Spec.ClusterIPs[idx]
}
}
}
return ""
}
func IsVIPMode(ing v1.LoadBalancerIngress) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) {
return true // backwards compat
}
if ing.IPMode == nil {
return true
}
return *ing.IPMode == v1.LoadBalancerIPModeVIP
}
//go:build linux
// +build linux
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"golang.org/x/sys/unix"
"k8s.io/apimachinery/pkg/util/wait"
)
var MaxAttemptsEINTR = wait.Backoff{Steps: 5}
var ShouldRetryOnEINTR = func(err error) bool { return errors.Is(err, unix.EINTR) }
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocator
import (
"errors"
"fmt"
"math/big"
"math/rand"
"sync"
"time"
)
// AllocationBitmap is a contiguous block of resources that can be allocated atomically.
//
// Each resource has an offset. The internal structure is a bitmap, with a bit for each offset.
//
// If a resource is taken, the bit at that offset is set to one.
// r.count is always equal to the number of set bits and can be recalculated at any time
// by counting the set bits in r.allocated.
//
// TODO: use RLE and compact the allocator to minimize space.
type AllocationBitmap struct {
// strategy carries the details of how to choose the next available item out of the range
strategy bitAllocator
// max is the maximum size of the usable items in the range
max int
// rangeSpec is the range specifier, matching RangeAllocation.Range
rangeSpec string
// lock guards the following members
lock sync.Mutex
// count is the number of currently allocated elements in the range
count int
// allocated is a bit array of the allocated items in the range
allocated *big.Int
}
// AllocationBitmap implements Interface and Snapshottable
var _ Interface = &AllocationBitmap{}
var _ Snapshottable = &AllocationBitmap{}
// bitAllocator represents a search strategy in the allocation map for a valid item.
type bitAllocator interface {
AllocateBit(allocated *big.Int, max, count int) (int, bool)
}
// NewAllocationMap creates an allocation bitmap using the random scan strategy.
func NewAllocationMap(max int, rangeSpec string) *AllocationBitmap {
return NewAllocationMapWithOffset(max, rangeSpec, 0)
}
// NewAllocationMapWithOffset creates an allocation bitmap using a random scan strategy that
// allows to pass an offset that divides the allocation bitmap in two blocks.
// The first block of values will not be used for random value assigned by the AllocateNext()
// method until the second block of values has been exhausted.
// The offset value must be always smaller than the bitmap size.
func NewAllocationMapWithOffset(max int, rangeSpec string, offset int) *AllocationBitmap {
a := AllocationBitmap{
strategy: randomScanStrategyWithOffset{
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
offset: offset,
},
allocated: big.NewInt(0),
count: 0,
max: max,
rangeSpec: rangeSpec,
}
return &a
}
// Allocate attempts to reserve the provided item.
// Returns true if it was allocated, false if it was already in use
func (r *AllocationBitmap) Allocate(offset int) (bool, error) {
r.lock.Lock()
defer r.lock.Unlock()
// max is the maximum size of the usable items in the range
if offset < 0 || offset >= r.max {
return false, fmt.Errorf("offset %d out of range [0,%d]", offset, r.max)
}
if r.allocated.Bit(offset) == 1 {
return false, nil
}
r.allocated = r.allocated.SetBit(r.allocated, offset, 1)
r.count++
return true, nil
}
// AllocateNext reserves one of the items from the pool.
// (0, false, nil) may be returned if there are no items left.
func (r *AllocationBitmap) AllocateNext() (int, bool, error) {
r.lock.Lock()
defer r.lock.Unlock()
next, ok := r.strategy.AllocateBit(r.allocated, r.max, r.count)
if !ok {
return 0, false, nil
}
r.count++
r.allocated = r.allocated.SetBit(r.allocated, next, 1)
return next, true, nil
}
// Release releases the item back to the pool. Releasing an
// unallocated item or an item out of the range is a no-op and
// returns no error.
func (r *AllocationBitmap) Release(offset int) error {
r.lock.Lock()
defer r.lock.Unlock()
if r.allocated.Bit(offset) == 0 {
return nil
}
r.allocated = r.allocated.SetBit(r.allocated, offset, 0)
r.count--
return nil
}
const (
// Find the size of a big.Word in bytes.
notZero = uint64(^big.Word(0))
wordPower = (notZero>>8)&1 + (notZero>>16)&1 + (notZero>>32)&1
wordSize = 1 << wordPower
)
// ForEach calls the provided function for each allocated bit. The
// AllocationBitmap may not be modified while this loop is running.
func (r *AllocationBitmap) ForEach(fn func(int)) {
r.lock.Lock()
defer r.lock.Unlock()
words := r.allocated.Bits()
for wordIdx, word := range words {
bit := 0
for word > 0 {
if (word & 1) != 0 {
fn((wordIdx * wordSize * 8) + bit)
word = word &^ 1
}
bit++
word = word >> 1
}
}
}
// Has returns true if the provided item is already allocated and a call
// to Allocate(offset) would fail.
func (r *AllocationBitmap) Has(offset int) bool {
r.lock.Lock()
defer r.lock.Unlock()
return r.allocated.Bit(offset) == 1
}
// Free returns the count of items left in the range.
func (r *AllocationBitmap) Free() int {
r.lock.Lock()
defer r.lock.Unlock()
return r.max - r.count
}
// Snapshot saves the current state of the pool.
func (r *AllocationBitmap) Snapshot() (string, []byte) {
r.lock.Lock()
defer r.lock.Unlock()
return r.rangeSpec, r.allocated.Bytes()
}
// Restore restores the pool to the previously captured state.
func (r *AllocationBitmap) Restore(rangeSpec string, data []byte) error {
r.lock.Lock()
defer r.lock.Unlock()
if r.rangeSpec != rangeSpec {
return errors.New("the provided range does not match the current range")
}
r.allocated = big.NewInt(0).SetBytes(data)
r.count = countBits(r.allocated)
return nil
}
// Destroy cleans up everything on shutdown.
func (r *AllocationBitmap) Destroy() {
}
// randomScanStrategy chooses a random address from the provided big.Int, and then
// scans forward looking for the next available address (it will wrap the range if
// necessary).
type randomScanStrategy struct {
rand *rand.Rand
}
func (rss randomScanStrategy) AllocateBit(allocated *big.Int, max, count int) (int, bool) {
if count >= max {
return 0, false
}
offset := rss.rand.Intn(max)
for i := 0; i < max; i++ {
at := (offset + i) % max
if allocated.Bit(at) == 0 {
return at, true
}
}
return 0, false
}
var _ bitAllocator = randomScanStrategy{}
// randomScanStrategyWithOffset choose a random address from the provided big.Int and then scans
// forward looking for the next available address. The big.Int range is subdivided so it will try
// to allocate first from the reserved upper range of addresses (it will wrap the upper subrange if necessary).
// If there is no free address it will try to allocate one from the lower range too.
type randomScanStrategyWithOffset struct {
rand *rand.Rand
offset int
}
func (rss randomScanStrategyWithOffset) AllocateBit(allocated *big.Int, max, count int) (int, bool) {
if count >= max {
return 0, false
}
// size of the upper subrange, prioritized for random allocation
subrangeMax := max - rss.offset
// try to get a value from the upper range [rss.reserved, max]
start := rss.rand.Intn(subrangeMax)
for i := 0; i < subrangeMax; i++ {
at := rss.offset + ((start + i) % subrangeMax)
if allocated.Bit(at) == 0 {
return at, true
}
}
start = rss.rand.Intn(rss.offset)
// subrange full, try to get the value from the first block before giving up.
for i := 0; i < rss.offset; i++ {
at := (start + i) % rss.offset
if allocated.Bit(at) == 0 {
return at, true
}
}
return 0, false
}
var _ bitAllocator = randomScanStrategyWithOffset{}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocator
import (
"math/big"
"math/bits"
)
// countBits returns the number of set bits in n
func countBits(n *big.Int) int {
var count int = 0
for _, w := range n.Bits() {
count += bits.OnesCount64(uint64(w))
}
return count
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"context"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
)
func ConfirmNoEscalationInternal(ctx context.Context, ruleResolver AuthorizationRuleResolver, inRules []rbac.PolicyRule) error {
rules := []rbacv1.PolicyRule{}
for i := range inRules {
v1Rule := rbacv1.PolicyRule{}
err := rbacv1helpers.Convert_rbac_PolicyRule_To_v1_PolicyRule(&inRules[i], &v1Rule, nil)
if err != nil {
return err
}
rules = append(rules, v1Rule)
}
return ConfirmNoEscalation(ctx, ruleResolver, rules)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"reflect"
rbacv1 "k8s.io/api/rbac/v1"
)
type simpleResource struct {
Group string
Resource string
ResourceNameExist bool
ResourceName string
}
// CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes.
// this is a fast check, and works well with the decomposed "missing rules" list from a Covers check.
func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) {
compacted := make([]rbacv1.PolicyRule, 0, len(rules))
simpleRules := map[simpleResource]*rbacv1.PolicyRule{}
for _, rule := range rules {
if resource, isSimple := isSimpleResourceRule(&rule); isSimple {
if existingRule, ok := simpleRules[resource]; ok {
// Add the new verbs to the existing simple resource rule
if existingRule.Verbs == nil {
existingRule.Verbs = []string{}
}
existingRule.Verbs = append(existingRule.Verbs, rule.Verbs...)
} else {
// Copy the rule to accumulate matching simple resource rules into
simpleRules[resource] = rule.DeepCopy()
}
} else {
compacted = append(compacted, rule)
}
}
// Once we've consolidated the simple resource rules, add them to the compacted list
for _, simpleRule := range simpleRules {
compacted = append(compacted, *simpleRule)
}
return compacted, nil
}
// isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values
func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) {
resource := simpleResource{}
// If we have "complex" rule attributes, return early without allocations or expensive comparisons
if len(rule.ResourceNames) > 1 || len(rule.NonResourceURLs) > 0 {
return resource, false
}
// If we have multiple api groups or resources, return early
if len(rule.APIGroups) != 1 || len(rule.Resources) != 1 {
return resource, false
}
// Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames
simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames}
if !reflect.DeepEqual(simpleRule, rule) {
return resource, false
}
if len(rule.ResourceNames) == 0 {
resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: false}
} else {
resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: true, ResourceName: rule.ResourceNames[0]}
}
return resource, true
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"context"
"errors"
"fmt"
"strings"
"k8s.io/klog/v2"
rbacv1 "k8s.io/api/rbac/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/component-helpers/auth/rbac/validation"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
)
type AuthorizationRuleResolver interface {
// GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namespace
// of the role binding, the empty string if a cluster role binding.
GetRoleReferenceRules(ctx context.Context, roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error)
// RulesFor returns the list of rules that apply to a given user in a given namespace and error. If an error is returned, the slice of
// PolicyRules may not be complete, but it contains all retrievable rules. This is done because policy rules are purely additive and policy determinations
// can be made on the basis of those rules that are found.
RulesFor(ctx context.Context, user user.Info, namespace string) ([]rbacv1.PolicyRule, error)
// VisitRulesFor invokes visitor() with each rule that applies to a given user in a given namespace, and each error encountered resolving those rules.
// If visitor() returns false, visiting is short-circuited.
VisitRulesFor(ctx context.Context, user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool)
}
// ConfirmNoEscalation determines if the roles for a given user in a given namespace encompass the provided role.
func ConfirmNoEscalation(ctx context.Context, ruleResolver AuthorizationRuleResolver, rules []rbacv1.PolicyRule) error {
ruleResolutionErrors := []error{}
user, ok := genericapirequest.UserFrom(ctx)
if !ok {
return fmt.Errorf("no user on context")
}
namespace, _ := genericapirequest.NamespaceFrom(ctx)
ownerRules, err := ruleResolver.RulesFor(ctx, user, namespace)
if err != nil {
// As per AuthorizationRuleResolver contract, this may return a non fatal error with an incomplete list of policies. Log the error and continue.
klog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err)
ruleResolutionErrors = append(ruleResolutionErrors, err)
}
ownerRightsCover, missingRights := validation.Covers(ownerRules, rules)
if !ownerRightsCover {
compactMissingRights := missingRights
if compact, err := CompactRules(missingRights); err == nil {
compactMissingRights = compact
}
missingDescriptions := sets.NewString()
for _, missing := range compactMissingRights {
missingDescriptions.Insert(rbacv1helpers.CompactString(missing))
}
msg := fmt.Sprintf("user %q (groups=%q) is attempting to grant RBAC permissions not currently held:\n%s", user.GetName(), user.GetGroups(), strings.Join(missingDescriptions.List(), "\n"))
if len(ruleResolutionErrors) > 0 {
msg = msg + fmt.Sprintf("; resolution errors: %v", ruleResolutionErrors)
}
return errors.New(msg)
}
return nil
}
type DefaultRuleResolver struct {
roleGetter RoleGetter
roleBindingLister RoleBindingLister
clusterRoleGetter ClusterRoleGetter
clusterRoleBindingLister ClusterRoleBindingLister
}
func NewDefaultRuleResolver(roleGetter RoleGetter, roleBindingLister RoleBindingLister, clusterRoleGetter ClusterRoleGetter, clusterRoleBindingLister ClusterRoleBindingLister) *DefaultRuleResolver {
return &DefaultRuleResolver{roleGetter, roleBindingLister, clusterRoleGetter, clusterRoleBindingLister}
}
type RoleGetter interface {
GetRole(ctx context.Context, namespace, name string) (*rbacv1.Role, error)
}
type RoleBindingLister interface {
ListRoleBindings(ctx context.Context, namespace string) ([]*rbacv1.RoleBinding, error)
}
type ClusterRoleGetter interface {
GetClusterRole(ctx context.Context, name string) (*rbacv1.ClusterRole, error)
}
type ClusterRoleBindingLister interface {
ListClusterRoleBindings(ctx context.Context) ([]*rbacv1.ClusterRoleBinding, error)
}
func (r *DefaultRuleResolver) RulesFor(ctx context.Context, user user.Info, namespace string) ([]rbacv1.PolicyRule, error) {
visitor := &ruleAccumulator{}
r.VisitRulesFor(ctx, user, namespace, visitor.visit)
return visitor.rules, utilerrors.NewAggregate(visitor.errors)
}
type ruleAccumulator struct {
rules []rbacv1.PolicyRule
errors []error
}
func (r *ruleAccumulator) visit(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool {
if rule != nil {
r.rules = append(r.rules, *rule)
}
if err != nil {
r.errors = append(r.errors, err)
}
return true
}
func describeSubject(s *rbacv1.Subject, bindingNamespace string) string {
switch s.Kind {
case rbacv1.ServiceAccountKind:
if len(s.Namespace) > 0 {
return fmt.Sprintf("%s %q", s.Kind, s.Name+"/"+s.Namespace)
}
return fmt.Sprintf("%s %q", s.Kind, s.Name+"/"+bindingNamespace)
default:
return fmt.Sprintf("%s %q", s.Kind, s.Name)
}
}
type clusterRoleBindingDescriber struct {
binding *rbacv1.ClusterRoleBinding
subject *rbacv1.Subject
}
func (d *clusterRoleBindingDescriber) String() string {
return fmt.Sprintf("ClusterRoleBinding %q of %s %q to %s",
d.binding.Name,
d.binding.RoleRef.Kind,
d.binding.RoleRef.Name,
describeSubject(d.subject, ""),
)
}
type roleBindingDescriber struct {
binding *rbacv1.RoleBinding
subject *rbacv1.Subject
}
func (d *roleBindingDescriber) String() string {
return fmt.Sprintf("RoleBinding %q of %s %q to %s",
d.binding.Name+"/"+d.binding.Namespace,
d.binding.RoleRef.Kind,
d.binding.RoleRef.Name,
describeSubject(d.subject, d.binding.Namespace),
)
}
func (r *DefaultRuleResolver) VisitRulesFor(ctx context.Context, user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool) {
if clusterRoleBindings, err := r.clusterRoleBindingLister.ListClusterRoleBindings(ctx); err != nil {
if !visitor(nil, nil, err) {
return
}
} else {
sourceDescriber := &clusterRoleBindingDescriber{}
for _, clusterRoleBinding := range clusterRoleBindings {
subjectIndex, applies := appliesTo(user, clusterRoleBinding.Subjects, "")
if !applies {
continue
}
rules, err := r.GetRoleReferenceRules(ctx, clusterRoleBinding.RoleRef, "")
if err != nil {
if !visitor(nil, nil, err) {
return
}
continue
}
sourceDescriber.binding = clusterRoleBinding
sourceDescriber.subject = &clusterRoleBinding.Subjects[subjectIndex]
for i := range rules {
if !visitor(sourceDescriber, &rules[i], nil) {
return
}
}
}
}
if len(namespace) > 0 {
if roleBindings, err := r.roleBindingLister.ListRoleBindings(ctx, namespace); err != nil {
if !visitor(nil, nil, err) {
return
}
} else {
sourceDescriber := &roleBindingDescriber{}
for _, roleBinding := range roleBindings {
subjectIndex, applies := appliesTo(user, roleBinding.Subjects, namespace)
if !applies {
continue
}
rules, err := r.GetRoleReferenceRules(ctx, roleBinding.RoleRef, namespace)
if err != nil {
if !visitor(nil, nil, err) {
return
}
continue
}
sourceDescriber.binding = roleBinding
sourceDescriber.subject = &roleBinding.Subjects[subjectIndex]
for i := range rules {
if !visitor(sourceDescriber, &rules[i], nil) {
return
}
}
}
}
}
}
// GetRoleReferenceRules attempts to resolve the RoleBinding or ClusterRoleBinding.
func (r *DefaultRuleResolver) GetRoleReferenceRules(ctx context.Context, roleRef rbacv1.RoleRef, bindingNamespace string) ([]rbacv1.PolicyRule, error) {
switch roleRef.Kind {
case "Role":
role, err := r.roleGetter.GetRole(ctx, bindingNamespace, roleRef.Name)
if err != nil {
return nil, err
}
return role.Rules, nil
case "ClusterRole":
clusterRole, err := r.clusterRoleGetter.GetClusterRole(ctx, roleRef.Name)
if err != nil {
return nil, err
}
return clusterRole.Rules, nil
default:
return nil, fmt.Errorf("unsupported role reference kind: %q", roleRef.Kind)
}
}
// appliesTo returns whether any of the bindingSubjects applies to the specified subject,
// and if true, the index of the first subject that applies
func appliesTo(user user.Info, bindingSubjects []rbacv1.Subject, namespace string) (int, bool) {
for i, bindingSubject := range bindingSubjects {
if appliesToUser(user, bindingSubject, namespace) {
return i, true
}
}
return 0, false
}
func has(set []string, ele string) bool {
for _, s := range set {
if s == ele {
return true
}
}
return false
}
func appliesToUser(user user.Info, subject rbacv1.Subject, namespace string) bool {
switch subject.Kind {
case rbacv1.UserKind:
return user.GetName() == subject.Name
case rbacv1.GroupKind:
return has(user.GetGroups(), subject.Name)
case rbacv1.ServiceAccountKind:
// default the namespace to namespace we're working in if its available. This allows rolebindings that reference
// SAs in th local namespace to avoid having to qualify them.
saNamespace := namespace
if len(subject.Namespace) > 0 {
saNamespace = subject.Namespace
}
if len(saNamespace) == 0 {
return false
}
// use a more efficient comparison for RBAC checking
return serviceaccount.MatchesUsername(saNamespace, subject.Name, user.GetName())
default:
return false
}
}
// NewTestRuleResolver returns a rule resolver from lists of role objects.
func NewTestRuleResolver(roles []*rbacv1.Role, roleBindings []*rbacv1.RoleBinding, clusterRoles []*rbacv1.ClusterRole, clusterRoleBindings []*rbacv1.ClusterRoleBinding) (AuthorizationRuleResolver, *StaticRoles) {
r := StaticRoles{
roles: roles,
roleBindings: roleBindings,
clusterRoles: clusterRoles,
clusterRoleBindings: clusterRoleBindings,
}
return newMockRuleResolver(&r), &r
}
func newMockRuleResolver(r *StaticRoles) AuthorizationRuleResolver {
return NewDefaultRuleResolver(r, r, r, r)
}
// StaticRoles is a rule resolver that resolves from lists of role objects.
type StaticRoles struct {
roles []*rbacv1.Role
roleBindings []*rbacv1.RoleBinding
clusterRoles []*rbacv1.ClusterRole
clusterRoleBindings []*rbacv1.ClusterRoleBinding
}
func (r *StaticRoles) GetRole(ctx context.Context, namespace, name string) (*rbacv1.Role, error) {
if len(namespace) == 0 {
return nil, errors.New("must provide namespace when getting role")
}
for _, role := range r.roles {
if role.Namespace == namespace && role.Name == name {
return role, nil
}
}
return nil, errors.New("role not found")
}
func (r *StaticRoles) GetClusterRole(ctx context.Context, name string) (*rbacv1.ClusterRole, error) {
for _, clusterRole := range r.clusterRoles {
if clusterRole.Name == name {
return clusterRole, nil
}
}
return nil, errors.New("clusterrole not found")
}
func (r *StaticRoles) ListRoleBindings(ctx context.Context, namespace string) ([]*rbacv1.RoleBinding, error) {
if len(namespace) == 0 {
return nil, errors.New("must provide namespace when listing role bindings")
}
roleBindingList := []*rbacv1.RoleBinding{}
for _, roleBinding := range r.roleBindings {
if roleBinding.Namespace != namespace {
continue
}
// TODO(ericchiang): need to implement label selectors?
roleBindingList = append(roleBindingList, roleBinding)
}
return roleBindingList, nil
}
func (r *StaticRoles) ListClusterRoleBindings(ctx context.Context) ([]*rbacv1.ClusterRoleBinding, error) {
return r.clusterRoleBindings, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "kubescheduler.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes registers known types to the given scheme
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeSchedulerConfiguration{},
&DefaultPreemptionArgs{},
&InterPodAffinityArgs{},
&NodeResourcesFitArgs{},
&PodTopologySpreadArgs{},
&VolumeBindingArgs{},
&NodeResourcesBalancedAllocationArgs{},
&NodeAffinityArgs{},
&DynamicResourcesArgs{},
)
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
config "k8s.io/kubernetes/pkg/scheduler/apis/config"
configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1"
)
var (
// Scheme is the runtime.Scheme to which all kubescheduler api types are registered.
Scheme = runtime.NewScheme()
// Codecs provides access to encoding and decoding for the scheme.
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
)
func init() {
AddToScheme(Scheme)
}
// AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api.
func AddToScheme(scheme *runtime.Scheme) {
utilruntime.Must(config.AddToScheme(scheme))
utilruntime.Must(configv1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(
configv1.SchemeGroupVersion,
))
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"math"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
componentbaseconfig "k8s.io/component-base/config"
)
const (
// DefaultKubeSchedulerPort is the default port for the scheduler status server.
// May be overridden by a flag at startup.
DefaultKubeSchedulerPort = 10259
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KubeSchedulerConfiguration configures a scheduler
type KubeSchedulerConfiguration struct {
// TypeMeta contains the API version and kind. In kube-scheduler, after
// conversion from the versioned KubeSchedulerConfiguration type to this
// internal type, we set the APIVersion field to the scheme group/version of
// the type we converted from. This is done in cmd/kube-scheduler in two
// places: (1) when loading config from a file, (2) generating the default
// config. Based on the versioned type set in this field, we make decisions;
// for example (1) during validation to check for usage of removed plugins,
// (2) writing config to a file, (3) initialising the scheduler.
metav1.TypeMeta
// Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16
Parallelism int32
// LeaderElection defines the configuration of leader election client.
LeaderElection componentbaseconfig.LeaderElectionConfiguration
// ClientConnection specifies the kubeconfig file and client connection
// settings for the proxy server to use when communicating with the apiserver.
ClientConnection componentbaseconfig.ClientConnectionConfiguration
// DebuggingConfiguration holds configuration for Debugging related features
// TODO: We might wanna make this a substruct like Debugging componentbaseconfig.DebuggingConfiguration
componentbaseconfig.DebuggingConfiguration
// PercentageOfNodesToScore is the percentage of all nodes that once found feasible
// for running a pod, the scheduler stops its search for more feasible nodes in
// the cluster. This helps improve scheduler's performance. Scheduler always tries to find
// at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is.
// Example: if the cluster size is 500 nodes and the value of this flag is 30,
// then scheduler stops finding further feasible nodes once it finds 150 feasible ones.
// When the value is 0, default percentage (5%--50% based on the size of the cluster) of the
// nodes will be scored. It is overridden by profile level PercentageOfNodesToScore.
PercentageOfNodesToScore *int32
// PodInitialBackoffSeconds is the initial backoff for unschedulable pods.
// If specified, it must be greater than 0. If this value is null, the default value (1s)
// will be used.
PodInitialBackoffSeconds int64
// PodMaxBackoffSeconds is the max backoff for unschedulable pods.
// If specified, it must be greater than or equal to podInitialBackoffSeconds. If this value is null,
// the default value (10s) will be used.
PodMaxBackoffSeconds int64
// Profiles are scheduling profiles that kube-scheduler supports. Pods can
// choose to be scheduled under a particular profile by setting its associated
// scheduler name. Pods that don't specify any scheduler name are scheduled
// with the "default-scheduler" profile, if present here.
Profiles []KubeSchedulerProfile
// Extenders are the list of scheduler extenders, each holding the values of how to communicate
// with the extender. These extenders are shared by all scheduler profiles.
Extenders []Extender
// DelayCacheUntilActive specifies when to start caching. If this is true and leader election is enabled,
// the scheduler will wait to fill informer caches until it is the leader. Doing so will have slower
// failover with the benefit of lower memory overhead while waiting to become leader.
// Defaults to false.
DelayCacheUntilActive bool
}
// KubeSchedulerProfile is a scheduling profile.
type KubeSchedulerProfile struct {
// SchedulerName is the name of the scheduler associated to this profile.
// If SchedulerName matches with the pod's "spec.schedulerName", then the pod
// is scheduled with this profile.
SchedulerName string
// PercentageOfNodesToScore is the percentage of all nodes that once found feasible
// for running a pod, the scheduler stops its search for more feasible nodes in
// the cluster. This helps improve scheduler's performance. Scheduler always tries to find
// at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is.
// Example: if the cluster size is 500 nodes and the value of this flag is 30,
// then scheduler stops finding further feasible nodes once it finds 150 feasible ones.
// When the value is 0, default percentage (5%--50% based on the size of the cluster) of the
// nodes will be scored. It will override global PercentageOfNodesToScore. If it is empty,
// global PercentageOfNodesToScore will be used.
PercentageOfNodesToScore *int32
// Plugins specify the set of plugins that should be enabled or disabled.
// Enabled plugins are the ones that should be enabled in addition to the
// default plugins. Disabled plugins are any of the default plugins that
// should be disabled.
// When no enabled or disabled plugin is specified for an extension point,
// default plugins for that extension point will be used if there is any.
// If a QueueSort plugin is specified, the same QueueSort Plugin and
// PluginConfig must be specified for all profiles.
Plugins *Plugins
// PluginConfig is an optional set of custom plugin arguments for each plugin.
// Omitting config args for a plugin is equivalent to using the default config
// for that plugin.
PluginConfig []PluginConfig
}
// Plugins include multiple extension points. When specified, the list of plugins for
// a particular extension point are the only ones enabled. If an extension point is
// omitted from the config, then the default set of plugins is used for that extension point.
// Enabled plugins are called in the order specified here, after default plugins. If they need to
// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order.
type Plugins struct {
// PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue.
PreEnqueue PluginSet
// QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue.
QueueSort PluginSet
// PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework.
PreFilter PluginSet
// Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod.
Filter PluginSet
// PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod.
PostFilter PluginSet
// PreScore is a list of plugins that are invoked before scoring.
PreScore PluginSet
// Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase.
Score PluginSet
// Reserve is a list of plugins invoked when reserving/unreserving resources
// after a node is assigned to run the pod.
Reserve PluginSet
// Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod.
Permit PluginSet
// PreBind is a list of plugins that should be invoked before a pod is bound.
PreBind PluginSet
// Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework.
// The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success.
Bind PluginSet
// PostBind is a list of plugins that should be invoked after a pod is successfully bound.
PostBind PluginSet
// MultiPoint is a simplified config field for enabling plugins for all valid extension points
MultiPoint PluginSet
}
// PluginSet specifies enabled and disabled plugins for an extension point.
// If an array is empty, missing, or nil, default plugins at that extension point will be used.
type PluginSet struct {
// Enabled specifies plugins that should be enabled in addition to default plugins.
// These are called after default plugins and in the same order specified here.
Enabled []Plugin
// Disabled specifies default plugins that should be disabled.
// When all default plugins need to be disabled, an array containing only one "*" should be provided.
Disabled []Plugin
}
// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins.
type Plugin struct {
// Name defines the name of plugin
Name string
// Weight defines the weight of plugin, only used for Score plugins.
Weight int32
}
// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization.
// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure.
// It is up to the plugin to process these Args.
type PluginConfig struct {
// Name defines the name of plugin being configured
Name string
// Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure.
Args runtime.Object
}
/*
* NOTE: The following variables and methods are intentionally left out of the staging mirror.
*/
const (
// DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes
// that once found feasible, the scheduler stops looking for more nodes.
// A value of 0 means adaptive, meaning the scheduler figures out a proper default.
DefaultPercentageOfNodesToScore = 0
// MaxCustomPriorityScore is the max score UtilizationShapePoint expects.
MaxCustomPriorityScore int64 = 10
// MaxTotalScore is the maximum total score.
MaxTotalScore int64 = math.MaxInt64
// MaxWeight defines the max weight value allowed for custom PriorityPolicy
MaxWeight = MaxTotalScore / MaxCustomPriorityScore
)
// Names returns the list of enabled plugin names.
func (p *Plugins) Names() []string {
if p == nil {
return nil
}
extensions := []PluginSet{
p.PreEnqueue,
p.PreFilter,
p.Filter,
p.PostFilter,
p.Reserve,
p.PreScore,
p.Score,
p.PreBind,
p.Bind,
p.PostBind,
p.Permit,
p.QueueSort,
}
n := sets.New[string]()
for _, e := range extensions {
for _, pg := range e.Enabled {
n.Insert(pg.Name)
}
}
return sets.List(n)
}
// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty,
// it is assumed that the extender chose not to provide that extension.
type Extender struct {
// URLPrefix at which the extender is available
URLPrefix string
// Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
FilterVerb string
// Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender.
PreemptVerb string
// Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
PrioritizeVerb string
// The numeric multiplier for the node scores that the prioritize call generates.
// The weight should be a positive integer
Weight int64
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function.
BindVerb string
// EnableHTTPS specifies whether https should be used to communicate with the extender
EnableHTTPS bool
// TLSConfig specifies the transport layer security config
TLSConfig *ExtenderTLSConfig
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
// timeout is ignored, k8s/other extenders priorities are used to select the node.
HTTPTimeout metav1.Duration
// NodeCacheCapable specifies that the extender is capable of caching node information,
// so the scheduler should only send minimal information about the eligible nodes
// assuming that the extender already cached full details of all nodes in the cluster
NodeCacheCapable bool
// ManagedResources is a list of extended resources that are managed by
// this extender.
// - A pod will be sent to the extender on the Filter, Prioritize and Bind
// (if the extender is the binder) phases iff the pod requests at least
// one of the extended resources in this list. If empty or unspecified,
// all pods will be sent to this extender.
// - If IgnoredByScheduler is set to true for a resource, kube-scheduler
// will skip checking the resource in predicates.
// +optional
ManagedResources []ExtenderManagedResource
// Ignorable specifies if the extender is ignorable, i.e. scheduling should not
// fail when the extender returns an error or is not reachable.
Ignorable bool
}
// ExtenderManagedResource describes the arguments of extended resources
// managed by an extender.
type ExtenderManagedResource struct {
// Name is the extended resource name.
Name string
// IgnoredByScheduler indicates whether kube-scheduler should ignore this
// resource when applying predicates.
IgnoredByScheduler bool
}
// ExtenderTLSConfig contains settings to enable TLS with extender
type ExtenderTLSConfig struct {
// Server should be accessed without verifying the TLS certificate. For testing only.
Insecure bool
// ServerName is passed to the server for SNI and is used in the client to check server
// certificates against. If ServerName is empty, the hostname used to contact the
// server is used.
ServerName string
// Server requires TLS client certificate authentication
CertFile string
// Server requires TLS client certificate authentication
KeyFile string
// Trusted root certificates for server
CAFile string
// CertData holds PEM-encoded bytes (typically read from a client certificate file).
// CertData takes precedence over CertFile
CertData []byte
// KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
// KeyData takes precedence over KeyFile
KeyData []byte `datapolicy:"security-key"`
// CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
// CAData takes precedence over CAFile
CAData []byte
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"sync"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
v1 "k8s.io/kube-scheduler/config/v1"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
)
var (
// pluginArgConversionScheme is a scheme with internal and v1 registered,
// used for defaulting/converting typed PluginConfig Args.
// Access via getPluginArgConversionScheme()
pluginArgConversionScheme *runtime.Scheme
initPluginArgConversionScheme sync.Once
)
func GetPluginArgConversionScheme() *runtime.Scheme {
initPluginArgConversionScheme.Do(func() {
// set up the scheme used for plugin arg conversion
pluginArgConversionScheme = runtime.NewScheme()
utilruntime.Must(AddToScheme(pluginArgConversionScheme))
utilruntime.Must(config.AddToScheme(pluginArgConversionScheme))
})
return pluginArgConversionScheme
}
func Convert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error {
if err := autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s); err != nil {
return err
}
return convertToInternalPluginConfigArgs(out)
}
// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal
// types using a scheme, after applying defaults.
func convertToInternalPluginConfigArgs(out *config.KubeSchedulerConfiguration) error {
scheme := GetPluginArgConversionScheme()
for i := range out.Profiles {
prof := &out.Profiles[i]
for j := range prof.PluginConfig {
args := prof.PluginConfig[j].Args
if args == nil {
continue
}
if _, isUnknown := args.(*runtime.Unknown); isUnknown {
continue
}
internalArgs, err := scheme.ConvertToVersion(args, config.SchemeGroupVersion)
if err != nil {
return fmt.Errorf("converting .Profiles[%d].PluginConfig[%d].Args into internal type: %w", i, j, err)
}
prof.PluginConfig[j].Args = internalArgs
}
}
return nil
}
func Convert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1.KubeSchedulerConfiguration, s conversion.Scope) error {
if err := autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in, out, s); err != nil {
return err
}
return convertToExternalPluginConfigArgs(out)
}
// convertToExternalPluginConfigArgs converts PluginConfig#Args into
// external (versioned) types using a scheme.
func convertToExternalPluginConfigArgs(out *v1.KubeSchedulerConfiguration) error {
scheme := GetPluginArgConversionScheme()
for i := range out.Profiles {
for j := range out.Profiles[i].PluginConfig {
args := out.Profiles[i].PluginConfig[j].Args
if args.Object == nil {
continue
}
if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown {
continue
}
externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion)
if err != nil {
return err
}
out.Profiles[i].PluginConfig[j].Args.Object = externalArgs
}
}
return nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
v1 "k8s.io/kube-scheduler/config/v1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/utils/ptr"
)
// getDefaultPlugins returns the default set of plugins.
func getDefaultPlugins() *v1.Plugins {
plugins := &v1.Plugins{
MultiPoint: v1.PluginSet{
Enabled: []v1.Plugin{
{Name: names.SchedulingGates},
{Name: names.PrioritySort},
{Name: names.NodeUnschedulable},
{Name: names.NodeName},
{Name: names.TaintToleration, Weight: ptr.To[int32](3)},
{Name: names.NodeAffinity, Weight: ptr.To[int32](2)},
{Name: names.NodePorts},
{Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)},
{Name: names.VolumeRestrictions},
{Name: names.NodeVolumeLimits},
{Name: names.VolumeBinding},
{Name: names.VolumeZone},
{Name: names.PodTopologySpread, Weight: ptr.To[int32](2)},
{Name: names.InterPodAffinity, Weight: ptr.To[int32](2)},
{Name: names.DefaultPreemption},
{Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)},
{Name: names.ImageLocality, Weight: ptr.To[int32](1)},
{Name: names.DefaultBinder},
},
},
}
applyFeatureGates(plugins)
return plugins
}
func applyFeatureGates(config *v1.Plugins) {
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
applyDynamicResources(config)
}
}
func applyDynamicResources(config *v1.Plugins) {
// This plugin should come before DefaultPreemption because if
// there is a problem with a Pod and PostFilter gets called to
// resolve the problem, it is better to first deallocate an
// idle ResourceClaim than it is to evict some Pod that might
// be doing useful work.
for i := range config.MultiPoint.Enabled {
if config.MultiPoint.Enabled[i].Name == names.DefaultPreemption {
extended := make([]v1.Plugin, 0, len(config.MultiPoint.Enabled)+1)
extended = append(extended, config.MultiPoint.Enabled[:i]...)
extended = append(extended, v1.Plugin{Name: names.DynamicResources})
extended = append(extended, config.MultiPoint.Enabled[i:]...)
config.MultiPoint.Enabled = extended
break
}
}
}
// mergePlugins merges the custom set into the given default one, handling disabled sets.
func mergePlugins(logger klog.Logger, defaultPlugins, customPlugins *v1.Plugins) *v1.Plugins {
if customPlugins == nil {
return defaultPlugins
}
defaultPlugins.MultiPoint = mergePluginSet(logger, defaultPlugins.MultiPoint, customPlugins.MultiPoint)
defaultPlugins.PreEnqueue = mergePluginSet(logger, defaultPlugins.PreEnqueue, customPlugins.PreEnqueue)
defaultPlugins.QueueSort = mergePluginSet(logger, defaultPlugins.QueueSort, customPlugins.QueueSort)
defaultPlugins.PreFilter = mergePluginSet(logger, defaultPlugins.PreFilter, customPlugins.PreFilter)
defaultPlugins.Filter = mergePluginSet(logger, defaultPlugins.Filter, customPlugins.Filter)
defaultPlugins.PostFilter = mergePluginSet(logger, defaultPlugins.PostFilter, customPlugins.PostFilter)
defaultPlugins.PreScore = mergePluginSet(logger, defaultPlugins.PreScore, customPlugins.PreScore)
defaultPlugins.Score = mergePluginSet(logger, defaultPlugins.Score, customPlugins.Score)
defaultPlugins.Reserve = mergePluginSet(logger, defaultPlugins.Reserve, customPlugins.Reserve)
defaultPlugins.Permit = mergePluginSet(logger, defaultPlugins.Permit, customPlugins.Permit)
defaultPlugins.PreBind = mergePluginSet(logger, defaultPlugins.PreBind, customPlugins.PreBind)
defaultPlugins.Bind = mergePluginSet(logger, defaultPlugins.Bind, customPlugins.Bind)
defaultPlugins.PostBind = mergePluginSet(logger, defaultPlugins.PostBind, customPlugins.PostBind)
return defaultPlugins
}
type pluginIndex struct {
index int
plugin v1.Plugin
}
func mergePluginSet(logger klog.Logger, defaultPluginSet, customPluginSet v1.PluginSet) v1.PluginSet {
disabledPlugins := sets.New[string]()
enabledCustomPlugins := make(map[string]pluginIndex)
// replacedPluginIndex is a set of index of plugins, which have replaced the default plugins.
replacedPluginIndex := sets.New[int]()
var disabled []v1.Plugin
for _, disabledPlugin := range customPluginSet.Disabled {
// if the user is manually disabling any (or all, with "*") default plugins for an extension point,
// we need to track that so that the MultiPoint extension logic in the framework can know to skip
// inserting unspecified default plugins to this point.
disabled = append(disabled, v1.Plugin{Name: disabledPlugin.Name})
disabledPlugins.Insert(disabledPlugin.Name)
}
// With MultiPoint, we may now have some disabledPlugins in the default registry
// For example, we enable PluginX with Filter+Score through MultiPoint but disable its Score plugin by default.
for _, disabledPlugin := range defaultPluginSet.Disabled {
disabled = append(disabled, v1.Plugin{Name: disabledPlugin.Name})
disabledPlugins.Insert(disabledPlugin.Name)
}
for index, enabledPlugin := range customPluginSet.Enabled {
enabledCustomPlugins[enabledPlugin.Name] = pluginIndex{index, enabledPlugin}
}
var enabledPlugins []v1.Plugin
if !disabledPlugins.Has("*") {
for _, defaultEnabledPlugin := range defaultPluginSet.Enabled {
if disabledPlugins.Has(defaultEnabledPlugin.Name) {
continue
}
// The default plugin is explicitly re-configured, update the default plugin accordingly.
if customPlugin, ok := enabledCustomPlugins[defaultEnabledPlugin.Name]; ok {
logger.Info("Default plugin is explicitly re-configured; overriding", "plugin", defaultEnabledPlugin.Name)
// Update the default plugin in place to preserve order.
defaultEnabledPlugin = customPlugin.plugin
replacedPluginIndex.Insert(customPlugin.index)
}
enabledPlugins = append(enabledPlugins, defaultEnabledPlugin)
}
}
// Append all the custom plugins which haven't replaced any default plugins.
// Note: duplicated custom plugins will still be appended here.
// If so, the instantiation of scheduler framework will detect it and abort.
for index, plugin := range customPluginSet.Enabled {
if !replacedPluginIndex.Has(index) {
enabledPlugins = append(enabledPlugins, plugin)
}
}
return v1.PluginSet{Enabled: enabledPlugins, Disabled: disabled}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/util/feature"
componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/utils/ptr"
)
var defaultResourceSpec = []configv1.ResourceSpec{
{Name: string(v1.ResourceCPU), Weight: 1},
{Name: string(v1.ResourceMemory), Weight: 1},
}
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func pluginsNames(p *configv1.Plugins) []string {
if p == nil {
return nil
}
extensions := []configv1.PluginSet{
p.MultiPoint,
p.PreFilter,
p.Filter,
p.PostFilter,
p.Reserve,
p.PreScore,
p.Score,
p.PreBind,
p.Bind,
p.PostBind,
p.Permit,
p.PreEnqueue,
p.QueueSort,
}
n := sets.New[string]()
for _, e := range extensions {
for _, pg := range e.Enabled {
n.Insert(pg.Name)
}
}
return sets.List(n)
}
func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSchedulerProfile) {
// Set default plugins.
prof.Plugins = mergePlugins(logger, getDefaultPlugins(), prof.Plugins)
// Set default plugin configs.
scheme := GetPluginArgConversionScheme()
existingConfigs := sets.New[string]()
for j := range prof.PluginConfig {
existingConfigs.Insert(prof.PluginConfig[j].Name)
args := prof.PluginConfig[j].Args.Object
if _, isUnknown := args.(*runtime.Unknown); isUnknown {
continue
}
scheme.Default(args)
}
// Append default configs for plugins that didn't have one explicitly set.
for _, name := range pluginsNames(prof.Plugins) {
if existingConfigs.Has(name) {
continue
}
gvk := configv1.SchemeGroupVersion.WithKind(name + "Args")
args, err := scheme.New(gvk)
if err != nil {
// This plugin is out-of-tree or doesn't require configuration.
continue
}
scheme.Default(args)
args.GetObjectKind().SetGroupVersionKind(gvk)
prof.PluginConfig = append(prof.PluginConfig, configv1.PluginConfig{
Name: name,
Args: runtime.RawExtension{Object: args},
})
}
}
// SetDefaults_KubeSchedulerConfiguration sets additional defaults
func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfiguration) {
logger := klog.TODO() // called by generated code that doesn't pass a logger. See #115724
if obj.Parallelism == nil {
obj.Parallelism = ptr.To[int32](16)
}
if len(obj.Profiles) == 0 {
obj.Profiles = append(obj.Profiles, configv1.KubeSchedulerProfile{})
}
// Only apply a default scheduler name when there is a single profile.
// Validation will ensure that every profile has a non-empty unique name.
if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil {
obj.Profiles[0].SchedulerName = ptr.To(v1.DefaultSchedulerName)
}
// Add the default set of plugins and apply the configuration.
for i := range obj.Profiles {
prof := &obj.Profiles[i]
setDefaults_KubeSchedulerProfile(logger, prof)
}
if obj.PercentageOfNodesToScore == nil {
obj.PercentageOfNodesToScore = ptr.To[int32](config.DefaultPercentageOfNodesToScore)
}
if len(obj.LeaderElection.ResourceLock) == 0 {
// Use lease-based leader election to reduce cost.
// We migrated for EndpointsLease lock in 1.17 and starting in 1.20 we
// migrated to Lease lock.
obj.LeaderElection.ResourceLock = "leases"
}
if len(obj.LeaderElection.ResourceNamespace) == 0 {
obj.LeaderElection.ResourceNamespace = configv1.SchedulerDefaultLockObjectNamespace
}
if len(obj.LeaderElection.ResourceName) == 0 {
obj.LeaderElection.ResourceName = configv1.SchedulerDefaultLockObjectName
}
if len(obj.ClientConnection.ContentType) == 0 {
obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf"
}
// Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings.
if obj.ClientConnection.QPS == 0.0 {
obj.ClientConnection.QPS = 50.0
}
if obj.ClientConnection.Burst == 0 {
obj.ClientConnection.Burst = 100
}
// Use the default LeaderElectionConfiguration options
componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection)
if obj.PodInitialBackoffSeconds == nil {
obj.PodInitialBackoffSeconds = ptr.To[int64](1)
}
if obj.PodMaxBackoffSeconds == nil {
obj.PodMaxBackoffSeconds = ptr.To[int64](10)
}
// Enable profiling by default in the scheduler
if obj.EnableProfiling == nil {
obj.EnableProfiling = ptr.To(true)
}
// Enable contention profiling by default if profiling is enabled
if *obj.EnableProfiling && obj.EnableContentionProfiling == nil {
obj.EnableContentionProfiling = ptr.To(true)
}
}
func SetDefaults_DefaultPreemptionArgs(obj *configv1.DefaultPreemptionArgs) {
if obj.MinCandidateNodesPercentage == nil {
obj.MinCandidateNodesPercentage = ptr.To[int32](10)
}
if obj.MinCandidateNodesAbsolute == nil {
obj.MinCandidateNodesAbsolute = ptr.To[int32](100)
}
}
func SetDefaults_InterPodAffinityArgs(obj *configv1.InterPodAffinityArgs) {
if obj.HardPodAffinityWeight == nil {
obj.HardPodAffinityWeight = ptr.To[int32](1)
}
}
func SetDefaults_VolumeBindingArgs(obj *configv1.VolumeBindingArgs) {
if obj.BindTimeoutSeconds == nil {
obj.BindTimeoutSeconds = ptr.To[int64](600)
}
if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring) {
obj.Shape = []configv1.UtilizationShapePoint{
{
Utilization: 0,
Score: int32(config.MaxCustomPriorityScore),
},
{
Utilization: 100,
Score: 0,
},
}
}
}
func SetDefaults_NodeResourcesBalancedAllocationArgs(obj *configv1.NodeResourcesBalancedAllocationArgs) {
if len(obj.Resources) == 0 {
obj.Resources = defaultResourceSpec
return
}
// If the weight is not set or it is explicitly set to 0, then apply the default weight(1) instead.
for i := range obj.Resources {
if obj.Resources[i].Weight == 0 {
obj.Resources[i].Weight = 1
}
}
}
func SetDefaults_PodTopologySpreadArgs(obj *configv1.PodTopologySpreadArgs) {
if obj.DefaultingType == "" {
obj.DefaultingType = configv1.SystemDefaulting
}
}
func SetDefaults_NodeResourcesFitArgs(obj *configv1.NodeResourcesFitArgs) {
if obj.ScoringStrategy == nil {
obj.ScoringStrategy = &configv1.ScoringStrategy{
Type: configv1.ScoringStrategyType(config.LeastAllocated),
Resources: defaultResourceSpec,
}
}
if len(obj.ScoringStrategy.Resources) == 0 {
// If no resources specified, use the default set.
obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...)
}
for i := range obj.ScoringStrategy.Resources {
if obj.ScoringStrategy.Resources[i].Weight == 0 {
obj.ScoringStrategy.Resources[i].Weight = 1
}
}
}
func SetDefaults_DynamicResourcesArgs(obj *configv1.DynamicResourcesArgs) {
if obj.FilterTimeout == nil && feature.DefaultFeatureGate.Enabled(features.DRASchedulerFilterTimeout) {
obj.FilterTimeout = &metav1.Duration{Duration: configv1.DynamicResourcesFilterTimeoutDefault}
}
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/kube-scheduler/config/v1"
)
// GroupName is the group name used in this package
const GroupName = v1.GroupName
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = v1.SchemeGroupVersion
var (
// localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package,
// defaulting and conversion init funcs are registered as well.
localSchemeBuilder = &v1.SchemeBuilder
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
v1alpha1 "k8s.io/component-base/config/v1alpha1"
configv1 "k8s.io/kube-scheduler/config/v1"
config "k8s.io/kubernetes/pkg/scheduler/apis/config"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*configv1.DefaultPreemptionArgs)(nil), (*config.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(a.(*configv1.DefaultPreemptionArgs), b.(*config.DefaultPreemptionArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.DefaultPreemptionArgs)(nil), (*configv1.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(a.(*config.DefaultPreemptionArgs), b.(*configv1.DefaultPreemptionArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.DynamicResourcesArgs)(nil), (*config.DynamicResourcesArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DynamicResourcesArgs_To_config_DynamicResourcesArgs(a.(*configv1.DynamicResourcesArgs), b.(*config.DynamicResourcesArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.DynamicResourcesArgs)(nil), (*configv1.DynamicResourcesArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_DynamicResourcesArgs_To_v1_DynamicResourcesArgs(a.(*config.DynamicResourcesArgs), b.(*configv1.DynamicResourcesArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.Extender)(nil), (*config.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Extender_To_config_Extender(a.(*configv1.Extender), b.(*config.Extender), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.Extender)(nil), (*configv1.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_Extender_To_v1_Extender(a.(*config.Extender), b.(*configv1.Extender), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.ExtenderManagedResource)(nil), (*config.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(a.(*configv1.ExtenderManagedResource), b.(*config.ExtenderManagedResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ExtenderManagedResource)(nil), (*configv1.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(a.(*config.ExtenderManagedResource), b.(*configv1.ExtenderManagedResource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.ExtenderTLSConfig)(nil), (*config.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(a.(*configv1.ExtenderTLSConfig), b.(*config.ExtenderTLSConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ExtenderTLSConfig)(nil), (*configv1.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(a.(*config.ExtenderTLSConfig), b.(*configv1.ExtenderTLSConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.InterPodAffinityArgs)(nil), (*config.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(a.(*configv1.InterPodAffinityArgs), b.(*config.InterPodAffinityArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.InterPodAffinityArgs)(nil), (*configv1.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(a.(*config.InterPodAffinityArgs), b.(*configv1.InterPodAffinityArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.KubeSchedulerProfile)(nil), (*config.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(a.(*configv1.KubeSchedulerProfile), b.(*config.KubeSchedulerProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerProfile)(nil), (*configv1.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*configv1.KubeSchedulerProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*configv1.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.NodeAffinityArgs)(nil), (*configv1.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(a.(*config.NodeAffinityArgs), b.(*configv1.NodeAffinityArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.NodeResourcesBalancedAllocationArgs)(nil), (*config.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(a.(*configv1.NodeResourcesBalancedAllocationArgs), b.(*config.NodeResourcesBalancedAllocationArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.NodeResourcesBalancedAllocationArgs)(nil), (*configv1.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(a.(*config.NodeResourcesBalancedAllocationArgs), b.(*configv1.NodeResourcesBalancedAllocationArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.NodeResourcesFitArgs)(nil), (*config.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(a.(*configv1.NodeResourcesFitArgs), b.(*config.NodeResourcesFitArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.NodeResourcesFitArgs)(nil), (*configv1.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(a.(*config.NodeResourcesFitArgs), b.(*configv1.NodeResourcesFitArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.Plugin)(nil), (*config.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Plugin_To_config_Plugin(a.(*configv1.Plugin), b.(*config.Plugin), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.Plugin)(nil), (*configv1.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_Plugin_To_v1_Plugin(a.(*config.Plugin), b.(*configv1.Plugin), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.PluginConfig)(nil), (*config.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PluginConfig_To_config_PluginConfig(a.(*configv1.PluginConfig), b.(*config.PluginConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.PluginConfig)(nil), (*configv1.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_PluginConfig_To_v1_PluginConfig(a.(*config.PluginConfig), b.(*configv1.PluginConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.PluginSet)(nil), (*config.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PluginSet_To_config_PluginSet(a.(*configv1.PluginSet), b.(*config.PluginSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.PluginSet)(nil), (*configv1.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_PluginSet_To_v1_PluginSet(a.(*config.PluginSet), b.(*configv1.PluginSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.Plugins)(nil), (*config.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Plugins_To_config_Plugins(a.(*configv1.Plugins), b.(*config.Plugins), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.Plugins)(nil), (*configv1.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_Plugins_To_v1_Plugins(a.(*config.Plugins), b.(*configv1.Plugins), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.PodTopologySpreadArgs)(nil), (*config.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(a.(*configv1.PodTopologySpreadArgs), b.(*config.PodTopologySpreadArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.PodTopologySpreadArgs)(nil), (*configv1.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(a.(*config.PodTopologySpreadArgs), b.(*configv1.PodTopologySpreadArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.RequestedToCapacityRatioParam)(nil), (*config.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(a.(*configv1.RequestedToCapacityRatioParam), b.(*config.RequestedToCapacityRatioParam), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.RequestedToCapacityRatioParam)(nil), (*configv1.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(a.(*config.RequestedToCapacityRatioParam), b.(*configv1.RequestedToCapacityRatioParam), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.ResourceSpec)(nil), (*config.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceSpec_To_config_ResourceSpec(a.(*configv1.ResourceSpec), b.(*config.ResourceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ResourceSpec)(nil), (*configv1.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ResourceSpec_To_v1_ResourceSpec(a.(*config.ResourceSpec), b.(*configv1.ResourceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ScoringStrategy_To_config_ScoringStrategy(a.(*configv1.ScoringStrategy), b.(*config.ScoringStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*configv1.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_ScoringStrategy_To_v1_ScoringStrategy(a.(*config.ScoringStrategy), b.(*configv1.ScoringStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*configv1.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.UtilizationShapePoint)(nil), (*configv1.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(a.(*config.UtilizationShapePoint), b.(*configv1.UtilizationShapePoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*configv1.VolumeBindingArgs)(nil), (*config.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(a.(*configv1.VolumeBindingArgs), b.(*config.VolumeBindingArgs), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*config.VolumeBindingArgs)(nil), (*configv1.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(a.(*config.VolumeBindingArgs), b.(*configv1.VolumeBindingArgs), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*configv1.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*configv1.KubeSchedulerConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*configv1.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*configv1.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *configv1.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil {
return err
}
return nil
}
// Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs is an autogenerated conversion function.
func Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *configv1.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error {
return autoConvert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in, out, s)
}
func autoConvert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *configv1.DefaultPreemptionArgs, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil {
return err
}
if err := metav1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil {
return err
}
return nil
}
// Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs is an autogenerated conversion function.
func Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *configv1.DefaultPreemptionArgs, s conversion.Scope) error {
return autoConvert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in, out, s)
}
func autoConvert_v1_DynamicResourcesArgs_To_config_DynamicResourcesArgs(in *configv1.DynamicResourcesArgs, out *config.DynamicResourcesArgs, s conversion.Scope) error {
out.FilterTimeout = (*metav1.Duration)(unsafe.Pointer(in.FilterTimeout))
return nil
}
// Convert_v1_DynamicResourcesArgs_To_config_DynamicResourcesArgs is an autogenerated conversion function.
func Convert_v1_DynamicResourcesArgs_To_config_DynamicResourcesArgs(in *configv1.DynamicResourcesArgs, out *config.DynamicResourcesArgs, s conversion.Scope) error {
return autoConvert_v1_DynamicResourcesArgs_To_config_DynamicResourcesArgs(in, out, s)
}
func autoConvert_config_DynamicResourcesArgs_To_v1_DynamicResourcesArgs(in *config.DynamicResourcesArgs, out *configv1.DynamicResourcesArgs, s conversion.Scope) error {
out.FilterTimeout = (*metav1.Duration)(unsafe.Pointer(in.FilterTimeout))
return nil
}
// Convert_config_DynamicResourcesArgs_To_v1_DynamicResourcesArgs is an autogenerated conversion function.
func Convert_config_DynamicResourcesArgs_To_v1_DynamicResourcesArgs(in *config.DynamicResourcesArgs, out *configv1.DynamicResourcesArgs, s conversion.Scope) error {
return autoConvert_config_DynamicResourcesArgs_To_v1_DynamicResourcesArgs(in, out, s)
}
func autoConvert_v1_Extender_To_config_Extender(in *configv1.Extender, out *config.Extender, s conversion.Scope) error {
out.URLPrefix = in.URLPrefix
out.FilterVerb = in.FilterVerb
out.PreemptVerb = in.PreemptVerb
out.PrioritizeVerb = in.PrioritizeVerb
out.Weight = in.Weight
out.BindVerb = in.BindVerb
out.EnableHTTPS = in.EnableHTTPS
out.TLSConfig = (*config.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig))
out.HTTPTimeout = in.HTTPTimeout
out.NodeCacheCapable = in.NodeCacheCapable
out.ManagedResources = *(*[]config.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources))
out.Ignorable = in.Ignorable
return nil
}
// Convert_v1_Extender_To_config_Extender is an autogenerated conversion function.
func Convert_v1_Extender_To_config_Extender(in *configv1.Extender, out *config.Extender, s conversion.Scope) error {
return autoConvert_v1_Extender_To_config_Extender(in, out, s)
}
func autoConvert_config_Extender_To_v1_Extender(in *config.Extender, out *configv1.Extender, s conversion.Scope) error {
out.URLPrefix = in.URLPrefix
out.FilterVerb = in.FilterVerb
out.PreemptVerb = in.PreemptVerb
out.PrioritizeVerb = in.PrioritizeVerb
out.Weight = in.Weight
out.BindVerb = in.BindVerb
out.EnableHTTPS = in.EnableHTTPS
out.TLSConfig = (*configv1.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig))
out.HTTPTimeout = in.HTTPTimeout
out.NodeCacheCapable = in.NodeCacheCapable
out.ManagedResources = *(*[]configv1.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources))
out.Ignorable = in.Ignorable
return nil
}
// Convert_config_Extender_To_v1_Extender is an autogenerated conversion function.
func Convert_config_Extender_To_v1_Extender(in *config.Extender, out *configv1.Extender, s conversion.Scope) error {
return autoConvert_config_Extender_To_v1_Extender(in, out, s)
}
func autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *configv1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error {
out.Name = in.Name
out.IgnoredByScheduler = in.IgnoredByScheduler
return nil
}
// Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource is an autogenerated conversion function.
func Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *configv1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error {
return autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in, out, s)
}
func autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *configv1.ExtenderManagedResource, s conversion.Scope) error {
out.Name = in.Name
out.IgnoredByScheduler = in.IgnoredByScheduler
return nil
}
// Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource is an autogenerated conversion function.
func Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *configv1.ExtenderManagedResource, s conversion.Scope) error {
return autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in, out, s)
}
func autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *configv1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error {
out.Insecure = in.Insecure
out.ServerName = in.ServerName
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
out.CAFile = in.CAFile
out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData))
out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData))
out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData))
return nil
}
// Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig is an autogenerated conversion function.
func Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *configv1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error {
return autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in, out, s)
}
func autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *configv1.ExtenderTLSConfig, s conversion.Scope) error {
out.Insecure = in.Insecure
out.ServerName = in.ServerName
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
out.CAFile = in.CAFile
out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData))
out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData))
out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData))
return nil
}
// Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig is an autogenerated conversion function.
func Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *configv1.ExtenderTLSConfig, s conversion.Scope) error {
return autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in, out, s)
}
func autoConvert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *configv1.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil {
return err
}
out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods
return nil
}
// Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs is an autogenerated conversion function.
func Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *configv1.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error {
return autoConvert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in, out, s)
}
func autoConvert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *configv1.InterPodAffinityArgs, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil {
return err
}
out.IgnorePreferredTermsOfExistingPods = in.IgnorePreferredTermsOfExistingPods
return nil
}
// Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs is an autogenerated conversion function.
func Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *configv1.InterPodAffinityArgs, s conversion.Scope) error {
return autoConvert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in, out, s)
}
func autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *configv1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int32_To_int32(&in.Parallelism, &out.Parallelism, s); err != nil {
return err
}
if err := v1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
return err
}
if err := v1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil {
return err
}
out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore))
if err := metav1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil {
return err
}
if err := metav1.Convert_Pointer_int64_To_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil {
return err
}
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]config.KubeSchedulerProfile, len(*in))
for i := range *in {
if err := Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Profiles = nil
}
out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders))
out.DelayCacheUntilActive = in.DelayCacheUntilActive
return nil
}
func autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *configv1.KubeSchedulerConfiguration, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Parallelism, &out.Parallelism, s); err != nil {
return err
}
if err := v1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil {
return err
}
if err := v1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil {
return err
}
if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil {
return err
}
out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore))
if err := metav1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil {
return err
}
if err := metav1.Convert_int64_To_Pointer_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil {
return err
}
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]configv1.KubeSchedulerProfile, len(*in))
for i := range *in {
if err := Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Profiles = nil
}
out.Extenders = *(*[]configv1.Extender)(unsafe.Pointer(&in.Extenders))
out.DelayCacheUntilActive = in.DelayCacheUntilActive
return nil
}
func autoConvert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *configv1.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error {
if err := metav1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil {
return err
}
out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore))
if in.Plugins != nil {
in, out := &in.Plugins, &out.Plugins
*out = new(config.Plugins)
if err := Convert_v1_Plugins_To_config_Plugins(*in, *out, s); err != nil {
return err
}
} else {
out.Plugins = nil
}
if in.PluginConfig != nil {
in, out := &in.PluginConfig, &out.PluginConfig
*out = make([]config.PluginConfig, len(*in))
for i := range *in {
if err := Convert_v1_PluginConfig_To_config_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.PluginConfig = nil
}
return nil
}
// Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile is an autogenerated conversion function.
func Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *configv1.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error {
return autoConvert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in, out, s)
}
func autoConvert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *configv1.KubeSchedulerProfile, s conversion.Scope) error {
if err := metav1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil {
return err
}
out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore))
if in.Plugins != nil {
in, out := &in.Plugins, &out.Plugins
*out = new(configv1.Plugins)
if err := Convert_config_Plugins_To_v1_Plugins(*in, *out, s); err != nil {
return err
}
} else {
out.Plugins = nil
}
if in.PluginConfig != nil {
in, out := &in.PluginConfig, &out.PluginConfig
*out = make([]configv1.PluginConfig, len(*in))
for i := range *in {
if err := Convert_config_PluginConfig_To_v1_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.PluginConfig = nil
}
return nil
}
// Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile is an autogenerated conversion function.
func Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *configv1.KubeSchedulerProfile, s conversion.Scope) error {
return autoConvert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in, out, s)
}
func autoConvert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in *configv1.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error {
out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity))
return nil
}
// Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs is an autogenerated conversion function.
func Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in *configv1.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error {
return autoConvert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in, out, s)
}
func autoConvert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in *config.NodeAffinityArgs, out *configv1.NodeAffinityArgs, s conversion.Scope) error {
out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity))
return nil
}
// Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs is an autogenerated conversion function.
func Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in *config.NodeAffinityArgs, out *configv1.NodeAffinityArgs, s conversion.Scope) error {
return autoConvert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in, out, s)
}
func autoConvert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *configv1.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error {
out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function.
func Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *configv1.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error {
return autoConvert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in, out, s)
}
func autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *configv1.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error {
out.Resources = *(*[]configv1.ResourceSpec)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function.
func Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *configv1.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error {
return autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in, out, s)
}
func autoConvert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *configv1.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error {
out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources))
out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups))
out.ScoringStrategy = (*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy))
return nil
}
// Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs is an autogenerated conversion function.
func Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *configv1.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error {
return autoConvert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in, out, s)
}
func autoConvert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *configv1.NodeResourcesFitArgs, s conversion.Scope) error {
out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources))
out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups))
out.ScoringStrategy = (*configv1.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy))
return nil
}
// Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs is an autogenerated conversion function.
func Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *configv1.NodeResourcesFitArgs, s conversion.Scope) error {
return autoConvert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in, out, s)
}
func autoConvert_v1_Plugin_To_config_Plugin(in *configv1.Plugin, out *config.Plugin, s conversion.Scope) error {
out.Name = in.Name
if err := metav1.Convert_Pointer_int32_To_int32(&in.Weight, &out.Weight, s); err != nil {
return err
}
return nil
}
// Convert_v1_Plugin_To_config_Plugin is an autogenerated conversion function.
func Convert_v1_Plugin_To_config_Plugin(in *configv1.Plugin, out *config.Plugin, s conversion.Scope) error {
return autoConvert_v1_Plugin_To_config_Plugin(in, out, s)
}
func autoConvert_config_Plugin_To_v1_Plugin(in *config.Plugin, out *configv1.Plugin, s conversion.Scope) error {
out.Name = in.Name
if err := metav1.Convert_int32_To_Pointer_int32(&in.Weight, &out.Weight, s); err != nil {
return err
}
return nil
}
// Convert_config_Plugin_To_v1_Plugin is an autogenerated conversion function.
func Convert_config_Plugin_To_v1_Plugin(in *config.Plugin, out *configv1.Plugin, s conversion.Scope) error {
return autoConvert_config_Plugin_To_v1_Plugin(in, out, s)
}
func autoConvert_v1_PluginConfig_To_config_PluginConfig(in *configv1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
return err
}
return nil
}
// Convert_v1_PluginConfig_To_config_PluginConfig is an autogenerated conversion function.
func Convert_v1_PluginConfig_To_config_PluginConfig(in *configv1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error {
return autoConvert_v1_PluginConfig_To_config_PluginConfig(in, out, s)
}
func autoConvert_config_PluginConfig_To_v1_PluginConfig(in *config.PluginConfig, out *configv1.PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil {
return err
}
return nil
}
// Convert_config_PluginConfig_To_v1_PluginConfig is an autogenerated conversion function.
func Convert_config_PluginConfig_To_v1_PluginConfig(in *config.PluginConfig, out *configv1.PluginConfig, s conversion.Scope) error {
return autoConvert_config_PluginConfig_To_v1_PluginConfig(in, out, s)
}
func autoConvert_v1_PluginSet_To_config_PluginSet(in *configv1.PluginSet, out *config.PluginSet, s conversion.Scope) error {
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = make([]config.Plugin, len(*in))
for i := range *in {
if err := Convert_v1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Enabled = nil
}
if in.Disabled != nil {
in, out := &in.Disabled, &out.Disabled
*out = make([]config.Plugin, len(*in))
for i := range *in {
if err := Convert_v1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Disabled = nil
}
return nil
}
// Convert_v1_PluginSet_To_config_PluginSet is an autogenerated conversion function.
func Convert_v1_PluginSet_To_config_PluginSet(in *configv1.PluginSet, out *config.PluginSet, s conversion.Scope) error {
return autoConvert_v1_PluginSet_To_config_PluginSet(in, out, s)
}
func autoConvert_config_PluginSet_To_v1_PluginSet(in *config.PluginSet, out *configv1.PluginSet, s conversion.Scope) error {
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = make([]configv1.Plugin, len(*in))
for i := range *in {
if err := Convert_config_Plugin_To_v1_Plugin(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Enabled = nil
}
if in.Disabled != nil {
in, out := &in.Disabled, &out.Disabled
*out = make([]configv1.Plugin, len(*in))
for i := range *in {
if err := Convert_config_Plugin_To_v1_Plugin(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Disabled = nil
}
return nil
}
// Convert_config_PluginSet_To_v1_PluginSet is an autogenerated conversion function.
func Convert_config_PluginSet_To_v1_PluginSet(in *config.PluginSet, out *configv1.PluginSet, s conversion.Scope) error {
return autoConvert_config_PluginSet_To_v1_PluginSet(in, out, s)
}
func autoConvert_v1_Plugins_To_config_Plugins(in *configv1.Plugins, out *config.Plugins, s conversion.Scope) error {
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Filter, &out.Filter, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreScore, &out.PreScore, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Score, &out.Score, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Reserve, &out.Reserve, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Permit, &out.Permit, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreBind, &out.PreBind, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Bind, &out.Bind, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PostBind, &out.PostBind, s); err != nil {
return err
}
if err := Convert_v1_PluginSet_To_config_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil {
return err
}
return nil
}
// Convert_v1_Plugins_To_config_Plugins is an autogenerated conversion function.
func Convert_v1_Plugins_To_config_Plugins(in *configv1.Plugins, out *config.Plugins, s conversion.Scope) error {
return autoConvert_v1_Plugins_To_config_Plugins(in, out, s)
}
func autoConvert_config_Plugins_To_v1_Plugins(in *config.Plugins, out *configv1.Plugins, s conversion.Scope) error {
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Filter, &out.Filter, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreScore, &out.PreScore, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Score, &out.Score, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Reserve, &out.Reserve, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Permit, &out.Permit, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreBind, &out.PreBind, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Bind, &out.Bind, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PostBind, &out.PostBind, s); err != nil {
return err
}
if err := Convert_config_PluginSet_To_v1_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil {
return err
}
return nil
}
// Convert_config_Plugins_To_v1_Plugins is an autogenerated conversion function.
func Convert_config_Plugins_To_v1_Plugins(in *config.Plugins, out *configv1.Plugins, s conversion.Scope) error {
return autoConvert_config_Plugins_To_v1_Plugins(in, out, s)
}
func autoConvert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *configv1.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error {
out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints))
out.DefaultingType = config.PodTopologySpreadConstraintsDefaulting(in.DefaultingType)
return nil
}
// Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs is an autogenerated conversion function.
func Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *configv1.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error {
return autoConvert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in, out, s)
}
func autoConvert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *configv1.PodTopologySpreadArgs, s conversion.Scope) error {
out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints))
out.DefaultingType = configv1.PodTopologySpreadConstraintsDefaulting(in.DefaultingType)
return nil
}
// Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs is an autogenerated conversion function.
func Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *configv1.PodTopologySpreadArgs, s conversion.Scope) error {
return autoConvert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in, out, s)
}
func autoConvert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *configv1.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error {
out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape))
return nil
}
// Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam is an autogenerated conversion function.
func Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *configv1.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error {
return autoConvert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in, out, s)
}
func autoConvert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *configv1.RequestedToCapacityRatioParam, s conversion.Scope) error {
out.Shape = *(*[]configv1.UtilizationShapePoint)(unsafe.Pointer(&in.Shape))
return nil
}
// Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam is an autogenerated conversion function.
func Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *configv1.RequestedToCapacityRatioParam, s conversion.Scope) error {
return autoConvert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in, out, s)
}
func autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in *configv1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error {
out.Name = in.Name
out.Weight = in.Weight
return nil
}
// Convert_v1_ResourceSpec_To_config_ResourceSpec is an autogenerated conversion function.
func Convert_v1_ResourceSpec_To_config_ResourceSpec(in *configv1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error {
return autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in, out, s)
}
func autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *configv1.ResourceSpec, s conversion.Scope) error {
out.Name = in.Name
out.Weight = in.Weight
return nil
}
// Convert_config_ResourceSpec_To_v1_ResourceSpec is an autogenerated conversion function.
func Convert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *configv1.ResourceSpec, s conversion.Scope) error {
return autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in, out, s)
}
func autoConvert_v1_ScoringStrategy_To_config_ScoringStrategy(in *configv1.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error {
out.Type = config.ScoringStrategyType(in.Type)
out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources))
out.RequestedToCapacityRatio = (*config.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio))
return nil
}
// Convert_v1_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function.
func Convert_v1_ScoringStrategy_To_config_ScoringStrategy(in *configv1.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error {
return autoConvert_v1_ScoringStrategy_To_config_ScoringStrategy(in, out, s)
}
func autoConvert_config_ScoringStrategy_To_v1_ScoringStrategy(in *config.ScoringStrategy, out *configv1.ScoringStrategy, s conversion.Scope) error {
out.Type = configv1.ScoringStrategyType(in.Type)
out.Resources = *(*[]configv1.ResourceSpec)(unsafe.Pointer(&in.Resources))
out.RequestedToCapacityRatio = (*configv1.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio))
return nil
}
// Convert_config_ScoringStrategy_To_v1_ScoringStrategy is an autogenerated conversion function.
func Convert_config_ScoringStrategy_To_v1_ScoringStrategy(in *config.ScoringStrategy, out *configv1.ScoringStrategy, s conversion.Scope) error {
return autoConvert_config_ScoringStrategy_To_v1_ScoringStrategy(in, out, s)
}
func autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *configv1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error {
out.Utilization = in.Utilization
out.Score = in.Score
return nil
}
// Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint is an autogenerated conversion function.
func Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *configv1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error {
return autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in, out, s)
}
func autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *configv1.UtilizationShapePoint, s conversion.Scope) error {
out.Utilization = in.Utilization
out.Score = in.Score
return nil
}
// Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint is an autogenerated conversion function.
func Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *configv1.UtilizationShapePoint, s conversion.Scope) error {
return autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in, out, s)
}
func autoConvert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in *configv1.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error {
if err := metav1.Convert_Pointer_int64_To_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil {
return err
}
out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape))
return nil
}
// Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs is an autogenerated conversion function.
func Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in *configv1.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error {
return autoConvert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in, out, s)
}
func autoConvert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in *config.VolumeBindingArgs, out *configv1.VolumeBindingArgs, s conversion.Scope) error {
if err := metav1.Convert_int64_To_Pointer_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil {
return err
}
out.Shape = *(*[]configv1.UtilizationShapePoint)(unsafe.Pointer(&in.Shape))
return nil
}
// Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs is an autogenerated conversion function.
func Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in *config.VolumeBindingArgs, out *configv1.VolumeBindingArgs, s conversion.Scope) error {
return autoConvert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in, out, s)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
configv1 "k8s.io/kube-scheduler/config/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&configv1.DefaultPreemptionArgs{}, func(obj interface{}) { SetObjectDefaults_DefaultPreemptionArgs(obj.(*configv1.DefaultPreemptionArgs)) })
scheme.AddTypeDefaultingFunc(&configv1.DynamicResourcesArgs{}, func(obj interface{}) { SetObjectDefaults_DynamicResourcesArgs(obj.(*configv1.DynamicResourcesArgs)) })
scheme.AddTypeDefaultingFunc(&configv1.InterPodAffinityArgs{}, func(obj interface{}) { SetObjectDefaults_InterPodAffinityArgs(obj.(*configv1.InterPodAffinityArgs)) })
scheme.AddTypeDefaultingFunc(&configv1.KubeSchedulerConfiguration{}, func(obj interface{}) {
SetObjectDefaults_KubeSchedulerConfiguration(obj.(*configv1.KubeSchedulerConfiguration))
})
scheme.AddTypeDefaultingFunc(&configv1.NodeResourcesBalancedAllocationArgs{}, func(obj interface{}) {
SetObjectDefaults_NodeResourcesBalancedAllocationArgs(obj.(*configv1.NodeResourcesBalancedAllocationArgs))
})
scheme.AddTypeDefaultingFunc(&configv1.NodeResourcesFitArgs{}, func(obj interface{}) { SetObjectDefaults_NodeResourcesFitArgs(obj.(*configv1.NodeResourcesFitArgs)) })
scheme.AddTypeDefaultingFunc(&configv1.PodTopologySpreadArgs{}, func(obj interface{}) { SetObjectDefaults_PodTopologySpreadArgs(obj.(*configv1.PodTopologySpreadArgs)) })
scheme.AddTypeDefaultingFunc(&configv1.VolumeBindingArgs{}, func(obj interface{}) { SetObjectDefaults_VolumeBindingArgs(obj.(*configv1.VolumeBindingArgs)) })
return nil
}
func SetObjectDefaults_DefaultPreemptionArgs(in *configv1.DefaultPreemptionArgs) {
SetDefaults_DefaultPreemptionArgs(in)
}
func SetObjectDefaults_DynamicResourcesArgs(in *configv1.DynamicResourcesArgs) {
SetDefaults_DynamicResourcesArgs(in)
}
func SetObjectDefaults_InterPodAffinityArgs(in *configv1.InterPodAffinityArgs) {
SetDefaults_InterPodAffinityArgs(in)
}
func SetObjectDefaults_KubeSchedulerConfiguration(in *configv1.KubeSchedulerConfiguration) {
SetDefaults_KubeSchedulerConfiguration(in)
}
func SetObjectDefaults_NodeResourcesBalancedAllocationArgs(in *configv1.NodeResourcesBalancedAllocationArgs) {
SetDefaults_NodeResourcesBalancedAllocationArgs(in)
}
func SetObjectDefaults_NodeResourcesFitArgs(in *configv1.NodeResourcesFitArgs) {
SetDefaults_NodeResourcesFitArgs(in)
}
func SetObjectDefaults_PodTopologySpreadArgs(in *configv1.PodTopologySpreadArgs) {
SetDefaults_PodTopologySpreadArgs(in)
}
func SetObjectDefaults_VolumeBindingArgs(in *configv1.VolumeBindingArgs) {
SetDefaults_VolumeBindingArgs(in)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"reflect"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/util/feature"
componentbasevalidation "k8s.io/component-base/config/validation"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
schedfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
)
// ValidateKubeSchedulerConfiguration ensures validation of the KubeSchedulerConfiguration struct
func ValidateKubeSchedulerConfiguration(cc *config.KubeSchedulerConfiguration) utilerrors.Aggregate {
var errs []error
errs = append(errs, componentbasevalidation.ValidateClientConnectionConfiguration(&cc.ClientConnection, field.NewPath("clientConnection")).ToAggregate())
errs = append(errs, componentbasevalidation.ValidateLeaderElectionConfiguration(&cc.LeaderElection, field.NewPath("leaderElection")).ToAggregate())
// TODO: This can be removed when ResourceLock is not available
// Only ResourceLock values with leases are allowed
if cc.LeaderElection.LeaderElect && cc.LeaderElection.ResourceLock != "leases" {
leaderElectionPath := field.NewPath("leaderElection")
errs = append(errs, field.Invalid(leaderElectionPath.Child("resourceLock"), cc.LeaderElection.ResourceLock, `resourceLock value must be "leases"`))
}
profilesPath := field.NewPath("profiles")
if cc.Parallelism <= 0 {
errs = append(errs, field.Invalid(field.NewPath("parallelism"), cc.Parallelism, "should be an integer value greater than zero"))
}
if len(cc.Profiles) == 0 {
errs = append(errs, field.Required(profilesPath, ""))
} else {
existingProfiles := make(map[string]int, len(cc.Profiles))
for i := range cc.Profiles {
profile := &cc.Profiles[i]
path := profilesPath.Index(i)
errs = append(errs, validateKubeSchedulerProfile(path, cc.APIVersion, profile)...)
if idx, ok := existingProfiles[profile.SchedulerName]; ok {
errs = append(errs, field.Duplicate(path.Child("schedulerName"), profilesPath.Index(idx).Child("schedulerName")))
}
existingProfiles[profile.SchedulerName] = i
}
errs = append(errs, validateCommonQueueSort(profilesPath, cc.Profiles)...)
}
errs = append(errs, validatePercentageOfNodesToScore(field.NewPath("percentageOfNodesToScore"), cc.PercentageOfNodesToScore))
if cc.PodInitialBackoffSeconds <= 0 {
errs = append(errs, field.Invalid(field.NewPath("podInitialBackoffSeconds"),
cc.PodInitialBackoffSeconds, "must be greater than 0"))
}
if cc.PodMaxBackoffSeconds < cc.PodInitialBackoffSeconds {
errs = append(errs, field.Invalid(field.NewPath("podMaxBackoffSeconds"),
cc.PodMaxBackoffSeconds, "must be greater than or equal to PodInitialBackoffSeconds"))
}
errs = append(errs, validateExtenders(field.NewPath("extenders"), cc.Extenders)...)
return utilerrors.Flatten(utilerrors.NewAggregate(errs))
}
func validatePercentageOfNodesToScore(path *field.Path, percentageOfNodesToScore *int32) error {
if percentageOfNodesToScore != nil {
if *percentageOfNodesToScore < 0 || *percentageOfNodesToScore > 100 {
return field.Invalid(path, *percentageOfNodesToScore, "not in valid range [0-100]")
}
}
return nil
}
type invalidPlugins struct {
schemeGroupVersion string
plugins []string
}
// invalidPluginsByVersion maintains a list of removed/deprecated plugins in each version.
// Remember to add an entry to that list when creating a new component config
// version (even if the list of invalid plugins is empty).
var invalidPluginsByVersion = []invalidPlugins{
{
schemeGroupVersion: v1.SchemeGroupVersion.String(),
plugins: []string{
"AzureDiskLimits",
"CinderLimits",
"EBSLimits",
"GCEPDLimits",
},
},
}
// isPluginInvalid checks if a given plugin was removed/deprecated in the given component
// config version or earlier.
func isPluginInvalid(apiVersion string, name string) (bool, string) {
for _, dp := range invalidPluginsByVersion {
for _, plugin := range dp.plugins {
if name == plugin {
return true, dp.schemeGroupVersion
}
}
if apiVersion == dp.schemeGroupVersion {
break
}
}
return false, ""
}
func validatePluginSetForInvalidPlugins(path *field.Path, apiVersion string, ps config.PluginSet) []error {
var errs []error
for i, plugin := range ps.Enabled {
if invalid, invalidVersion := isPluginInvalid(apiVersion, plugin.Name); invalid {
errs = append(errs, field.Invalid(path.Child("enabled").Index(i), plugin.Name, fmt.Sprintf("was invalid in version %q (KubeSchedulerConfiguration is version %q)", invalidVersion, apiVersion)))
}
}
return errs
}
func validateKubeSchedulerProfile(path *field.Path, apiVersion string, profile *config.KubeSchedulerProfile) []error {
var errs []error
if len(profile.SchedulerName) == 0 {
errs = append(errs, field.Required(path.Child("schedulerName"), ""))
}
errs = append(errs, validatePercentageOfNodesToScore(path.Child("percentageOfNodesToScore"), profile.PercentageOfNodesToScore))
errs = append(errs, validatePluginConfig(path, apiVersion, profile)...)
return errs
}
func validatePluginConfig(path *field.Path, apiVersion string, profile *config.KubeSchedulerProfile) []error {
var errs []error
m := map[string]interface{}{
"DefaultPreemption": ValidateDefaultPreemptionArgs,
"InterPodAffinity": ValidateInterPodAffinityArgs,
"NodeAffinity": ValidateNodeAffinityArgs,
"NodeResourcesBalancedAllocation": ValidateNodeResourcesBalancedAllocationArgs,
"NodeResourcesFitArgs": ValidateNodeResourcesFitArgs,
"PodTopologySpread": ValidatePodTopologySpreadArgs,
"VolumeBinding": ValidateVolumeBindingArgs,
"DynamicResources": func(path *field.Path, args *config.DynamicResourcesArgs) error {
return ValidateDynamicResourcesArgs(path, args, schedfeature.NewSchedulerFeaturesFromGates(feature.DefaultFeatureGate))
},
}
if profile.Plugins != nil {
stagesToPluginSet := map[string]config.PluginSet{
"preEnqueue": profile.Plugins.PreEnqueue,
"queueSort": profile.Plugins.QueueSort,
"preFilter": profile.Plugins.PreFilter,
"filter": profile.Plugins.Filter,
"postFilter": profile.Plugins.PostFilter,
"preScore": profile.Plugins.PreScore,
"score": profile.Plugins.Score,
"reserve": profile.Plugins.Reserve,
"permit": profile.Plugins.Permit,
"preBind": profile.Plugins.PreBind,
"bind": profile.Plugins.Bind,
"postBind": profile.Plugins.PostBind,
}
pluginsPath := path.Child("plugins")
for s, p := range stagesToPluginSet {
errs = append(errs, validatePluginSetForInvalidPlugins(
pluginsPath.Child(s), apiVersion, p)...)
}
}
seenPluginConfig := sets.New[string]()
for i := range profile.PluginConfig {
pluginConfigPath := path.Child("pluginConfig").Index(i)
name := profile.PluginConfig[i].Name
args := profile.PluginConfig[i].Args
if seenPluginConfig.Has(name) {
errs = append(errs, field.Duplicate(pluginConfigPath, name))
} else {
seenPluginConfig.Insert(name)
}
if invalid, invalidVersion := isPluginInvalid(apiVersion, name); invalid {
errs = append(errs, field.Invalid(pluginConfigPath, name, fmt.Sprintf("was invalid in version %q (KubeSchedulerConfiguration is version %q)", invalidVersion, apiVersion)))
} else if validateFunc, ok := m[name]; ok {
// type mismatch, no need to validate the `args`.
if reflect.TypeOf(args) != reflect.ValueOf(validateFunc).Type().In(1) {
errs = append(errs, field.Invalid(pluginConfigPath.Child("args"), args, "has to match plugin args"))
} else {
in := []reflect.Value{reflect.ValueOf(pluginConfigPath.Child("args")), reflect.ValueOf(args)}
res := reflect.ValueOf(validateFunc).Call(in)
// It's possible that validation function return a Aggregate, just append here and it will be flattened at the end of CC validation.
if res[0].Interface() != nil {
errs = append(errs, res[0].Interface().(error))
}
}
}
}
return errs
}
func validateCommonQueueSort(path *field.Path, profiles []config.KubeSchedulerProfile) []error {
var errs []error
var canon config.PluginSet
var queueSortName string
var queueSortArgs runtime.Object
if profiles[0].Plugins != nil {
canon = profiles[0].Plugins.QueueSort
if len(profiles[0].Plugins.QueueSort.Enabled) != 0 {
queueSortName = profiles[0].Plugins.QueueSort.Enabled[0].Name
}
length := len(profiles[0].Plugins.QueueSort.Enabled)
if length > 1 {
errs = append(errs, field.Invalid(path.Index(0).Child("plugins", "queueSort", "Enabled"), length, "only one queue sort plugin can be enabled"))
}
}
for _, cfg := range profiles[0].PluginConfig {
if len(queueSortName) > 0 && cfg.Name == queueSortName {
queueSortArgs = cfg.Args
}
}
for i := 1; i < len(profiles); i++ {
var curr config.PluginSet
if profiles[i].Plugins != nil {
curr = profiles[i].Plugins.QueueSort
}
if !apiequality.Semantic.DeepEqual(canon, curr) {
errs = append(errs, field.Invalid(path.Index(i).Child("plugins", "queueSort"), curr, "must be the same for all profiles"))
}
for _, cfg := range profiles[i].PluginConfig {
if cfg.Name == queueSortName && !apiequality.Semantic.DeepEqual(queueSortArgs, cfg.Args) {
errs = append(errs, field.Invalid(path.Index(i).Child("pluginConfig", "args"), cfg.Args, "queueSort must be the same for all profiles"))
}
}
}
return errs
}
// validateExtenders validates the configured extenders for the Scheduler
func validateExtenders(fldPath *field.Path, extenders []config.Extender) []error {
var errs []error
binders := 0
extenderManagedResources := sets.New[string]()
for i, extender := range extenders {
path := fldPath.Index(i)
if len(extender.PrioritizeVerb) > 0 && extender.Weight <= 0 {
errs = append(errs, field.Invalid(path.Child("weight"),
extender.Weight, "must have a positive weight applied to it"))
}
if extender.BindVerb != "" {
binders++
}
for j, resource := range extender.ManagedResources {
managedResourcesPath := path.Child("managedResources").Index(j)
validationErrors := validateExtendedResourceName(managedResourcesPath.Child("name"), v1.ResourceName(resource.Name))
errs = append(errs, validationErrors...)
if extenderManagedResources.Has(resource.Name) {
errs = append(errs, field.Invalid(managedResourcesPath.Child("name"),
resource.Name, "duplicate extender managed resource name"))
}
extenderManagedResources.Insert(resource.Name)
}
}
if binders > 1 {
errs = append(errs, field.Invalid(fldPath, fmt.Sprintf("found %d extenders implementing bind", binders), "only one extender can implement bind"))
}
return errs
}
// validateExtendedResourceName checks whether the specified name is a valid
// extended resource name.
func validateExtendedResourceName(path *field.Path, name v1.ResourceName) []error {
var validationErrors []error
for _, msg := range validation.IsQualifiedName(string(name)) {
validationErrors = append(validationErrors, field.Invalid(path, name, msg))
}
if len(validationErrors) != 0 {
return validationErrors
}
if !v1helper.IsExtendedResourceName(name) {
validationErrors = append(validationErrors, field.Invalid(path, string(name), "is an invalid extended resource name"))
}
return validationErrors
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
)
// supportedScoringStrategyTypes has to be a set of strings for use with field.Unsupported
var supportedScoringStrategyTypes = sets.New(
string(config.LeastAllocated),
string(config.MostAllocated),
string(config.RequestedToCapacityRatio),
)
// ValidateDefaultPreemptionArgs validates that DefaultPreemptionArgs are correct.
func ValidateDefaultPreemptionArgs(path *field.Path, args *config.DefaultPreemptionArgs) error {
var allErrs field.ErrorList
percentagePath := path.Child("minCandidateNodesPercentage")
absolutePath := path.Child("minCandidateNodesAbsolute")
if err := validateMinCandidateNodesPercentage(args.MinCandidateNodesPercentage, percentagePath); err != nil {
allErrs = append(allErrs, err)
}
if err := validateMinCandidateNodesAbsolute(args.MinCandidateNodesAbsolute, absolutePath); err != nil {
allErrs = append(allErrs, err)
}
if args.MinCandidateNodesPercentage == 0 && args.MinCandidateNodesAbsolute == 0 {
allErrs = append(allErrs,
field.Invalid(percentagePath, args.MinCandidateNodesPercentage, "cannot be zero at the same time as minCandidateNodesAbsolute"),
field.Invalid(absolutePath, args.MinCandidateNodesAbsolute, "cannot be zero at the same time as minCandidateNodesPercentage"))
}
return allErrs.ToAggregate()
}
// validateMinCandidateNodesPercentage validates that
// minCandidateNodesPercentage is within the allowed range.
func validateMinCandidateNodesPercentage(minCandidateNodesPercentage int32, p *field.Path) *field.Error {
if minCandidateNodesPercentage < 0 || minCandidateNodesPercentage > 100 {
return field.Invalid(p, minCandidateNodesPercentage, "not in valid range [0, 100]")
}
return nil
}
// validateMinCandidateNodesAbsolute validates that minCandidateNodesAbsolute
// is within the allowed range.
func validateMinCandidateNodesAbsolute(minCandidateNodesAbsolute int32, p *field.Path) *field.Error {
if minCandidateNodesAbsolute < 0 {
return field.Invalid(p, minCandidateNodesAbsolute, "not in valid range [0, inf)")
}
return nil
}
// ValidateInterPodAffinityArgs validates that InterPodAffinityArgs are correct.
func ValidateInterPodAffinityArgs(path *field.Path, args *config.InterPodAffinityArgs) error {
return validateHardPodAffinityWeight(path.Child("hardPodAffinityWeight"), args.HardPodAffinityWeight)
}
// validateHardPodAffinityWeight validates that weight is within allowed range.
func validateHardPodAffinityWeight(path *field.Path, w int32) error {
const (
minHardPodAffinityWeight = 0
maxHardPodAffinityWeight = 100
)
if w < minHardPodAffinityWeight || w > maxHardPodAffinityWeight {
msg := fmt.Sprintf("not in valid range [%d, %d]", minHardPodAffinityWeight, maxHardPodAffinityWeight)
return field.Invalid(path, w, msg)
}
return nil
}
// ValidatePodTopologySpreadArgs validates that PodTopologySpreadArgs are correct.
// It replicates the validation from pkg/apis/core/validation.validateTopologySpreadConstraints
// with an additional check for .labelSelector to be nil.
func ValidatePodTopologySpreadArgs(path *field.Path, args *config.PodTopologySpreadArgs) error {
var allErrs field.ErrorList
if err := validateDefaultingType(path.Child("defaultingType"), args.DefaultingType, args.DefaultConstraints); err != nil {
allErrs = append(allErrs, err)
}
defaultConstraintsPath := path.Child("defaultConstraints")
for i, c := range args.DefaultConstraints {
p := defaultConstraintsPath.Index(i)
if c.MaxSkew <= 0 {
f := p.Child("maxSkew")
allErrs = append(allErrs, field.Invalid(f, c.MaxSkew, "not in valid range (0, inf)"))
}
allErrs = append(allErrs, validateTopologyKey(p.Child("topologyKey"), c.TopologyKey)...)
if err := validateWhenUnsatisfiable(p.Child("whenUnsatisfiable"), c.WhenUnsatisfiable); err != nil {
allErrs = append(allErrs, err)
}
if c.LabelSelector != nil {
f := field.Forbidden(p.Child("labelSelector"), "constraint must not define a selector, as they deduced for each pod")
allErrs = append(allErrs, f)
}
if err := validateConstraintNotRepeat(defaultConstraintsPath, args.DefaultConstraints, i); err != nil {
allErrs = append(allErrs, err)
}
}
if len(allErrs) == 0 {
return nil
}
return allErrs.ToAggregate()
}
func validateDefaultingType(p *field.Path, v config.PodTopologySpreadConstraintsDefaulting, constraints []v1.TopologySpreadConstraint) *field.Error {
if v != config.SystemDefaulting && v != config.ListDefaulting {
return field.NotSupported(p, v, []string{string(config.SystemDefaulting), string(config.ListDefaulting)})
}
if v == config.SystemDefaulting && len(constraints) > 0 {
return field.Invalid(p, v, "when .defaultConstraints are not empty")
}
return nil
}
func validateTopologyKey(p *field.Path, v string) field.ErrorList {
var allErrs field.ErrorList
if len(v) == 0 {
allErrs = append(allErrs, field.Required(p, "can not be empty"))
} else {
allErrs = append(allErrs, metav1validation.ValidateLabelName(v, p)...)
}
return allErrs
}
func validateWhenUnsatisfiable(p *field.Path, v v1.UnsatisfiableConstraintAction) *field.Error {
supportedScheduleActions := sets.New(string(v1.DoNotSchedule), string(v1.ScheduleAnyway))
if len(v) == 0 {
return field.Required(p, "can not be empty")
}
if !supportedScheduleActions.Has(string(v)) {
return field.NotSupported(p, v, sets.List(supportedScheduleActions))
}
return nil
}
func validateConstraintNotRepeat(path *field.Path, constraints []v1.TopologySpreadConstraint, idx int) *field.Error {
c := &constraints[idx]
for i := range constraints[:idx] {
other := &constraints[i]
if c.TopologyKey == other.TopologyKey && c.WhenUnsatisfiable == other.WhenUnsatisfiable {
return field.Duplicate(path.Index(idx), fmt.Sprintf("{%v, %v}", c.TopologyKey, c.WhenUnsatisfiable))
}
}
return nil
}
func validateFunctionShape(shape []config.UtilizationShapePoint, path *field.Path) field.ErrorList {
const (
minUtilization = 0
maxUtilization = 100
minScore = 0
maxScore = int32(config.MaxCustomPriorityScore)
)
var allErrs field.ErrorList
if len(shape) == 0 {
allErrs = append(allErrs, field.Required(path, "at least one point must be specified"))
return allErrs
}
for i := 1; i < len(shape); i++ {
if shape[i-1].Utilization >= shape[i].Utilization {
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("utilization"), shape[i].Utilization, "values must be sorted in increasing order"))
break
}
}
for i, point := range shape {
if point.Utilization < minUtilization || point.Utilization > maxUtilization {
msg := fmt.Sprintf("not in valid range [%d, %d]", minUtilization, maxUtilization)
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("utilization"), point.Utilization, msg))
}
if point.Score < minScore || point.Score > maxScore {
msg := fmt.Sprintf("not in valid range [%d, %d]", minScore, maxScore)
allErrs = append(allErrs, field.Invalid(path.Index(i).Child("score"), point.Score, msg))
}
}
return allErrs
}
func validateResources(resources []config.ResourceSpec, p *field.Path) field.ErrorList {
var allErrs field.ErrorList
for i, resource := range resources {
if resource.Weight <= 0 || resource.Weight > 100 {
msg := fmt.Sprintf("resource weight of %v not in valid range (0, 100]", resource.Name)
allErrs = append(allErrs, field.Invalid(p.Index(i).Child("weight"), resource.Weight, msg))
}
}
return allErrs
}
// ValidateNodeResourcesBalancedAllocationArgs validates that NodeResourcesBalancedAllocationArgs are set correctly.
func ValidateNodeResourcesBalancedAllocationArgs(path *field.Path, args *config.NodeResourcesBalancedAllocationArgs) error {
var allErrs field.ErrorList
seenResources := sets.New[string]()
for i, resource := range args.Resources {
if seenResources.Has(resource.Name) {
allErrs = append(allErrs, field.Duplicate(path.Child("resources").Index(i).Child("name"), resource.Name))
} else {
seenResources.Insert(resource.Name)
}
if resource.Weight != 1 {
allErrs = append(allErrs, field.Invalid(path.Child("resources").Index(i).Child("weight"), resource.Weight, "must be 1"))
}
}
return allErrs.ToAggregate()
}
// ValidateNodeAffinityArgs validates that NodeAffinityArgs are correct.
func ValidateNodeAffinityArgs(path *field.Path, args *config.NodeAffinityArgs) error {
if args.AddedAffinity == nil {
return nil
}
affinity := args.AddedAffinity
var errs []error
if ns := affinity.RequiredDuringSchedulingIgnoredDuringExecution; ns != nil {
_, err := nodeaffinity.NewNodeSelector(ns, field.WithPath(path.Child("addedAffinity", "requiredDuringSchedulingIgnoredDuringExecution")))
if err != nil {
errs = append(errs, err)
}
}
// TODO: Add validation for requiredDuringSchedulingRequiredDuringExecution when it gets added to the API.
if terms := affinity.PreferredDuringSchedulingIgnoredDuringExecution; len(terms) != 0 {
_, err := nodeaffinity.NewPreferredSchedulingTerms(terms, field.WithPath(path.Child("addedAffinity", "preferredDuringSchedulingIgnoredDuringExecution")))
if err != nil {
errs = append(errs, err)
}
}
return errors.Flatten(errors.NewAggregate(errs))
}
// VolumeBindingArgsValidationOptions contains the different settings for validation.
type VolumeBindingArgsValidationOptions struct {
AllowStorageCapacityScoring bool
}
// ValidateVolumeBindingArgs validates that VolumeBindingArgs are set correctly.
func ValidateVolumeBindingArgs(path *field.Path, args *config.VolumeBindingArgs) error {
return ValidateVolumeBindingArgsWithOptions(path, args, VolumeBindingArgsValidationOptions{
AllowStorageCapacityScoring: utilfeature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring),
})
}
// ValidateVolumeBindingArgsWithOptions validates that VolumeBindingArgs and VolumeBindingArgsValidationOptions with scheduler features.
func ValidateVolumeBindingArgsWithOptions(path *field.Path, args *config.VolumeBindingArgs, opts VolumeBindingArgsValidationOptions) error {
var allErrs field.ErrorList
if args.BindTimeoutSeconds < 0 {
allErrs = append(allErrs, field.Invalid(path.Child("bindTimeoutSeconds"), args.BindTimeoutSeconds, "invalid BindTimeoutSeconds, should not be a negative value"))
}
if opts.AllowStorageCapacityScoring {
allErrs = append(allErrs, validateFunctionShape(args.Shape, path.Child("shape"))...)
} else if args.Shape != nil {
// When the feature is off, return an error if the config is not nil.
// This prevents unexpected configuration from taking effect when the
// feature turns on in the future.
allErrs = append(allErrs, field.Invalid(path.Child("shape"), args.Shape, "unexpected field `shape`, remove it or turn on the feature gate StorageCapacityScoring"))
}
return allErrs.ToAggregate()
}
func ValidateNodeResourcesFitArgs(path *field.Path, args *config.NodeResourcesFitArgs) error {
var allErrs field.ErrorList
resPath := path.Child("ignoredResources")
for i, res := range args.IgnoredResources {
path := resPath.Index(i)
if errs := metav1validation.ValidateLabelName(res, path); len(errs) != 0 {
allErrs = append(allErrs, errs...)
}
}
groupPath := path.Child("ignoredResourceGroups")
for i, group := range args.IgnoredResourceGroups {
path := groupPath.Index(i)
if strings.Contains(group, "/") {
allErrs = append(allErrs, field.Invalid(path, group, "resource group name can't contain '/'"))
}
if errs := metav1validation.ValidateLabelName(group, path); len(errs) != 0 {
allErrs = append(allErrs, errs...)
}
}
strategyPath := path.Child("scoringStrategy")
if args.ScoringStrategy != nil {
if !supportedScoringStrategyTypes.Has(string(args.ScoringStrategy.Type)) {
allErrs = append(allErrs, field.NotSupported(strategyPath.Child("type"), args.ScoringStrategy.Type, sets.List(supportedScoringStrategyTypes)))
}
allErrs = append(allErrs, validateResources(args.ScoringStrategy.Resources, strategyPath.Child("resources"))...)
if args.ScoringStrategy.RequestedToCapacityRatio != nil {
allErrs = append(allErrs, validateFunctionShape(args.ScoringStrategy.RequestedToCapacityRatio.Shape, strategyPath.Child("shape"))...)
}
}
if len(allErrs) == 0 {
return nil
}
return allErrs.ToAggregate()
}
// ValidateDynamicResourcesArgs validates that DynamicResourcesArgs are correct.
// In contrast to the REST API, setting fields that have no effect because
// the corresponding feature is disabled is considered an error.
func ValidateDynamicResourcesArgs(path *field.Path, args *config.DynamicResourcesArgs, fts feature.Features) error {
var allErrs field.ErrorList
if fts.EnableDRASchedulerFilterTimeout {
if args.FilterTimeout != nil && args.FilterTimeout.Duration < 0 {
allErrs = append(allErrs, field.Invalid(path.Child("filterTimeout"), args.FilterTimeout, "must be zero or positive"))
}
} else {
if args.FilterTimeout != nil {
allErrs = append(allErrs, field.Forbidden(path.Child("filterTimeout"), "DRASchedulingFilterTimeout feature gate is disabled"))
}
}
return allErrs.ToAggregate()
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package config
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DefaultPreemptionArgs) DeepCopyInto(out *DefaultPreemptionArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPreemptionArgs.
func (in *DefaultPreemptionArgs) DeepCopy() *DefaultPreemptionArgs {
if in == nil {
return nil
}
out := new(DefaultPreemptionArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DefaultPreemptionArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DynamicResourcesArgs) DeepCopyInto(out *DynamicResourcesArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.FilterTimeout != nil {
in, out := &in.FilterTimeout, &out.FilterTimeout
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicResourcesArgs.
func (in *DynamicResourcesArgs) DeepCopy() *DynamicResourcesArgs {
if in == nil {
return nil
}
out := new(DynamicResourcesArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DynamicResourcesArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Extender) DeepCopyInto(out *Extender) {
*out = *in
if in.TLSConfig != nil {
in, out := &in.TLSConfig, &out.TLSConfig
*out = new(ExtenderTLSConfig)
(*in).DeepCopyInto(*out)
}
out.HTTPTimeout = in.HTTPTimeout
if in.ManagedResources != nil {
in, out := &in.ManagedResources, &out.ManagedResources
*out = make([]ExtenderManagedResource, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender.
func (in *Extender) DeepCopy() *Extender {
if in == nil {
return nil
}
out := new(Extender)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource.
func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource {
if in == nil {
return nil
}
out := new(ExtenderManagedResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) {
*out = *in
if in.CertData != nil {
in, out := &in.CertData, &out.CertData
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.KeyData != nil {
in, out := &in.KeyData, &out.KeyData
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.CAData != nil {
in, out := &in.CAData, &out.CAData
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig.
func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig {
if in == nil {
return nil
}
out := new(ExtenderTLSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs.
func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs {
if in == nil {
return nil
}
out := new(InterPodAffinityArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
out.DebuggingConfiguration = in.DebuggingConfiguration
if in.PercentageOfNodesToScore != nil {
in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore
*out = new(int32)
**out = **in
}
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]KubeSchedulerProfile, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Extenders != nil {
in, out := &in.Extenders, &out.Extenders
*out = make([]Extender, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration.
func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration {
if in == nil {
return nil
}
out := new(KubeSchedulerConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) {
*out = *in
if in.PercentageOfNodesToScore != nil {
in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore
*out = new(int32)
**out = **in
}
if in.Plugins != nil {
in, out := &in.Plugins, &out.Plugins
*out = new(Plugins)
(*in).DeepCopyInto(*out)
}
if in.PluginConfig != nil {
in, out := &in.PluginConfig, &out.PluginConfig
*out = make([]PluginConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile.
func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile {
if in == nil {
return nil
}
out := new(KubeSchedulerProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAffinityArgs) DeepCopyInto(out *NodeAffinityArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.AddedAffinity != nil {
in, out := &in.AddedAffinity, &out.AddedAffinity
*out = new(corev1.NodeAffinity)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinityArgs.
func (in *NodeAffinityArgs) DeepCopy() *NodeAffinityArgs {
if in == nil {
return nil
}
out := new(NodeAffinityArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceSpec, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesBalancedAllocationArgs.
func (in *NodeResourcesBalancedAllocationArgs) DeepCopy() *NodeResourcesBalancedAllocationArgs {
if in == nil {
return nil
}
out := new(NodeResourcesBalancedAllocationArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeResourcesBalancedAllocationArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.IgnoredResources != nil {
in, out := &in.IgnoredResources, &out.IgnoredResources
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IgnoredResourceGroups != nil {
in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ScoringStrategy != nil {
in, out := &in.ScoringStrategy, &out.ScoringStrategy
*out = new(ScoringStrategy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs.
func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs {
if in == nil {
return nil
}
out := new(NodeResourcesFitArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Plugin) DeepCopyInto(out *Plugin) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin.
func (in *Plugin) DeepCopy() *Plugin {
if in == nil {
return nil
}
out := new(Plugin)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
*out = *in
if in.Args != nil {
out.Args = in.Args.DeepCopyObject()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig.
func (in *PluginConfig) DeepCopy() *PluginConfig {
if in == nil {
return nil
}
out := new(PluginConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginSet) DeepCopyInto(out *PluginSet) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = make([]Plugin, len(*in))
copy(*out, *in)
}
if in.Disabled != nil {
in, out := &in.Disabled, &out.Disabled
*out = make([]Plugin, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet.
func (in *PluginSet) DeepCopy() *PluginSet {
if in == nil {
return nil
}
out := new(PluginSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Plugins) DeepCopyInto(out *Plugins) {
*out = *in
in.PreEnqueue.DeepCopyInto(&out.PreEnqueue)
in.QueueSort.DeepCopyInto(&out.QueueSort)
in.PreFilter.DeepCopyInto(&out.PreFilter)
in.Filter.DeepCopyInto(&out.Filter)
in.PostFilter.DeepCopyInto(&out.PostFilter)
in.PreScore.DeepCopyInto(&out.PreScore)
in.Score.DeepCopyInto(&out.Score)
in.Reserve.DeepCopyInto(&out.Reserve)
in.Permit.DeepCopyInto(&out.Permit)
in.PreBind.DeepCopyInto(&out.PreBind)
in.Bind.DeepCopyInto(&out.Bind)
in.PostBind.DeepCopyInto(&out.PostBind)
in.MultiPoint.DeepCopyInto(&out.MultiPoint)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins.
func (in *Plugins) DeepCopy() *Plugins {
if in == nil {
return nil
}
out := new(Plugins)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.DefaultConstraints != nil {
in, out := &in.DefaultConstraints, &out.DefaultConstraints
*out = make([]corev1.TopologySpreadConstraint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs.
func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs {
if in == nil {
return nil
}
out := new(PodTopologySpreadArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) {
*out = *in
if in.Shape != nil {
in, out := &in.Shape, &out.Shape
*out = make([]UtilizationShapePoint, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioParam.
func (in *RequestedToCapacityRatioParam) DeepCopy() *RequestedToCapacityRatioParam {
if in == nil {
return nil
}
out := new(RequestedToCapacityRatioParam)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec.
func (in *ResourceSpec) DeepCopy() *ResourceSpec {
if in == nil {
return nil
}
out := new(ResourceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceSpec, len(*in))
copy(*out, *in)
}
if in.RequestedToCapacityRatio != nil {
in, out := &in.RequestedToCapacityRatio, &out.RequestedToCapacityRatio
*out = new(RequestedToCapacityRatioParam)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy.
func (in *ScoringStrategy) DeepCopy() *ScoringStrategy {
if in == nil {
return nil
}
out := new(ScoringStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint.
func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint {
if in == nil {
return nil
}
out := new(UtilizationShapePoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Shape != nil {
in, out := &in.Shape, &out.Shape
*out = make([]UtilizationShapePoint, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs.
func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs {
if in == nil {
return nil
}
out := new(VolumeBindingArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apicache
import (
"context"
v1 "k8s.io/api/core/v1"
fwk "k8s.io/kube-scheduler/framework"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
)
// APICache is responsible for sending API calls' requests through scheduling queue or cache.
type APICache struct {
schedulingQueue internalqueue.SchedulingQueue
cache internalcache.Cache
}
func New(schedulingQueue internalqueue.SchedulingQueue, cache internalcache.Cache) *APICache {
return &APICache{
schedulingQueue: schedulingQueue,
cache: cache,
}
}
// PatchPodStatus sends a patch request for a Pod's status through a scheduling queue.
// The patch could be first applied to the cached Pod object and then the API call is executed asynchronously.
// It returns a channel that can be used to wait for the call's completion.
func (c *APICache) PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) (<-chan error, error) {
return c.schedulingQueue.PatchPodStatus(pod, condition, nominatingInfo)
}
// BindPod sends a binding request through a cache. The binding could be first applied to the cached Pod object
// and then the API call is executed asynchronously.
// It returns a channel that can be used to wait for the call's completion.
func (c *APICache) BindPod(binding *v1.Binding) (<-chan error, error) {
return c.cache.BindPod(binding)
}
// WaitOnFinish blocks until the result of an API call is sent to the given onFinish channel
// (returned by methods BindPod or PreemptPod).
//
// It returns the error received from the channel.
// It also returns nil if the call was skipped or overwritten,
// as these are considered successful lifecycle outcomes.
func (c *APICache) WaitOnFinish(ctx context.Context, onFinish <-chan error) error {
select {
case err := <-onFinish:
if fwk.IsUnexpectedError(err) {
return err
}
case <-ctx.Done():
return ctx.Err()
}
return nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apidispatcher
import (
"context"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
)
// APIDispatcher implements the fwk.APIDispatcher interface and allows for queueing and dispatching API calls asynchronously.
type APIDispatcher struct {
cancel func()
client clientset.Interface
callQueue *callQueue
goroutinesLimiter *goroutinesLimiter
}
// New returns a new APIDispatcher object.
func New(client clientset.Interface, parallelization int, apiCallRelevances fwk.APICallRelevances) *APIDispatcher {
d := APIDispatcher{
client: client,
callQueue: newCallQueue(apiCallRelevances),
goroutinesLimiter: newGoroutinesLimiter(parallelization),
}
return &d
}
// Add adds an API call to the dispatcher's queue. It returns an error if the call is not enqueued
// (e.g., if it's skipped). The caller should handle ErrCallSkipped if returned.
func (ad *APIDispatcher) Add(newAPICall fwk.APICall, opts fwk.APICallOptions) error {
apiCall := &queuedAPICall{
APICall: newAPICall,
onFinish: opts.OnFinish,
}
return ad.callQueue.add(apiCall)
}
// SyncObject performs a two-way synchronization between the given object
// and a pending API call held within the dispatcher and returns the modified object.
func (ad *APIDispatcher) SyncObject(obj metav1.Object) (metav1.Object, error) {
return ad.callQueue.syncObject(obj)
}
// Run starts the main processing loop of the APIDispatcher, which pops calls
// from the queue and dispatches them to worker goroutines for execution.
func (ad *APIDispatcher) Run(logger klog.Logger) {
// Create a new context to allow to cancel the APICalls' execution when the APIDispatcher is closed.
ctx, cancel := context.WithCancel(context.Background())
ad.cancel = cancel
go func() {
for {
select {
case <-ctx.Done():
// APIDispatcher is closed.
return
default:
}
// Acquire a goroutine before popping a call. This ordering prevents a popped
// call from waiting (being in in-flight) for a long time.
acquired := ad.goroutinesLimiter.acquire()
if !acquired {
// goroutinesLimiter is closed.
return
}
apiCall, err := ad.callQueue.pop()
if err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "popping API call from call controller failed")
ad.goroutinesLimiter.release()
continue
}
if apiCall == nil {
// callController is closed.
ad.goroutinesLimiter.release()
return
}
go func() {
defer ad.goroutinesLimiter.release()
startTime := time.Now()
err := apiCall.Execute(ctx, ad.client)
result := metrics.GoroutineResultSuccess
if err != nil {
result = metrics.GoroutineResultError
}
callType := string(apiCall.CallType())
metrics.AsyncAPICallsTotal.WithLabelValues(callType, result).Inc()
metrics.AsyncAPICallDuration.WithLabelValues(callType, result).Observe(time.Since(startTime).Seconds())
ad.callQueue.finalize(apiCall)
apiCall.sendOnFinish(err)
}()
}
}()
}
// Close shuts down the APIDispatcher.
func (ad *APIDispatcher) Close() {
ad.callQueue.close()
ad.goroutinesLimiter.close()
if ad.cancel != nil {
ad.cancel()
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apidispatcher
import (
"errors"
"fmt"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/utils/buffer"
)
// queuedAPICall contains an API call and other fields needed for its processing by the callQueue.
type queuedAPICall struct {
fwk.APICall
// onFinish is a channel to which the result of the API call's execution is sent in a non-blocking way.
// sendOnFinish helper should always be used instead of a direct channel.
onFinish chan<- error
// callID provides a unique identity for this enqueued call.
// It is used during finalization to differentiate this instance from a newer one that may have replaced it
// while this call was in-flight.
callID int
}
// sendOnFinish performs a non-blocking send of an error to the onFinish channel.
// This method should be used instead of direct operation on the channel.
func (qc *queuedAPICall) sendOnFinish(err error) {
if qc.onFinish != nil {
select {
case qc.onFinish <- err:
default:
}
}
}
// callQueue manages the state and lifecycle of API calls.
// It handles all logic related to merging, queuing, and finalization of calls.
//
// For any given object (identified by its UID), at most one API
// call is present in the callsQueue and apiCalls at any time. This prevents race conditions
// and ensures sequential execution for operations on the same object.
//
// When a new call is added for an object that is already being tracked,
// the controller reconciles them by merging the calls, overwriting the older one,
// or skipping the new one entirely based on relevance.
//
// When an existing call is in-progress and a new one for the same object is added,
// it will be inserted to the callsQueue when the existing one is finished (finalized).
//
// The controller tracks the state of all calls (pending, in-flight)
// to coordinate the asynchronous execution flow managed by the APIDispatcher.
type callQueue struct {
lock sync.RWMutex
cond *sync.Cond
closed bool
// apiCallRelevances maps all possible APICallTypes to a relevance value.
// A more relevant API call should overwrite a less relevant one for the same object.
// Types of the same relevance should only be defined for different object types.
apiCallRelevances fwk.APICallRelevances
// callIDCounter is a monotonically increasing counter used to assign a unique callID
// to each queuedAPICall added to the callQueue.
callIDCounter int
// apiCalls stores details of API calls for objects that are currently enqueued or in-flight.
// Must be used under lock.
apiCalls map[types.UID]*queuedAPICall
// callsQueue is a FIFO queue that stores the object UIDs of (pending) API calls waiting to be executed.
// Must be used under lock.
callsQueue buffer.Ring[types.UID]
// inFlightEntities stores object UIDs of API calls that are currently in-flight
// (i.e., their execution goroutine has been dispatched).
// Must be used under lock.
inFlightEntities sets.Set[types.UID]
}
// newCallQueue returns a new callQueue object.
func newCallQueue(apiCallRelevances fwk.APICallRelevances) *callQueue {
q := callQueue{
apiCallRelevances: apiCallRelevances,
apiCalls: make(map[types.UID]*queuedAPICall),
inFlightEntities: sets.New[types.UID](),
}
q.cond = sync.NewCond(&q.lock)
return &q
}
// isLessRelevant returns true if newCall is less relevant than oldCall.
func (cq *callQueue) isLessRelevant(oldCall, newCall fwk.APICallType) bool {
return cq.apiCallRelevances[newCall] < cq.apiCallRelevances[oldCall]
}
// merge compares a new API call with an existing one for the same object.
// Based on their relevance and type, it determines whether to skip the new call,
// overwrite the old one, or merge them.
// Must be called under cq.lock.
func (cq *callQueue) merge(oldAPICall, apiCall *queuedAPICall) error {
if cq.isLessRelevant(oldAPICall.CallType(), apiCall.CallType()) {
// The new API call is less relevant than the existing one, so skip it.
err := fmt.Errorf("a more relevant call is already enqueued for this object: %w", fwk.ErrCallSkipped)
apiCall.sendOnFinish(err)
return err
}
if oldAPICall.CallType() != apiCall.CallType() {
// API call types don't match, so we overwrite the old one.
// Update the pending calls metric if the old call is not in-flight.
if !cq.inFlightEntities.Has(oldAPICall.UID()) {
metrics.AsyncAPIPendingCalls.WithLabelValues(string(oldAPICall.CallType())).Dec()
metrics.AsyncAPIPendingCalls.WithLabelValues(string(apiCall.CallType())).Inc()
}
oldAPICall.sendOnFinish(fmt.Errorf("a more relevant call was enqueued for this object: %w", fwk.ErrCallOverwritten))
return nil
}
// Send a concrete APICall object to allow for a type assertion.
err := apiCall.Merge(oldAPICall.APICall)
if err != nil {
err := fmt.Errorf("failed to merge API calls: %w", err)
apiCall.sendOnFinish(err)
return err
}
oldAPICall.sendOnFinish(fmt.Errorf("a call of the same type was enqueued for this object: %w", fwk.ErrCallOverwritten))
// If we still need the API call or the previous one is in-flight, we proceed.
return nil
}
// enqueue handles the logic of adding a new or updated API call to the controller's state.
//
// First, it checks if the apiCall is a no-op. If it is, and no other call for the same object
// is currently in-flight, the call is skipped and an ErrCallSkipped is returned.
//
// If the call is not skipped, it is stored internally. If no previous call for the object
// was being tracked (oldCallPresent is false), the object's UID is added to the processing
// queue to be picked up by a worker.
//
// This logic ensures only one call per object is active in the queue at a time.
// Must be called under cq.lock.
func (cq *callQueue) enqueue(apiCall *queuedAPICall, oldCallPresent bool) error {
noOp := apiCall.IsNoOp()
if noOp && !cq.inFlightEntities.Has(apiCall.UID()) {
// The call can be skipped, as it is a no-op and the old call is not in-flight.
if oldCallPresent {
cq.removePending(apiCall.UID())
}
apiCall.sendOnFinish(fmt.Errorf("call does not need to be executed because it has no effect: %w", fwk.ErrCallSkipped))
return fmt.Errorf("call does not need to be executed because it has no effect: %w", fwk.ErrCallSkipped)
}
objectUID := apiCall.UID()
cq.apiCalls[objectUID] = apiCall
if oldCallPresent {
// If another API call for this object is already present (i.e., is pending or in-flight),
// don't add this new call to the queue. The new call will be processed
// after the currently in-flight call is finalized.
return nil
}
cq.callsQueue.WriteOne(objectUID)
metrics.AsyncAPIPendingCalls.WithLabelValues(string(apiCall.CallType())).Inc()
cq.cond.Broadcast()
return nil
}
// removeFromQueue removes the objectUID from the queue and returns the recreated queue.
func removeFromQueue(queue *buffer.Ring[types.UID], objectUID types.UID) *buffer.Ring[types.UID] {
newQueue := buffer.NewRing[types.UID](buffer.RingOptions{
InitialSize: queue.Len(),
NormalSize: queue.Cap(),
})
for {
uid, ok := queue.ReadOne()
if !ok {
break
}
if uid != objectUID {
newQueue.WriteOne(uid)
}
}
return newQueue
}
// removePending removes a pending API call for the given objectUID from the queue and its associated data.
// This function is intended to be used on calls that have not yet been popped for execution
// (i.e., are not in-flight).
// Must be called under cq.lock.
func (cq *callQueue) removePending(objectUID types.UID) {
apiCall, ok := cq.apiCalls[objectUID]
if !ok {
return
}
delete(cq.apiCalls, objectUID)
if !cq.inFlightEntities.Has(objectUID) {
cq.callsQueue = *removeFromQueue(&cq.callsQueue, objectUID)
callType := string(apiCall.CallType())
metrics.AsyncAPIPendingCalls.WithLabelValues(callType).Dec()
}
}
// add adds a new apiCall to the queue.
// If an API call for the same object is already present in the queue,
// it tries to skip, overwrite, or merge the calls based on their relevance and type.
func (cq *callQueue) add(apiCall *queuedAPICall) error {
cq.lock.Lock()
defer cq.lock.Unlock()
apiCall.callID = cq.callIDCounter
cq.callIDCounter++
oldAPICall, ok := cq.apiCalls[apiCall.UID()]
if ok {
err := cq.merge(oldAPICall, apiCall)
if err != nil {
return err
}
}
return cq.enqueue(apiCall, ok)
}
// pop pops the first object UID from the queue and returns the corresponding API call details.
// Note: The caller must always call finalize() for the poped apiCall, otherwise it would result in the memory leak.
func (cq *callQueue) pop() (*queuedAPICall, error) {
cq.lock.Lock()
defer cq.lock.Unlock()
for cq.callsQueue.Len() == 0 {
if cq.closed {
return nil, nil
}
// Wait for an API call to become available.
cq.cond.Wait()
}
objectUID, ok := cq.callsQueue.ReadOne()
if !ok {
return nil, errors.New("api calls queue is empty")
}
apiCall, ok := cq.apiCalls[objectUID]
if !ok {
return nil, fmt.Errorf("object %s is not present in a map with API calls details", objectUID)
}
cq.inFlightEntities.Insert(objectUID)
callType := string(apiCall.CallType())
metrics.AsyncAPIPendingCalls.WithLabelValues(callType).Dec()
return apiCall, nil
}
// finalize handles a completed API call.
// If a new call for the same object arrived while the original was in-flight, it re-queues the object for processing.
// Otherwise, it removes the completed call's details from the queue.
// This method must be called after a call's execution is finished.
func (cq *callQueue) finalize(apiCall *queuedAPICall) {
cq.lock.Lock()
defer cq.lock.Unlock()
objectUID := apiCall.UID()
newAPICall := cq.apiCalls[objectUID]
if newAPICall.callID == apiCall.callID {
// The API call in the map hasn't changed, so we can remove it.
delete(cq.apiCalls, objectUID)
} else {
// The API call in the map has changed, so re-queue the object for the new call to be processed.
cq.callsQueue.WriteOne(objectUID)
callType := string(newAPICall.CallType())
metrics.AsyncAPIPendingCalls.WithLabelValues(callType).Inc()
cq.cond.Broadcast()
}
cq.inFlightEntities.Delete(objectUID)
}
// syncObject performs a two-way synchronization between the given object
// and a pending API call for that object, if one exists in the queue, and returns the synced object.
func (cq *callQueue) syncObject(obj metav1.Object) (metav1.Object, error) {
cq.lock.Lock()
defer cq.lock.Unlock()
objectUID := obj.GetUID()
apiCall, ok := cq.apiCalls[objectUID]
if !ok {
return obj, nil
}
syncedObj, err := apiCall.Sync(obj)
noOp := apiCall.IsNoOp()
if noOp && !cq.inFlightEntities.Has(apiCall.UID()) {
// The call can be removed, as the sync resulted in a no-op and the call is not in-flight.
cq.removePending(apiCall.UID())
apiCall.sendOnFinish(fmt.Errorf("call does not need to be executed because after sync it has no effect: %w", fwk.ErrCallSkipped))
}
return syncedObj, err
}
// close shuts down the callQueue.
func (cq *callQueue) close() {
cq.lock.Lock()
defer cq.lock.Unlock()
cq.closed = true
cq.cond.Broadcast()
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apidispatcher
import (
"sync"
)
// goroutinesLimiter limits the number of goroutines that can be dispatched.
type goroutinesLimiter struct {
lock sync.RWMutex
cond *sync.Cond
closed bool
// maxGoroutines is the maximum number of goroutines that can be dispatched concurrently by the APIDispatcher.
maxGoroutines int
// goroutines is the current number of goroutines allowed to run.
// Must be used under lock.
goroutines int
}
// newGoroutinesLimiter returns a new goroutinesLimiter object.
func newGoroutinesLimiter(maxGoroutines int) *goroutinesLimiter {
p := goroutinesLimiter{
maxGoroutines: maxGoroutines,
}
p.cond = sync.NewCond(&p.lock)
return &p
}
// acquire attempts to reserve a goroutine to run.
// It blocks until a goroutine is available.
// It returns true if the goroutine was successfully acquired.
// If the limiter is closed, it returns false.
// Goroutine should be released after use.
func (gl *goroutinesLimiter) acquire() bool {
gl.lock.Lock()
defer gl.lock.Unlock()
for gl.goroutines >= gl.maxGoroutines {
if gl.closed {
return false
}
// Wait for a goroutine to become available.
gl.cond.Wait()
}
gl.goroutines++
return true
}
// release decrements the active goroutine count.
// Goroutine should be previously acquired.
func (gl *goroutinesLimiter) release() {
gl.lock.Lock()
defer gl.lock.Unlock()
gl.goroutines--
gl.cond.Broadcast()
}
// close shuts down the goroutinesLimiter.
func (gl *goroutinesLimiter) close() {
gl.lock.Lock()
defer gl.lock.Unlock()
gl.closed = true
gl.cond.Broadcast()
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"errors"
"fmt"
"sync"
"time"
v1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/api_calls"
"k8s.io/kubernetes/pkg/scheduler/metrics"
)
var (
cleanAssumedPeriod = 1 * time.Second
)
// New returns a Cache implementation.
// It automatically starts a go routine that manages expiration of assumed pods.
// "ttl" is how long the assumed pod will get expired.
// "ctx" is the context that would close the background goroutine.
func New(ctx context.Context, ttl time.Duration, apiDispatcher fwk.APIDispatcher) Cache {
logger := klog.FromContext(ctx)
cache := newCache(ctx, ttl, cleanAssumedPeriod, apiDispatcher)
cache.run(logger)
return cache
}
// nodeInfoListItem holds a NodeInfo pointer and acts as an item in a doubly
// linked list. When a NodeInfo is updated, it goes to the head of the list.
// The items closer to the head are the most recently updated items.
type nodeInfoListItem struct {
info *framework.NodeInfo
next *nodeInfoListItem
prev *nodeInfoListItem
}
type cacheImpl struct {
stop <-chan struct{}
ttl time.Duration
period time.Duration
// This mutex guards all fields within this cache struct.
mu sync.RWMutex
// a set of assumed pod keys.
// The key could further be used to get an entry in podStates.
assumedPods sets.Set[string]
// a map from pod key to podState.
podStates map[string]*podState
nodes map[string]*nodeInfoListItem
// headNode points to the most recently updated NodeInfo in "nodes". It is the
// head of the linked list.
headNode *nodeInfoListItem
nodeTree *nodeTree
// A map from image name to its ImageStateSummary.
imageStates map[string]*fwk.ImageStateSummary
// apiDispatcher is used for the methods that are expected to send API calls.
// It's non-nil only if the SchedulerAsyncAPICalls feature gate is enabled.
apiDispatcher fwk.APIDispatcher
}
type podState struct {
pod *v1.Pod
// Used by assumedPod to determinate expiration.
// If deadline is nil, assumedPod will never expire.
deadline *time.Time
// Used to block cache from expiring assumedPod if binding still runs
bindingFinished bool
}
func newCache(ctx context.Context, ttl, period time.Duration, apiDispatcher fwk.APIDispatcher) *cacheImpl {
logger := klog.FromContext(ctx)
return &cacheImpl{
ttl: ttl,
period: period,
stop: ctx.Done(),
nodes: make(map[string]*nodeInfoListItem),
nodeTree: newNodeTree(logger, nil),
assumedPods: sets.New[string](),
podStates: make(map[string]*podState),
imageStates: make(map[string]*fwk.ImageStateSummary),
apiDispatcher: apiDispatcher,
}
}
// newNodeInfoListItem initializes a new nodeInfoListItem.
func newNodeInfoListItem(ni *framework.NodeInfo) *nodeInfoListItem {
return &nodeInfoListItem{
info: ni,
}
}
// moveNodeInfoToHead moves a NodeInfo to the head of "cache.nodes" doubly
// linked list. The head is the most recently updated NodeInfo.
// We assume cache lock is already acquired.
func (cache *cacheImpl) moveNodeInfoToHead(logger klog.Logger, name string) {
ni, ok := cache.nodes[name]
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "No node info with given name found in the cache", "node", klog.KRef("", name))
return
}
// if the node info list item is already at the head, we are done.
if ni == cache.headNode {
return
}
if ni.prev != nil {
ni.prev.next = ni.next
}
if ni.next != nil {
ni.next.prev = ni.prev
}
if cache.headNode != nil {
cache.headNode.prev = ni
}
ni.next = cache.headNode
ni.prev = nil
cache.headNode = ni
}
// removeNodeInfoFromList removes a NodeInfo from the "cache.nodes" doubly
// linked list.
// We assume cache lock is already acquired.
func (cache *cacheImpl) removeNodeInfoFromList(logger klog.Logger, name string) {
ni, ok := cache.nodes[name]
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "No node info with given name found in the cache", "node", klog.KRef("", name))
return
}
if ni.prev != nil {
ni.prev.next = ni.next
}
if ni.next != nil {
ni.next.prev = ni.prev
}
// if the removed item was at the head, we must update the head.
if ni == cache.headNode {
cache.headNode = ni.next
}
delete(cache.nodes, name)
}
// Dump produces a dump of the current scheduler cache. This is used for
// debugging purposes only and shouldn't be confused with UpdateSnapshot
// function.
// This method is expensive, and should be only used in non-critical path.
func (cache *cacheImpl) Dump() *Dump {
cache.mu.RLock()
defer cache.mu.RUnlock()
nodes := make(map[string]*framework.NodeInfo, len(cache.nodes))
for k, v := range cache.nodes {
nodes[k] = v.info.SnapshotConcrete()
}
return &Dump{
Nodes: nodes,
AssumedPods: cache.assumedPods.Union(nil),
}
}
// UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at
// beginning of every scheduling cycle.
// The snapshot only includes Nodes that are not deleted at the time this function is called.
// nodeInfo.Node() is guaranteed to be not nil for all the nodes in the snapshot.
// This function tracks generation number of NodeInfo and updates only the
// entries of an existing snapshot that have changed after the snapshot was taken.
func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapshot) error {
cache.mu.Lock()
defer cache.mu.Unlock()
// Get the last generation of the snapshot.
snapshotGeneration := nodeSnapshot.generation
// NodeInfoList and HavePodsWithAffinityNodeInfoList must be re-created if a node was added
// or removed from the cache.
updateAllLists := false
// HavePodsWithAffinityNodeInfoList must be re-created if a node changed its
// status from having pods with affinity to NOT having pods with affinity or the other
// way around.
updateNodesHavePodsWithAffinity := false
// HavePodsWithRequiredAntiAffinityNodeInfoList must be re-created if a node changed its
// status from having pods with required anti-affinity to NOT having pods with required
// anti-affinity or the other way around.
updateNodesHavePodsWithRequiredAntiAffinity := false
// usedPVCSet must be re-created whenever the head node generation is greater than
// last snapshot generation.
updateUsedPVCSet := false
// Start from the head of the NodeInfo doubly linked list and update snapshot
// of NodeInfos updated after the last snapshot.
for node := cache.headNode; node != nil; node = node.next {
if node.info.Generation <= snapshotGeneration {
// all the nodes are updated before the existing snapshot. We are done.
break
}
if np := node.info.Node(); np != nil {
existing, ok := nodeSnapshot.nodeInfoMap[np.Name]
if !ok {
updateAllLists = true
existing = &framework.NodeInfo{}
nodeSnapshot.nodeInfoMap[np.Name] = existing
}
clone := node.info.SnapshotConcrete()
// We track nodes that have pods with affinity, here we check if this node changed its
// status from having pods with affinity to NOT having pods with affinity or the other
// way around.
if (len(existing.PodsWithAffinity) > 0) != (len(clone.PodsWithAffinity) > 0) {
updateNodesHavePodsWithAffinity = true
}
if (len(existing.PodsWithRequiredAntiAffinity) > 0) != (len(clone.PodsWithRequiredAntiAffinity) > 0) {
updateNodesHavePodsWithRequiredAntiAffinity = true
}
if !updateUsedPVCSet {
if len(existing.PVCRefCounts) != len(clone.PVCRefCounts) {
updateUsedPVCSet = true
} else {
for pvcKey := range clone.PVCRefCounts {
if _, found := existing.PVCRefCounts[pvcKey]; !found {
updateUsedPVCSet = true
break
}
}
}
}
// We need to preserve the original pointer of the NodeInfo struct since it
// is used in the NodeInfoList, which we may not update.
*existing = *clone
}
}
// Update the snapshot generation with the latest NodeInfo generation.
if cache.headNode != nil {
nodeSnapshot.generation = cache.headNode.info.Generation
}
// Comparing to pods in nodeTree.
// Deleted nodes get removed from the tree, but they might remain in the nodes map
// if they still have non-deleted Pods.
if len(nodeSnapshot.nodeInfoMap) > cache.nodeTree.numNodes {
cache.removeDeletedNodesFromSnapshot(nodeSnapshot)
updateAllLists = true
}
if updateAllLists || updateNodesHavePodsWithAffinity || updateNodesHavePodsWithRequiredAntiAffinity || updateUsedPVCSet {
cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, updateAllLists)
}
if len(nodeSnapshot.nodeInfoList) != cache.nodeTree.numNodes {
errMsg := fmt.Sprintf("snapshot state is not consistent, length of NodeInfoList=%v not equal to length of nodes in tree=%v "+
", length of NodeInfoMap=%v, length of nodes in cache=%v"+
", trying to recover",
len(nodeSnapshot.nodeInfoList), cache.nodeTree.numNodes,
len(nodeSnapshot.nodeInfoMap), len(cache.nodes))
logger.Error(nil, errMsg)
// We will try to recover by re-creating the lists for the next scheduling cycle, but still return an
// error to surface the problem, the error will likely cause a failure to the current scheduling cycle.
cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true)
return errors.New(errMsg)
}
return nil
}
func (cache *cacheImpl) updateNodeInfoSnapshotList(logger klog.Logger, snapshot *Snapshot, updateAll bool) {
snapshot.havePodsWithAffinityNodeInfoList = make([]fwk.NodeInfo, 0, cache.nodeTree.numNodes)
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = make([]fwk.NodeInfo, 0, cache.nodeTree.numNodes)
snapshot.usedPVCSet = sets.New[string]()
if updateAll {
// Take a snapshot of the nodes order in the tree
snapshot.nodeInfoList = make([]fwk.NodeInfo, 0, cache.nodeTree.numNodes)
nodesList, err := cache.nodeTree.list()
if err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Error occurred while retrieving the list of names of the nodes from node tree")
}
for _, nodeName := range nodesList {
if nodeInfo := snapshot.nodeInfoMap[nodeName]; nodeInfo != nil {
snapshot.nodeInfoList = append(snapshot.nodeInfoList, nodeInfo)
if len(nodeInfo.PodsWithAffinity) > 0 {
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, nodeInfo)
}
if len(nodeInfo.PodsWithRequiredAntiAffinity) > 0 {
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, nodeInfo)
}
for key := range nodeInfo.PVCRefCounts {
snapshot.usedPVCSet.Insert(key)
}
} else {
utilruntime.HandleErrorWithLogger(logger, nil, "Node exists in nodeTree but not in NodeInfoMap, this should not happen", "node", klog.KRef("", nodeName))
}
}
} else {
for _, nodeInfo := range snapshot.nodeInfoList {
if len(nodeInfo.GetPodsWithAffinity()) > 0 {
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, nodeInfo)
}
if len(nodeInfo.GetPodsWithRequiredAntiAffinity()) > 0 {
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, nodeInfo)
}
for key := range nodeInfo.GetPVCRefCounts() {
snapshot.usedPVCSet.Insert(key)
}
}
}
}
// If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot.
func (cache *cacheImpl) removeDeletedNodesFromSnapshot(snapshot *Snapshot) {
toDelete := len(snapshot.nodeInfoMap) - cache.nodeTree.numNodes
for name := range snapshot.nodeInfoMap {
if toDelete <= 0 {
break
}
if n, ok := cache.nodes[name]; !ok || n.info.Node() == nil {
delete(snapshot.nodeInfoMap, name)
toDelete--
}
}
}
// NodeCount returns the number of nodes in the cache.
// DO NOT use outside of tests.
func (cache *cacheImpl) NodeCount() int {
cache.mu.RLock()
defer cache.mu.RUnlock()
return len(cache.nodes)
}
// PodCount returns the number of pods in the cache (including those from deleted nodes).
// DO NOT use outside of tests.
func (cache *cacheImpl) PodCount() (int, error) {
cache.mu.RLock()
defer cache.mu.RUnlock()
// podFilter is expected to return true for most or all of the pods. We
// can avoid expensive array growth without wasting too much memory by
// pre-allocating capacity.
count := 0
for _, n := range cache.nodes {
count += len(n.info.Pods)
}
return count, nil
}
func (cache *cacheImpl) AssumePod(logger klog.Logger, pod *v1.Pod) error {
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
if _, ok := cache.podStates[key]; ok {
return fmt.Errorf("pod %v(%v) is in the cache, so can't be assumed", key, klog.KObj(pod))
}
return cache.addPod(logger, pod, true)
}
func (cache *cacheImpl) FinishBinding(logger klog.Logger, pod *v1.Pod) error {
return cache.finishBinding(logger, pod, time.Now())
}
// finishBinding exists to make tests deterministic by injecting now as an argument
func (cache *cacheImpl) finishBinding(logger klog.Logger, pod *v1.Pod, now time.Time) error {
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
cache.mu.RLock()
defer cache.mu.RUnlock()
logger.V(5).Info("Finished binding for pod, can be expired", "podKey", key, "pod", klog.KObj(pod))
currState, ok := cache.podStates[key]
if ok && cache.assumedPods.Has(key) {
if cache.ttl == time.Duration(0) {
currState.deadline = nil
} else {
dl := now.Add(cache.ttl)
currState.deadline = &dl
}
currState.bindingFinished = true
}
return nil
}
func (cache *cacheImpl) ForgetPod(logger klog.Logger, pod *v1.Pod) error {
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
if ok && currState.pod.Spec.NodeName != pod.Spec.NodeName {
return fmt.Errorf("pod %v(%v) was assumed on %v but assigned to %v", key, klog.KObj(pod), pod.Spec.NodeName, currState.pod.Spec.NodeName)
}
// Only assumed pod can be forgotten.
if ok && cache.assumedPods.Has(key) {
return cache.removePod(logger, pod)
}
return fmt.Errorf("pod %v(%v) wasn't assumed so cannot be forgotten", key, klog.KObj(pod))
}
// Assumes that lock is already acquired.
func (cache *cacheImpl) addPod(logger klog.Logger, pod *v1.Pod, assumePod bool) error {
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
n, ok := cache.nodes[pod.Spec.NodeName]
if !ok {
n = newNodeInfoListItem(framework.NewNodeInfo())
cache.nodes[pod.Spec.NodeName] = n
}
n.info.AddPod(pod)
cache.moveNodeInfoToHead(logger, pod.Spec.NodeName)
ps := &podState{
pod: pod,
}
cache.podStates[key] = ps
if assumePod {
cache.assumedPods.Insert(key)
}
return nil
}
// Assumes that lock is already acquired.
func (cache *cacheImpl) updatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error {
if err := cache.removePod(logger, oldPod); err != nil {
return err
}
return cache.addPod(logger, newPod, false)
}
// Assumes that lock is already acquired.
// Removes a pod from the cached node info. If the node information was already
// removed and there are no more pods left in the node, cleans up the node from
// the cache.
func (cache *cacheImpl) removePod(logger klog.Logger, pod *v1.Pod) error {
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
n, ok := cache.nodes[pod.Spec.NodeName]
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Node not found when trying to remove pod", "node", klog.KRef("", pod.Spec.NodeName), "podKey", key, "pod", klog.KObj(pod))
} else {
if err := n.info.RemovePod(logger, pod); err != nil {
return err
}
if len(n.info.Pods) == 0 && n.info.Node() == nil {
cache.removeNodeInfoFromList(logger, pod.Spec.NodeName)
} else {
cache.moveNodeInfoToHead(logger, pod.Spec.NodeName)
}
}
delete(cache.podStates, key)
delete(cache.assumedPods, key)
return nil
}
func (cache *cacheImpl) AddPod(logger klog.Logger, pod *v1.Pod) error {
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
switch {
case ok && cache.assumedPods.Has(key):
// When assuming, we've already added the Pod to cache,
// Just update here to make sure the Pod's status is up-to-date.
if err = cache.updatePod(logger, currState.pod, pod); err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Error occurred while updating pod")
}
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
// The pod was added to a different node than it was assumed to.
logger.Info("Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName))
return nil
}
case !ok:
// Pod was expired. We should add it back.
if err = cache.addPod(logger, pod, false); err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Error occurred while adding pod")
}
default:
return fmt.Errorf("pod %v(%v) was already in added state", key, klog.KObj(pod))
}
return nil
}
func (cache *cacheImpl) UpdatePod(logger klog.Logger, oldPod, newPod *v1.Pod) error {
key, err := framework.GetPodKey(oldPod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
if !ok {
return fmt.Errorf("pod %v(%v) is not added to scheduler cache, so cannot be updated", key, klog.KObj(oldPod))
}
// An assumed pod won't have Update/Remove event. It needs to have Add event
// before Update event, in which case the state would change from Assumed to Added.
if cache.assumedPods.Has(key) {
return fmt.Errorf("assumed pod %v(%v) should not be updated", key, klog.KObj(oldPod))
}
if currState.pod.Spec.NodeName != newPod.Spec.NodeName {
utilruntime.HandleErrorWithLogger(logger, nil, "Pod updated on a different node than previously added to. Scheduler cache is corrupted and can badly affect scheduling decisions", "podKey", key, "pod", klog.KObj(oldPod))
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
return cache.updatePod(logger, oldPod, newPod)
}
func (cache *cacheImpl) RemovePod(logger klog.Logger, pod *v1.Pod) error {
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
cache.mu.Lock()
defer cache.mu.Unlock()
currState, ok := cache.podStates[key]
if !ok {
return fmt.Errorf("pod %v(%v) is not found in scheduler cache, so cannot be removed from it", key, klog.KObj(pod))
}
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
utilruntime.HandleErrorWithLogger(logger, nil, "Pod was added to a different node than it was assumed", "podKey", key, "pod", klog.KObj(pod), "assumedNode", klog.KRef("", pod.Spec.NodeName), "currentNode", klog.KRef("", currState.pod.Spec.NodeName))
if pod.Spec.NodeName != "" {
// An empty NodeName is possible when the scheduler misses a Delete
// event and it gets the last known state from the informer cache.
utilruntime.HandleErrorWithLogger(logger, nil, "Scheduler cache is corrupted and can badly affect scheduling decisions")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
}
return cache.removePod(logger, currState.pod)
}
func (cache *cacheImpl) IsAssumedPod(pod *v1.Pod) (bool, error) {
key, err := framework.GetPodKey(pod)
if err != nil {
return false, err
}
cache.mu.RLock()
defer cache.mu.RUnlock()
return cache.assumedPods.Has(key), nil
}
// GetPod might return a pod for which its node has already been deleted from
// the main cache. This is useful to properly process pod update events.
func (cache *cacheImpl) GetPod(pod *v1.Pod) (*v1.Pod, error) {
key, err := framework.GetPodKey(pod)
if err != nil {
return nil, err
}
cache.mu.RLock()
defer cache.mu.RUnlock()
podState, ok := cache.podStates[key]
if !ok {
return nil, fmt.Errorf("pod %v(%v) does not exist in scheduler cache", key, klog.KObj(pod))
}
return podState.pod, nil
}
func (cache *cacheImpl) AddNode(logger klog.Logger, node *v1.Node) *framework.NodeInfo {
cache.mu.Lock()
defer cache.mu.Unlock()
n, ok := cache.nodes[node.Name]
if !ok {
n = newNodeInfoListItem(framework.NewNodeInfo())
cache.nodes[node.Name] = n
} else {
cache.removeNodeImageStates(n.info.Node())
}
cache.moveNodeInfoToHead(logger, node.Name)
cache.nodeTree.addNode(logger, node)
cache.addNodeImageStates(node, n.info)
n.info.SetNode(node)
return n.info.SnapshotConcrete()
}
func (cache *cacheImpl) UpdateNode(logger klog.Logger, oldNode, newNode *v1.Node) *framework.NodeInfo {
cache.mu.Lock()
defer cache.mu.Unlock()
n, ok := cache.nodes[newNode.Name]
if !ok {
n = newNodeInfoListItem(framework.NewNodeInfo())
cache.nodes[newNode.Name] = n
cache.nodeTree.addNode(logger, newNode)
} else {
cache.removeNodeImageStates(n.info.Node())
}
cache.moveNodeInfoToHead(logger, newNode.Name)
cache.nodeTree.updateNode(logger, oldNode, newNode)
cache.addNodeImageStates(newNode, n.info)
n.info.SetNode(newNode)
return n.info.SnapshotConcrete()
}
// RemoveNode removes a node from the cache's tree.
// The node might still have pods because their deletion events didn't arrive
// yet. Those pods are considered removed from the cache, being the node tree
// the source of truth.
// However, we keep a ghost node with the list of pods until all pod deletion
// events have arrived. A ghost node is skipped from snapshots.
func (cache *cacheImpl) RemoveNode(logger klog.Logger, node *v1.Node) error {
cache.mu.Lock()
defer cache.mu.Unlock()
n, ok := cache.nodes[node.Name]
if !ok {
return fmt.Errorf("node %v is not found", node.Name)
}
n.info.RemoveNode()
// We remove NodeInfo for this node only if there aren't any pods on this node.
// We can't do it unconditionally, because notifications about pods are delivered
// in a different watch, and thus can potentially be observed later, even though
// they happened before node removal.
if len(n.info.Pods) == 0 {
cache.removeNodeInfoFromList(logger, node.Name)
} else {
cache.moveNodeInfoToHead(logger, node.Name)
}
if err := cache.nodeTree.removeNode(logger, node); err != nil {
return err
}
cache.removeNodeImageStates(node)
return nil
}
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
// scheduler cache. This function assumes the lock to scheduler cache has been acquired.
func (cache *cacheImpl) addNodeImageStates(node *v1.Node, nodeInfo *framework.NodeInfo) {
newSum := make(map[string]*fwk.ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {
// update the entry in imageStates
state, ok := cache.imageStates[name]
if !ok {
state = &fwk.ImageStateSummary{
Size: image.SizeBytes,
Nodes: sets.New(node.Name),
}
cache.imageStates[name] = state
} else {
state.Nodes.Insert(node.Name)
}
// create the ImageStateSummary for this image
if _, ok := newSum[name]; !ok {
newSum[name] = state
}
}
}
nodeInfo.ImageStates = newSum
}
// removeNodeImageStates removes the given node record from image entries having the node
// in imageStates cache. After the removal, if any image becomes free, i.e., the image
// is no longer available on any node, the image entry will be removed from imageStates.
func (cache *cacheImpl) removeNodeImageStates(node *v1.Node) {
if node == nil {
return
}
for _, image := range node.Status.Images {
for _, name := range image.Names {
state, ok := cache.imageStates[name]
if ok {
state.Nodes.Delete(node.Name)
if state.Nodes.Len() == 0 {
// Remove the unused image to make sure the length of
// imageStates represents the total number of different
// images on all nodes
delete(cache.imageStates, name)
}
}
}
}
}
func (cache *cacheImpl) run(logger klog.Logger) {
go wait.Until(func() {
cache.cleanupAssumedPods(logger, time.Now())
}, cache.period, cache.stop)
}
// cleanupAssumedPods exists for making test deterministic by taking time as input argument.
// It also reports metrics on the cache size for nodes, pods, and assumed pods.
func (cache *cacheImpl) cleanupAssumedPods(logger klog.Logger, now time.Time) {
cache.mu.Lock()
defer cache.mu.Unlock()
defer cache.updateMetrics()
// The size of assumedPods should be small
for key := range cache.assumedPods {
ps, ok := cache.podStates[key]
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Key found in assumed set but not in podStates, potentially a logical error")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
}
if !ps.bindingFinished {
logger.V(5).Info("Could not expire cache for pod as binding is still in progress", "podKey", key, "pod", klog.KObj(ps.pod))
continue
}
if cache.ttl != 0 && now.After(*ps.deadline) {
logger.Info("Pod expired", "podKey", key, "pod", klog.KObj(ps.pod))
if err := cache.removePod(logger, ps.pod); err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "ExpirePod failed", "podKey", key, "pod", klog.KObj(ps.pod))
}
}
}
}
// updateMetrics updates cache size metric values for pods, assumed pods, and nodes
func (cache *cacheImpl) updateMetrics() {
metrics.CacheSize.WithLabelValues("assumed_pods").Set(float64(len(cache.assumedPods)))
metrics.CacheSize.WithLabelValues("pods").Set(float64(len(cache.podStates)))
metrics.CacheSize.WithLabelValues("nodes").Set(float64(len(cache.nodes)))
}
// BindPod handles the pod binding by adding a bind API call to the dispatcher.
// This method should be used only if the SchedulerAsyncAPICalls feature gate is enabled.
func (cache *cacheImpl) BindPod(binding *v1.Binding) (<-chan error, error) {
// Don't store anything in the cache, as the pod is already assumed, and in case of a binding failure, it will be forgotten.
onFinish := make(chan error, 1)
err := cache.apiDispatcher.Add(apicalls.Implementations.PodBinding(binding), fwk.APICallOptions{
OnFinish: onFinish,
})
if fwk.IsUnexpectedError(err) {
return onFinish, err
}
return onFinish, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package debugger
import (
"sort"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// CacheComparer is an implementation of the Scheduler's cache comparer.
type CacheComparer struct {
NodeLister corelisters.NodeLister
PodLister corelisters.PodLister
Cache internalcache.Cache
PodQueue internalqueue.SchedulingQueue
}
// Compare compares the nodes and pods of NodeLister with Cache.Snapshot.
func (c *CacheComparer) Compare(logger klog.Logger) error {
logger.V(3).Info("Cache comparer started")
defer logger.V(3).Info("Cache comparer finished")
nodes, err := c.NodeLister.List(labels.Everything())
if err != nil {
return err
}
pods, err := c.PodLister.List(labels.Everything())
if err != nil {
return err
}
dump := c.Cache.Dump()
pendingPods, _ := c.PodQueue.PendingPods()
if missed, redundant := c.CompareNodes(nodes, dump.Nodes); len(missed)+len(redundant) != 0 {
logger.Info("Cache mismatch", "missedNodes", missed, "redundantNodes", redundant)
}
if missed, redundant := c.ComparePods(pods, pendingPods, dump.Nodes); len(missed)+len(redundant) != 0 {
logger.Info("Cache mismatch", "missedPods", missed, "redundantPods", redundant)
}
return nil
}
// CompareNodes compares actual nodes with cached nodes.
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) {
actual := []string{}
for _, node := range nodes {
actual = append(actual, node.Name)
}
cached := []string{}
for nodeName := range nodeinfos {
cached = append(cached, nodeName)
}
return compareStrings(actual, cached)
}
// ComparePods compares actual pods with cached pods.
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) {
actual := []string{}
for _, pod := range pods {
actual = append(actual, string(pod.UID))
}
cached := []string{}
for _, nodeinfo := range nodeinfos {
for _, p := range nodeinfo.Pods {
cached = append(cached, string(p.GetPod().UID))
}
}
for _, pod := range waitingPods {
cached = append(cached, string(pod.UID))
}
return compareStrings(actual, cached)
}
func compareStrings(actual, cached []string) (missed, redundant []string) {
missed, redundant = []string{}, []string{}
sort.Strings(actual)
sort.Strings(cached)
compare := func(i, j int) int {
if i == len(actual) {
return 1
} else if j == len(cached) {
return -1
}
return strings.Compare(actual[i], cached[j])
}
for i, j := 0, 0; i < len(actual) || j < len(cached); {
switch compare(i, j) {
case 0:
i++
j++
case -1:
missed = append(missed, actual[i])
i++
case 1:
redundant = append(redundant, cached[j])
j++
}
}
return
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package debugger
import (
"context"
"os"
"os/signal"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
)
// CacheDebugger provides ways to check and write cache information for debugging.
type CacheDebugger struct {
Comparer CacheComparer
Dumper CacheDumper
}
// New creates a CacheDebugger.
func New(
nodeLister corelisters.NodeLister,
podLister corelisters.PodLister,
cache internalcache.Cache,
podQueue internalqueue.SchedulingQueue,
) *CacheDebugger {
return &CacheDebugger{
Comparer: CacheComparer{
NodeLister: nodeLister,
PodLister: podLister,
Cache: cache,
PodQueue: podQueue,
},
Dumper: CacheDumper{
cache: cache,
podQueue: podQueue,
},
}
}
// ListenForSignal starts a goroutine that will trigger the CacheDebugger's
// behavior when the process receives SIGINT (Windows) or SIGUSER2 (non-Windows).
func (d *CacheDebugger) ListenForSignal(ctx context.Context) {
logger := klog.FromContext(ctx)
stopCh := ctx.Done()
ch := make(chan os.Signal, 1)
signal.Notify(ch, compareSignal)
go func() {
for {
select {
case <-stopCh:
return
case <-ch:
d.Comparer.Compare(logger)
d.Dumper.DumpAll(logger)
}
}
}()
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package debugger
import (
"fmt"
"strings"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
"k8s.io/kubernetes/pkg/scheduler/backend/queue"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// CacheDumper writes some information from the scheduler cache and the scheduling queue to the
// scheduler logs for debugging purposes.
type CacheDumper struct {
cache internalcache.Cache
podQueue queue.SchedulingQueue
}
// DumpAll writes cached nodes and scheduling queue information to the scheduler logs.
func (d *CacheDumper) DumpAll(logger klog.Logger) {
d.dumpNodes(logger)
d.dumpSchedulingQueue(logger)
}
// dumpNodes writes NodeInfo to the scheduler logs.
func (d *CacheDumper) dumpNodes(logger klog.Logger) {
dump := d.cache.Dump()
nodeInfos := make([]string, 0, len(dump.Nodes))
for name, nodeInfo := range dump.Nodes {
nodeInfos = append(nodeInfos, d.printNodeInfo(name, nodeInfo))
}
// Extra blank line added between node entries for readability.
logger.Info("Dump of cached NodeInfo", "nodes", strings.Join(nodeInfos, "\n\n"))
}
// dumpSchedulingQueue writes pods in the scheduling queue to the scheduler logs.
func (d *CacheDumper) dumpSchedulingQueue(logger klog.Logger) {
pendingPods, s := d.podQueue.PendingPods()
var podData strings.Builder
for _, p := range pendingPods {
podData.WriteString(printPod(p))
}
logger.Info("Dump of scheduling queue", "summary", s, "pods", podData.String())
}
// printNodeInfo writes parts of NodeInfo to a string.
func (d *CacheDumper) printNodeInfo(name string, n *framework.NodeInfo) string {
var nodeData strings.Builder
nodeData.WriteString(fmt.Sprintf("Node name: %s\nDeleted: %t\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n",
name, n.Node() == nil, n.Requested, n.Allocatable, len(n.Pods)))
// Dumping Pod Info
for _, p := range n.Pods {
nodeData.WriteString(printPod(p.GetPod()))
}
// Dumping nominated pods info on the node
nominatedPodInfos := d.podQueue.NominatedPodsForNode(name)
if len(nominatedPodInfos) != 0 {
nodeData.WriteString(fmt.Sprintf("Nominated Pods(number: %v):\n", len(nominatedPodInfos)))
for _, pi := range nominatedPodInfos {
nodeData.WriteString(printPod(pi.GetPod()))
}
}
return nodeData.String()
}
// printPod writes parts of a Pod object to a string.
func printPod(p *v1.Pod) string {
return fmt.Sprintf("name: %v, namespace: %v, uid: %v, phase: %v, nominated node: %v\n", p.Name, p.Namespace, p.UID, p.Status.Phase, p.Status.NominatedNodeName)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
v1 "k8s.io/api/core/v1"
utilnode "k8s.io/component-helpers/node/topology"
"k8s.io/klog/v2"
)
// nodeTree is a tree-like data structure that holds node names in each zone. Zone names are
// keys to "NodeTree.tree" and values of "NodeTree.tree" are arrays of node names.
// NodeTree is NOT thread-safe, any concurrent updates/reads from it must be synchronized by the caller.
// It is used only by schedulerCache, and should stay as such.
type nodeTree struct {
tree map[string][]string // a map from zone (region-zone) to an array of nodes in the zone.
zones []string // a list of all the zones in the tree (keys)
numNodes int
}
// newNodeTree creates a NodeTree from nodes.
func newNodeTree(logger klog.Logger, nodes []*v1.Node) *nodeTree {
nt := &nodeTree{
tree: make(map[string][]string, len(nodes)),
}
for _, n := range nodes {
nt.addNode(logger, n)
}
return nt
}
// addNode adds a node and its corresponding zone to the tree. If the zone already exists, the node
// is added to the array of nodes in that zone.
func (nt *nodeTree) addNode(logger klog.Logger, n *v1.Node) {
zone := utilnode.GetZoneKey(n)
if na, ok := nt.tree[zone]; ok {
for _, nodeName := range na {
if nodeName == n.Name {
logger.Info("Did not add to the NodeTree because it already exists", "node", klog.KObj(n))
return
}
}
nt.tree[zone] = append(na, n.Name)
} else {
nt.zones = append(nt.zones, zone)
nt.tree[zone] = []string{n.Name}
}
logger.V(2).Info("Added node to NodeTree", "node", klog.KObj(n), "zone", zone)
nt.numNodes++
}
// removeNode removes a node from the NodeTree.
func (nt *nodeTree) removeNode(logger klog.Logger, n *v1.Node) error {
zone := utilnode.GetZoneKey(n)
if na, ok := nt.tree[zone]; ok {
for i, nodeName := range na {
if nodeName == n.Name {
nt.tree[zone] = append(na[:i], na[i+1:]...)
if len(nt.tree[zone]) == 0 {
nt.removeZone(zone)
}
logger.V(2).Info("Removed node from NodeTree", "node", klog.KObj(n), "zone", zone)
nt.numNodes--
return nil
}
}
}
logger.Error(nil, "Did not remove Node in NodeTree because it was not found", "node", klog.KObj(n), "zone", zone)
return fmt.Errorf("node %q in group %q was not found", n.Name, zone)
}
// removeZone removes a zone from tree.
// This function must be called while writer locks are hold.
func (nt *nodeTree) removeZone(zone string) {
delete(nt.tree, zone)
for i, z := range nt.zones {
if z == zone {
nt.zones = append(nt.zones[:i], nt.zones[i+1:]...)
return
}
}
}
// updateNode updates a node in the NodeTree.
func (nt *nodeTree) updateNode(logger klog.Logger, old, new *v1.Node) {
var oldZone string
if old != nil {
oldZone = utilnode.GetZoneKey(old)
}
newZone := utilnode.GetZoneKey(new)
// If the zone ID of the node has not changed, we don't need to do anything. Name of the node
// cannot be changed in an update.
if oldZone == newZone {
return
}
nt.removeNode(logger, old) // No error checking. We ignore whether the old node exists or not.
nt.addNode(logger, new)
}
// list returns the list of names of the node. NodeTree iterates over zones and in each zone iterates
// over nodes in a round robin fashion.
func (nt *nodeTree) list() ([]string, error) {
if len(nt.zones) == 0 {
return nil, nil
}
nodesList := make([]string, 0, nt.numNodes)
numExhaustedZones := 0
nodeIndex := 0
for len(nodesList) < nt.numNodes {
if numExhaustedZones >= len(nt.zones) { // all zones are exhausted.
return nodesList, errors.New("all zones exhausted before reaching count of nodes expected")
}
for zoneIndex := 0; zoneIndex < len(nt.zones); zoneIndex++ {
na := nt.tree[nt.zones[zoneIndex]]
if nodeIndex >= len(na) { // If the zone is exhausted, continue
if nodeIndex == len(na) { // If it is the first time the zone is exhausted
numExhaustedZones++
}
continue
}
nodesList = append(nodesList, na[nodeIndex])
}
nodeIndex++
}
return nodesList, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a
// snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle.
type Snapshot struct {
// nodeInfoMap a map of node name to a snapshot of its NodeInfo.
nodeInfoMap map[string]*framework.NodeInfo
// nodeInfoList is the list of nodes as ordered in the cache's nodeTree.
nodeInfoList []fwk.NodeInfo
// havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
havePodsWithAffinityNodeInfoList []fwk.NodeInfo
// havePodsWithRequiredAntiAffinityNodeInfoList is the list of nodes with at least one pod declaring
// required anti-affinity terms.
havePodsWithRequiredAntiAffinityNodeInfoList []fwk.NodeInfo
// usedPVCSet contains a set of PVC names that have one or more scheduled pods using them,
// keyed in the format "namespace/name".
usedPVCSet sets.Set[string]
generation int64
}
var _ fwk.SharedLister = &Snapshot{}
// NewEmptySnapshot initializes a Snapshot struct and returns it.
func NewEmptySnapshot() *Snapshot {
return &Snapshot{
nodeInfoMap: make(map[string]*framework.NodeInfo),
usedPVCSet: sets.New[string](),
}
}
// NewSnapshot initializes a Snapshot struct and returns it.
func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
nodeInfoMap := createNodeInfoMap(pods, nodes)
nodeInfoList := make([]fwk.NodeInfo, 0, len(nodeInfoMap))
havePodsWithAffinityNodeInfoList := make([]fwk.NodeInfo, 0, len(nodeInfoMap))
havePodsWithRequiredAntiAffinityNodeInfoList := make([]fwk.NodeInfo, 0, len(nodeInfoMap))
for _, v := range nodeInfoMap {
nodeInfoList = append(nodeInfoList, v)
if len(v.PodsWithAffinity) > 0 {
havePodsWithAffinityNodeInfoList = append(havePodsWithAffinityNodeInfoList, v)
}
if len(v.PodsWithRequiredAntiAffinity) > 0 {
havePodsWithRequiredAntiAffinityNodeInfoList = append(havePodsWithRequiredAntiAffinityNodeInfoList, v)
}
}
s := NewEmptySnapshot()
s.nodeInfoMap = nodeInfoMap
s.nodeInfoList = nodeInfoList
s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList
s.havePodsWithRequiredAntiAffinityNodeInfoList = havePodsWithRequiredAntiAffinityNodeInfoList
s.usedPVCSet = createUsedPVCSet(pods)
return s
}
// createNodeInfoMap obtains a list of pods and pivots that list into a map
// where the keys are node names and the values are the aggregated information
// for that node.
func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*framework.NodeInfo {
nodeNameToInfo := make(map[string]*framework.NodeInfo)
for _, pod := range pods {
nodeName := pod.Spec.NodeName
if _, ok := nodeNameToInfo[nodeName]; !ok {
nodeNameToInfo[nodeName] = framework.NewNodeInfo()
}
nodeNameToInfo[nodeName].AddPod(pod)
}
imageExistenceMap := createImageExistenceMap(nodes)
for _, node := range nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = framework.NewNodeInfo()
}
nodeInfo := nodeNameToInfo[node.Name]
nodeInfo.SetNode(node)
nodeInfo.ImageStates = getNodeImageStates(node, imageExistenceMap)
}
return nodeNameToInfo
}
func createUsedPVCSet(pods []*v1.Pod) sets.Set[string] {
usedPVCSet := sets.New[string]()
for _, pod := range pods {
if pod.Spec.NodeName == "" {
continue
}
for _, v := range pod.Spec.Volumes {
if v.PersistentVolumeClaim == nil {
continue
}
key := framework.GetNamespacedName(pod.Namespace, v.PersistentVolumeClaim.ClaimName)
usedPVCSet.Insert(key)
}
}
return usedPVCSet
}
// getNodeImageStates returns the given node's image states based on the given imageExistence map.
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.Set[string]) map[string]*fwk.ImageStateSummary {
imageStates := make(map[string]*fwk.ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {
imageStates[name] = &fwk.ImageStateSummary{
Size: image.SizeBytes,
NumNodes: imageExistenceMap[name].Len(),
}
}
}
return imageStates
}
// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names.
func createImageExistenceMap(nodes []*v1.Node) map[string]sets.Set[string] {
imageExistenceMap := make(map[string]sets.Set[string])
for _, node := range nodes {
for _, image := range node.Status.Images {
for _, name := range image.Names {
if _, ok := imageExistenceMap[name]; !ok {
imageExistenceMap[name] = sets.New(node.Name)
} else {
imageExistenceMap[name].Insert(node.Name)
}
}
}
}
return imageExistenceMap
}
// NodeInfos returns a NodeInfoLister.
func (s *Snapshot) NodeInfos() fwk.NodeInfoLister {
return s
}
// StorageInfos returns a StorageInfoLister.
func (s *Snapshot) StorageInfos() fwk.StorageInfoLister {
return s
}
// NumNodes returns the number of nodes in the snapshot.
func (s *Snapshot) NumNodes() int {
return len(s.nodeInfoList)
}
// List returns the list of nodes in the snapshot.
func (s *Snapshot) List() ([]fwk.NodeInfo, error) {
return s.nodeInfoList, nil
}
// HavePodsWithAffinityList returns the list of nodes with at least one pod with inter-pod affinity
func (s *Snapshot) HavePodsWithAffinityList() ([]fwk.NodeInfo, error) {
return s.havePodsWithAffinityNodeInfoList, nil
}
// HavePodsWithRequiredAntiAffinityList returns the list of nodes with at least one pod with
// required inter-pod anti-affinity
func (s *Snapshot) HavePodsWithRequiredAntiAffinityList() ([]fwk.NodeInfo, error) {
return s.havePodsWithRequiredAntiAffinityNodeInfoList, nil
}
// Get returns the NodeInfo of the given node name.
func (s *Snapshot) Get(nodeName string) (fwk.NodeInfo, error) {
if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
return v, nil
}
return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName)
}
func (s *Snapshot) IsPVCUsedByPods(key string) bool {
return s.usedPVCSet.Has(key)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Below is the implementation of the a heap. The logic is pretty much the same
// as cache.heap, however, this heap does not perform synchronization. It leaves
// synchronization to the SchedulingQueue.
package heap
import (
"container/heap"
"fmt"
"k8s.io/kubernetes/pkg/scheduler/metrics"
)
// KeyFunc is a function type to get the key from an object.
type KeyFunc[T any] func(obj T) string
type heapItem[T any] struct {
obj T // The object which is stored in the heap.
index int // The index of the object's key in the Heap.queue.
}
type itemKeyValue[T any] struct {
key string
obj T
}
// data is an internal struct that implements the standard heap interface
// and keeps the data stored in the heap.
type data[T any] struct {
// items is a map from key of the objects to the objects and their index.
// We depend on the property that items in the map are in the queue and vice versa.
items map[string]*heapItem[T]
// queue implements a heap data structure and keeps the order of elements
// according to the heap invariant. The queue keeps the keys of objects stored
// in "items".
queue []string
// keyFunc is used to make the key used for queued item insertion and retrieval, and
// should be deterministic.
keyFunc KeyFunc[T]
// lessFunc is used to compare two objects in the heap.
lessFunc LessFunc[T]
}
var (
_ = heap.Interface(&data[any]{}) // heapData is a standard heap
)
// Less compares two objects and returns true if the first one should go
// in front of the second one in the heap.
func (h *data[T]) Less(i, j int) bool {
if i > len(h.queue) || j > len(h.queue) {
return false
}
itemi, ok := h.items[h.queue[i]]
if !ok {
return false
}
itemj, ok := h.items[h.queue[j]]
if !ok {
return false
}
return h.lessFunc(itemi.obj, itemj.obj)
}
// Len returns the number of items in the Heap.
func (h *data[T]) Len() int { return len(h.queue) }
// Swap implements swapping of two elements in the heap. This is a part of standard
// heap interface and should never be called directly.
func (h *data[T]) Swap(i, j int) {
if i < 0 || j < 0 {
return
}
h.queue[i], h.queue[j] = h.queue[j], h.queue[i]
item := h.items[h.queue[i]]
item.index = i
item = h.items[h.queue[j]]
item.index = j
}
// Push is supposed to be called by container/heap.Push only.
func (h *data[T]) Push(kv interface{}) {
keyValue := kv.(*itemKeyValue[T])
n := len(h.queue)
h.items[keyValue.key] = &heapItem[T]{keyValue.obj, n}
h.queue = append(h.queue, keyValue.key)
}
// Pop is supposed to be called by container/heap.Pop only.
func (h *data[T]) Pop() interface{} {
if len(h.queue) == 0 {
return nil
}
key := h.queue[len(h.queue)-1]
h.queue = h.queue[0 : len(h.queue)-1]
item, ok := h.items[key]
if !ok {
// This is an error
return nil
}
delete(h.items, key)
return item.obj
}
// Peek returns the head of the heap without removing it.
func (h *data[T]) Peek() (T, bool) {
if len(h.queue) > 0 {
return h.items[h.queue[0]].obj, true
}
var zero T
return zero, false
}
// Heap is a producer/consumer queue that implements a heap data structure.
// It can be used to implement priority queues and similar data structures.
type Heap[T any] struct {
// data stores objects and has a queue that keeps their ordering according
// to the heap invariant.
data *data[T]
// metricRecorder updates the counter when elements of a heap get added or
// removed, and it does nothing if it's nil
metricRecorder metrics.MetricRecorder
}
// AddOrUpdate inserts an item, and puts it in the queue. The item is updated if it
// already exists.
func (h *Heap[T]) AddOrUpdate(obj T) {
key := h.data.keyFunc(obj)
if _, exists := h.data.items[key]; exists {
h.data.items[key].obj = obj
heap.Fix(h.data, h.data.items[key].index)
} else {
heap.Push(h.data, &itemKeyValue[T]{key, obj})
if h.metricRecorder != nil {
h.metricRecorder.Inc()
}
}
}
// Delete removes an item.
func (h *Heap[T]) Delete(obj T) error {
key := h.data.keyFunc(obj)
if item, ok := h.data.items[key]; ok {
heap.Remove(h.data, item.index)
if h.metricRecorder != nil {
h.metricRecorder.Dec()
}
return nil
}
return fmt.Errorf("object not found")
}
// Peek returns the head of the heap without removing it.
func (h *Heap[T]) Peek() (T, bool) {
return h.data.Peek()
}
// Pop returns the head of the heap and removes it.
func (h *Heap[T]) Pop() (T, error) {
obj := heap.Pop(h.data)
if obj != nil {
if h.metricRecorder != nil {
h.metricRecorder.Dec()
}
return obj.(T), nil
}
var zero T
return zero, fmt.Errorf("heap is empty")
}
// Get returns the requested item, or sets exists=false.
func (h *Heap[T]) Get(obj T) (T, bool) {
key := h.data.keyFunc(obj)
return h.GetByKey(key)
}
// GetByKey returns the requested item, or sets exists=false.
func (h *Heap[T]) GetByKey(key string) (T, bool) {
item, exists := h.data.items[key]
if !exists {
var zero T
return zero, false
}
return item.obj, true
}
func (h *Heap[T]) Has(obj T) bool {
key := h.data.keyFunc(obj)
_, ok := h.GetByKey(key)
return ok
}
// List returns a list of all the items.
func (h *Heap[T]) List() []T {
list := make([]T, 0, len(h.data.items))
for _, item := range h.data.items {
list = append(list, item.obj)
}
return list
}
// Len returns the number of items in the heap.
func (h *Heap[T]) Len() int {
return len(h.data.queue)
}
// New returns a Heap which can be used to queue up items to process.
func New[T any](keyFn KeyFunc[T], lessFn LessFunc[T]) *Heap[T] {
return NewWithRecorder(keyFn, lessFn, nil)
}
// NewWithRecorder wraps an optional metricRecorder to compose a Heap object.
func NewWithRecorder[T any](keyFn KeyFunc[T], lessFn LessFunc[T], metricRecorder metrics.MetricRecorder) *Heap[T] {
return &Heap[T]{
data: &data[T]{
items: map[string]*heapItem[T]{},
queue: []string{},
keyFunc: keyFn,
lessFunc: lessFn,
},
metricRecorder: metricRecorder,
}
}
// LessFunc is a function that receives two items and returns true if the first
// item should be placed before the second one when the list is sorted.
type LessFunc[T any] func(item1, item2 T) bool
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queue
import (
"container/list"
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/backend/heap"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
)
// activeQueuer is a wrapper for activeQ related operations.
// Its methods take the lock inside.
type activeQueuer interface {
underLock(func(unlockedActiveQ unlockedActiveQueuer))
underRLock(func(unlockedActiveQ unlockedActiveQueueReader))
delete(pInfo *framework.QueuedPodInfo) error
pop(logger klog.Logger) (*framework.QueuedPodInfo, error)
list() []*v1.Pod
len() int
has(pInfo *framework.QueuedPodInfo) bool
listInFlightEvents() []interface{}
listInFlightPods() []*v1.Pod
clusterEventsForPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) ([]*clusterEvent, error)
addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []fwk.ClusterEvent) bool
addEventIfAnyInFlight(oldObj, newObj interface{}, event fwk.ClusterEvent) bool
schedulingCycle() int64
done(pod types.UID)
close()
broadcast()
}
// unlockedActiveQueuer defines activeQ methods that are not protected by the lock itself.
// underLock() method should be used to protect these methods.
type unlockedActiveQueuer interface {
unlockedActiveQueueReader
// add adds a new pod to the activeQ.
// The event should show which event triggered this addition and is used for the metric recording.
// This method should be called in activeQueue.underLock().
add(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string)
// update updates the pod in activeQ if oldPodInfo is already in the queue.
// It returns new pod info if updated, nil otherwise.
update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo
// addEventsIfPodInFlight adds events to inFlightEvents if the newPod is in inFlightPods.
// It returns true if pushed the event to the inFlightEvents.
addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []fwk.ClusterEvent) bool
}
// unlockedActiveQueueReader defines activeQ read-only methods that are not protected by the lock itself.
// underLock() or underRLock() method should be used to protect these methods.
type unlockedActiveQueueReader interface {
// get returns the pod matching pInfo inside the activeQ.
// Returns false if the pInfo doesn't exist in the queue.
// This method should be called in activeQueue.underLock() or activeQueue.underRLock().
get(pInfo *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool)
// has returns if pInfo exists in the queue.
// This method should be called in activeQueue.underLock() or activeQueue.underRLock().
has(pInfo *framework.QueuedPodInfo) bool
}
// unlockedActiveQueue defines activeQ methods that are not protected by the lock itself.
// activeQueue.underLock() or activeQueue.underRLock() method should be used to protect these methods.
type unlockedActiveQueue struct {
queue *heap.Heap[*framework.QueuedPodInfo]
inFlightPods map[types.UID]*list.Element
inFlightEvents *list.List
metricsRecorder *metrics.MetricAsyncRecorder
}
func newUnlockedActiveQueue(queue *heap.Heap[*framework.QueuedPodInfo], inFlightPods map[types.UID]*list.Element, inFlightEvents *list.List, metricsRecorder *metrics.MetricAsyncRecorder) *unlockedActiveQueue {
return &unlockedActiveQueue{
queue: queue,
inFlightPods: inFlightPods,
inFlightEvents: inFlightEvents,
metricsRecorder: metricsRecorder,
}
}
// add adds a new pod to the activeQ.
// The event should show which event triggered this addition and is used for the metric recording.
// This method should be called in activeQueue.underLock().
func (uaq *unlockedActiveQueue) add(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string) {
uaq.queue.AddOrUpdate(pInfo)
metrics.SchedulerQueueIncomingPods.WithLabelValues("active", event).Inc()
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", activeQ)
}
// update updates the pod in activeQ if oldPodInfo is already in the queue.
// It returns new pod info if updated, nil otherwise.
func (uaq *unlockedActiveQueue) update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo {
if pInfo, exists := uaq.queue.Get(oldPodInfo); exists {
_ = pInfo.Update(newPod)
uaq.queue.AddOrUpdate(pInfo)
return pInfo
}
return nil
}
// addEventsIfPodInFlight adds events to inFlightEvents if the newPod is in inFlightPods.
// It returns true if pushed the event to the inFlightEvents.
func (uaq *unlockedActiveQueue) addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []fwk.ClusterEvent) bool {
_, ok := uaq.inFlightPods[newPod.UID]
if ok {
for _, event := range events {
uaq.metricsRecorder.ObserveInFlightEventsAsync(event.Label(), 1, false)
uaq.inFlightEvents.PushBack(&clusterEvent{
event: event,
oldObj: oldPod,
newObj: newPod,
})
}
}
return ok
}
// get returns the pod matching pInfo inside the activeQ.
// Returns false if the pInfo doesn't exist in the queue.
// This method should be called in activeQueue.underLock() or activeQueue.underRLock().
func (uaq *unlockedActiveQueue) get(pInfo *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool) {
return uaq.queue.Get(pInfo)
}
// has returns if pInfo exists in the queue.
// This method should be called in activeQueue.underLock() or activeQueue.underRLock().
func (uaq *unlockedActiveQueue) has(pInfo *framework.QueuedPodInfo) bool {
return uaq.queue.Has(pInfo)
}
// backoffQPopper defines method that is used to pop from the backoffQ when the activeQ is empty.
type backoffQPopper interface {
// popBackoff pops the pInfo from the podBackoffQ.
popBackoff() (*framework.QueuedPodInfo, error)
// len returns length of the podBackoffQ queue.
lenBackoff() int
}
// activeQueue implements activeQueuer. All of the fields have to be protected using the lock.
type activeQueue struct {
// lock synchronizes all operations related to activeQ.
// It protects activeQ, inFlightPods, inFlightEvents, schedulingCycle and closed fields.
// Caution: DO NOT take "SchedulingQueue.lock" after taking "lock".
// You should always take "SchedulingQueue.lock" first, otherwise the queue could end up in deadlock.
// "lock" should not be taken after taking "backoffQueue.lock" or "nominator.nLock".
// Correct locking order is: SchedulingQueue.lock > lock > backoffQueue.lock > nominator.nLock.
lock sync.RWMutex
// activeQ is heap structure that scheduler actively looks at to find pods to
// schedule. Head of heap is the highest priority pod.
queue *heap.Heap[*framework.QueuedPodInfo]
// unlockedQueue is a wrapper of queue providing methods that are not locked themselves
// and can be used in the underLock() or underRLock().
unlockedQueue *unlockedActiveQueue
// cond is a condition that is notified when the pod is added to activeQ.
// When SchedulerPopFromBackoffQ feature is enabled,
// condition is also notified when the pod is added to backoffQ.
// It is used with lock.
cond sync.Cond
// inFlightPods holds the UID of all pods which have been popped out for which Done
// hasn't been called yet - in other words, all pods that are currently being
// processed (being scheduled, in permit, or in the binding cycle).
//
// The values in the map are the entry of each pod in the inFlightEvents list.
// The value of that entry is the *v1.Pod at the time that scheduling of that
// pod started, which can be useful for logging or debugging.
inFlightPods map[types.UID]*list.Element
// inFlightEvents holds the events received by the scheduling queue
// (entry value is clusterEvent) together with in-flight pods (entry
// value is *v1.Pod). Entries get added at the end while the mutex is
// locked, so they get serialized.
//
// The pod entries are added in Pop and used to track which events
// occurred after the pod scheduling attempt for that pod started.
// They get removed when the scheduling attempt is done, at which
// point all events that occurred in the meantime are processed.
//
// After removal of a pod, events at the start of the list are no
// longer needed because all of the other in-flight pods started
// later. Those events can be removed.
inFlightEvents *list.List
// schedCycle represents sequence number of scheduling cycle and is incremented
// when a pod is popped.
schedCycle int64
// closed indicates that the queue is closed.
// It is mainly used to let Pop() exit its control loop while waiting for an item.
closed bool
// isSchedulingQueueHintEnabled indicates whether the feature gate for the scheduling queue is enabled.
isSchedulingQueueHintEnabled bool
metricsRecorder *metrics.MetricAsyncRecorder
// backoffQPopper is used to pop from backoffQ when activeQ is empty.
// It is non-nil only when SchedulerPopFromBackoffQ feature is enabled.
backoffQPopper backoffQPopper
}
func newActiveQueue(queue *heap.Heap[*framework.QueuedPodInfo], isSchedulingQueueHintEnabled bool, metricRecorder *metrics.MetricAsyncRecorder, backoffQPopper backoffQPopper) *activeQueue {
aq := &activeQueue{
queue: queue,
inFlightPods: make(map[types.UID]*list.Element),
inFlightEvents: list.New(),
isSchedulingQueueHintEnabled: isSchedulingQueueHintEnabled,
metricsRecorder: metricRecorder,
backoffQPopper: backoffQPopper,
}
aq.cond.L = &aq.lock
aq.unlockedQueue = newUnlockedActiveQueue(queue, aq.inFlightPods, aq.inFlightEvents, metricRecorder)
return aq
}
// underLock runs the fn function under the lock.Lock.
// fn can run unlockedActiveQueuer methods but should NOT run any other activeQueue method,
// as it would end up in deadlock.
func (aq *activeQueue) underLock(fn func(unlockedActiveQ unlockedActiveQueuer)) {
aq.lock.Lock()
defer aq.lock.Unlock()
fn(aq.unlockedQueue)
}
// underLock runs the fn function under the lock.RLock.
// fn can run unlockedActiveQueueReader methods but should NOT run any other activeQueue method,
// as it would end up in deadlock.
func (aq *activeQueue) underRLock(fn func(unlockedActiveQ unlockedActiveQueueReader)) {
aq.lock.RLock()
defer aq.lock.RUnlock()
fn(aq.unlockedQueue)
}
// delete deletes the pod info from activeQ.
func (aq *activeQueue) delete(pInfo *framework.QueuedPodInfo) error {
aq.lock.Lock()
defer aq.lock.Unlock()
return aq.queue.Delete(pInfo)
}
// pop removes the head of the queue and returns it.
// It blocks if the queue is empty and waits until a new item is added to the queue.
// It increments scheduling cycle when a pod is popped.
func (aq *activeQueue) pop(logger klog.Logger) (*framework.QueuedPodInfo, error) {
aq.lock.Lock()
defer aq.lock.Unlock()
return aq.unlockedPop(logger)
}
func (aq *activeQueue) unlockedPop(logger klog.Logger) (*framework.QueuedPodInfo, error) {
var pInfo *framework.QueuedPodInfo
for aq.queue.Len() == 0 {
// backoffQPopper is non-nil only if SchedulerPopFromBackoffQ feature is enabled.
// In case of non-empty backoffQ, try popping from there.
if aq.backoffQPopper != nil && aq.backoffQPopper.lenBackoff() != 0 {
break
}
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
// When Close() is called, the p.closed is set and the condition is broadcast,
// which causes this loop to continue and return from the Pop().
if aq.closed {
logger.V(2).Info("Scheduling queue is closed")
return nil, nil
}
aq.cond.Wait()
}
pInfo, err := aq.queue.Pop()
if err != nil {
if aq.backoffQPopper == nil {
return nil, err
}
// Try to pop from backoffQ when activeQ is empty.
pInfo, err = aq.backoffQPopper.popBackoff()
if err != nil {
return nil, err
}
metrics.SchedulerQueueIncomingPods.WithLabelValues("active", framework.PopFromBackoffQ).Inc()
}
pInfo.Attempts++
// In flight, no concurrent events yet.
if aq.isSchedulingQueueHintEnabled {
// If the pod is already in the map, we shouldn't overwrite the inFlightPods otherwise it'd lead to a memory leak.
// https://github.com/kubernetes/kubernetes/pull/127016
if _, ok := aq.inFlightPods[pInfo.Pod.UID]; ok {
// Just report it as an error, but no need to stop the scheduler
// because it likely doesn't cause any visible issues from the scheduling perspective.
utilruntime.HandleErrorWithLogger(logger, nil, "The same pod is tracked in multiple places in the scheduler, and just discard it", "pod", klog.KObj(pInfo.Pod))
// Just ignore/discard this duplicated pod and try to pop the next one.
return aq.unlockedPop(logger)
}
aq.metricsRecorder.ObserveInFlightEventsAsync(metrics.PodPoppedInFlightEvent, 1, false)
aq.inFlightPods[pInfo.Pod.UID] = aq.inFlightEvents.PushBack(pInfo.Pod)
}
aq.schedCycle++
// Update metrics and reset the set of unschedulable plugins for the next attempt.
for plugin := range pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) {
metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Dec()
}
pInfo.UnschedulablePlugins.Clear()
pInfo.PendingPlugins.Clear()
pInfo.GatingPlugin = ""
pInfo.GatingPluginEvents = nil
return pInfo, nil
}
// list returns all pods that are in the queue.
func (aq *activeQueue) list() []*v1.Pod {
aq.lock.RLock()
defer aq.lock.RUnlock()
var result []*v1.Pod
for _, pInfo := range aq.queue.List() {
result = append(result, pInfo.GetPod())
}
return result
}
// len returns length of the queue.
func (aq *activeQueue) len() int {
return aq.queue.Len()
}
// has inform if pInfo exists in the queue.
func (aq *activeQueue) has(pInfo *framework.QueuedPodInfo) bool {
aq.lock.RLock()
defer aq.lock.RUnlock()
return aq.queue.Has(pInfo)
}
// listInFlightEvents returns all inFlightEvents.
func (aq *activeQueue) listInFlightEvents() []interface{} {
aq.lock.RLock()
defer aq.lock.RUnlock()
var values []interface{}
for event := aq.inFlightEvents.Front(); event != nil; event = event.Next() {
values = append(values, event.Value)
}
return values
}
// listInFlightPods returns all inFlightPods.
func (aq *activeQueue) listInFlightPods() []*v1.Pod {
aq.lock.RLock()
defer aq.lock.RUnlock()
var pods []*v1.Pod
for _, obj := range aq.inFlightPods {
pods = append(pods, obj.Value.(*v1.Pod))
}
return pods
}
// clusterEventsForPod gets all cluster events that have happened during pod for pInfo is being scheduled.
func (aq *activeQueue) clusterEventsForPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) ([]*clusterEvent, error) {
aq.lock.RLock()
defer aq.lock.RUnlock()
logger.V(5).Info("Checking events for in-flight pod", "pod", klog.KObj(pInfo.Pod), "unschedulablePlugins", pInfo.UnschedulablePlugins, "inFlightEventsSize", aq.inFlightEvents.Len(), "inFlightPodsSize", len(aq.inFlightPods))
// AddUnschedulableIfNotPresent is called with the Pod at the end of scheduling or binding.
// So, given pInfo should have been Pop()ed before,
// we can assume pInfo must be recorded in inFlightPods and thus inFlightEvents.
inFlightPod, ok := aq.inFlightPods[pInfo.Pod.UID]
if !ok {
return nil, fmt.Errorf("in flight Pod isn't found in the scheduling queue. If you see this error log, it's likely a bug in the scheduler")
}
var events []*clusterEvent
for event := inFlightPod.Next(); event != nil; event = event.Next() {
e, ok := event.Value.(*clusterEvent)
if !ok {
// Must be another in-flight Pod (*v1.Pod). Can be ignored.
continue
}
events = append(events, e)
}
return events, nil
}
// addEventsIfPodInFlight adds events to inFlightEvents if the newPod is in inFlightPods.
// It returns true if pushed the event to the inFlightEvents.
func (aq *activeQueue) addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []fwk.ClusterEvent) bool {
aq.lock.Lock()
defer aq.lock.Unlock()
return aq.unlockedQueue.addEventsIfPodInFlight(oldPod, newPod, events)
}
// addEventIfAnyInFlight adds clusterEvent to inFlightEvents if any pod is in inFlightPods.
// It returns true if pushed the event to the inFlightEvents.
func (aq *activeQueue) addEventIfAnyInFlight(oldObj, newObj interface{}, event fwk.ClusterEvent) bool {
aq.lock.Lock()
defer aq.lock.Unlock()
if len(aq.inFlightPods) != 0 {
aq.metricsRecorder.ObserveInFlightEventsAsync(event.Label(), 1, false)
aq.inFlightEvents.PushBack(&clusterEvent{
event: event,
oldObj: oldObj,
newObj: newObj,
})
return true
}
return false
}
func (aq *activeQueue) schedulingCycle() int64 {
aq.lock.RLock()
defer aq.lock.RUnlock()
return aq.schedCycle
}
// done must be called for pod returned by Pop. This allows the queue to
// keep track of which pods are currently being processed.
func (aq *activeQueue) done(pod types.UID) {
aq.lock.Lock()
defer aq.lock.Unlock()
aq.unlockedDone(pod)
}
// unlockedDone is used by the activeQueue internally and doesn't take the lock itself.
// It assumes the lock is already taken outside before the method is called.
func (aq *activeQueue) unlockedDone(pod types.UID) {
inFlightPod, ok := aq.inFlightPods[pod]
if !ok {
// This Pod is already done()ed.
return
}
delete(aq.inFlightPods, pod)
// Remove the pod from the list.
aq.inFlightEvents.Remove(inFlightPod)
aggrMetricsCounter := map[string]int{}
// Remove events which are only referred to by this Pod
// so that the inFlightEvents list doesn't grow infinitely.
// If the pod was at the head of the list, then all
// events between it and the next pod are no longer needed
// and can be removed.
for {
e := aq.inFlightEvents.Front()
if e == nil {
// Empty list.
break
}
ev, ok := e.Value.(*clusterEvent)
if !ok {
// A pod, must stop pruning.
break
}
aq.inFlightEvents.Remove(e)
aggrMetricsCounter[ev.event.Label()]--
}
for evLabel, count := range aggrMetricsCounter {
aq.metricsRecorder.ObserveInFlightEventsAsync(evLabel, float64(count), false)
}
aq.metricsRecorder.ObserveInFlightEventsAsync(metrics.PodPoppedInFlightEvent, -1,
// If it's the last Pod in inFlightPods, we should force-flush the metrics.
// Otherwise, especially in small clusters, which don't get a new Pod frequently,
// the metrics might not be flushed for a long time.
len(aq.inFlightPods) == 0)
}
// close closes the activeQueue.
func (aq *activeQueue) close() {
aq.lock.Lock()
defer aq.lock.Unlock()
// We should call done() for all in-flight pods to clean up the inFlightEvents metrics.
// It's safe even if the binding cycle running asynchronously calls done() afterwards
// done() will just be a no-op.
for pod := range aq.inFlightPods {
aq.unlockedDone(pod)
}
aq.closed = true
}
// broadcast notifies the pop() operation that new pod(s) was added to the activeQueue.
func (aq *activeQueue) broadcast() {
aq.cond.Broadcast()
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queue
import (
"sync"
"time"
v1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/backend/heap"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/utils/clock"
)
// backoffQOrderingWindowDuration is a duration of an ordering window in the podBackoffQ.
// In each window, represented as a whole second, pods are ordered by priority.
// It is the same as interval of flushing the pods from the podBackoffQ to the activeQ, to flush the whole windows there.
// This works only if PopFromBackoffQ feature is enabled.
// See the KEP-5142 (http://kep.k8s.io/5142) for rationale.
const backoffQOrderingWindowDuration = time.Second
// backoffQueuer is a wrapper for backoffQ related operations.
// Its methods that relies on the queues, take the lock inside.
type backoffQueuer interface {
// isPodBackingoff returns true if a pod is still waiting for its backoff timer.
// If this returns true, the pod should not be re-tried.
// If the pod backoff time is in the actual ordering window, it should still be backing off.
isPodBackingoff(podInfo *framework.QueuedPodInfo) bool
// popAllBackoffCompleted pops all pods from podBackoffQ and podErrorBackoffQ that completed backoff.
popAllBackoffCompleted(logger klog.Logger) []*framework.QueuedPodInfo
// podInitialBackoffDuration returns initial backoff duration that pod can get.
podInitialBackoffDuration() time.Duration
// podMaxBackoffDuration returns maximum backoff duration that pod can get.
podMaxBackoffDuration() time.Duration
// waitUntilAlignedWithOrderingWindow waits until the time reaches a multiple of backoffQOrderingWindowDuration.
// It then runs the f function at the backoffQOrderingWindowDuration interval using a ticker.
// It's important to align the flushing time, because podBackoffQ's ordering is based on the windows
// and whole windows have to be flushed at one time without a visible latency.
waitUntilAlignedWithOrderingWindow(f func(), stopCh <-chan struct{})
// add adds the pInfo to backoffQueue.
// The event should show which event triggered this addition and is used for the metric recording.
// It also ensures that pInfo is not in both queues.
add(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string)
// update updates the pod in backoffQueue if oldPodInfo is already in the queue.
// It returns new pod info if updated, nil otherwise.
update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo
// delete deletes the pInfo from backoffQueue.
// It returns true if the pod was deleted.
delete(pInfo *framework.QueuedPodInfo) bool
// get returns the pInfo matching given pInfoLookup, if exists.
get(pInfoLookup *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool)
// has inform if pInfo exists in the queue.
has(pInfo *framework.QueuedPodInfo) bool
// list returns all pods that are in the queue.
list() []*v1.Pod
// len returns length of the queue.
len() int
}
// backoffQueue implements backoffQueuer and wraps two queues inside,
// providing seamless access as if it were one queue.
type backoffQueue struct {
// lock synchronizes all operations related to backoffQ.
// It protects both podBackoffQ and podErrorBackoffQ.
// Caution: DO NOT take "SchedulingQueue.lock" or "activeQueue.lock" after taking "lock".
// You should always take "SchedulingQueue.lock" and "activeQueue.lock" first, otherwise the queue could end up in deadlock.
// "lock" should not be taken after taking "nominator.nLock".
// Correct locking order is: SchedulingQueue.lock > activeQueue.lock > lock > nominator.nLock.
lock sync.RWMutex
clock clock.WithTicker
// podBackoffQ is a heap ordered by backoff expiry. Pods which have completed backoff
// are popped from this heap before the scheduler looks at activeQ
podBackoffQ *heap.Heap[*framework.QueuedPodInfo]
// podErrorBackoffQ is a heap ordered by error backoff expiry. Pods which have completed backoff
// are popped from this heap before the scheduler looks at activeQ
podErrorBackoffQ *heap.Heap[*framework.QueuedPodInfo]
podInitialBackoff time.Duration
podMaxBackoff time.Duration
// activeQLessFn is used as an eventual less function if two backoff times are equal,
// when the SchedulerPopFromBackoffQ feature is enabled.
activeQLessFn fwk.LessFunc
// isPopFromBackoffQEnabled indicates whether the feature gate SchedulerPopFromBackoffQ is enabled.
isPopFromBackoffQEnabled bool
}
func newBackoffQueue(clock clock.WithTicker, podInitialBackoffDuration time.Duration, podMaxBackoffDuration time.Duration, activeQLessFn fwk.LessFunc, popFromBackoffQEnabled bool) *backoffQueue {
bq := &backoffQueue{
clock: clock,
podInitialBackoff: podInitialBackoffDuration,
podMaxBackoff: podMaxBackoffDuration,
isPopFromBackoffQEnabled: popFromBackoffQEnabled,
activeQLessFn: activeQLessFn,
}
podBackoffQLessFn := bq.lessBackoffCompleted
if popFromBackoffQEnabled {
podBackoffQLessFn = bq.lessBackoffCompletedWithPriority
}
bq.podBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, podBackoffQLessFn, metrics.NewBackoffPodsRecorder())
bq.podErrorBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, bq.lessBackoffCompleted, metrics.NewBackoffPodsRecorder())
return bq
}
// podInitialBackoffDuration returns initial backoff duration that pod can get.
func (bq *backoffQueue) podInitialBackoffDuration() time.Duration {
return bq.podInitialBackoff
}
// podMaxBackoffDuration returns maximum backoff duration that pod can get.
func (bq *backoffQueue) podMaxBackoffDuration() time.Duration {
return bq.podMaxBackoff
}
// alignToWindow truncates the provided time to the podBackoffQ ordering window.
// It returns the lowest possible timestamp in the window.
func (bq *backoffQueue) alignToWindow(t time.Time) time.Time {
if !bq.isPopFromBackoffQEnabled {
return t
}
return t.Truncate(backoffQOrderingWindowDuration)
}
// waitUntilAlignedWithOrderingWindow waits until the time reaches a multiple of backoffQOrderingWindowDuration.
// It then runs the f function at the backoffQOrderingWindowDuration interval using a ticker.
// It's important to align the flushing time, because podBackoffQ's ordering is based on the windows
// and whole windows have to be flushed at one time without a visible latency.
func (bq *backoffQueue) waitUntilAlignedWithOrderingWindow(f func(), stopCh <-chan struct{}) {
now := bq.clock.Now()
// Wait until the time reaches the multiple of backoffQOrderingWindowDuration.
durationToNextWindow := bq.alignToWindow(now.Add(backoffQOrderingWindowDuration)).Sub(now)
timer := bq.clock.NewTimer(durationToNextWindow)
select {
case <-stopCh:
timer.Stop()
return
case <-timer.C():
}
// Run a ticker to make sure the invocations of f function
// are aligned with the backoffQ's ordering window.
ticker := bq.clock.NewTicker(backoffQOrderingWindowDuration)
for {
select {
case <-stopCh:
return
default:
}
f()
// NOTE: b/c there is no priority selection in golang
// it is possible for this to race, meaning we could
// trigger ticker.C and stopCh, and ticker.C select falls through.
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
ticker.Stop()
return
case <-ticker.C():
}
}
}
// lessBackoffCompletedWithPriority is a less function of podBackoffQ if PopFromBackoffQ feature is enabled.
// It orders the pods in the same BackoffOrderingWindow the same as the activeQ will do to improve popping order from backoffQ when activeQ is empty.
func (bq *backoffQueue) lessBackoffCompletedWithPriority(pInfo1, pInfo2 *framework.QueuedPodInfo) bool {
bo1 := bq.getBackoffTime(pInfo1)
bo2 := bq.getBackoffTime(pInfo2)
if !bo1.Equal(bo2) {
return bo1.Before(bo2)
}
// If the backoff time is the same, sort the pod in the same manner as activeQ does.
return bq.activeQLessFn(pInfo1, pInfo2)
}
// lessBackoffCompleted is a less function of podErrorBackoffQ.
func (bq *backoffQueue) lessBackoffCompleted(pInfo1, pInfo2 *framework.QueuedPodInfo) bool {
bo1 := bq.getBackoffTime(pInfo1)
bo2 := bq.getBackoffTime(pInfo2)
return bo1.Before(bo2)
}
// isPodBackingoff returns true if a pod is still waiting for its backoff timer.
// If this returns true, the pod should not be re-tried.
// If the pod backoff time is in the actual ordering window, it should still be backing off.
func (bq *backoffQueue) isPodBackingoff(podInfo *framework.QueuedPodInfo) bool {
boTime := bq.getBackoffTime(podInfo)
// Don't use After, because in case of windows equality we want to return true.
return !boTime.Before(bq.alignToWindow(bq.clock.Now()))
}
// getBackoffTime returns the time that podInfo completes backoff.
// It caches the result in podInfo.BackoffExpiration and returns this value in subsequent calls.
// The cache will be cleared when this pod is poped from the scheduling queue again (i.e., at activeQ's pop),
// because of the fact that the backoff time is calculated based on podInfo.Attempts,
// which doesn't get changed until the pod's scheduling is retried.
func (bq *backoffQueue) getBackoffTime(podInfo *framework.QueuedPodInfo) time.Time {
if bq.podMaxBackoff == 0 {
// If podMaxBackoff is set to 0, the backoff should be disabled completely.
return time.Time{}
}
count := podInfo.UnschedulableCount
if podInfo.ConsecutiveErrorsCount > 0 {
// This Pod has experienced an error status at the last scheduling cycle,
// and we should consider the error count for the backoff duration.
count = podInfo.ConsecutiveErrorsCount
}
if count == 0 {
// When the Pod hasn't experienced any scheduling attempts,
// they don't have to get a backoff.
return time.Time{}
}
if podInfo.BackoffExpiration.IsZero() {
duration := bq.calculateBackoffDuration(count)
podInfo.BackoffExpiration = bq.alignToWindow(podInfo.Timestamp.Add(duration))
}
return podInfo.BackoffExpiration
}
// calculateBackoffDuration is a helper function for calculating the backoffDuration
// based on the number of attempts the pod has made.
func (bq *backoffQueue) calculateBackoffDuration(count int) time.Duration {
if count == 0 {
return 0
}
shift := count - 1
if bq.podInitialBackoff > bq.podMaxBackoff>>shift {
return bq.podMaxBackoff
}
return time.Duration(bq.podInitialBackoff << shift)
}
func (bq *backoffQueue) popAllBackoffCompletedWithQueue(logger klog.Logger, queue *heap.Heap[*framework.QueuedPodInfo]) []*framework.QueuedPodInfo {
var poppedPods []*framework.QueuedPodInfo
for {
pInfo, ok := queue.Peek()
if !ok || pInfo == nil {
break
}
pod := pInfo.Pod
if bq.isPodBackingoff(pInfo) {
break
}
_, err := queue.Pop()
if err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Unable to pop pod from backoff queue despite backoff completion", "pod", klog.KObj(pod))
break
}
poppedPods = append(poppedPods, pInfo)
}
return poppedPods
}
// popAllBackoffCompleted pops all pods from podBackoffQ and podErrorBackoffQ that completed backoff.
func (bq *backoffQueue) popAllBackoffCompleted(logger klog.Logger) []*framework.QueuedPodInfo {
bq.lock.Lock()
defer bq.lock.Unlock()
// Ensure both queues are called
return append(bq.popAllBackoffCompletedWithQueue(logger, bq.podBackoffQ), bq.popAllBackoffCompletedWithQueue(logger, bq.podErrorBackoffQ)...)
}
// add adds the pInfo to backoffQueue.
// The event should show which event triggered this addition and is used for the metric recording.
// It also ensures that pInfo is not in both queues.
func (bq *backoffQueue) add(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string) {
bq.lock.Lock()
defer bq.lock.Unlock()
// If pod has empty both unschedulable plugins and pending plugins,
// it means that it failed because of error and should be moved to podErrorBackoffQ.
if pInfo.UnschedulablePlugins.Len() == 0 && pInfo.PendingPlugins.Len() == 0 {
bq.podErrorBackoffQ.AddOrUpdate(pInfo)
// Ensure the pod is not in the podBackoffQ and report the error if it happens.
err := bq.podBackoffQ.Delete(pInfo)
if err == nil {
logger.Error(nil, "BackoffQueue add() was called with a pod that was already in the podBackoffQ", "pod", klog.KObj(pInfo.Pod))
return
}
metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", event).Inc()
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", backoffQ)
return
}
bq.podBackoffQ.AddOrUpdate(pInfo)
// Ensure the pod is not in the podErrorBackoffQ and report the error if it happens.
err := bq.podErrorBackoffQ.Delete(pInfo)
if err == nil {
logger.Error(nil, "BackoffQueue add() was called with a pod that was already in the podErrorBackoffQ", "pod", klog.KObj(pInfo.Pod))
return
}
metrics.SchedulerQueueIncomingPods.WithLabelValues("backoff", event).Inc()
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", backoffQ)
}
// update updates the pod in backoffQueue if oldPodInfo is already in the queue.
// It returns new pod info if updated, nil otherwise.
func (bq *backoffQueue) update(newPod *v1.Pod, oldPodInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo {
bq.lock.Lock()
defer bq.lock.Unlock()
// If the pod is in the backoff queue, update it there.
if pInfo, exists := bq.podBackoffQ.Get(oldPodInfo); exists {
_ = pInfo.Update(newPod)
bq.podBackoffQ.AddOrUpdate(pInfo)
return pInfo
}
// If the pod is in the error backoff queue, update it there.
if pInfo, exists := bq.podErrorBackoffQ.Get(oldPodInfo); exists {
_ = pInfo.Update(newPod)
bq.podErrorBackoffQ.AddOrUpdate(pInfo)
return pInfo
}
return nil
}
// delete deletes the pInfo from backoffQueue.
// It returns true if the pod was deleted.
func (bq *backoffQueue) delete(pInfo *framework.QueuedPodInfo) bool {
bq.lock.Lock()
defer bq.lock.Unlock()
if bq.podBackoffQ.Delete(pInfo) == nil {
return true
}
return bq.podErrorBackoffQ.Delete(pInfo) == nil
}
// popBackoff pops the pInfo from the podBackoffQ.
// It returns error if the queue is empty.
// This doesn't pop the pods from the podErrorBackoffQ.
func (bq *backoffQueue) popBackoff() (*framework.QueuedPodInfo, error) {
bq.lock.Lock()
defer bq.lock.Unlock()
return bq.podBackoffQ.Pop()
}
// get returns the pInfo matching given pInfoLookup, if exists.
func (bq *backoffQueue) get(pInfoLookup *framework.QueuedPodInfo) (*framework.QueuedPodInfo, bool) {
bq.lock.RLock()
defer bq.lock.RUnlock()
pInfo, exists := bq.podBackoffQ.Get(pInfoLookup)
if exists {
return pInfo, true
}
return bq.podErrorBackoffQ.Get(pInfoLookup)
}
// has inform if pInfo exists in the queue.
func (bq *backoffQueue) has(pInfo *framework.QueuedPodInfo) bool {
bq.lock.RLock()
defer bq.lock.RUnlock()
return bq.podBackoffQ.Has(pInfo) || bq.podErrorBackoffQ.Has(pInfo)
}
// list returns all pods that are in the queue.
func (bq *backoffQueue) list() []*v1.Pod {
bq.lock.RLock()
defer bq.lock.RUnlock()
var result []*v1.Pod
for _, pInfo := range bq.podBackoffQ.List() {
result = append(result, pInfo.Pod)
}
for _, pInfo := range bq.podErrorBackoffQ.List() {
result = append(result, pInfo.Pod)
}
return result
}
// len returns length of the queue.
func (bq *backoffQueue) len() int {
bq.lock.RLock()
defer bq.lock.RUnlock()
return bq.podBackoffQ.Len() + bq.podErrorBackoffQ.Len()
}
// lenBackoff returns length of the podBackoffQ.
func (bq *backoffQueue) lenBackoff() int {
bq.lock.RLock()
defer bq.lock.RUnlock()
return bq.podBackoffQ.Len()
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queue
import (
"slices"
"sync"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
)
// nominator is a structure that stores pods nominated to run on nodes.
// It exists because nominatedNodeName of pod objects stored in the structure
// may be different than what scheduler has here. We should be able to find pods
// by their UID and update/delete them.
type nominator struct {
// nLock synchronizes all operations related to nominator.
// It should not be used anywhere else.
// Caution: DO NOT take ("SchedulingQueue.lock" or "activeQueue.lock" or "backoffQueue.lock") after taking "nLock".
// You should always take "SchedulingQueue.lock" and "activeQueue.lock" and "backoffQueue.lock" first,
// otherwise the nominator could end up in deadlock.
// Correct locking order is: SchedulingQueue.lock > activeQueue.lock = backoffQueue.lock > nLock.
nLock sync.RWMutex
// podLister is used to verify if the given pod is alive.
podLister listersv1.PodLister
// nominatedPods is a map keyed by a node name and the value is a list of
// pods which are nominated to run on the node. These are pods which can be in
// the activeQ or unschedulablePods.
nominatedPods map[string][]podRef
// nominatedPodToNode is map keyed by a Pod UID to the node name where it is
// nominated.
nominatedPodToNode map[types.UID]string
}
func newPodNominator(podLister listersv1.PodLister) *nominator {
return &nominator{
podLister: podLister,
nominatedPods: make(map[string][]podRef),
nominatedPodToNode: make(map[types.UID]string),
}
}
// addNominatedPod adds a pod to the nominated pods of the given node.
// This is called during the preemption process when a node is nominated to run
// the pod. We update the nominator's structure before sending an API request to update the pod
// object, to avoid races with the following scheduling cycles.
func (npm *nominator) addNominatedPod(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *fwk.NominatingInfo) {
npm.nLock.Lock()
npm.addNominatedPodUnlocked(logger, pi, nominatingInfo)
npm.nLock.Unlock()
}
func (npm *nominator) addNominatedPodUnlocked(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *fwk.NominatingInfo) {
// Always delete the pod if it already exists, to ensure we never store more than
// one instance of the pod.
npm.deleteUnlocked(pi.GetPod())
var nodeName string
if nominatingInfo.Mode() == fwk.ModeOverride {
nodeName = nominatingInfo.NominatedNodeName
} else if nominatingInfo.Mode() == fwk.ModeNoop {
if pi.GetPod().Status.NominatedNodeName == "" {
return
}
nodeName = pi.GetPod().Status.NominatedNodeName
}
if npm.podLister != nil {
// If the pod was removed or if it was already scheduled, don't nominate it.
updatedPod, err := npm.podLister.Pods(pi.GetPod().Namespace).Get(pi.GetPod().Name)
if err != nil {
logger.V(4).Info("Pod doesn't exist in podLister, aborted adding it to the nominator", "pod", klog.KObj(pi.GetPod()))
return
}
if updatedPod.Spec.NodeName != "" {
logger.V(4).Info("Pod is already scheduled to a node, aborted adding it to the nominator", "pod", klog.KObj(pi.GetPod()), "node", updatedPod.Spec.NodeName)
return
}
}
npm.nominatedPodToNode[pi.GetPod().UID] = nodeName
for _, np := range npm.nominatedPods[nodeName] {
if np.uid == pi.GetPod().UID {
logger.V(4).Info("Pod already exists in the nominator", "pod", np.uid)
return
}
}
npm.nominatedPods[nodeName] = append(npm.nominatedPods[nodeName], podToRef(pi.GetPod()))
}
// UpdateNominatedPod updates the <oldPod> with <newPod>.
func (npm *nominator) UpdateNominatedPod(logger klog.Logger, oldPod *v1.Pod, newPodInfo fwk.PodInfo) {
npm.nLock.Lock()
defer npm.nLock.Unlock()
// In some cases, an Update event with no "NominatedNode" present is received right
// after a node("NominatedNode") is reserved for this pod in memory.
// In this case, we need to keep reserving the NominatedNode when updating the pod pointer.
var nominatingInfo *fwk.NominatingInfo
// We won't fall into below `if` block if the Update event represents:
// (1) NominatedNode info is added
// (2) NominatedNode info is updated
// (3) NominatedNode info is removed
if nominatedNodeName(oldPod) == "" && nominatedNodeName(newPodInfo.GetPod()) == "" {
if nnn, ok := npm.nominatedPodToNode[oldPod.UID]; ok {
// This is the only case we should continue reserving the NominatedNode
nominatingInfo = &fwk.NominatingInfo{
NominatingMode: fwk.ModeOverride,
NominatedNodeName: nnn,
}
}
}
// We update irrespective of the nominatedNodeName changed or not, to ensure
// that pod pointer is updated.
npm.deleteUnlocked(oldPod)
npm.addNominatedPodUnlocked(logger, newPodInfo, nominatingInfo)
}
// DeleteNominatedPodIfExists deletes <pod> from nominatedPods.
func (npm *nominator) DeleteNominatedPodIfExists(pod *v1.Pod) {
npm.nLock.Lock()
npm.deleteUnlocked(pod)
npm.nLock.Unlock()
}
func (npm *nominator) deleteUnlocked(p *v1.Pod) {
nnn, ok := npm.nominatedPodToNode[p.UID]
if !ok {
return
}
for i, np := range npm.nominatedPods[nnn] {
if np.uid == p.UID {
npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn][:i], npm.nominatedPods[nnn][i+1:]...)
if len(npm.nominatedPods[nnn]) == 0 {
delete(npm.nominatedPods, nnn)
}
break
}
}
delete(npm.nominatedPodToNode, p.UID)
}
func (npm *nominator) nominatedPodsForNode(nodeName string) []podRef {
npm.nLock.RLock()
defer npm.nLock.RUnlock()
return slices.Clone(npm.nominatedPods[nodeName])
}
// nominatedNodeName returns nominated node name of a Pod.
func nominatedNodeName(pod *v1.Pod) string {
return pod.Status.NominatedNodeName
}
type podRef struct {
name string
namespace string
uid types.UID
}
func podToRef(pod *v1.Pod) podRef {
return podRef{
name: pod.Name,
namespace: pod.Namespace,
uid: pod.UID,
}
}
func (np podRef) toPod() *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: np.name,
Namespace: np.namespace,
UID: np.uid,
},
}
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains structures that implement scheduling queue types.
// Scheduling queues hold pods waiting to be scheduled. This file implements a
// priority queue which has two sub queues and a additional data structure,
// namely: activeQ, backoffQ and unschedulablePods.
// - activeQ holds pods that are being considered for scheduling.
// - backoffQ holds pods that moved from unschedulablePods and will move to
// activeQ when their backoff periods complete.
// - unschedulablePods holds pods that were already attempted for scheduling and
// are currently determined to be unschedulable.
package queue
import (
"context"
"fmt"
"math/rand"
"reflect"
"sync"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/backend/heap"
"k8s.io/kubernetes/pkg/scheduler/framework"
apicalls "k8s.io/kubernetes/pkg/scheduler/framework/api_calls"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/utils/clock"
)
const (
// DefaultPodMaxInUnschedulablePodsDuration is the default value for the maximum
// time a pod can stay in unschedulablePods. If a pod stays in unschedulablePods
// for longer than this value, the pod will be moved from unschedulablePods to
// backoffQ or activeQ. If this value is empty, the default value (5min)
// will be used.
DefaultPodMaxInUnschedulablePodsDuration time.Duration = 5 * time.Minute
// Scheduling queue names
activeQ = "Active"
backoffQ = "Backoff"
unschedulableQ = "Unschedulable"
preEnqueue = "PreEnqueue"
)
const (
// DefaultPodInitialBackoffDuration is the default value for the initial backoff duration
// for unschedulable pods. To change the default podInitialBackoffDurationSeconds used by the
// scheduler, update the ComponentConfig value in defaults.go
DefaultPodInitialBackoffDuration time.Duration = 1 * time.Second
// DefaultPodMaxBackoffDuration is the default value for the max backoff duration
// for unschedulable pods. To change the default podMaxBackoffDurationSeconds used by the
// scheduler, update the ComponentConfig value in defaults.go
DefaultPodMaxBackoffDuration time.Duration = 10 * time.Second
)
// PreEnqueueCheck is a function type. It's used to build functions that
// run against a Pod and the caller can choose to enqueue or skip the Pod
// by the checking result.
type PreEnqueueCheck func(pod *v1.Pod) bool
// SchedulingQueue is an interface for a queue to store pods waiting to be scheduled.
// The interface follows a pattern similar to cache.FIFO and cache.Heap and
// makes it easy to use those data structures as a SchedulingQueue.
type SchedulingQueue interface {
fwk.PodNominator
Add(logger klog.Logger, pod *v1.Pod)
// Activate moves the given pods to activeQ.
// If a pod isn't found in unschedulablePods or backoffQ and it's in-flight,
// the wildcard event is registered so that the pod will be requeued when it comes back.
// But, if a pod isn't found in unschedulablePods or backoffQ and it's not in-flight (i.e., completely unknown pod),
// Activate would ignore the pod.
Activate(logger klog.Logger, pods map[string]*v1.Pod)
// AddUnschedulableIfNotPresent adds an unschedulable pod back to scheduling queue.
// The podSchedulingCycle represents the current scheduling cycle number which can be
// returned by calling SchedulingCycle().
AddUnschedulableIfNotPresent(logger klog.Logger, pod *framework.QueuedPodInfo, podSchedulingCycle int64) error
// SchedulingCycle returns the current number of scheduling cycle which is
// cached by scheduling queue. Normally, incrementing this number whenever
// a pod is popped (e.g. called Pop()) is enough.
SchedulingCycle() int64
// Pop removes the head of the queue and returns it. It blocks if the
// queue is empty and waits until a new item is added to the queue.
Pop(logger klog.Logger) (*framework.QueuedPodInfo, error)
// Done must be called for pod returned by Pop. This allows the queue to
// keep track of which pods are currently being processed.
Done(types.UID)
Update(logger klog.Logger, oldPod, newPod *v1.Pod)
Delete(pod *v1.Pod)
// Important Note: preCheck shouldn't include anything that depends on the in-tree plugins' logic.
// (e.g., filter Pods based on added/updated Node's capacity, etc.)
// We know currently some do, but we'll eventually remove them in favor of the scheduling queue hint.
MoveAllToActiveOrBackoffQueue(logger klog.Logger, event fwk.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck)
AssignedPodAdded(logger klog.Logger, pod *v1.Pod)
AssignedPodUpdated(logger klog.Logger, oldPod, newPod *v1.Pod, event fwk.ClusterEvent)
// Close closes the SchedulingQueue so that the goroutine which is
// waiting to pop items can exit gracefully.
Close()
// Run starts the goroutines managing the queue.
Run(logger klog.Logger)
// PatchPodStatus handles the pod status update by sending an update API call through API dispatcher.
// This method should be used only if the SchedulerAsyncAPICalls feature gate is enabled.
PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) (<-chan error, error)
// The following functions are supposed to be used only for testing or debugging.
GetPod(name, namespace string) (*framework.QueuedPodInfo, bool)
PendingPods() ([]*v1.Pod, string)
InFlightPods() []*v1.Pod
PodsInActiveQ() []*v1.Pod
// PodsInBackoffQ returns all the Pods in the backoffQ.
PodsInBackoffQ() []*v1.Pod
UnschedulablePods() []*v1.Pod
}
// NewSchedulingQueue initializes a priority queue as a new scheduling queue.
func NewSchedulingQueue(
lessFn fwk.LessFunc,
informerFactory informers.SharedInformerFactory,
opts ...Option) SchedulingQueue {
return NewPriorityQueue(lessFn, informerFactory, opts...)
}
// PriorityQueue implements a scheduling queue.
// The head of PriorityQueue is the highest priority pending pod. This structure
// has two sub queues and a additional data structure, namely: activeQ,
// backoffQ and unschedulablePods.
// - activeQ holds pods that are being considered for scheduling.
// - backoffQ holds pods that moved from unschedulablePods and will move to
// activeQ when their backoff periods complete.
// - unschedulablePods holds pods that were already attempted for scheduling and
// are currently determined to be unschedulable.
type PriorityQueue struct {
*nominator
stop chan struct{}
clock clock.WithTicker
// lock takes precedence and should be taken first,
// before any other locks in the queue (activeQueue.lock or backoffQueue.lock or nominator.nLock).
// Correct locking order is: lock > activeQueue.lock > backoffQueue.lock > nominator.nLock.
lock sync.RWMutex
// the maximum time a pod can stay in the unschedulablePods.
podMaxInUnschedulablePodsDuration time.Duration
activeQ activeQueuer
backoffQ backoffQueuer
// unschedulablePods holds pods that have been tried and determined unschedulable.
unschedulablePods *unschedulablePods
// moveRequestCycle caches the sequence number of scheduling cycle when we
// received a move request. Unschedulable pods in and before this scheduling
// cycle will be put back to activeQueue if we were trying to schedule them
// when we received move request.
// TODO: this will be removed after SchedulingQueueHint goes to stable and the feature gate is removed.
moveRequestCycle int64
// preEnqueuePluginMap is keyed with profile and plugin name, valued with registered preEnqueue plugins.
preEnqueuePluginMap map[string]map[string]fwk.PreEnqueuePlugin
// queueingHintMap is keyed with profile name, valued with registered queueing hint functions.
queueingHintMap QueueingHintMapPerProfile
// pluginToEventsMap shows which plugin is interested in which events.
pluginToEventsMap map[string][]fwk.ClusterEvent
nsLister listersv1.NamespaceLister
metricsRecorder *metrics.MetricAsyncRecorder
// pluginMetricsSamplePercent is the percentage of plugin metrics to be sampled.
pluginMetricsSamplePercent int
// apiDispatcher is used for the methods that are expected to send API calls.
// It's non-nil only if the SchedulerAsyncAPICalls feature gate is enabled.
apiDispatcher fwk.APIDispatcher
// isSchedulingQueueHintEnabled indicates whether the feature gate for the scheduling queue is enabled.
isSchedulingQueueHintEnabled bool
// isPopFromBackoffQEnabled indicates whether the feature gate SchedulerPopFromBackoffQ is enabled.
isPopFromBackoffQEnabled bool
}
// QueueingHintFunction is the wrapper of QueueingHintFn that has PluginName.
type QueueingHintFunction struct {
PluginName string
QueueingHintFn fwk.QueueingHintFn
}
// clusterEvent has the event and involved objects.
type clusterEvent struct {
event fwk.ClusterEvent
// oldObj is the object that involved this event.
oldObj interface{}
// newObj is the object that involved this event.
newObj interface{}
}
type priorityQueueOptions struct {
clock clock.WithTicker
podInitialBackoffDuration time.Duration
podMaxBackoffDuration time.Duration
podMaxInUnschedulablePodsDuration time.Duration
podLister listersv1.PodLister
metricsRecorder *metrics.MetricAsyncRecorder
pluginMetricsSamplePercent int
preEnqueuePluginMap map[string]map[string]fwk.PreEnqueuePlugin
queueingHintMap QueueingHintMapPerProfile
apiDispatcher fwk.APIDispatcher
}
// Option configures a PriorityQueue
type Option func(*priorityQueueOptions)
// WithClock sets clock for PriorityQueue, the default clock is clock.RealClock.
func WithClock(clock clock.WithTicker) Option {
return func(o *priorityQueueOptions) {
o.clock = clock
}
}
// WithPodInitialBackoffDuration sets pod initial backoff duration for PriorityQueue.
func WithPodInitialBackoffDuration(duration time.Duration) Option {
return func(o *priorityQueueOptions) {
o.podInitialBackoffDuration = duration
}
}
// WithPodMaxBackoffDuration sets pod max backoff duration for PriorityQueue.
func WithPodMaxBackoffDuration(duration time.Duration) Option {
return func(o *priorityQueueOptions) {
o.podMaxBackoffDuration = duration
}
}
// WithPodLister sets pod lister for PriorityQueue.
func WithPodLister(pl listersv1.PodLister) Option {
return func(o *priorityQueueOptions) {
o.podLister = pl
}
}
// WithPodMaxInUnschedulablePodsDuration sets podMaxInUnschedulablePodsDuration for PriorityQueue.
func WithPodMaxInUnschedulablePodsDuration(duration time.Duration) Option {
return func(o *priorityQueueOptions) {
o.podMaxInUnschedulablePodsDuration = duration
}
}
// QueueingHintMapPerProfile is keyed with profile name, valued with queueing hint map registered for the profile.
type QueueingHintMapPerProfile map[string]QueueingHintMap
// QueueingHintMap is keyed with ClusterEvent, valued with queueing hint functions registered for the event.
type QueueingHintMap map[fwk.ClusterEvent][]*QueueingHintFunction
// WithQueueingHintMapPerProfile sets queueingHintMap for PriorityQueue.
func WithQueueingHintMapPerProfile(m QueueingHintMapPerProfile) Option {
return func(o *priorityQueueOptions) {
o.queueingHintMap = m
}
}
// WithPreEnqueuePluginMap sets preEnqueuePluginMap for PriorityQueue.
func WithPreEnqueuePluginMap(m map[string]map[string]fwk.PreEnqueuePlugin) Option {
return func(o *priorityQueueOptions) {
o.preEnqueuePluginMap = m
}
}
// WithMetricsRecorder sets metrics recorder.
func WithMetricsRecorder(recorder *metrics.MetricAsyncRecorder) Option {
return func(o *priorityQueueOptions) {
o.metricsRecorder = recorder
}
}
// WithPluginMetricsSamplePercent sets the percentage of plugin metrics to be sampled.
func WithPluginMetricsSamplePercent(percent int) Option {
return func(o *priorityQueueOptions) {
o.pluginMetricsSamplePercent = percent
}
}
// WithAPIDispatcher sets the API dispatcher.
func WithAPIDispatcher(apiDispatcher fwk.APIDispatcher) Option {
return func(o *priorityQueueOptions) {
o.apiDispatcher = apiDispatcher
}
}
var defaultPriorityQueueOptions = priorityQueueOptions{
clock: clock.RealClock{},
podInitialBackoffDuration: DefaultPodInitialBackoffDuration,
podMaxBackoffDuration: DefaultPodMaxBackoffDuration,
podMaxInUnschedulablePodsDuration: DefaultPodMaxInUnschedulablePodsDuration,
}
// Making sure that PriorityQueue implements SchedulingQueue.
var _ SchedulingQueue = &PriorityQueue{}
// newQueuedPodInfoForLookup builds a QueuedPodInfo object for a lookup in the queue.
func newQueuedPodInfoForLookup(pod *v1.Pod, plugins ...string) *framework.QueuedPodInfo {
// Since this is only used for a lookup in the queue, we only need to set the Pod,
// and so we avoid creating a full PodInfo, which is expensive to instantiate frequently.
return &framework.QueuedPodInfo{
PodInfo: &framework.PodInfo{Pod: pod},
UnschedulablePlugins: sets.New(plugins...),
}
}
// NewPriorityQueue creates a PriorityQueue object.
func NewPriorityQueue(
lessFn fwk.LessFunc,
informerFactory informers.SharedInformerFactory,
opts ...Option,
) *PriorityQueue {
options := defaultPriorityQueueOptions
if options.podLister == nil {
options.podLister = informerFactory.Core().V1().Pods().Lister()
}
for _, opt := range opts {
opt(&options)
}
isSchedulingQueueHintEnabled := utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints)
isPopFromBackoffQEnabled := utilfeature.DefaultFeatureGate.Enabled(features.SchedulerPopFromBackoffQ)
lessConverted := convertLessFn(lessFn)
backoffQ := newBackoffQueue(options.clock, options.podInitialBackoffDuration, options.podMaxBackoffDuration, lessFn, isPopFromBackoffQEnabled)
pq := &PriorityQueue{
clock: options.clock,
stop: make(chan struct{}),
podMaxInUnschedulablePodsDuration: options.podMaxInUnschedulablePodsDuration,
backoffQ: backoffQ,
unschedulablePods: newUnschedulablePods(metrics.NewUnschedulablePodsRecorder(), metrics.NewGatedPodsRecorder()),
preEnqueuePluginMap: options.preEnqueuePluginMap,
queueingHintMap: options.queueingHintMap,
pluginToEventsMap: buildEventMap(options.queueingHintMap),
metricsRecorder: options.metricsRecorder,
pluginMetricsSamplePercent: options.pluginMetricsSamplePercent,
moveRequestCycle: -1,
apiDispatcher: options.apiDispatcher,
isSchedulingQueueHintEnabled: isSchedulingQueueHintEnabled,
isPopFromBackoffQEnabled: isPopFromBackoffQEnabled,
}
var backoffQPopper backoffQPopper
if isPopFromBackoffQEnabled {
backoffQPopper = backoffQ
}
pq.activeQ = newActiveQueue(heap.NewWithRecorder(podInfoKeyFunc, heap.LessFunc[*framework.QueuedPodInfo](lessConverted), metrics.NewActivePodsRecorder()), isSchedulingQueueHintEnabled, options.metricsRecorder, backoffQPopper)
pq.nsLister = informerFactory.Core().V1().Namespaces().Lister()
pq.nominator = newPodNominator(options.podLister)
return pq
}
// Helper function that wraps fwk.LessFunc and converts it to take *framework.QueuedPodInfo as arguments.
func convertLessFn(lessFn fwk.LessFunc) func(podInfo1, podInfo2 *framework.QueuedPodInfo) bool {
return func(podInfo1, podInfo2 *framework.QueuedPodInfo) bool {
return lessFn(podInfo1, podInfo2)
}
}
func buildEventMap(qHintMap QueueingHintMapPerProfile) map[string][]fwk.ClusterEvent {
eventMap := make(map[string][]fwk.ClusterEvent)
for _, hintMap := range qHintMap {
for event, qHints := range hintMap {
for _, qHint := range qHints {
eventMap[qHint.PluginName] = append(eventMap[qHint.PluginName], event)
}
}
}
return eventMap
}
// Run starts the goroutine to pump from backoffQ to activeQ
func (p *PriorityQueue) Run(logger klog.Logger) {
go p.backoffQ.waitUntilAlignedWithOrderingWindow(func() {
p.flushBackoffQCompleted(logger)
}, p.stop)
go wait.Until(func() {
p.flushUnschedulablePodsLeftover(logger)
}, 30*time.Second, p.stop)
}
// queueingStrategy indicates how the scheduling queue should enqueue the Pod from unschedulable pod pool.
type queueingStrategy int
const (
// queueSkip indicates that the scheduling queue should skip requeuing the Pod to activeQ/backoffQ.
queueSkip queueingStrategy = iota
// queueAfterBackoff indicates that the scheduling queue should requeue the Pod after backoff is completed.
queueAfterBackoff
// queueImmediately indicates that the scheduling queue should skip backoff and requeue the Pod immediately to activeQ.
queueImmediately
)
// isEventOfInterest returns true if the event is of interest by some plugins.
func (p *PriorityQueue) isEventOfInterest(logger klog.Logger, event fwk.ClusterEvent) bool {
if framework.ClusterEventIsWildCard(event) {
// Wildcard event moves Pods that failed with any plugins.
return true
}
for _, hintMap := range p.queueingHintMap {
for eventToMatch := range hintMap {
if framework.MatchClusterEvents(eventToMatch, event) {
// This event is interested by some plugins.
return true
}
}
}
logger.V(6).Info("receive an event that isn't interested by any enabled plugins", "event", event)
return false
}
// isPodWorthRequeuing calls QueueingHintFn of only plugins registered in pInfo.unschedulablePlugins and pInfo.PendingPlugins.
//
// If any of pInfo.PendingPlugins return Queue,
// the scheduling queue is supposed to enqueue this Pod to activeQ, skipping backoffQ.
// If any of pInfo.unschedulablePlugins return Queue,
// the scheduling queue is supposed to enqueue this Pod to activeQ/backoffQ depending on the remaining backoff time of the Pod.
// If all QueueingHintFns returns Skip, the scheduling queue enqueues the Pod back to unschedulable Pod pool
// because no plugin changes the scheduling result via the event.
func (p *PriorityQueue) isPodWorthRequeuing(logger klog.Logger, pInfo *framework.QueuedPodInfo, event fwk.ClusterEvent, oldObj, newObj interface{}) queueingStrategy {
rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins)
if rejectorPlugins.Len() == 0 {
logger.V(6).Info("Worth requeuing because no failed plugins", "pod", klog.KObj(pInfo.Pod))
return queueAfterBackoff
}
if framework.ClusterEventIsWildCard(event) {
// If the wildcard event has a Pod in newObj,
// that indicates that the event wants to be effective for the Pod only.
// Specifically, EventForceActivate could have a target Pod in newObj.
if newObj != nil {
if pod, ok := newObj.(*v1.Pod); !ok || pod.UID != pInfo.Pod.UID {
// This wildcard event is not for this Pod.
if ok {
logger.V(6).Info("Not worth requeuing because the event is wildcard, but for another pod", "pod", klog.KObj(pInfo.Pod), "event", event.Label(), "newObj", klog.KObj(pod))
}
return queueSkip
}
}
// If the wildcard event is special one as someone wants to force all Pods to move to activeQ/backoffQ.
// We return queueAfterBackoff in this case, while resetting all blocked plugins.
logger.V(6).Info("Worth requeuing because the event is wildcard", "pod", klog.KObj(pInfo.Pod), "event", event.Label())
return queueAfterBackoff
}
hintMap, ok := p.queueingHintMap[pInfo.Pod.Spec.SchedulerName]
if !ok {
// shouldn't reach here unless bug.
utilruntime.HandleErrorWithLogger(logger, nil, "No QueueingHintMap is registered for this profile", "profile", pInfo.Pod.Spec.SchedulerName, "pod", klog.KObj(pInfo.Pod))
return queueAfterBackoff
}
pod := pInfo.Pod
queueStrategy := queueSkip
for eventToMatch, hintfns := range hintMap {
if !framework.MatchClusterEvents(eventToMatch, event) {
continue
}
for _, hintfn := range hintfns {
if !rejectorPlugins.Has(hintfn.PluginName) {
// skip if it's not hintfn from rejectorPlugins.
continue
}
start := time.Now()
hint, err := hintfn.QueueingHintFn(logger, pod, oldObj, newObj)
if err != nil {
// If the QueueingHintFn returned an error, we should treat the event as Queue so that we can prevent
// the Pod from being stuck in the unschedulable pod pool.
oldObjMeta, newObjMeta, asErr := util.As[klog.KMetadata](oldObj, newObj)
if asErr != nil {
utilruntime.HandleErrorWithLogger(logger, err, "QueueingHintFn returns error", "event", event, "plugin", hintfn.PluginName, "pod", klog.KObj(pod))
} else {
utilruntime.HandleErrorWithLogger(logger, err, "QueueingHintFn returns error", "event", event, "plugin", hintfn.PluginName, "pod", klog.KObj(pod), "oldObj", klog.KObj(oldObjMeta), "newObj", klog.KObj(newObjMeta))
}
hint = fwk.Queue
}
p.metricsRecorder.ObserveQueueingHintDurationAsync(hintfn.PluginName, event.Label(), queueingHintToLabel(hint, err), metrics.SinceInSeconds(start))
if hint == fwk.QueueSkip {
continue
}
if pInfo.PendingPlugins.Has(hintfn.PluginName) {
// interprets Queue from the Pending plugin as queueImmediately.
// We can return immediately because queueImmediately is the highest priority.
return queueImmediately
}
// interprets Queue from the unschedulable plugin as queueAfterBackoff.
if pInfo.PendingPlugins.Len() == 0 {
// We can return immediately because no Pending plugins, which only can make queueImmediately, registered in this Pod,
// and queueAfterBackoff is the second highest priority.
return queueAfterBackoff
}
// We can't return immediately because there are some Pending plugins registered in this Pod.
// We need to check if those plugins return Queue or not and if they do, we return queueImmediately.
queueStrategy = queueAfterBackoff
}
}
return queueStrategy
}
// queueingHintToLabel converts a hint and an error from QHint to a label string.
func queueingHintToLabel(hint fwk.QueueingHint, err error) string {
if err != nil {
return metrics.QueueingHintResultError
}
switch hint {
case fwk.Queue:
return metrics.QueueingHintResultQueue
case fwk.QueueSkip:
return metrics.QueueingHintResultQueueSkip
}
// Shouldn't reach here.
return ""
}
// runPreEnqueuePlugins iterates PreEnqueue function in each registered PreEnqueuePlugin,
// and updates pInfo.GatingPlugin and pInfo.UnschedulablePlugins.
// Note: we need to associate the failed plugin to `pInfo`, so that the pod can be moved back
// to activeQ by related cluster event.
func (p *PriorityQueue) runPreEnqueuePlugins(ctx context.Context, pInfo *framework.QueuedPodInfo) {
var s *fwk.Status
pod := pInfo.Pod
startTime := p.clock.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(preEnqueue, s.Code().String(), pod.Spec.SchedulerName).Observe(metrics.SinceInSeconds(startTime))
}()
shouldRecordMetric := rand.Intn(100) < p.pluginMetricsSamplePercent
logger := klog.FromContext(ctx)
gatingPlugin := pInfo.GatingPlugin
if gatingPlugin != "" {
// Run the gating plugin first
s := p.runPreEnqueuePlugin(ctx, logger, p.preEnqueuePluginMap[pod.Spec.SchedulerName][gatingPlugin], pInfo, shouldRecordMetric)
if !s.IsSuccess() {
// No need to iterate other plugins
return
}
}
for _, pl := range p.preEnqueuePluginMap[pod.Spec.SchedulerName] {
if gatingPlugin != "" && pl.Name() == gatingPlugin {
// should be run already above.
continue
}
s := p.runPreEnqueuePlugin(ctx, logger, pl, pInfo, shouldRecordMetric)
if !s.IsSuccess() {
// No need to iterate other plugins
return
}
}
// all plugins passed
pInfo.GatingPlugin = ""
}
// runPreEnqueuePlugin runs the PreEnqueue plugin and update pInfo's fields accordingly if needed.
func (p *PriorityQueue) runPreEnqueuePlugin(ctx context.Context, logger klog.Logger, pl fwk.PreEnqueuePlugin, pInfo *framework.QueuedPodInfo, shouldRecordMetric bool) *fwk.Status {
pod := pInfo.Pod
startTime := p.clock.Now()
s := pl.PreEnqueue(ctx, pod)
if shouldRecordMetric {
p.metricsRecorder.ObservePluginDurationAsync(preEnqueue, pl.Name(), s.Code().String(), p.clock.Since(startTime).Seconds())
}
if s.IsSuccess() {
// No need to change GatingPlugin; it's overwritten by the next PreEnqueue plugin if they gate this pod, or it's overwritten with an empty string if all PreEnqueue plugins pass.
return s
}
pInfo.UnschedulablePlugins.Insert(pl.Name())
metrics.UnschedulableReason(pl.Name(), pod.Spec.SchedulerName).Inc()
pInfo.GatingPlugin = pl.Name()
pInfo.GatingPluginEvents = p.pluginToEventsMap[pInfo.GatingPlugin]
if s.Code() == fwk.Error {
utilruntime.HandleErrorWithContext(ctx, s.AsError(), "Unexpected error running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name())
} else {
logger.V(4).Info("Status after running PreEnqueue plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", s)
}
return s
}
// AddNominatedPod adds the given pod to the nominator.
// It locks the PriorityQueue to make sure it won't race with any other method.
func (p *PriorityQueue) AddNominatedPod(logger klog.Logger, pi fwk.PodInfo, nominatingInfo *fwk.NominatingInfo) {
p.lock.Lock()
p.nominator.addNominatedPod(logger, pi, nominatingInfo)
p.lock.Unlock()
}
// moveToActiveQ tries to add the pod to the active queue.
// If the pod doesn't pass PreEnqueue plugins, it gets added to unschedulablePods instead.
// movesFromBackoffQ should be set to true, if the pod directly moves from the backoffQ, so the PreEnqueue call can be skipped.
// It returns a boolean flag to indicate whether the pod is added successfully.
func (p *PriorityQueue) moveToActiveQ(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string, movesFromBackoffQ bool) bool {
gatedBefore := pInfo.Gated()
// If SchedulerPopFromBackoffQ feature gate is enabled,
// PreEnqueue plugins were called when the pod was added to the backoffQ.
// Don't need to repeat it here when the pod is directly moved from the backoffQ.
skipPreEnqueue := p.isPopFromBackoffQEnabled && movesFromBackoffQ
if !skipPreEnqueue {
p.runPreEnqueuePlugins(context.Background(), pInfo)
}
added := false
p.activeQ.underLock(func(unlockedActiveQ unlockedActiveQueuer) {
if pInfo.Gated() {
// Add the Pod to unschedulablePods if it's not passing PreEnqueuePlugins.
if unlockedActiveQ.has(pInfo) {
return
}
if p.backoffQ.has(pInfo) {
return
}
if p.unschedulablePods.get(pInfo.Pod) != nil {
return
}
p.unschedulablePods.addOrUpdate(pInfo, event)
logger.V(5).Info("Pod moved to an internal scheduling queue, because the pod is gated", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", unschedulableQ)
return
}
if pInfo.InitialAttemptTimestamp == nil {
now := p.clock.Now()
pInfo.InitialAttemptTimestamp = &now
}
p.unschedulablePods.delete(pInfo.Pod, gatedBefore)
p.backoffQ.delete(pInfo)
unlockedActiveQ.add(logger, pInfo, event)
added = true
if event == framework.EventUnscheduledPodAdd.Label() || event == framework.EventUnscheduledPodUpdate.Label() {
p.nominator.addNominatedPod(logger, pInfo.PodInfo, nil)
}
})
return added
}
// moveToBackoffQ tries to add the pod to the backoff queue.
// If SchedulerPopFromBackoffQ feature gate is enabled and the pod doesn't pass PreEnqueue plugins, it gets added to unschedulablePods instead.
// It returns a boolean flag to indicate whether the pod is added successfully.
func (p *PriorityQueue) moveToBackoffQ(logger klog.Logger, pInfo *framework.QueuedPodInfo, event string) bool {
gatedBefore := pInfo.Gated()
// If SchedulerPopFromBackoffQ feature gate is enabled,
// PreEnqueue plugins are called on inserting pods to the backoffQ,
// not to call them again on popping out.
if p.isPopFromBackoffQEnabled {
p.runPreEnqueuePlugins(context.Background(), pInfo)
if pInfo.Gated() {
if p.unschedulablePods.get(pInfo.Pod) == nil {
p.unschedulablePods.addOrUpdate(pInfo, event)
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", unschedulableQ)
}
return false
}
}
p.unschedulablePods.delete(pInfo.Pod, gatedBefore)
p.backoffQ.add(logger, pInfo, event)
return true
}
// Add adds a pod to the active queue. It should be called only when a new pod
// is added so there is no chance the pod is already in active/unschedulable/backoff queues
func (p *PriorityQueue) Add(logger klog.Logger, pod *v1.Pod) {
p.lock.Lock()
defer p.lock.Unlock()
pInfo := p.newQueuedPodInfo(pod)
if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodAdd.Label(), false); added {
p.activeQ.broadcast()
}
}
// Activate moves the given pods to activeQ.
// If a pod isn't found in unschedulablePods or backoffQ and it's in-flight,
// the wildcard event is registered so that the pod will be requeued when it comes back.
// But, if a pod isn't found in unschedulablePods or backoffQ and it's not in-flight (i.e., completely unknown pod),
// Activate would ignore the pod.
func (p *PriorityQueue) Activate(logger klog.Logger, pods map[string]*v1.Pod) {
p.lock.Lock()
defer p.lock.Unlock()
activated := false
for _, pod := range pods {
if p.activate(logger, pod) {
activated = true
continue
}
// If this pod is in-flight, register the activation event (for when QHint is enabled) or update moveRequestCycle (for when QHints is disabled)
// so that the pod will be requeued when it comes back.
// Specifically in the in-tree plugins, this is for the scenario with the preemption plugin
// where the async preemption API calls are all done or fail at some point before the Pod comes back to the queue.
p.activeQ.addEventsIfPodInFlight(nil, pod, []fwk.ClusterEvent{framework.EventForceActivate})
p.moveRequestCycle = p.activeQ.schedulingCycle()
}
if activated {
p.activeQ.broadcast()
}
}
func (p *PriorityQueue) activate(logger klog.Logger, pod *v1.Pod) bool {
var pInfo *framework.QueuedPodInfo
var movesFromBackoffQ bool
// Verify if the pod is present in unschedulablePods or backoffQ.
if pInfo = p.unschedulablePods.get(pod); pInfo == nil {
// If the pod doesn't belong to unschedulablePods or backoffQ, don't activate it.
// The pod can be already in activeQ.
var exists bool
pInfo, exists = p.backoffQ.get(newQueuedPodInfoForLookup(pod))
if !exists {
return false
}
// Delete pod from the backoffQ now to make sure it won't be popped from the backoffQ
// just before moving it to the activeQ
if deleted := p.backoffQ.delete(pInfo); !deleted {
// Pod was popped from the backoffQ in the meantime. Don't activate it.
return false
}
movesFromBackoffQ = true
}
if pInfo == nil {
// Redundant safe check. We shouldn't reach here.
utilruntime.HandleErrorWithLogger(logger, nil, "Internal error: cannot obtain pInfo")
return false
}
return p.moveToActiveQ(logger, pInfo, framework.ForceActivate, movesFromBackoffQ)
}
// SchedulingCycle returns current scheduling cycle.
func (p *PriorityQueue) SchedulingCycle() int64 {
return p.activeQ.schedulingCycle()
}
// determineSchedulingHintForInFlightPod looks at the unschedulable plugins of the given Pod
// and determines the scheduling hint for this Pod while checking the events that happened during in-flight.
func (p *PriorityQueue) determineSchedulingHintForInFlightPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) queueingStrategy {
if len(pInfo.UnschedulablePlugins) == 0 && len(pInfo.PendingPlugins) == 0 {
// No failed plugins are associated with this Pod.
// Meaning something unusual (a temporal failure on kube-apiserver, etc) happened and this Pod gets moved back to the queue.
// In this case, we should retry scheduling it because this Pod may not be retried until the next flush.
return queueAfterBackoff
}
events, err := p.activeQ.clusterEventsForPod(logger, pInfo)
if err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Error getting cluster events for pod", "pod", klog.KObj(pInfo.Pod))
return queueAfterBackoff
}
// check if there is an event that makes this Pod schedulable based on pInfo.UnschedulablePlugins.
queueingStrategy := queueSkip
for _, e := range events {
logger.V(5).Info("Checking event for in-flight pod", "pod", klog.KObj(pInfo.Pod), "event", e.event.Label())
switch p.isPodWorthRequeuing(logger, pInfo, e.event, e.oldObj, e.newObj) {
case queueSkip:
continue
case queueImmediately:
// queueImmediately is the highest priority.
// No need to go through the rest of the events.
return queueImmediately
case queueAfterBackoff:
// replace schedulingHint with queueAfterBackoff
queueingStrategy = queueAfterBackoff
if pInfo.PendingPlugins.Len() == 0 {
// We can return immediately because no Pending plugins, which only can make queueImmediately, registered in this Pod,
// and queueAfterBackoff is the second highest priority.
return queueAfterBackoff
}
}
}
return queueingStrategy
}
// addUnschedulableWithoutQueueingHint inserts a pod that cannot be scheduled into
// the queue, unless it is already in the queue. Normally, PriorityQueue puts
// unschedulable pods in `unschedulablePods`. But if there has been a recent move
// request, then the pod is put in `backoffQ`.
// TODO: This function is called only when p.isSchedulingQueueHintEnabled is false,
// and this will be removed after SchedulingQueueHint goes to stable and the feature gate is removed.
func (p *PriorityQueue) addUnschedulableWithoutQueueingHint(logger klog.Logger, pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error {
pod := pInfo.Pod
// When the queueing hint is enabled, they are used differently.
// But, we use all of them as UnschedulablePlugins when the queueing hint isn't enabled so that we don't break the old behaviour.
rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins)
// If a move request has been received, move it to the BackoffQ, otherwise move
// it to unschedulablePods.
for plugin := range rejectorPlugins {
metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Inc()
}
if p.moveRequestCycle >= podSchedulingCycle || len(rejectorPlugins) == 0 {
// Two cases to move a Pod to the active/backoff queue:
// - The Pod is rejected by some plugins, but a move request is received after this Pod's scheduling cycle is started.
// In this case, the received event may be make Pod schedulable and we should retry scheduling it.
// - No unschedulable plugins are associated with this Pod,
// meaning something unusual (a temporal failure on kube-apiserver, etc) happened and this Pod gets moved back to the queue.
// In this case, we should retry scheduling it because this Pod may not be retried until the next flush.
if added := p.moveToBackoffQ(logger, pInfo, framework.ScheduleAttemptFailure); added {
if p.isPopFromBackoffQEnabled {
p.activeQ.broadcast()
}
}
} else {
p.unschedulablePods.addOrUpdate(pInfo, framework.ScheduleAttemptFailure)
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", framework.ScheduleAttemptFailure, "queue", unschedulableQ)
}
return nil
}
// AddUnschedulableIfNotPresent inserts a pod that cannot be scheduled into
// the queue, unless it is already in the queue. Normally, PriorityQueue puts
// unschedulable pods in `unschedulablePods`. But if there has been a recent move
// request, then the pod is put in `backoffQ`.
func (p *PriorityQueue) AddUnschedulableIfNotPresent(logger klog.Logger, pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error {
p.lock.Lock()
defer p.lock.Unlock()
// In any case, this Pod will be moved back to the queue and we should call Done.
defer p.Done(pInfo.Pod.UID)
pod := pInfo.Pod
if p.unschedulablePods.get(pod) != nil {
return fmt.Errorf("Pod %v is already present in unschedulable queue", klog.KObj(pod))
}
if p.activeQ.has(pInfo) {
return fmt.Errorf("Pod %v is already present in the active queue", klog.KObj(pod))
}
if p.backoffQ.has(pInfo) {
return fmt.Errorf("Pod %v is already present in the backoff queue", klog.KObj(pod))
}
if len(pInfo.UnschedulablePlugins) == 0 && len(pInfo.PendingPlugins) == 0 {
// This Pod came back because of some unexpected errors (e.g., a network issue).
pInfo.ConsecutiveErrorsCount++
} else {
// This Pod is rejected by some plugins, not coming back due to unexpected errors (e.g., a network issue)
pInfo.UnschedulableCount++
// We should reset the error count because the error is gone.
pInfo.ConsecutiveErrorsCount = 0
}
// Refresh the timestamp since the pod is re-added.
pInfo.Timestamp = p.clock.Now()
// We changed ConsecutiveErrorsCount or UnschedulableCount plus Timestamp, and now the calculated backoff time should be different,
// removing the cached backoff time.
pInfo.BackoffExpiration = time.Time{}
if !p.isSchedulingQueueHintEnabled {
// fall back to the old behavior which doesn't depend on the queueing hint.
return p.addUnschedulableWithoutQueueingHint(logger, pInfo, podSchedulingCycle)
}
// If a move request has been received, move it to the BackoffQ, otherwise move
// it to unschedulablePods.
rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins)
for plugin := range rejectorPlugins {
metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Inc()
}
// We check whether this Pod may change its scheduling result by any of events that happened during scheduling.
schedulingHint := p.determineSchedulingHintForInFlightPod(logger, pInfo)
// In this case, we try to requeue this Pod to activeQ/backoffQ.
queue := p.requeuePodWithQueueingStrategy(logger, pInfo, schedulingHint, framework.ScheduleAttemptFailure)
logger.V(3).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", framework.ScheduleAttemptFailure, "queue", queue, "schedulingCycle", podSchedulingCycle, "hint", schedulingHint, "unschedulable plugins", rejectorPlugins)
if queue == activeQ || (p.isPopFromBackoffQEnabled && queue == backoffQ) {
// When the Pod is moved to activeQ, need to let p.cond know so that the Pod will be pop()ed out.
p.activeQ.broadcast()
}
return nil
}
// flushBackoffQCompleted Moves all pods from backoffQ which have completed backoff in to activeQ
func (p *PriorityQueue) flushBackoffQCompleted(logger klog.Logger) {
p.lock.Lock()
defer p.lock.Unlock()
activated := false
podsCompletedBackoff := p.backoffQ.popAllBackoffCompleted(logger)
for _, pInfo := range podsCompletedBackoff {
if added := p.moveToActiveQ(logger, pInfo, framework.BackoffComplete, true); added {
activated = true
}
}
if activated {
p.activeQ.broadcast()
}
}
// flushUnschedulablePodsLeftover moves pods which stay in unschedulablePods
// longer than podMaxInUnschedulablePodsDuration to backoffQ or activeQ.
func (p *PriorityQueue) flushUnschedulablePodsLeftover(logger klog.Logger) {
p.lock.Lock()
defer p.lock.Unlock()
var podsToMove []*framework.QueuedPodInfo
currentTime := p.clock.Now()
for _, pInfo := range p.unschedulablePods.podInfoMap {
lastScheduleTime := pInfo.Timestamp
if currentTime.Sub(lastScheduleTime) > p.podMaxInUnschedulablePodsDuration {
podsToMove = append(podsToMove, pInfo)
}
}
if len(podsToMove) > 0 {
p.movePodsToActiveOrBackoffQueue(logger, podsToMove, framework.EventUnschedulableTimeout, nil, nil)
}
}
// Pop removes the head of the active queue and returns it. It blocks if the
// activeQ is empty and waits until a new item is added to the queue. It
// increments scheduling cycle when a pod is popped.
// Note: This method should NOT be locked by the p.lock at any moment,
// as it would lead to scheduling throughput degradation.
func (p *PriorityQueue) Pop(logger klog.Logger) (*framework.QueuedPodInfo, error) {
return p.activeQ.pop(logger)
}
// Done must be called for pod returned by Pop. This allows the queue to
// keep track of which pods are currently being processed.
func (p *PriorityQueue) Done(pod types.UID) {
if !p.isSchedulingQueueHintEnabled {
// do nothing if schedulingQueueHint is disabled.
// In that case, we don't have inFlightPods and inFlightEvents.
return
}
p.activeQ.done(pod)
}
func (p *PriorityQueue) InFlightPods() []*v1.Pod {
if !p.isSchedulingQueueHintEnabled {
// do nothing if schedulingQueueHint is disabled.
// In that case, we don't have inFlightPods and inFlightEvents.
return nil
}
return p.activeQ.listInFlightPods()
}
// isPodUpdated checks if the pod is updated in a way that it may have become
// schedulable. It drops status of the pod and compares it with old version,
// except for pod.status.resourceClaimStatuses and
// pod.status.extendedResourceClaimStatus: changing that may have an
// effect on scheduling.
func isPodUpdated(oldPod, newPod *v1.Pod) bool {
strip := func(pod *v1.Pod) *v1.Pod {
p := pod.DeepCopy()
p.ResourceVersion = ""
p.Generation = 0
p.Status = v1.PodStatus{
ResourceClaimStatuses: pod.Status.ResourceClaimStatuses,
ExtendedResourceClaimStatus: pod.Status.ExtendedResourceClaimStatus,
}
p.ManagedFields = nil
p.Finalizers = nil
return p
}
return !reflect.DeepEqual(strip(oldPod), strip(newPod))
}
// Update updates a pod in the active or backoff queue if present. Otherwise, it removes
// the item from the unschedulable queue if pod is updated in a way that it may
// become schedulable and adds the updated one to the active queue.
// If pod is not present in any of the queues, it is added to the active queue.
func (p *PriorityQueue) Update(logger klog.Logger, oldPod, newPod *v1.Pod) {
p.lock.Lock()
defer p.lock.Unlock()
var events []fwk.ClusterEvent
if p.isSchedulingQueueHintEnabled {
events = framework.PodSchedulingPropertiesChange(newPod, oldPod)
}
updated := false
// Run the following code under the activeQ lock to make sure that in the meantime pod is not popped from either activeQ or backoffQ.
// This way, the event will be registered or the pod will be updated consistently.
// Locking only the part of Update method is sufficient, because in the other part the pod is in the unscheduledPods
// which is protected by p.lock anyway.
p.activeQ.underLock(func(unlockedActiveQ unlockedActiveQueuer) {
if p.isSchedulingQueueHintEnabled {
// The inflight pod will be requeued using the latest version from the informer cache, which matches what the event delivers.
// Record this Pod update because
// this update may make the Pod schedulable in case it gets rejected and comes back to the queue.
// We can clean it up once we change updatePodInSchedulingQueue to call MoveAllToActiveOrBackoffQueue.
// See https://github.com/kubernetes/kubernetes/pull/125578#discussion_r1648338033 for more context.
if exists := unlockedActiveQ.addEventsIfPodInFlight(oldPod, newPod, events); exists {
logger.V(6).Info("The pod doesn't need to be queued for now because it's being scheduled and will be queued back if necessary", "pod", klog.KObj(newPod))
updated = true
return
}
}
if oldPod != nil {
oldPodInfo := newQueuedPodInfoForLookup(oldPod)
// If the pod is already in the active queue, just update it there.
if pInfo := unlockedActiveQ.update(newPod, oldPodInfo); pInfo != nil {
p.UpdateNominatedPod(logger, oldPod, pInfo.PodInfo)
updated = true
return
}
// If the pod is in the backoff queue, update it there.
if pInfo := p.backoffQ.update(newPod, oldPodInfo); pInfo != nil {
p.UpdateNominatedPod(logger, oldPod, pInfo.PodInfo)
updated = true
return
}
}
})
if updated {
return
}
// If the pod is in the unschedulable queue, updating it may make it schedulable.
if pInfo := p.unschedulablePods.get(newPod); pInfo != nil {
_ = pInfo.Update(newPod)
p.UpdateNominatedPod(logger, oldPod, pInfo.PodInfo)
if p.isSchedulingQueueHintEnabled {
// When unscheduled Pods are updated, we check with QueueingHint
// whether the update may make the pods schedulable.
// Plugins have to implement a QueueingHint for Pod/Update event
// if the rejection from them could be resolved by updating unscheduled Pods itself.
for _, evt := range events {
hint := p.isPodWorthRequeuing(logger, pInfo, evt, oldPod, newPod)
queue := p.requeuePodWithQueueingStrategy(logger, pInfo, hint, evt.Label())
if queue != unschedulableQ {
logger.V(5).Info("Pod moved to an internal scheduling queue because the Pod is updated", "pod", klog.KObj(newPod), "event", evt.Label(), "queue", queue)
}
if queue == activeQ || (p.isPopFromBackoffQEnabled && queue == backoffQ) {
p.activeQ.broadcast()
break
}
}
return
}
if isPodUpdated(oldPod, newPod) {
// Pod might have completed its backoff time while being in unschedulablePods,
// so we should check isPodBackingoff before moving the pod to backoffQ.
if p.backoffQ.isPodBackingoff(pInfo) {
if added := p.moveToBackoffQ(logger, pInfo, framework.EventUnscheduledPodUpdate.Label()); added {
if p.isPopFromBackoffQEnabled {
p.activeQ.broadcast()
}
}
return
}
if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodUpdate.Label(), false); added {
p.activeQ.broadcast()
}
return
}
// Pod update didn't make it schedulable, keep it in the unschedulable queue.
p.unschedulablePods.addOrUpdate(pInfo, framework.EventUnscheduledPodUpdate.Label())
return
}
// If pod is not in any of the queues, we put it in the active queue.
pInfo := p.newQueuedPodInfo(newPod)
if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodUpdate.Label(), false); added {
p.activeQ.broadcast()
}
}
// Delete deletes the item from either of the two queues. It assumes the pod is
// only in one queue.
func (p *PriorityQueue) Delete(pod *v1.Pod) {
p.lock.Lock()
defer p.lock.Unlock()
p.DeleteNominatedPodIfExists(pod)
pInfo := newQueuedPodInfoForLookup(pod)
if err := p.activeQ.delete(pInfo); err == nil {
return
}
if deleted := p.backoffQ.delete(pInfo); deleted {
return
}
if pInfo = p.unschedulablePods.get(pod); pInfo != nil {
p.unschedulablePods.delete(pod, pInfo.Gated())
}
}
// AssignedPodAdded is called when a bound pod is added. Creation of this pod
// may make pending pods with matching affinity terms schedulable.
func (p *PriorityQueue) AssignedPodAdded(logger klog.Logger, pod *v1.Pod) {
p.lock.Lock()
// Pre-filter Pods to move by getUnschedulablePodsWithCrossTopologyTerm
// because Pod related events shouldn't make Pods that rejected by single-node scheduling requirement schedulable.
p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithCrossTopologyTerm(logger, pod), framework.EventAssignedPodAdd, nil, pod)
p.lock.Unlock()
}
// AssignedPodUpdated is called when a bound pod is updated. Change of labels
// may make pending pods with matching affinity terms schedulable.
func (p *PriorityQueue) AssignedPodUpdated(logger klog.Logger, oldPod, newPod *v1.Pod, event fwk.ClusterEvent) {
p.lock.Lock()
if (framework.MatchClusterEvents(fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.UpdatePodScaleDown}, event)) {
// In this case, we don't want to pre-filter Pods by getUnschedulablePodsWithCrossTopologyTerm
// because Pod related events may make Pods that were rejected by NodeResourceFit schedulable.
p.moveAllToActiveOrBackoffQueue(logger, event, oldPod, newPod, nil)
} else {
// Pre-filter Pods to move by getUnschedulablePodsWithCrossTopologyTerm
// because Pod related events only make Pods rejected by cross topology term schedulable.
p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithCrossTopologyTerm(logger, newPod), event, oldPod, newPod)
}
p.lock.Unlock()
}
// NOTE: this function assumes a lock has been acquired in the caller.
// moveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ.
// This function adds all pods and then signals the condition variable to ensure that
// if Pop() is waiting for an item, it receives the signal after all the pods are in the
// queue and the head is the highest priority pod.
func (p *PriorityQueue) moveAllToActiveOrBackoffQueue(logger klog.Logger, event fwk.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) {
if !p.isEventOfInterest(logger, event) {
// No plugin is interested in this event.
// Return early before iterating all pods in unschedulablePods for preCheck.
return
}
unschedulablePods := make([]*framework.QueuedPodInfo, 0, len(p.unschedulablePods.podInfoMap))
for _, pInfo := range p.unschedulablePods.podInfoMap {
if preCheck == nil || preCheck(pInfo.Pod) {
unschedulablePods = append(unschedulablePods, pInfo)
}
}
p.movePodsToActiveOrBackoffQueue(logger, unschedulablePods, event, oldObj, newObj)
}
// MoveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ.
// This function adds all pods and then signals the condition variable to ensure that
// if Pop() is waiting for an item, it receives the signal after all the pods are in the
// queue and the head is the highest priority pod.
func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(logger klog.Logger, event fwk.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) {
p.lock.Lock()
defer p.lock.Unlock()
p.moveAllToActiveOrBackoffQueue(logger, event, oldObj, newObj, preCheck)
}
// requeuePodWithQueueingStrategy tries to requeue Pod to activeQ, backoffQ or unschedulable pod pool based on schedulingHint.
// It returns the queue name Pod goes.
//
// NOTE: this function assumes lock has been acquired in caller
func (p *PriorityQueue) requeuePodWithQueueingStrategy(logger klog.Logger, pInfo *framework.QueuedPodInfo, strategy queueingStrategy, event string) string {
if strategy == queueSkip {
p.unschedulablePods.addOrUpdate(pInfo, event)
return unschedulableQ
}
// Pod might have completed its backoff time while being in unschedulablePods,
// so we should check isPodBackingoff before moving the pod to backoffQ.
if strategy == queueAfterBackoff && p.backoffQ.isPodBackingoff(pInfo) {
if added := p.moveToBackoffQ(logger, pInfo, event); added {
return backoffQ
}
return unschedulableQ
}
// Reach here if schedulingHint is QueueImmediately, or schedulingHint is Queue but the pod is not backing off.
if added := p.moveToActiveQ(logger, pInfo, event, false); added {
return activeQ
}
// Pod is gated. We don't have to push it back to unschedulable queue, because moveToActiveQ should already have done that.
return unschedulableQ
}
// NOTE: this function assumes lock has been acquired in caller
func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(logger klog.Logger, podInfoList []*framework.QueuedPodInfo, event fwk.ClusterEvent, oldObj, newObj interface{}) {
if !p.isEventOfInterest(logger, event) {
// No plugin is interested in this event.
return
}
activated := false
for _, pInfo := range podInfoList {
if pInfo.Gated() && !framework.MatchAnyClusterEvent(event, pInfo.GatingPluginEvents) {
// This event doesn't interest the gating plugin of this Pod,
// which means this event never moves this Pod to activeQ.
continue
}
schedulingHint := p.isPodWorthRequeuing(logger, pInfo, event, oldObj, newObj)
if schedulingHint == queueSkip {
// QueueingHintFn determined that this Pod isn't worth putting to activeQ or backoffQ by this event.
logger.V(5).Info("Event is not making pod schedulable", "pod", klog.KObj(pInfo.Pod), "event", event.Label())
continue
}
p.unschedulablePods.delete(pInfo.Pod, pInfo.Gated())
queue := p.requeuePodWithQueueingStrategy(logger, pInfo, schedulingHint, event.Label())
if queue == activeQ || (p.isPopFromBackoffQEnabled && queue == backoffQ) {
activated = true
}
}
p.moveRequestCycle = p.activeQ.schedulingCycle()
if p.isSchedulingQueueHintEnabled {
// AddUnschedulableIfNotPresent might get called for in-flight Pods later, and in
// AddUnschedulableIfNotPresent we need to know whether events were
// observed while scheduling them.
if added := p.activeQ.addEventIfAnyInFlight(oldObj, newObj, event); added {
logger.V(5).Info("Event received while pods are in flight", "event", event.Label())
}
}
if activated {
p.activeQ.broadcast()
}
}
// getUnschedulablePodsWithCrossTopologyTerm returns unschedulable pods which either of following conditions is met:
// - have any affinity term that matches "pod".
// - rejected by PodTopologySpread plugin.
// NOTE: this function assumes lock has been acquired in caller.
func (p *PriorityQueue) getUnschedulablePodsWithCrossTopologyTerm(logger klog.Logger, pod *v1.Pod) []*framework.QueuedPodInfo {
nsLabels := interpodaffinity.GetNamespaceLabelsSnapshot(logger, pod.Namespace, p.nsLister)
var podsToMove []*framework.QueuedPodInfo
for _, pInfo := range p.unschedulablePods.podInfoMap {
if pInfo.UnschedulablePlugins.Has(podtopologyspread.Name) && pod.Namespace == pInfo.Pod.Namespace {
// This Pod may be schedulable now by this Pod event.
podsToMove = append(podsToMove, pInfo)
continue
}
for _, term := range pInfo.RequiredAffinityTerms {
if term.Matches(pod, nsLabels) {
podsToMove = append(podsToMove, pInfo)
break
}
}
}
return podsToMove
}
// PodsInActiveQ returns all the Pods in the activeQ.
func (p *PriorityQueue) PodsInActiveQ() []*v1.Pod {
return p.activeQ.list()
}
// PodsInBackoffQ returns all the Pods in the backoffQ.
func (p *PriorityQueue) PodsInBackoffQ() []*v1.Pod {
return p.backoffQ.list()
}
// UnschedulablePods returns all the pods in unschedulable state.
func (p *PriorityQueue) UnschedulablePods() []*v1.Pod {
var result []*v1.Pod
for _, pInfo := range p.unschedulablePods.podInfoMap {
result = append(result, pInfo.Pod)
}
return result
}
var pendingPodsSummary = "activeQ:%v; backoffQ:%v; unschedulablePods:%v"
// GetPod searches for a pod in the activeQ, backoffQ, and unschedulablePods.
func (p *PriorityQueue) GetPod(name, namespace string) (pInfo *framework.QueuedPodInfo, ok bool) {
p.lock.RLock()
defer p.lock.RUnlock()
pInfoLookup := &framework.QueuedPodInfo{
PodInfo: &framework.PodInfo{
Pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
},
},
}
if pInfo, ok = p.backoffQ.get(pInfoLookup); ok {
return pInfo, true
}
if pInfo = p.unschedulablePods.get(pInfoLookup.Pod); pInfo != nil {
return pInfo, true
}
p.activeQ.underRLock(func(unlockedActiveQ unlockedActiveQueueReader) {
pInfo, ok = unlockedActiveQ.get(pInfoLookup)
})
return
}
// PendingPods returns all the pending pods in the queue; accompanied by a debugging string
// recording showing the number of pods in each queue respectively.
// This function is used for debugging purposes in the scheduler cache dumper and comparer.
func (p *PriorityQueue) PendingPods() ([]*v1.Pod, string) {
p.lock.RLock()
defer p.lock.RUnlock()
result := p.PodsInActiveQ()
activeQLen := len(result)
backoffQPods := p.PodsInBackoffQ()
backoffQLen := len(backoffQPods)
result = append(result, backoffQPods...)
for _, pInfo := range p.unschedulablePods.podInfoMap {
result = append(result, pInfo.Pod)
}
return result, fmt.Sprintf(pendingPodsSummary, activeQLen, backoffQLen, len(p.unschedulablePods.podInfoMap))
}
// PatchPodStatus handles the pod status update by sending an update API call through API dispatcher.
// This method should be used only if the SchedulerAsyncAPICalls feature gate is enabled.
func (p *PriorityQueue) PatchPodStatus(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) (<-chan error, error) {
// Don't store anything in the cache. This might be extended in the next releases.
onFinish := make(chan error, 1)
err := p.apiDispatcher.Add(apicalls.Implementations.PodStatusPatch(pod, condition, nominatingInfo), fwk.APICallOptions{
OnFinish: onFinish,
})
if fwk.IsUnexpectedError(err) {
return onFinish, err
}
return onFinish, nil
}
// Note: this function assumes the caller locks both p.lock.RLock and p.activeQ.getLock().RLock.
func (p *PriorityQueue) nominatedPodToInfo(np podRef, unlockedActiveQ unlockedActiveQueueReader) *framework.PodInfo {
pod := np.toPod()
pInfoLookup := newQueuedPodInfoForLookup(pod)
queuedPodInfo, exists := unlockedActiveQ.get(pInfoLookup)
if exists {
return queuedPodInfo.PodInfo
}
queuedPodInfo = p.unschedulablePods.get(pod)
if queuedPodInfo != nil {
return queuedPodInfo.PodInfo
}
queuedPodInfo, exists = p.backoffQ.get(pInfoLookup)
if exists {
return queuedPodInfo.PodInfo
}
return &framework.PodInfo{Pod: pod}
}
// Close closes the priority queue.
func (p *PriorityQueue) Close() {
p.lock.Lock()
defer p.lock.Unlock()
close(p.stop)
p.activeQ.close()
p.activeQ.broadcast()
}
// NominatedPodsForNode returns a copy of pods that are nominated to run on the given node,
// but they are waiting for other pods to be removed from the node.
// CAUTION: Make sure you don't call this function while taking any queue's lock in any scenario.
func (p *PriorityQueue) NominatedPodsForNode(nodeName string) []fwk.PodInfo {
p.lock.RLock()
defer p.lock.RUnlock()
nominatedPods := p.nominator.nominatedPodsForNode(nodeName)
pods := make([]fwk.PodInfo, len(nominatedPods))
p.activeQ.underRLock(func(unlockedActiveQ unlockedActiveQueueReader) {
for i, np := range nominatedPods {
pods[i] = p.nominatedPodToInfo(np, unlockedActiveQ).DeepCopy()
}
})
return pods
}
// newQueuedPodInfo builds a QueuedPodInfo object.
func (p *PriorityQueue) newQueuedPodInfo(pod *v1.Pod, plugins ...string) *framework.QueuedPodInfo {
now := p.clock.Now()
// ignore this err since apiserver doesn't properly validate affinity terms
// and we can't fix the validation for backwards compatibility.
podInfo, _ := framework.NewPodInfo(pod)
return &framework.QueuedPodInfo{
PodInfo: podInfo,
Timestamp: now,
InitialAttemptTimestamp: nil,
UnschedulablePlugins: sets.New(plugins...),
}
}
func podInfoKeyFunc(pInfo *framework.QueuedPodInfo) string {
return cache.NewObjectName(pInfo.Pod.Namespace, pInfo.Pod.Name).String()
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queue
import (
"context"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
)
// NewTestQueue creates a priority queue with an empty informer factory.
func NewTestQueue(ctx context.Context, lessFn fwk.LessFunc, opts ...Option) *PriorityQueue {
return NewTestQueueWithObjects(ctx, lessFn, nil, opts...)
}
// NewTestQueueWithObjects creates a priority queue with an informer factory
// populated with the provided objects.
func NewTestQueueWithObjects(
ctx context.Context,
lessFn fwk.LessFunc,
objs []runtime.Object,
opts ...Option,
) *PriorityQueue {
informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(objs...), 0)
// Because some major functions (e.g., Pop) requires the metric recorder to be set,
// we always set a metric recorder here.
recorder := metrics.NewMetricsAsyncRecorder(10, 20*time.Microsecond, ctx.Done())
// We set it before the options that users provide, so that users can override it.
opts = append([]Option{WithMetricsRecorder(recorder)}, opts...)
return NewTestQueueWithInformerFactory(ctx, lessFn, informerFactory, opts...)
}
func NewTestQueueWithInformerFactory(
ctx context.Context,
lessFn fwk.LessFunc,
informerFactory informers.SharedInformerFactory,
opts ...Option,
) *PriorityQueue {
pq := NewPriorityQueue(lessFn, informerFactory, opts...)
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
return pq
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queue
import (
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// unschedulablePods holds pods that cannot be scheduled.
type unschedulablePods struct {
// podInfoMap is a map key by a pod's full-name and the value is a pointer to the QueuedPodInfo.
podInfoMap map[string]*framework.QueuedPodInfo
keyFunc func(*v1.Pod) string
// unschedulableRecorder/gatedRecorder updates the counter when elements of an unschedulablePods
// get added or removed, and it does nothing if it's nil.
unschedulableRecorder, gatedRecorder metrics.MetricRecorder
}
// newUnschedulablePods initializes a new object of unschedulablePods.
func newUnschedulablePods(unschedulableRecorder, gatedRecorder metrics.MetricRecorder) *unschedulablePods {
return &unschedulablePods{
podInfoMap: make(map[string]*framework.QueuedPodInfo),
keyFunc: util.GetPodFullName,
unschedulableRecorder: unschedulableRecorder,
gatedRecorder: gatedRecorder,
}
}
// addOrUpdate adds a pod to the unschedulable podInfoMap.
// The event should show which event triggered the addition and is used for the metric recording.
func (u *unschedulablePods) addOrUpdate(pInfo *framework.QueuedPodInfo, event string) {
podID := u.keyFunc(pInfo.Pod)
if _, exists := u.podInfoMap[podID]; !exists {
if pInfo.Gated() && u.gatedRecorder != nil {
u.gatedRecorder.Inc()
} else if !pInfo.Gated() && u.unschedulableRecorder != nil {
u.unschedulableRecorder.Inc()
}
metrics.SchedulerQueueIncomingPods.WithLabelValues("unschedulable", event).Inc()
}
u.podInfoMap[podID] = pInfo
}
// delete deletes a pod from the unschedulable podInfoMap.
// The `gated` parameter is used to figure out which metric should be decreased.
func (u *unschedulablePods) delete(pod *v1.Pod, gated bool) {
podID := u.keyFunc(pod)
if _, exists := u.podInfoMap[podID]; exists {
if gated && u.gatedRecorder != nil {
u.gatedRecorder.Dec()
} else if !gated && u.unschedulableRecorder != nil {
u.unschedulableRecorder.Dec()
}
}
delete(u.podInfoMap, podID)
}
// get returns the QueuedPodInfo if a pod with the same key as the key of the given "pod"
// is found in the map. It returns nil otherwise.
func (u *unschedulablePods) get(pod *v1.Pod) *framework.QueuedPodInfo {
podKey := u.keyFunc(pod)
if pInfo, exists := u.podInfoMap[podKey]; exists {
return pInfo
}
return nil
}
// clear removes all the entries from the unschedulable podInfoMap.
func (u *unschedulablePods) clear() {
u.podInfoMap = make(map[string]*framework.QueuedPodInfo)
if u.unschedulableRecorder != nil {
u.unschedulableRecorder.Clear()
}
if u.gatedRecorder != nil {
u.gatedRecorder.Clear()
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"context"
"fmt"
"strings"
"time"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
corev1nodeaffinity "k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/backend/queue"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/profile"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
)
func (sched *Scheduler) addNodeToCache(obj interface{}) {
evt := fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Add}
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
node, ok := obj.(*v1.Node)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Node", "obj", obj)
return
}
logger.V(3).Info("Add event for node", "node", klog.KObj(node))
nodeInfo := sched.Cache.AddNode(logger, node)
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil, node, preCheckForNode(nodeInfo))
}
func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) {
start := time.Now()
logger := sched.logger
oldNode, ok := oldObj.(*v1.Node)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert oldObj to *v1.Node", "oldObj", oldObj)
return
}
newNode, ok := newObj.(*v1.Node)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert newObj to *v1.Node", "newObj", newObj)
return
}
logger.V(4).Info("Update event for node", "node", klog.KObj(newNode))
nodeInfo := sched.Cache.UpdateNode(logger, oldNode, newNode)
events := framework.NodeSchedulingPropertiesChange(newNode, oldNode)
// Save the time it takes to update the node in the cache.
updatingDuration := metrics.SinceInSeconds(start)
// Only requeue unschedulable pods if the node became more schedulable.
for _, evt := range events {
startMoving := time.Now()
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, oldNode, newNode, preCheckForNode(nodeInfo))
movingDuration := metrics.SinceInSeconds(startMoving)
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(updatingDuration + movingDuration)
}
}
func (sched *Scheduler) deleteNodeFromCache(obj interface{}) {
evt := fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Delete}
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
var node *v1.Node
switch t := obj.(type) {
case *v1.Node:
node = t
case cache.DeletedFinalStateUnknown:
var ok bool
node, ok = t.Obj.(*v1.Node)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Node", "obj", t.Obj)
return
}
default:
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Node", "obj", t)
return
}
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, node, nil, nil)
logger.V(3).Info("Delete event for node", "node", klog.KObj(node))
if err := sched.Cache.RemoveNode(logger, node); err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Scheduler cache RemoveNode failed")
}
}
func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodAdd.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
pod := obj.(*v1.Pod)
logger.V(3).Info("Add event for unscheduled pod", "pod", klog.KObj(pod))
sched.SchedulingQueue.Add(logger, pod)
}
func (sched *Scheduler) syncPodWithDispatcher(pod *v1.Pod) *v1.Pod {
enrichedObj, err := sched.APIDispatcher.SyncObject(pod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to sync pod %s/%s with API dispatcher: %w", pod.Namespace, pod.Name, err))
return pod
}
enrichedPod, ok := enrichedObj.(*v1.Pod)
if !ok {
utilruntime.HandleError(fmt.Errorf("cannot convert enrichedObj of type %T to *v1.Pod", enrichedObj))
return pod
}
return enrichedPod
}
func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) {
start := time.Now()
logger := sched.logger
oldPod, newPod := oldObj.(*v1.Pod), newObj.(*v1.Pod)
// Bypass update event that carries identical objects; otherwise, a duplicated
// Pod may go through scheduling and cause unexpected behavior (see #96071).
if oldPod.ResourceVersion == newPod.ResourceVersion {
return
}
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodUpdate.Label()).Observe(metrics.SinceInSeconds(start))
for _, evt := range framework.PodSchedulingPropertiesChange(newPod, oldPod) {
if evt.Label() != framework.EventUnscheduledPodUpdate.Label() {
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
}
}
if sched.APIDispatcher != nil {
// If the API dispatcher is available, sync the new pod with the details.
// However, at the moment the updated newPod is discarded and this logic will be handled in the future releases.
_ = sched.syncPodWithDispatcher(newPod)
}
isAssumed, err := sched.Cache.IsAssumedPod(newPod)
if err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Failed to check whether pod is assumed", "pod", klog.KObj(newPod))
}
if isAssumed {
return
}
logger.V(4).Info("Update event for unscheduled pod", "pod", klog.KObj(newPod))
sched.SchedulingQueue.Update(logger, oldPod, newPod)
if hasNominatedNodeNameChanged(oldPod, newPod) {
// Nominated node changed in pod, so we need to treat it as if the pod was deleted from the old nominated node,
// because the scheduler treats such a pod as if it was already assigned when scheduling lower or equal priority pods.
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, oldPod, nil, getLEPriorityPreCheck(corev1helpers.PodPriority(oldPod)))
}
}
// hasNominatedNodeNameChanged returns true when nominated node name has existed but changed.
func hasNominatedNodeNameChanged(oldPod, newPod *v1.Pod) bool {
return len(oldPod.Status.NominatedNodeName) > 0 && oldPod.Status.NominatedNodeName != newPod.Status.NominatedNodeName
}
func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodDelete.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = obj.(*v1.Pod)
case cache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
return
}
default:
utilruntime.HandleErrorWithLogger(logger, nil, "Unable to handle object", "objType", fmt.Sprintf("%T", obj), "obj", obj)
return
}
logger.V(3).Info("Delete event for unscheduled pod", "pod", klog.KObj(pod))
sched.SchedulingQueue.Delete(pod)
fwk, err := sched.frameworkForPod(pod)
if err != nil {
// This shouldn't happen, because we only accept for scheduling the pods
// which specify a scheduler name that matches one of the profiles.
utilruntime.HandleErrorWithLogger(logger, err, "Unable to get profile", "pod", klog.KObj(pod))
return
}
// If a waiting pod is rejected, it indicates it's previously assumed and we're
// removing it from the scheduler cache. In this case, signal a AssignedPodDelete
// event to immediately retry some unscheduled Pods.
// Similarly when a pod that had nominated node is deleted, it can unblock scheduling of other pods,
// because the lower or equal priority pods treat such a pod as if it was assigned.
if fwk.RejectWaitingPod(pod.UID) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, nil)
} else if pod.Status.NominatedNodeName != "" {
// Note that a nominated pod can fall into `RejectWaitingPod` case as well,
// but in that case the `MoveAllToActiveOrBackoffQueue` already covered lower priority pods.
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, getLEPriorityPreCheck(corev1helpers.PodPriority(pod)))
}
}
// getLEPriorityPreCheck is a PreEnqueueCheck function that selects only lower or equal priority pods.
func getLEPriorityPreCheck(priority int32) queue.PreEnqueueCheck {
return func(pod *v1.Pod) bool {
return corev1helpers.PodPriority(pod) <= priority
}
}
func (sched *Scheduler) addPodToCache(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodAdd.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
pod, ok := obj.(*v1.Pod)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Pod", "obj", obj)
return
}
logger.V(3).Info("Add event for scheduled pod", "pod", klog.KObj(pod))
if err := sched.Cache.AddPod(logger, pod); err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Scheduler cache AddPod failed", "pod", klog.KObj(pod))
}
// SchedulingQueue.AssignedPodAdded has a problem:
// It internally pre-filters Pods to move to activeQ,
// while taking only in-tree plugins into consideration.
// Consequently, if custom plugins that subscribes Pod/Add events reject Pods,
// those Pods will never be requeued to activeQ by an assigned Pod related events,
// and they may be stuck in unschedulableQ.
//
// Here we use MoveAllToActiveOrBackoffQueue only when QueueingHint is enabled.
// (We cannot switch to MoveAllToActiveOrBackoffQueue right away because of throughput concern.)
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodAdd, nil, pod, nil)
} else {
sched.SchedulingQueue.AssignedPodAdded(logger, pod)
}
}
func (sched *Scheduler) updatePodInCache(oldObj, newObj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodUpdate.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
oldPod, ok := oldObj.(*v1.Pod)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
return
}
newPod, ok := newObj.(*v1.Pod)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
return
}
if sched.APIDispatcher != nil {
// If the API dispatcher is available, sync the new pod with the details.
// However, at the moment the updated newPod is discarded and this logic will be handled in the future releases.
_ = sched.syncPodWithDispatcher(newPod)
}
logger.V(4).Info("Update event for scheduled pod", "pod", klog.KObj(oldPod))
if err := sched.Cache.UpdatePod(logger, oldPod, newPod); err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Scheduler cache UpdatePod failed", "pod", klog.KObj(oldPod))
}
events := framework.PodSchedulingPropertiesChange(newPod, oldPod)
// Save the time it takes to update the pod in the cache.
updatingDuration := metrics.SinceInSeconds(start)
for _, evt := range events {
startMoving := time.Now()
// SchedulingQueue.AssignedPodUpdated has a problem:
// It internally pre-filters Pods to move to activeQ,
// while taking only in-tree plugins into consideration.
// Consequently, if custom plugins that subscribes Pod/Update events reject Pods,
// those Pods will never be requeued to activeQ by an assigned Pod related events,
// and they may be stuck in unschedulableQ.
//
// Here we use MoveAllToActiveOrBackoffQueue only when QueueingHint is enabled.
// (We cannot switch to MoveAllToActiveOrBackoffQueue right away because of throughput concern.)
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, oldPod, newPod, nil)
} else {
sched.SchedulingQueue.AssignedPodUpdated(logger, oldPod, newPod, evt)
}
movingDuration := metrics.SinceInSeconds(startMoving)
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(updatingDuration + movingDuration)
}
}
func (sched *Scheduler) deletePodFromCache(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodDelete.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = t
case cache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
return
}
default:
utilruntime.HandleErrorWithLogger(logger, nil, "Unable to handle object", "objType", fmt.Sprintf("%T", obj), "obj", obj)
return
}
logger.V(3).Info("Delete event for scheduled pod", "pod", klog.KObj(pod))
if err := sched.Cache.RemovePod(logger, pod); err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Scheduler cache RemovePod failed", "pod", klog.KObj(pod))
}
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, nil)
}
// assignedPod selects pods that are assigned (scheduled and running).
func assignedPod(pod *v1.Pod) bool {
return len(pod.Spec.NodeName) != 0
}
// responsibleForPod returns true if the pod has asked to be scheduled by the given scheduler.
func responsibleForPod(pod *v1.Pod, profiles profile.Map) bool {
return profiles.HandlesSchedulerName(pod.Spec.SchedulerName)
}
const (
// syncedPollPeriod controls how often you look at the status of your sync funcs
syncedPollPeriod = 100 * time.Millisecond
)
// WaitForHandlersSync waits for EventHandlers to sync.
// It returns true if it was successful, false if the controller should shut down
func (sched *Scheduler) WaitForHandlersSync(ctx context.Context) error {
return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) {
for _, handler := range sched.registeredHandlers {
if !handler.HasSynced() {
return false, nil
}
}
return true, nil
})
}
// addAllEventHandlers is a helper function used in tests and in Scheduler
// to add event handlers for various informers.
func addAllEventHandlers(
sched *Scheduler,
informerFactory informers.SharedInformerFactory,
dynInformerFactory dynamicinformer.DynamicSharedInformerFactory,
resourceClaimCache *assumecache.AssumeCache,
resourceSliceTracker *resourceslicetracker.Tracker,
gvkMap map[fwk.EventResource]fwk.ActionType,
) error {
var (
handlerRegistration cache.ResourceEventHandlerRegistration
err error
handlers []cache.ResourceEventHandlerRegistration
)
logger := sched.logger
// scheduled pod cache
if handlerRegistration, err = informerFactory.Core().V1().Pods().Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Pod:
return assignedPod(t)
case cache.DeletedFinalStateUnknown:
if _, ok := t.Obj.(*v1.Pod); ok {
// The carried object may be stale, so we don't use it to check if
// it's assigned or not. Attempting to cleanup anyways.
return true
}
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
return false
default:
utilruntime.HandleErrorWithLogger(logger, nil, "Unable to handle object", "objType", fmt.Sprintf("%T", obj), "obj", obj)
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sched.addPodToCache,
UpdateFunc: sched.updatePodInCache,
DeleteFunc: sched.deletePodFromCache,
},
},
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
// unscheduled pod queue
if handlerRegistration, err = informerFactory.Core().V1().Pods().Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Pod:
return !assignedPod(t) && responsibleForPod(t, sched.Profiles)
case cache.DeletedFinalStateUnknown:
if pod, ok := t.Obj.(*v1.Pod); ok {
// The carried object may be stale, so we don't use it to check if
// it's assigned or not.
return responsibleForPod(pod, sched.Profiles)
}
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
return false
default:
utilruntime.HandleErrorWithLogger(logger, nil, "Unable to handle object", "objType", fmt.Sprintf("%T", obj), "obj", obj)
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sched.addPodToSchedulingQueue,
UpdateFunc: sched.updatePodInSchedulingQueue,
DeleteFunc: sched.deletePodFromSchedulingQueue,
},
},
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
if handlerRegistration, err = informerFactory.Core().V1().Nodes().Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: sched.addNodeToCache,
UpdateFunc: sched.updateNodeInCache,
DeleteFunc: sched.deleteNodeFromCache,
},
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
buildEvtResHandler := func(at fwk.ActionType, resource fwk.EventResource) cache.ResourceEventHandlerFuncs {
funcs := cache.ResourceEventHandlerFuncs{}
if at&fwk.Add != 0 {
evt := fwk.ClusterEvent{Resource: resource, ActionType: fwk.Add}
funcs.AddFunc = func(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
if resource == fwk.StorageClass && !utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
sc, ok := obj.(*storagev1.StorageClass)
if !ok {
utilruntime.HandleErrorWithLogger(logger, nil, "Cannot convert to *storagev1.StorageClass", "obj", obj)
return
}
// CheckVolumeBindingPred fails if pod has unbound immediate PVCs. If these
// PVCs have specified StorageClass name, creating StorageClass objects
// with late binding will cause predicates to pass, so we need to move pods
// to active queue.
// We don't need to invalidate cached results because results will not be
// cached for pod that has unbound immediate PVCs.
if sc.VolumeBindingMode == nil || *sc.VolumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer {
return
}
}
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil, obj, nil)
}
}
if at&fwk.Update != 0 {
evt := fwk.ClusterEvent{Resource: resource, ActionType: fwk.Update}
funcs.UpdateFunc = func(old, obj interface{}) {
start := time.Now()
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, old, obj, nil)
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
}
}
if at&fwk.Delete != 0 {
evt := fwk.ClusterEvent{Resource: resource, ActionType: fwk.Delete}
funcs.DeleteFunc = func(obj interface{}) {
start := time.Now()
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, obj, nil, nil)
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
}
}
return funcs
}
for gvk, at := range gvkMap {
switch gvk {
case fwk.Node, fwk.Pod:
// Do nothing.
case fwk.CSINode:
if handlerRegistration, err = informerFactory.Storage().V1().CSINodes().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.CSINode),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case fwk.CSIDriver:
if handlerRegistration, err = informerFactory.Storage().V1().CSIDrivers().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.CSIDriver),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case fwk.CSIStorageCapacity:
if handlerRegistration, err = informerFactory.Storage().V1().CSIStorageCapacities().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.CSIStorageCapacity),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case fwk.PersistentVolume:
// MaxPDVolumeCountPredicate: since it relies on the counts of PV.
//
// PvAdd: Pods created when there are no PVs available will be stuck in
// unschedulable queue. But unbound PVs created for static provisioning and
// delay binding storage class are skipped in PV controller dynamic
// provisioning and binding process, will not trigger events to schedule pod
// again. So we need to move pods to active queue on PV add for this
// scenario.
//
// PvUpdate: Scheduler.bindVolumesWorker may fail to update assumed pod volume
// bindings due to conflicts if PVs are updated by PV controller or other
// parties, then scheduler will add pod back to unschedulable queue. We
// need to move pods to active queue on PV update for this scenario.
if handlerRegistration, err = informerFactory.Core().V1().PersistentVolumes().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.PersistentVolume),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case fwk.PersistentVolumeClaim:
// MaxPDVolumeCountPredicate: add/update PVC will affect counts of PV when it is bound.
if handlerRegistration, err = informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.PersistentVolumeClaim),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case fwk.ResourceClaim:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
handlerRegistration = resourceClaimCache.AddEventHandler(
buildEvtResHandler(at, fwk.ResourceClaim),
)
handlers = append(handlers, handlerRegistration)
}
case fwk.ResourceSlice:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = resourceSliceTracker.AddEventHandler(
buildEvtResHandler(at, fwk.ResourceSlice),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
}
case fwk.DeviceClass:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = informerFactory.Resource().V1().DeviceClasses().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.DeviceClass),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
}
case fwk.StorageClass:
if handlerRegistration, err = informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.StorageClass),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case fwk.VolumeAttachment:
if handlerRegistration, err = informerFactory.Storage().V1().VolumeAttachments().Informer().AddEventHandler(
buildEvtResHandler(at, fwk.VolumeAttachment),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
default:
// Tests may not instantiate dynInformerFactory.
if dynInformerFactory == nil {
continue
}
// GVK is expected to be at least 3-folded, separated by dots.
// <kind in plural>.<version>.<group>
// Valid examples:
// - foos.v1.example.com
// - bars.v1beta1.a.b.c
// Invalid examples:
// - foos.v1 (2 sections)
// - foo.v1.example.com (the first section should be plural)
if strings.Count(string(gvk), ".") < 2 {
utilruntime.HandleErrorWithLogger(logger, nil, "Incorrect event registration", "gvk", gvk)
continue
}
// Fall back to try dynamic informers.
gvr, _ := schema.ParseResourceArg(string(gvk))
dynInformer := dynInformerFactory.ForResource(*gvr).Informer()
if handlerRegistration, err = dynInformer.AddEventHandler(
buildEvtResHandler(at, gvk),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
}
}
sched.registeredHandlers = handlers
return nil
}
func preCheckForNode(nodeInfo *framework.NodeInfo) queue.PreEnqueueCheck {
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
// QHint is initially created from the motivation of replacing this preCheck.
// It assumes that the scheduler only has in-tree plugins, which is problematic for our extensibility.
// Here, we skip preCheck if QHint is enabled, and we eventually remove it after QHint is graduated.
return nil
}
// Note: the following checks doesn't take preemption into considerations, in very rare
// cases (e.g., node resizing), "pod" may still fail a check but preemption helps. We deliberately
// chose to ignore those cases as unschedulable pods will be re-queued eventually.
return func(pod *v1.Pod) bool {
admissionResults := AdmissionCheck(pod, nodeInfo, false)
if len(admissionResults) != 0 {
return false
}
_, isUntolerated := corev1helpers.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc())
return !isUntolerated
}
}
// AdmissionCheck calls the filtering logic of noderesources/nodeport/nodeAffinity/nodename
// and returns the failure reasons. It's used in kubelet(pkg/kubelet/lifecycle/predicate.go) and scheduler.
// It returns the first failure if `includeAllFailures` is set to false; otherwise
// returns all failures.
func AdmissionCheck(pod *v1.Pod, nodeInfo *framework.NodeInfo, includeAllFailures bool) []AdmissionResult {
var admissionResults []AdmissionResult
insufficientResources := noderesources.Fits(pod, nodeInfo, noderesources.ResourceRequestsOptions{
EnablePodLevelResources: utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
EnableDRAExtendedResource: utilfeature.DefaultFeatureGate.Enabled(features.DRAExtendedResource),
})
if len(insufficientResources) != 0 {
for i := range insufficientResources {
admissionResults = append(admissionResults, AdmissionResult{InsufficientResource: &insufficientResources[i]})
}
if !includeAllFailures {
return admissionResults
}
}
if matches, _ := corev1nodeaffinity.GetRequiredNodeAffinity(pod).Match(nodeInfo.Node()); !matches {
admissionResults = append(admissionResults, AdmissionResult{Name: nodeaffinity.Name, Reason: nodeaffinity.ErrReasonPod})
if !includeAllFailures {
return admissionResults
}
}
if !nodename.Fits(pod, nodeInfo) {
admissionResults = append(admissionResults, AdmissionResult{Name: nodename.Name, Reason: nodename.ErrReason})
if !includeAllFailures {
return admissionResults
}
}
if !nodeports.Fits(pod, nodeInfo) {
admissionResults = append(admissionResults, AdmissionResult{Name: nodeports.Name, Reason: nodeports.ErrReason})
if !includeAllFailures {
return admissionResults
}
}
return admissionResults
}
// AdmissionResult describes the reason why Scheduler can't admit the pod.
// If the reason is a resource fit one, then AdmissionResult.InsufficientResource includes the details.
type AdmissionResult struct {
Name string
Reason string
InsufficientResource *noderesources.InsufficientResource
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
v1 "k8s.io/api/core/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
restclient "k8s.io/client-go/rest"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
const (
// DefaultExtenderTimeout defines the default extender timeout in second.
DefaultExtenderTimeout = 5 * time.Second
)
// HTTPExtender implements the Extender interface.
type HTTPExtender struct {
extenderURL string
preemptVerb string
filterVerb string
prioritizeVerb string
bindVerb string
weight int64
client *http.Client
nodeCacheCapable bool
managedResources sets.Set[string]
ignorable bool
}
func makeTransport(config *schedulerapi.Extender) (http.RoundTripper, error) {
var cfg restclient.Config
if config.TLSConfig != nil {
cfg.TLSClientConfig.Insecure = config.TLSConfig.Insecure
cfg.TLSClientConfig.ServerName = config.TLSConfig.ServerName
cfg.TLSClientConfig.CertFile = config.TLSConfig.CertFile
cfg.TLSClientConfig.KeyFile = config.TLSConfig.KeyFile
cfg.TLSClientConfig.CAFile = config.TLSConfig.CAFile
cfg.TLSClientConfig.CertData = config.TLSConfig.CertData
cfg.TLSClientConfig.KeyData = config.TLSConfig.KeyData
cfg.TLSClientConfig.CAData = config.TLSConfig.CAData
}
if config.EnableHTTPS {
hasCA := len(cfg.CAFile) > 0 || len(cfg.CAData) > 0
if !hasCA {
cfg.Insecure = true
}
}
tlsConfig, err := restclient.TLSConfigFor(&cfg)
if err != nil {
return nil, err
}
if tlsConfig != nil {
return utilnet.SetTransportDefaults(&http.Transport{
TLSClientConfig: tlsConfig,
}), nil
}
return utilnet.SetTransportDefaults(&http.Transport{}), nil
}
// NewHTTPExtender creates an HTTPExtender object.
func NewHTTPExtender(config *schedulerapi.Extender) (fwk.Extender, error) {
if config.HTTPTimeout.Duration.Nanoseconds() == 0 {
config.HTTPTimeout.Duration = time.Duration(DefaultExtenderTimeout)
}
transport, err := makeTransport(config)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
Timeout: config.HTTPTimeout.Duration,
}
managedResources := sets.New[string]()
for _, r := range config.ManagedResources {
managedResources.Insert(string(r.Name))
}
return &HTTPExtender{
extenderURL: config.URLPrefix,
preemptVerb: config.PreemptVerb,
filterVerb: config.FilterVerb,
prioritizeVerb: config.PrioritizeVerb,
bindVerb: config.BindVerb,
weight: config.Weight,
client: client,
nodeCacheCapable: config.NodeCacheCapable,
managedResources: managedResources,
ignorable: config.Ignorable,
}, nil
}
// Name returns extenderURL to identify the extender.
func (h *HTTPExtender) Name() string {
return h.extenderURL
}
// IsIgnorable returns true indicates scheduling should not fail when this extender
// is unavailable
func (h *HTTPExtender) IsIgnorable() bool {
return h.ignorable
}
// SupportsPreemption returns true if an extender supports preemption.
// An extender should have preempt verb defined and enabled its own node cache.
func (h *HTTPExtender) SupportsPreemption() bool {
return len(h.preemptVerb) > 0
}
// ProcessPreemption returns filtered candidate nodes and victims after running preemption logic in extender.
func (h *HTTPExtender) ProcessPreemption(
pod *v1.Pod,
nodeNameToVictims map[string]*extenderv1.Victims,
nodeInfos fwk.NodeInfoLister,
) (map[string]*extenderv1.Victims, error) {
var (
result extenderv1.ExtenderPreemptionResult
args *extenderv1.ExtenderPreemptionArgs
)
if !h.SupportsPreemption() {
return nil, fmt.Errorf("preempt verb is not defined for extender %v but run into ProcessPreemption", h.extenderURL)
}
if h.nodeCacheCapable {
// If extender has cached node info, pass NodeNameToMetaVictims in args.
nodeNameToMetaVictims := convertToMetaVictims(nodeNameToVictims)
args = &extenderv1.ExtenderPreemptionArgs{
Pod: pod,
NodeNameToMetaVictims: nodeNameToMetaVictims,
}
} else {
args = &extenderv1.ExtenderPreemptionArgs{
Pod: pod,
NodeNameToVictims: nodeNameToVictims,
}
}
if err := h.send(h.preemptVerb, args, &result); err != nil {
return nil, err
}
// Extender will always return NodeNameToMetaVictims.
// So let's convert it to NodeNameToVictims by using <nodeInfos>.
newNodeNameToVictims, err := h.convertToVictims(result.NodeNameToMetaVictims, nodeInfos)
if err != nil {
return nil, err
}
// Do not override <nodeNameToVictims>.
return newNodeNameToVictims, nil
}
// convertToVictims converts "nodeNameToMetaVictims" from object identifiers,
// such as UIDs and names, to object pointers.
func (h *HTTPExtender) convertToVictims(
nodeNameToMetaVictims map[string]*extenderv1.MetaVictims,
nodeInfos fwk.NodeInfoLister,
) (map[string]*extenderv1.Victims, error) {
nodeNameToVictims := map[string]*extenderv1.Victims{}
for nodeName, metaVictims := range nodeNameToMetaVictims {
nodeInfo, err := nodeInfos.Get(nodeName)
if err != nil {
return nil, err
}
victims := &extenderv1.Victims{
Pods: []*v1.Pod{},
NumPDBViolations: metaVictims.NumPDBViolations,
}
for _, metaPod := range metaVictims.Pods {
pod, err := h.convertPodUIDToPod(metaPod, nodeInfo)
if err != nil {
return nil, err
}
victims.Pods = append(victims.Pods, pod)
}
nodeNameToVictims[nodeName] = victims
}
return nodeNameToVictims, nil
}
// convertPodUIDToPod returns v1.Pod object for given MetaPod and node info.
// The v1.Pod object is restored by nodeInfo.Pods().
// It returns an error if there's cache inconsistency between default scheduler
// and extender, i.e. when the pod is not found in nodeInfo.Pods.
func (h *HTTPExtender) convertPodUIDToPod(
metaPod *extenderv1.MetaPod,
nodeInfo fwk.NodeInfo) (*v1.Pod, error) {
for _, p := range nodeInfo.GetPods() {
if string(p.GetPod().UID) == metaPod.UID {
return p.GetPod(), nil
}
}
return nil, fmt.Errorf("extender: %v claims to preempt pod (UID: %v) on node: %v, but the pod is not found on that node",
h.extenderURL, metaPod, nodeInfo.Node().Name)
}
// convertToMetaVictims converts from struct type to meta types.
func convertToMetaVictims(
nodeNameToVictims map[string]*extenderv1.Victims,
) map[string]*extenderv1.MetaVictims {
nodeNameToMetaVictims := map[string]*extenderv1.MetaVictims{}
for node, victims := range nodeNameToVictims {
metaVictims := &extenderv1.MetaVictims{
Pods: []*extenderv1.MetaPod{},
NumPDBViolations: victims.NumPDBViolations,
}
for _, pod := range victims.Pods {
metaPod := &extenderv1.MetaPod{
UID: string(pod.UID),
}
metaVictims.Pods = append(metaVictims.Pods, metaPod)
}
nodeNameToMetaVictims[node] = metaVictims
}
return nodeNameToMetaVictims
}
// Filter based on extender implemented predicate functions. The filtered list is
// expected to be a subset of the supplied list; otherwise the function returns an error.
// The failedNodes and failedAndUnresolvableNodes optionally contains the list
// of failed nodes and failure reasons, except nodes in the latter are
// unresolvable.
func (h *HTTPExtender) Filter(
pod *v1.Pod,
nodes []fwk.NodeInfo,
) (filteredList []fwk.NodeInfo, failedNodes, failedAndUnresolvableNodes extenderv1.FailedNodesMap, err error) {
var (
result extenderv1.ExtenderFilterResult
nodeList *v1.NodeList
nodeNames *[]string
nodeResult []fwk.NodeInfo
args *extenderv1.ExtenderArgs
)
fromNodeName := make(map[string]fwk.NodeInfo)
for _, n := range nodes {
fromNodeName[n.Node().Name] = n
}
if h.filterVerb == "" {
return nodes, extenderv1.FailedNodesMap{}, extenderv1.FailedNodesMap{}, nil
}
if h.nodeCacheCapable {
nodeNameSlice := make([]string, 0, len(nodes))
for _, node := range nodes {
nodeNameSlice = append(nodeNameSlice, node.Node().Name)
}
nodeNames = &nodeNameSlice
} else {
nodeList = &v1.NodeList{}
for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node.Node())
}
}
args = &extenderv1.ExtenderArgs{
Pod: pod,
Nodes: nodeList,
NodeNames: nodeNames,
}
if err := h.send(h.filterVerb, args, &result); err != nil {
return nil, nil, nil, err
}
if result.Error != "" {
return nil, nil, nil, errors.New(result.Error)
}
if h.nodeCacheCapable && result.NodeNames != nil {
nodeResult = make([]fwk.NodeInfo, len(*result.NodeNames))
for i, nodeName := range *result.NodeNames {
if n, ok := fromNodeName[nodeName]; ok {
nodeResult[i] = n
} else {
return nil, nil, nil, fmt.Errorf(
"extender %q claims a filtered node %q which is not found in the input node list",
h.extenderURL, nodeName)
}
}
} else if result.Nodes != nil {
nodeResult = make([]fwk.NodeInfo, len(result.Nodes.Items))
for i := range result.Nodes.Items {
nodeResult[i] = framework.NewNodeInfo()
nodeResult[i].SetNode(&result.Nodes.Items[i])
}
}
return nodeResult, result.FailedNodes, result.FailedAndUnresolvableNodes, nil
}
// Prioritize based on extender implemented priority functions. Weight*priority is added
// up for each such priority function. The returned score is added to the score computed
// by Kubernetes scheduler. The total score is used to do the host selection.
func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []fwk.NodeInfo) (*extenderv1.HostPriorityList, int64, error) {
var (
result extenderv1.HostPriorityList
nodeList *v1.NodeList
nodeNames *[]string
args *extenderv1.ExtenderArgs
)
if h.prioritizeVerb == "" {
result := extenderv1.HostPriorityList{}
for _, node := range nodes {
result = append(result, extenderv1.HostPriority{Host: node.Node().Name, Score: 0})
}
return &result, 0, nil
}
if h.nodeCacheCapable {
nodeNameSlice := make([]string, 0, len(nodes))
for _, node := range nodes {
nodeNameSlice = append(nodeNameSlice, node.Node().Name)
}
nodeNames = &nodeNameSlice
} else {
nodeList = &v1.NodeList{}
for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node.Node())
}
}
args = &extenderv1.ExtenderArgs{
Pod: pod,
Nodes: nodeList,
NodeNames: nodeNames,
}
if err := h.send(h.prioritizeVerb, args, &result); err != nil {
return nil, 0, err
}
return &result, h.weight, nil
}
// Bind delegates the action of binding a pod to a node to the extender.
func (h *HTTPExtender) Bind(binding *v1.Binding) error {
var result extenderv1.ExtenderBindingResult
if !h.IsBinder() {
// This shouldn't happen as this extender wouldn't have become a Binder.
return fmt.Errorf("unexpected empty bindVerb in extender")
}
req := &extenderv1.ExtenderBindingArgs{
PodName: binding.Name,
PodNamespace: binding.Namespace,
PodUID: binding.UID,
Node: binding.Target.Name,
}
if err := h.send(h.bindVerb, req, &result); err != nil {
return err
}
if result.Error != "" {
return errors.New(result.Error)
}
return nil
}
// IsBinder returns whether this extender is configured for the Bind method.
func (h *HTTPExtender) IsBinder() bool {
return h.bindVerb != ""
}
// IsPrioritizer returns whether this extender is configured for the Prioritize method.
func (h *HTTPExtender) IsPrioritizer() bool {
return h.prioritizeVerb != ""
}
// IsFilter returns whether this extender is configured for the Filter method.
func (h *HTTPExtender) IsFilter() bool {
return h.filterVerb != ""
}
// Helper function to send messages to the extender
func (h *HTTPExtender) send(action string, args interface{}, result interface{}) error {
out, err := json.Marshal(args)
if err != nil {
return err
}
url := strings.TrimRight(h.extenderURL, "/") + "/" + action
req, err := http.NewRequest("POST", url, bytes.NewReader(out))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
resp, err := h.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed %v with extender at URL %v, code %v", action, url, resp.StatusCode)
}
return json.NewDecoder(resp.Body).Decode(result)
}
// IsInterested returns true if at least one extended resource requested by
// this pod is managed by this extender.
func (h *HTTPExtender) IsInterested(pod *v1.Pod) bool {
if h.managedResources.Len() == 0 {
return true
}
if h.hasManagedResources(pod.Spec.Containers) {
return true
}
if h.hasManagedResources(pod.Spec.InitContainers) {
return true
}
return false
}
func (h *HTTPExtender) hasManagedResources(containers []v1.Container) bool {
for i := range containers {
container := &containers[i]
for resourceName := range container.Resources.Requests {
if h.managedResources.Has(string(resourceName)) {
return true
}
}
for resourceName := range container.Resources.Limits {
if h.managedResources.Has(string(resourceName)) {
return true
}
}
}
return false
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apicalls
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
)
// PodBindingCall is used to bind the pod using the binding details.
type PodBindingCall struct {
binding *v1.Binding
}
func NewPodBindingCall(binding *v1.Binding) *PodBindingCall {
return &PodBindingCall{
binding: binding,
}
}
func (pbc *PodBindingCall) CallType() fwk.APICallType {
return PodBinding
}
func (pbc *PodBindingCall) UID() types.UID {
return pbc.binding.UID
}
func (pbc *PodBindingCall) Execute(ctx context.Context, client clientset.Interface) error {
logger := klog.FromContext(ctx)
logger.V(3).Info("Attempting to bind pod to node", "pod", klog.KObj(&pbc.binding.ObjectMeta), "node", pbc.binding.Target.Name)
return client.CoreV1().Pods(pbc.binding.Namespace).Bind(ctx, pbc.binding, metav1.CreateOptions{})
}
func (pbc *PodBindingCall) Sync(obj metav1.Object) (metav1.Object, error) {
// Don't need to store or update an object.
return obj, nil
}
func (pbc *PodBindingCall) Merge(oldCall fwk.APICall) error {
// Merge should just overwrite the previous call.
return nil
}
func (pbc *PodBindingCall) IsNoOp() bool {
return false
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apicalls
import (
"context"
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// PodStatusPatchCall is used to patch the pod status.
type PodStatusPatchCall struct {
lock sync.Mutex
// executed is set at the beginning of the call's Execute
// and is used by Sync to know if the podStatus should be updated.
executed bool
// podUID is an UID of the pod.
podUID types.UID
// podRef is a reference to the pod.
podRef klog.ObjectRef
// podStatus contains the actual status of the pod.
podStatus *v1.PodStatus
// newCondition is a condition to update.
newCondition *v1.PodCondition
// nominatingInfo is a nominating info to update.
nominatingInfo *fwk.NominatingInfo
}
func NewPodStatusPatchCall(pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) *PodStatusPatchCall {
return &PodStatusPatchCall{
podUID: pod.UID,
podRef: klog.KObj(pod),
podStatus: pod.Status.DeepCopy(),
newCondition: condition,
nominatingInfo: nominatingInfo,
}
}
func (psuc *PodStatusPatchCall) CallType() fwk.APICallType {
return PodStatusPatch
}
func (psuc *PodStatusPatchCall) UID() types.UID {
return psuc.podUID
}
// syncStatus syncs the given status with condition and nominatingInfo. It returns true if anything was actually updated.
func syncStatus(status *v1.PodStatus, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) bool {
nnnNeedsUpdate := nominatingInfo.Mode() == fwk.ModeOverride && status.NominatedNodeName != nominatingInfo.NominatedNodeName
if condition != nil {
if !podutil.UpdatePodCondition(status, condition) && !nnnNeedsUpdate {
return false
}
} else if !nnnNeedsUpdate {
return false
}
if nnnNeedsUpdate {
status.NominatedNodeName = nominatingInfo.NominatedNodeName
}
return true
}
func (psuc *PodStatusPatchCall) Execute(ctx context.Context, client clientset.Interface) error {
psuc.lock.Lock()
// Executed flag is set not to race with podStatus write in Sync afterwards.
psuc.executed = true
condition := psuc.newCondition.DeepCopy()
podStatusCopy := psuc.podStatus.DeepCopy()
psuc.lock.Unlock()
logger := klog.FromContext(ctx)
if condition != nil {
logger.V(3).Info("Updating pod condition", "pod", psuc.podRef, "conditionType", condition.Type, "conditionStatus", condition.Status, "conditionReason", condition.Reason)
}
// Sync status to have the condition and nominatingInfo applied on a podStatusCopy.
synced := syncStatus(podStatusCopy, condition, psuc.nominatingInfo)
if !synced {
logger.V(5).Info("Pod status patch call does not need to be executed because it has no effect", "pod", psuc.podRef)
return nil
}
// It's safe to run PatchPodStatus even on outdated pod object.
err := util.PatchPodStatus(ctx, client, psuc.podRef.Name, psuc.podRef.Namespace, psuc.podStatus, podStatusCopy)
if err != nil {
logger.Error(err, "Failed to patch pod status", "pod", psuc.podRef)
return err
}
return nil
}
func (psuc *PodStatusPatchCall) Sync(obj metav1.Object) (metav1.Object, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return obj, fmt.Errorf("unexpected error: object of type %T is not of type *v1.Pod", obj)
}
psuc.lock.Lock()
if !psuc.executed {
// Set podStatus only if the call execution haven't started yet,
// because otherwise it's irrelevant and might race.
psuc.podStatus = pod.Status.DeepCopy()
}
newCondition := psuc.newCondition.DeepCopy()
psuc.lock.Unlock()
podCopy := pod.DeepCopy()
// Sync passed pod's status with the call's condition and nominatingInfo.
synced := syncStatus(&podCopy.Status, newCondition, psuc.nominatingInfo)
if !synced {
return pod, nil
}
return podCopy, nil
}
func (psuc *PodStatusPatchCall) Merge(oldCall fwk.APICall) error {
oldPsuc, ok := oldCall.(*PodStatusPatchCall)
if !ok {
return fmt.Errorf("unexpected error: call of type %T is not of type *PodStatusPatchCall", oldCall)
}
if psuc.nominatingInfo.Mode() == fwk.ModeNoop && oldPsuc.nominatingInfo.Mode() == fwk.ModeOverride {
// Set a nominatingInfo from an old call if the new one is no-op.
psuc.nominatingInfo = oldPsuc.nominatingInfo
}
if psuc.newCondition == nil && oldPsuc.newCondition != nil {
// Set a condition from an old call if the new one is nil.
psuc.newCondition = oldPsuc.newCondition
}
return nil
}
// conditionNeedsUpdate checks if the pod condition needs update.
func conditionNeedsUpdate(status *v1.PodStatus, condition *v1.PodCondition) bool {
// Try to find this pod condition.
_, oldCondition := podutil.GetPodCondition(status, condition.Type)
if oldCondition == nil {
return true
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message &&
condition.LastProbeTime.Equal(&oldCondition.LastProbeTime)
// Return true if one of the fields have changed.
return !isEqual
}
func (psuc *PodStatusPatchCall) IsNoOp() bool {
nnnNeedsUpdate := psuc.nominatingInfo.Mode() == fwk.ModeOverride && psuc.podStatus.NominatedNodeName != psuc.nominatingInfo.NominatedNodeName
if nnnNeedsUpdate {
return false
}
if psuc.newCondition == nil {
return true
}
return !conditionNeedsUpdate(psuc.podStatus, psuc.newCondition)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"sync"
"k8s.io/apimachinery/pkg/util/sets"
fwk "k8s.io/kube-scheduler/framework"
)
// Note: CycleState uses a sync.Map to back the storage, because it is thread safe. It's aimed to optimize for the "write once and read many times" scenarios.
// It is the recommended pattern used in all in-tree plugins - plugin-specific state is written once in PreFilter/PreScore and afterward read many times in Filter/Score.
type CycleState struct {
// storage is keyed with StateKey, and valued with StateData.
storage sync.Map
// if recordPluginMetrics is true, metrics.PluginExecutionDuration will be recorded for this cycle.
recordPluginMetrics bool
// skipFilterPlugins are plugins that will be skipped in the Filter extension point.
skipFilterPlugins sets.Set[string]
// skipScorePlugins are plugins that will be skipped in the Score extension point.
skipScorePlugins sets.Set[string]
// skipPreBindPlugins are plugins that will be skipped in the PreBind extension point.
skipPreBindPlugins sets.Set[string]
}
// NewCycleState initializes a new CycleState and returns its pointer.
func NewCycleState() *CycleState {
return &CycleState{}
}
// ShouldRecordPluginMetrics returns whether metrics.PluginExecutionDuration metrics should be recorded.
func (c *CycleState) ShouldRecordPluginMetrics() bool {
if c == nil {
return false
}
return c.recordPluginMetrics
}
// SetRecordPluginMetrics sets recordPluginMetrics to the given value.
func (c *CycleState) SetRecordPluginMetrics(flag bool) {
if c == nil {
return
}
c.recordPluginMetrics = flag
}
func (c *CycleState) SetSkipFilterPlugins(plugins sets.Set[string]) {
c.skipFilterPlugins = plugins
}
func (c *CycleState) GetSkipFilterPlugins() sets.Set[string] {
return c.skipFilterPlugins
}
func (c *CycleState) SetSkipScorePlugins(plugins sets.Set[string]) {
c.skipScorePlugins = plugins
}
func (c *CycleState) GetSkipScorePlugins() sets.Set[string] {
return c.skipScorePlugins
}
func (c *CycleState) SetSkipPreBindPlugins(plugins sets.Set[string]) {
c.skipPreBindPlugins = plugins
}
func (c *CycleState) GetSkipPreBindPlugins() sets.Set[string] {
return c.skipPreBindPlugins
}
// Clone creates a copy of CycleState and returns its pointer. Clone returns
// nil if the context being cloned is nil.
func (c *CycleState) Clone() fwk.CycleState {
if c == nil {
return nil
}
copy := NewCycleState()
// Safe copy storage in case of overwriting.
c.storage.Range(func(k, v interface{}) bool {
copy.storage.Store(k, v.(fwk.StateData).Clone())
return true
})
// The below are not mutated, so we don't have to safe copy.
copy.recordPluginMetrics = c.recordPluginMetrics
copy.skipFilterPlugins = c.skipFilterPlugins
copy.skipScorePlugins = c.skipScorePlugins
copy.skipPreBindPlugins = c.skipPreBindPlugins
return copy
}
// Read retrieves data with the given "key" from CycleState. If the key is not
// present, ErrNotFound is returned.
//
// See CycleState for notes on concurrency.
func (c *CycleState) Read(key fwk.StateKey) (fwk.StateData, error) {
if v, ok := c.storage.Load(key); ok {
return v.(fwk.StateData), nil
}
return nil, fwk.ErrNotFound
}
// Write stores the given "val" in CycleState with the given "key".
//
// See CycleState for notes on concurrency.
func (c *CycleState) Write(key fwk.StateKey, val fwk.StateData) {
c.storage.Store(key, val)
}
// Delete deletes data with the given key from CycleState.
//
// See CycleState for notes on concurrency.
func (c *CycleState) Delete(key fwk.StateKey) {
c.storage.Delete(key)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-helpers/resource"
"k8s.io/dynamic-resource-allocation/resourceclaim"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
)
// Special event labels.
const (
// ScheduleAttemptFailure is the event when a schedule attempt fails.
ScheduleAttemptFailure = "ScheduleAttemptFailure"
// BackoffComplete is the event when a pod finishes backoff.
BackoffComplete = "BackoffComplete"
// PopFromBackoffQ is the event when a pod is popped from backoffQ when activeQ is empty.
PopFromBackoffQ = "PopFromBackoffQ"
// ForceActivate is the event when a pod is moved from unschedulablePods/backoffQ
// to activeQ. Usually it's triggered by plugin implementations.
ForceActivate = "ForceActivate"
// UnschedulableTimeout is the event when a pod is moved from unschedulablePods
// due to the timeout specified at pod-max-in-unschedulable-pods-duration.
UnschedulableTimeout = "UnschedulableTimeout"
)
var (
// EventAssignedPodAdd is the event when an assigned pod is added.
EventAssignedPodAdd = fwk.ClusterEvent{Resource: assignedPod, ActionType: fwk.Add}
// EventAssignedPodUpdate is the event when an assigned pod is updated.
EventAssignedPodUpdate = fwk.ClusterEvent{Resource: assignedPod, ActionType: fwk.Update}
// EventAssignedPodDelete is the event when an assigned pod is deleted.
EventAssignedPodDelete = fwk.ClusterEvent{Resource: assignedPod, ActionType: fwk.Delete}
// EventUnscheduledPodAdd is the event when an unscheduled pod is added.
EventUnscheduledPodAdd = fwk.ClusterEvent{Resource: unschedulablePod, ActionType: fwk.Add}
// EventUnscheduledPodUpdate is the event when an unscheduled pod is updated.
EventUnscheduledPodUpdate = fwk.ClusterEvent{Resource: unschedulablePod, ActionType: fwk.Update}
// EventUnscheduledPodDelete is the event when an unscheduled pod is deleted.
EventUnscheduledPodDelete = fwk.ClusterEvent{Resource: unschedulablePod, ActionType: fwk.Delete}
// EventUnschedulableTimeout is the event when a pod stays in unschedulable for longer than timeout.
EventUnschedulableTimeout = fwk.ClusterEvent{Resource: fwk.WildCard, ActionType: fwk.All, CustomLabel: UnschedulableTimeout}
// EventForceActivate is the event when a pod is moved from unschedulablePods/backoffQ to activeQ.
EventForceActivate = fwk.ClusterEvent{Resource: fwk.WildCard, ActionType: fwk.All, CustomLabel: ForceActivate}
)
// PodSchedulingPropertiesChange interprets the update of a pod and returns corresponding UpdatePodXYZ event(s).
// Once we have other pod update events, we should update here as well.
func PodSchedulingPropertiesChange(newPod *v1.Pod, oldPod *v1.Pod) (events []fwk.ClusterEvent) {
r := assignedPod
if newPod.Spec.NodeName == "" {
r = unschedulablePod
}
podChangeExtractors := []podChangeExtractor{
extractPodLabelsChange,
extractPodScaleDown,
extractPodSchedulingGateEliminatedChange,
extractPodTolerationChange,
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
podChangeExtractors = append(podChangeExtractors, extractPodGeneratedResourceClaimChange)
}
for _, fn := range podChangeExtractors {
if event := fn(newPod, oldPod); event != fwk.None {
events = append(events, fwk.ClusterEvent{Resource: r, ActionType: event})
}
}
if len(events) == 0 {
// When no specific event is found, we use the general Update action,
// which should only trigger plugins registering a general Pod/Update event.
events = append(events, fwk.ClusterEvent{Resource: r, ActionType: fwk.Update})
}
return
}
type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) fwk.ActionType
// extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down.
func extractPodScaleDown(newPod, oldPod *v1.Pod) fwk.ActionType {
opt := resource.PodResourcesOptions{
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
}
newPodRequests := resource.PodRequests(newPod, opt)
oldPodRequests := resource.PodRequests(oldPod, opt)
for rName, oldReq := range oldPodRequests {
newReq, ok := newPodRequests[rName]
if !ok {
// The resource request of rName is removed.
return fwk.UpdatePodScaleDown
}
if oldReq.MilliValue() > newReq.MilliValue() {
// The resource request of rName is scaled down.
return fwk.UpdatePodScaleDown
}
}
return fwk.None
}
func extractPodLabelsChange(newPod *v1.Pod, oldPod *v1.Pod) fwk.ActionType {
if isLabelChanged(newPod.GetLabels(), oldPod.GetLabels()) {
return fwk.UpdatePodLabel
}
return fwk.None
}
func extractPodTolerationChange(newPod *v1.Pod, oldPod *v1.Pod) fwk.ActionType {
if len(newPod.Spec.Tolerations) != len(oldPod.Spec.Tolerations) {
// A Pod got a new toleration.
// Due to API validation, the user can add, but cannot modify or remove tolerations.
// So, it's enough to just check the length of tolerations to notice the update.
// And, any updates in tolerations could make Pod schedulable.
return fwk.UpdatePodToleration
}
return fwk.None
}
func extractPodSchedulingGateEliminatedChange(newPod *v1.Pod, oldPod *v1.Pod) fwk.ActionType {
if len(newPod.Spec.SchedulingGates) == 0 && len(oldPod.Spec.SchedulingGates) != 0 {
// A scheduling gate on the pod is completely removed.
return fwk.UpdatePodSchedulingGatesEliminated
}
return fwk.None
}
func extractPodGeneratedResourceClaimChange(newPod *v1.Pod, oldPod *v1.Pod) fwk.ActionType {
if !resourceclaim.PodStatusEqual(newPod.Status.ResourceClaimStatuses, oldPod.Status.ResourceClaimStatuses) ||
!resourceclaim.PodExtendedStatusEqual(newPod.Status.ExtendedResourceClaimStatus, oldPod.Status.ExtendedResourceClaimStatus) {
return fwk.UpdatePodGeneratedResourceClaim
}
return fwk.None
}
// NodeSchedulingPropertiesChange interprets the update of a node and returns corresponding UpdateNodeXYZ event(s).
func NodeSchedulingPropertiesChange(newNode *v1.Node, oldNode *v1.Node) (events []fwk.ClusterEvent) {
nodeChangeExtracters := []nodeChangeExtractor{
extractNodeSpecUnschedulableChange,
extractNodeAllocatableChange,
extractNodeLabelsChange,
extractNodeTaintsChange,
extractNodeConditionsChange,
extractNodeAnnotationsChange,
}
for _, fn := range nodeChangeExtracters {
if event := fn(newNode, oldNode); event != fwk.None {
events = append(events, fwk.ClusterEvent{Resource: fwk.Node, ActionType: event})
}
}
return
}
type nodeChangeExtractor func(newNode *v1.Node, oldNode *v1.Node) fwk.ActionType
func extractNodeAllocatableChange(newNode *v1.Node, oldNode *v1.Node) fwk.ActionType {
if !equality.Semantic.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) {
return fwk.UpdateNodeAllocatable
}
return fwk.None
}
func extractNodeLabelsChange(newNode *v1.Node, oldNode *v1.Node) fwk.ActionType {
if isLabelChanged(newNode.GetLabels(), oldNode.GetLabels()) {
return fwk.UpdateNodeLabel
}
return fwk.None
}
func isLabelChanged(newLabels map[string]string, oldLabels map[string]string) bool {
return !equality.Semantic.DeepEqual(newLabels, oldLabels)
}
func extractNodeTaintsChange(newNode *v1.Node, oldNode *v1.Node) fwk.ActionType {
if !equality.Semantic.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints) {
return fwk.UpdateNodeTaint
}
return fwk.None
}
func extractNodeConditionsChange(newNode *v1.Node, oldNode *v1.Node) fwk.ActionType {
strip := func(conditions []v1.NodeCondition) map[v1.NodeConditionType]v1.ConditionStatus {
conditionStatuses := make(map[v1.NodeConditionType]v1.ConditionStatus, len(conditions))
for i := range conditions {
conditionStatuses[conditions[i].Type] = conditions[i].Status
}
return conditionStatuses
}
if !equality.Semantic.DeepEqual(strip(oldNode.Status.Conditions), strip(newNode.Status.Conditions)) {
return fwk.UpdateNodeCondition
}
return fwk.None
}
func extractNodeSpecUnschedulableChange(newNode *v1.Node, oldNode *v1.Node) fwk.ActionType {
if newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable && !newNode.Spec.Unschedulable {
// TODO: create UpdateNodeSpecUnschedulable ActionType
return fwk.UpdateNodeTaint
}
return fwk.None
}
func extractNodeAnnotationsChange(newNode *v1.Node, oldNode *v1.Node) fwk.ActionType {
if !equality.Semantic.DeepEqual(oldNode.GetAnnotations(), newNode.GetAnnotations()) {
return fwk.UpdateNodeAnnotation
}
return fwk.None
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file defines the scheduling framework plugin interfaces.
package framework
import (
"context"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
)
// NodeToStatus contains the statuses of the Nodes where the incoming Pod was not schedulable.
type NodeToStatus struct {
// nodeToStatus contains specific statuses of the nodes.
nodeToStatus map[string]*fwk.Status
// absentNodesStatus defines a status for all nodes that are absent in nodeToStatus map.
// By default, all absent nodes are UnschedulableAndUnresolvable.
absentNodesStatus *fwk.Status
}
// NewDefaultNodeToStatus creates NodeToStatus without any node in the map.
// The absentNodesStatus is set by default to UnschedulableAndUnresolvable.
func NewDefaultNodeToStatus() *NodeToStatus {
return NewNodeToStatus(make(map[string]*fwk.Status), fwk.NewStatus(fwk.UnschedulableAndUnresolvable))
}
// NewNodeToStatus creates NodeToStatus initialized with given nodeToStatus and absentNodesStatus.
func NewNodeToStatus(nodeToStatus map[string]*fwk.Status, absentNodesStatus *fwk.Status) *NodeToStatus {
return &NodeToStatus{
nodeToStatus: nodeToStatus,
absentNodesStatus: absentNodesStatus,
}
}
// Get returns the status for given nodeName. If the node is not in the map, the absentNodesStatus is returned.
func (m *NodeToStatus) Get(nodeName string) *fwk.Status {
if status, ok := m.nodeToStatus[nodeName]; ok {
return status
}
return m.absentNodesStatus
}
// Set sets status for given nodeName.
func (m *NodeToStatus) Set(nodeName string, status *fwk.Status) {
m.nodeToStatus[nodeName] = status
}
// Len returns length of nodeToStatus map. It is not aware of number of absent nodes.
func (m *NodeToStatus) Len() int {
return len(m.nodeToStatus)
}
// AbsentNodesStatus returns absentNodesStatus value.
func (m *NodeToStatus) AbsentNodesStatus() *fwk.Status {
return m.absentNodesStatus
}
// SetAbsentNodesStatus sets absentNodesStatus value.
func (m *NodeToStatus) SetAbsentNodesStatus(status *fwk.Status) {
m.absentNodesStatus = status
}
// ForEachExplicitNode runs fn for each node which status is explicitly set.
// Imporatant note, it runs the fn only for nodes with a status explicitly registered,
// and hence may not run the fn for all existing nodes.
// For example, if PreFilter rejects all Nodes, the scheduler would NOT set a failure status to every Node,
// but set a failure status as AbsentNodesStatus.
// You're supposed to get a status from AbsentNodesStatus(), and consider all other nodes that are rejected by them.
func (m *NodeToStatus) ForEachExplicitNode(fn func(nodeName string, status *fwk.Status)) {
for nodeName, status := range m.nodeToStatus {
fn(nodeName, status)
}
}
// NodesForStatusCode returns a list of NodeInfos for the nodes that matches a given status code.
// If the absentNodesStatus matches the code, all existing nodes are fetched using nodeLister
// and filtered using NodeToStatus.Get.
// If the absentNodesStatus doesn't match the code, nodeToStatus map is used to create a list of nodes
// and nodeLister.Get is used to obtain NodeInfo for each.
func (m *NodeToStatus) NodesForStatusCode(nodeLister fwk.NodeInfoLister, code fwk.Code) ([]fwk.NodeInfo, error) {
var resultNodes []fwk.NodeInfo
if m.AbsentNodesStatus().Code() == code {
allNodes, err := nodeLister.List()
if err != nil {
return nil, err
}
if m.Len() == 0 {
// All nodes are absent and status code is matching, so can return all nodes.
return allNodes, nil
}
// Need to find all the nodes that are absent or have a matching code using the allNodes.
for _, node := range allNodes {
nodeName := node.Node().Name
if status := m.Get(nodeName); status.Code() == code {
resultNodes = append(resultNodes, node)
}
}
return resultNodes, nil
}
m.ForEachExplicitNode(func(nodeName string, status *fwk.Status) {
if status.Code() == code {
if nodeInfo, err := nodeLister.Get(nodeName); err == nil {
resultNodes = append(resultNodes, nodeInfo)
}
}
})
return resultNodes, nil
}
// PodsToActivateKey is a reserved state key for stashing pods.
// If the stashed pods are present in unschedulablePods or backoffQ,they will be
// activated (i.e., moved to activeQ) in two phases:
// - end of a scheduling cycle if it succeeds (will be cleared from `PodsToActivate` if activated)
// - end of a binding cycle if it succeeds
var PodsToActivateKey fwk.StateKey = "kubernetes.io/pods-to-activate"
// PodsToActivate stores pods to be activated.
type PodsToActivate struct {
sync.Mutex
// Map is keyed with namespaced pod name, and valued with the pod.
Map map[string]*v1.Pod
}
// Clone just returns the same state.
func (s *PodsToActivate) Clone() fwk.StateData {
return s
}
// NewPodsToActivate instantiates a PodsToActivate object.
func NewPodsToActivate() *PodsToActivate {
return &PodsToActivate{Map: make(map[string]*v1.Pod)}
}
// Framework manages the set of plugins in use by the scheduling framework.
// Configured plugins are called at specified points in a scheduling context.
type Framework interface {
fwk.Handle
// PreEnqueuePlugins returns the registered preEnqueue plugins.
PreEnqueuePlugins() []fwk.PreEnqueuePlugin
// EnqueueExtensions returns the registered Enqueue extensions.
EnqueueExtensions() []fwk.EnqueueExtensions
// QueueSortFunc returns the function to sort pods in scheduling queue
QueueSortFunc() fwk.LessFunc
// RunPreFilterPlugins runs the set of configured PreFilter plugins. It returns
// *fwk.Status and its code is set to non-success if any of the plugins returns
// anything but Success. If a non-success status is returned, then the scheduling
// cycle is aborted.
// It also returns a PreFilterResult, which may influence what or how many nodes to
// evaluate downstream.
// The third returns value contains PreFilter plugin that rejected some or all Nodes with PreFilterResult.
// But, note that it doesn't contain any plugin when a plugin rejects this Pod with non-success status,
// not with PreFilterResult.
RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (*fwk.PreFilterResult, *fwk.Status, sets.Set[string])
// RunPostFilterPlugins runs the set of configured PostFilter plugins.
// PostFilter plugins can either be informational, in which case should be configured
// to execute first and return Unschedulable status, or ones that try to change the
// cluster state to make the pod potentially schedulable in a future scheduling cycle.
RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status)
// RunPreBindPlugins runs the set of configured PreBind plugins. It returns
// *fwk.Status and its code is set to non-success if any of the plugins returns
// anything but Success. If the Status code is "Unschedulable", it is
// considered as a scheduling check failure, otherwise, it is considered as an
// internal error. In either case the pod is not going to be bound.
RunPreBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status
// RunPreBindPreFlights runs the set of configured PreBindPreFlight functions from PreBind plugins.
// It returns immediately if any of the plugins returns a non-skip status.
RunPreBindPreFlights(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status
// RunPostBindPlugins runs the set of configured PostBind plugins.
RunPostBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string)
// RunReservePluginsReserve runs the Reserve method of the set of
// configured Reserve plugins. If any of these calls returns an error, it
// does not continue running the remaining ones and returns the error. In
// such case, pod will not be scheduled.
RunReservePluginsReserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status
// RunReservePluginsUnreserve runs the Unreserve method of the set of
// configured Reserve plugins.
RunReservePluginsUnreserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string)
// RunPermitPlugins runs the set of configured Permit plugins. If any of these
// plugins returns a status other than "Success" or "Wait", it does not continue
// running the remaining plugins and returns an error. Otherwise, if any of the
// plugins returns "Wait", then this function will create and add waiting pod
// to a map of currently waiting pods and return status with "Wait" code.
// Pod will remain waiting pod for the minimum duration returned by the Permit plugins.
RunPermitPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status
// WillWaitOnPermit returns whether this pod will wait on permit by checking if the pod is a waiting pod.
WillWaitOnPermit(ctx context.Context, pod *v1.Pod) bool
// WaitOnPermit will block, if the pod is a waiting pod, until the waiting pod is rejected or allowed.
WaitOnPermit(ctx context.Context, pod *v1.Pod) *fwk.Status
// RunBindPlugins runs the set of configured Bind plugins. A Bind plugin may choose
// whether or not to handle the given Pod. If a Bind plugin chooses to skip the
// binding, it should return code=5("skip") status. Otherwise, it should return "Error"
// or "Success". If none of the plugins handled binding, RunBindPlugins returns
// code=5("skip") status.
RunBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status
// HasFilterPlugins returns true if at least one Filter plugin is defined.
HasFilterPlugins() bool
// HasPostFilterPlugins returns true if at least one PostFilter plugin is defined.
HasPostFilterPlugins() bool
// HasScorePlugins returns true if at least one Score plugin is defined.
HasScorePlugins() bool
// ListPlugins returns a map of extension point name to list of configured Plugins.
ListPlugins() *config.Plugins
// ProfileName returns the profile name associated to a profile.
ProfileName() string
// PercentageOfNodesToScore returns percentageOfNodesToScore associated to a profile.
PercentageOfNodesToScore() *int32
// SetPodNominator sets the PodNominator
SetPodNominator(nominator fwk.PodNominator)
// SetPodActivator sets the PodActivator
SetPodActivator(activator fwk.PodActivator)
// SetAPICacher sets the APICacher
SetAPICacher(apiCacher fwk.APICacher)
// Close calls Close method of each plugin.
Close() error
}
func NewPostFilterResultWithNominatedNode(name string) *fwk.PostFilterResult {
return &fwk.PostFilterResult{
NominatingInfo: &fwk.NominatingInfo{
NominatedNodeName: name,
NominatingMode: fwk.ModeOverride,
},
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parallelize
import "context"
// ErrorChannel supports non-blocking send and receive operation to capture error.
// A maximum of one error is kept in the channel and the rest of the errors sent
// are ignored, unless the existing error is received and the channel becomes empty
// again.
type ErrorChannel struct {
errCh chan error
}
// SendError sends an error without blocking the sender.
func (e *ErrorChannel) SendError(err error) {
select {
case e.errCh <- err:
default:
}
}
// SendErrorWithCancel sends an error without blocking the sender and calls
// cancel function.
func (e *ErrorChannel) SendErrorWithCancel(err error, cancel context.CancelFunc) {
e.SendError(err)
cancel()
}
// ReceiveError receives an error from channel without blocking on the receiver.
func (e *ErrorChannel) ReceiveError() error {
select {
case err := <-e.errCh:
return err
default:
return nil
}
}
// NewErrorChannel returns a new ErrorChannel.
func NewErrorChannel() *ErrorChannel {
return &ErrorChannel{
errCh: make(chan error, 1),
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parallelize
import (
"context"
"math"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/scheduler/metrics"
)
// DefaultParallelism is the default parallelism used in scheduler.
const DefaultParallelism int = 16
// Parallelizer implements k8s.io/kube-scheduler/framework.Parallelizer helps run scheduling operations in parallel chunks where possible, to improve performance and CPU utilization.
// It wraps logic of k8s.io/client-go/util/workqueue to run operations on multiple workers.
type Parallelizer struct {
parallelism int
}
// NewParallelizer returns an object holding the parallelism (number of workers).
func NewParallelizer(p int) Parallelizer {
return Parallelizer{parallelism: p}
}
// chunkSizeFor returns a chunk size for the given number of items to use for
// parallel work. The size aims to produce good CPU utilization.
// returns max(1, min(sqrt(n), n/Parallelism))
func chunkSizeFor(n, parallelism int) int {
s := int(math.Sqrt(float64(n)))
if r := n/parallelism + 1; s > r {
s = r
} else if s < 1 {
s = 1
}
return s
}
// numWorkersForChunkSize returns number of workers (goroutines)
// that will be created in workqueue.ParallelizeUntil
// for given parallelism, pieces and chunkSize values.
func numWorkersForChunkSize(parallelism, pieces, chunkSize int) int {
chunks := (pieces + chunkSize - 1) / chunkSize
if chunks < parallelism {
return chunks
}
return parallelism
}
// Until is a wrapper around workqueue.ParallelizeUntil to use in scheduling algorithms.
// A given operation will be a label that is recorded in the goroutine metric.
func (p Parallelizer) Until(ctx context.Context, pieces int, doWorkPiece workqueue.DoWorkPieceFunc, operation string) {
chunkSize := chunkSizeFor(pieces, p.parallelism)
workers := numWorkersForChunkSize(p.parallelism, pieces, chunkSize)
goroutinesMetric := metrics.Goroutines.WithLabelValues(operation)
// Calling single Add with workers' count is more efficient than calling Inc or Dec per each work piece.
// This approach improves performance of some plugins (affinity, topology spreading) as well as preemption.
goroutinesMetric.Add(float64(workers))
defer goroutinesMetric.Add(float64(-workers))
workqueue.ParallelizeUntil(ctx, p.parallelism, pieces, doWorkPiece, workqueue.WithChunkSize(chunkSize))
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultbinder
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
// Name of the plugin used in the plugin registry and configurations.
const Name = names.DefaultBinder
// DefaultBinder binds pods to nodes using a k8s client.
type DefaultBinder struct {
handle fwk.Handle
}
var _ fwk.BindPlugin = &DefaultBinder{}
// New creates a DefaultBinder.
func New(_ context.Context, _ runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
return &DefaultBinder{handle: handle}, nil
}
// Name returns the name of the plugin.
func (b DefaultBinder) Name() string {
return Name
}
// Bind binds pods to nodes using the k8s client.
func (b DefaultBinder) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *fwk.Status {
logger := klog.FromContext(ctx)
binding := &v1.Binding{
ObjectMeta: metav1.ObjectMeta{Namespace: p.Namespace, Name: p.Name, UID: p.UID},
Target: v1.ObjectReference{Kind: "Node", Name: nodeName},
}
if b.handle.APICacher() != nil {
// When API cacher is available, use it to bind the pod.
onFinish, err := b.handle.APICacher().BindPod(binding)
if err != nil {
return fwk.AsStatus(err)
}
err = b.handle.APICacher().WaitOnFinish(ctx, onFinish)
if err != nil {
return fwk.AsStatus(err)
}
return nil
}
logger.V(3).Info("Attempting to bind pod to node", "pod", klog.KObj(p), "node", klog.KRef("", nodeName))
err := b.handle.ClientSet().CoreV1().Pods(binding.Namespace).Bind(ctx, binding, metav1.CreateOptions{})
if err != nil {
return fwk.AsStatus(err)
}
return nil
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultpreemption
import (
"context"
"fmt"
"math/rand"
"sort"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
corelisters "k8s.io/client-go/listers/core/v1"
policylisters "k8s.io/client-go/listers/policy/v1"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/framework/preemption"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// Name of the plugin used in the plugin registry and configurations.
const Name = names.DefaultPreemption
// IsEligiblePodFunc is a function which may be assigned to the DefaultPreemption plugin.
// This may implement rules/filtering around preemption eligibility, which is in addition to
// the internal requirement that the victim pod have lower priority than the preemptor pod.
// Any customizations should always allow system services to preempt normal pods, to avoid
// problems if system pods are unable to find space.
type IsEligiblePodFunc func(nodeInfo fwk.NodeInfo, victim fwk.PodInfo, preemptor *v1.Pod) bool
// MoreImportantPodFunc is a function which may be assigned to the DefaultPreemption plugin.
// Implementations should return true if the first pod is more important than the second pod
// and the second one should be considered for preemption before the first one.
// For performance reasons, the search for nodes eligible for preemption is done by omitting all
// eligible victims from a node then checking whether the preemptor fits on the node without them,
// before adding back victims (starting from the most important) that still fit with the preemptor.
// The default behavior is to not consider pod affinity between the preemptor and the victims,
// as affinity between pods that are eligible to preempt each other isn't recommended.
type MoreImportantPodFunc func(pod1, pod2 *v1.Pod) bool
// DefaultPreemption is a PostFilter plugin implements the preemption logic.
type DefaultPreemption struct {
fh fwk.Handle
fts feature.Features
args config.DefaultPreemptionArgs
podLister corelisters.PodLister
pdbLister policylisters.PodDisruptionBudgetLister
Evaluator *preemption.Evaluator
// IsEligiblePod returns whether a victim pod is allowed to be preempted by a preemptor pod.
// This filtering is in addition to the internal requirement that the victim pod have lower
// priority than the preemptor pod. Any customizations should always allow system services
// to preempt normal pods, to avoid problems if system pods are unable to find space.
IsEligiblePod IsEligiblePodFunc
// MoreImportantPod is used to sort eligible victims in-place in descending order of highest to
// lowest importance. Pods with higher importance are less likely to be preempted.
// The default behavior is to order pods by descending priority, then descending runtime duration
// for pods with equal priority.
MoreImportantPod MoreImportantPodFunc
}
var _ fwk.PostFilterPlugin = &DefaultPreemption{}
var _ fwk.PreEnqueuePlugin = &DefaultPreemption{}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *DefaultPreemption) Name() string {
return Name
}
// New initializes a new plugin and returns it. The plugin type is retained to allow modification.
func New(_ context.Context, dpArgs runtime.Object, fh fwk.Handle, fts feature.Features) (*DefaultPreemption, error) {
args, ok := dpArgs.(*config.DefaultPreemptionArgs)
if !ok {
return nil, fmt.Errorf("got args of type %T, want *DefaultPreemptionArgs", dpArgs)
}
if err := validation.ValidateDefaultPreemptionArgs(nil, args); err != nil {
return nil, err
}
podLister := fh.SharedInformerFactory().Core().V1().Pods().Lister()
pdbLister := getPDBLister(fh.SharedInformerFactory())
pl := DefaultPreemption{
fh: fh,
fts: fts,
args: *args,
podLister: podLister,
pdbLister: pdbLister,
}
pl.Evaluator = preemption.NewEvaluator(Name, fh, &pl, fts.EnableAsyncPreemption)
// Default behavior: No additional filtering, beyond the internal requirement that the victim pod
// have lower priority than the preemptor pod.
pl.IsEligiblePod = func(nodeInfo fwk.NodeInfo, victim fwk.PodInfo, preemptor *v1.Pod) bool {
return true
}
// Default behavior: Sort by descending priority, then by descending runtime duration as secondary ordering.
pl.MoreImportantPod = util.MoreImportantPod
return &pl, nil
}
// PostFilter invoked at the postFilter extension point.
func (pl *DefaultPreemption) PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
defer func() {
metrics.PreemptionAttempts.Inc()
}()
result, status := pl.Evaluator.Preempt(ctx, state, pod, m)
msg := status.Message()
if len(msg) > 0 {
return result, fwk.NewStatus(status.Code(), "preemption: "+msg)
}
return result, status
}
func (pl *DefaultPreemption) PreEnqueue(ctx context.Context, p *v1.Pod) *fwk.Status {
if !pl.fts.EnableAsyncPreemption {
return nil
}
if pl.Evaluator.IsPodRunningPreemption(p.GetUID()) {
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, "waiting for the preemption for this pod to be finished")
}
return nil
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *DefaultPreemption) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
if pl.fts.EnableAsyncPreemption {
return []fwk.ClusterEventWithHint{
// We need to register the event to tell the scheduling queue that the pod could be un-gated after some Pods' deletion.
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.Delete}, QueueingHintFn: pl.isPodSchedulableAfterPodDeletion},
}, nil
}
// When the async preemption is disabled, PreEnqueue always returns nil, and hence pods never get rejected by this plugin.
return nil, nil
}
// isPodSchedulableAfterPodDeletion returns the queueing hint for the pod after the pod deletion event,
// which always return Skip.
// The default preemption plugin is a bit tricky;
// the pods rejected by it are the ones that have run/are running the preemption asynchronously.
// And, those pods should always have the other plugins in pInfo.UnschedulablePlugins
// which failure will be resolved by the preemption.
// The reason why we return Skip here is that the preemption plugin should not make the decision of when to requeueing Pods,
// and rather, those plugins should be responsible for that.
func (pl *DefaultPreemption) isPodSchedulableAfterPodDeletion(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
return fwk.QueueSkip, nil
}
// calculateNumCandidates returns the number of candidates the FindCandidates
// method must produce from dry running based on the constraints given by
// <minCandidateNodesPercentage> and <minCandidateNodesAbsolute>. The number of
// candidates returned will never be greater than <numNodes>.
func (pl *DefaultPreemption) calculateNumCandidates(numNodes int32) int32 {
n := (numNodes * pl.args.MinCandidateNodesPercentage) / 100
if n < pl.args.MinCandidateNodesAbsolute {
n = pl.args.MinCandidateNodesAbsolute
}
if n > numNodes {
n = numNodes
}
return n
}
// getOffsetRand is a dedicated random source for GetOffsetAndNumCandidates calls.
// It defaults to rand.Int31n, but is a package variable so it can be overridden to make unit tests deterministic.
var getOffsetRand = rand.Int31n
// GetOffsetAndNumCandidates chooses a random offset and calculates the number
// of candidates that should be shortlisted for dry running preemption.
func (pl *DefaultPreemption) GetOffsetAndNumCandidates(numNodes int32) (int32, int32) {
return getOffsetRand(numNodes), pl.calculateNumCandidates(numNodes)
}
// This function is not applicable for out-of-tree preemption plugins that exercise
// different preemption candidates on the same nominated node.
func (pl *DefaultPreemption) CandidatesToVictimsMap(candidates []preemption.Candidate) map[string]*extenderv1.Victims {
m := make(map[string]*extenderv1.Victims, len(candidates))
for _, c := range candidates {
m[c.Name()] = c.Victims()
}
return m
}
// SelectVictimsOnNode finds minimum set of pods on the given node that should be preempted in order to make enough room
// for "pod" to be scheduled.
func (pl *DefaultPreemption) SelectVictimsOnNode(
ctx context.Context,
state fwk.CycleState,
pod *v1.Pod,
nodeInfo fwk.NodeInfo,
pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *fwk.Status) {
logger := klog.FromContext(ctx)
var potentialVictims []fwk.PodInfo
removePod := func(rpi fwk.PodInfo) error {
if err := nodeInfo.RemovePod(logger, rpi.GetPod()); err != nil {
return err
}
status := pl.fh.RunPreFilterExtensionRemovePod(ctx, state, pod, rpi, nodeInfo)
if !status.IsSuccess() {
return status.AsError()
}
return nil
}
addPod := func(api fwk.PodInfo) error {
nodeInfo.AddPodInfo(api)
status := pl.fh.RunPreFilterExtensionAddPod(ctx, state, pod, api, nodeInfo)
if !status.IsSuccess() {
return status.AsError()
}
return nil
}
// As the first step, remove all pods eligible for preemption from the node and
// check if the given pod can be scheduled without them present.
for _, pi := range nodeInfo.GetPods() {
if pl.isPreemptionAllowed(nodeInfo, pi, pod) {
potentialVictims = append(potentialVictims, pi)
if err := removePod(pi); err != nil {
return nil, 0, fwk.AsStatus(err)
}
}
}
// No potential victims are found, and so we don't need to evaluate the node again since its state didn't change.
if len(potentialVictims) == 0 {
return nil, 0, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, "No preemption victims found for incoming pod")
}
// If the new pod does not fit after removing all the eligible pods,
// we are almost done and this node is not suitable for preemption. The only
// condition that we could check is if the "pod" is failing to schedule due to
// inter-pod affinity to one or more victims, but we have decided not to
// support this case for performance reasons. Having affinity to lower
// importance (priority) pods is not a recommended configuration anyway.
if status := pl.fh.RunFilterPluginsWithNominatedPods(ctx, state, pod, nodeInfo); !status.IsSuccess() {
return nil, 0, status
}
var victims []fwk.PodInfo
numViolatingVictim := 0
// Sort potentialVictims by descending importance, which ensures reprieve of
// higher importance pods first.
sort.Slice(potentialVictims, func(i, j int) bool {
return pl.MoreImportantPod(potentialVictims[i].GetPod(), potentialVictims[j].GetPod())
})
// Try to reprieve as many pods as possible. We first try to reprieve the PDB
// violating victims and then other non-violating ones. In both cases, we start
// from the highest importance victims.
violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims, pdbs)
reprievePod := func(pi fwk.PodInfo) (bool, error) {
if err := addPod(pi); err != nil {
return false, err
}
status := pl.fh.RunFilterPluginsWithNominatedPods(ctx, state, pod, nodeInfo)
fits := status.IsSuccess()
if !fits {
if err := removePod(pi); err != nil {
return false, err
}
victims = append(victims, pi)
logger.V(5).Info("Pod is a potential preemption victim on node", "pod", klog.KObj(pi.GetPod()), "node", klog.KObj(nodeInfo.Node()))
}
return fits, nil
}
for _, p := range violatingVictims {
if fits, err := reprievePod(p); err != nil {
return nil, 0, fwk.AsStatus(err)
} else if !fits {
numViolatingVictim++
}
}
// Now we try to reprieve non-violating victims.
for _, p := range nonViolatingVictims {
if _, err := reprievePod(p); err != nil {
return nil, 0, fwk.AsStatus(err)
}
}
// Sort victims after reprieving pods to keep the pods in the victims sorted in order of importance from high to low.
if len(violatingVictims) != 0 && len(nonViolatingVictims) != 0 {
sort.Slice(victims, func(i, j int) bool { return pl.MoreImportantPod(victims[i].GetPod(), victims[j].GetPod()) })
}
var victimPods []*v1.Pod
for _, pi := range victims {
victimPods = append(victimPods, pi.GetPod())
}
return victimPods, numViolatingVictim, fwk.NewStatus(fwk.Success)
}
// PodEligibleToPreemptOthers returns one bool and one string. The bool
// indicates whether this pod should be considered for preempting other pods or
// not. The string includes the reason if this pod isn't eligible.
// There're several reasons:
// 1. The pod has a preemptionPolicy of Never.
// 2. The pod has already preempted other pods and the victims are in their graceful termination period.
// Currently we check the node that is nominated for this pod, and as long as there are
// terminating pods on this node, we don't attempt to preempt more pods.
func (pl *DefaultPreemption) PodEligibleToPreemptOthers(_ context.Context, pod *v1.Pod, nominatedNodeStatus *fwk.Status) (bool, string) {
if pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever {
return false, "not eligible due to preemptionPolicy=Never."
}
nodeInfos := pl.fh.SnapshotSharedLister().NodeInfos()
nomNodeName := pod.Status.NominatedNodeName
if len(nomNodeName) > 0 {
// If the pod's nominated node is considered as UnschedulableAndUnresolvable by the filters,
// then the pod should be considered for preempting again.
if nominatedNodeStatus.Code() == fwk.UnschedulableAndUnresolvable {
return true, ""
}
if nodeInfo, _ := nodeInfos.Get(nomNodeName); nodeInfo != nil {
for _, p := range nodeInfo.GetPods() {
if pl.isPreemptionAllowed(nodeInfo, p, pod) && podTerminatingByPreemption(p.GetPod()) {
// There is a terminating pod on the nominated node.
return false, "not eligible due to a terminating pod on the nominated node."
}
}
}
}
return true, ""
}
// OrderedScoreFuncs returns a list of ordered score functions to select preferable node where victims will be preempted.
func (pl *DefaultPreemption) OrderedScoreFuncs(ctx context.Context, nodesToVictims map[string]*extenderv1.Victims) []func(node string) int64 {
return nil
}
// isPreemptionAllowed returns whether the victim residing on nodeInfo can be preempted by the preemptor
func (pl *DefaultPreemption) isPreemptionAllowed(nodeInfo fwk.NodeInfo, victim fwk.PodInfo, preemptor *v1.Pod) bool {
// The victim must have lower priority than the preemptor, in addition to any filtering implemented by IsEligiblePod
return corev1helpers.PodPriority(victim.GetPod()) < corev1helpers.PodPriority(preemptor) && pl.IsEligiblePod(nodeInfo, victim, preemptor)
}
// podTerminatingByPreemption returns true if the pod is in the termination state caused by scheduler preemption.
func podTerminatingByPreemption(p *v1.Pod) bool {
if p.DeletionTimestamp == nil {
return false
}
for _, condition := range p.Status.Conditions {
if condition.Type == v1.DisruptionTarget {
return condition.Status == v1.ConditionTrue && condition.Reason == v1.PodReasonPreemptionByScheduler
}
}
return false
}
// filterPodsWithPDBViolation groups the given "pods" into two groups of "violatingPods"
// and "nonViolatingPods" based on whether their PDBs will be violated if they are
// preempted.
// This function is stable and does not change the order of received pods. So, if it
// receives a sorted list, grouping will preserve the order of the input list.
func filterPodsWithPDBViolation(podInfos []fwk.PodInfo, pdbs []*policy.PodDisruptionBudget) (violatingPodInfos, nonViolatingPodInfos []fwk.PodInfo) {
pdbsAllowed := make([]int32, len(pdbs))
for i, pdb := range pdbs {
pdbsAllowed[i] = pdb.Status.DisruptionsAllowed
}
for _, podInfo := range podInfos {
pod := podInfo.GetPod()
pdbForPodIsViolated := false
// A pod with no labels will not match any PDB. So, no need to check.
if len(pod.Labels) != 0 {
for i, pdb := range pdbs {
if pdb.Namespace != pod.Namespace {
continue
}
selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
if err != nil {
// This object has an invalid selector, it does not match the pod
continue
}
// A PDB with a nil or empty selector matches nothing.
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
continue
}
// Existing in DisruptedPods means it has been processed in API server,
// we don't treat it as a violating case.
if _, exist := pdb.Status.DisruptedPods[pod.Name]; exist {
continue
}
// Only decrement the matched pdb when it's not in its <DisruptedPods>;
// otherwise we may over-decrement the budget number.
pdbsAllowed[i]--
// We have found a matching PDB.
if pdbsAllowed[i] < 0 {
pdbForPodIsViolated = true
}
}
}
if pdbForPodIsViolated {
violatingPodInfos = append(violatingPodInfos, podInfo)
} else {
nonViolatingPodInfos = append(nonViolatingPodInfos, podInfo)
}
}
return violatingPodInfos, nonViolatingPodInfos
}
func getPDBLister(informerFactory informers.SharedInformerFactory) policylisters.PodDisruptionBudgetLister {
return informerFactory.Policy().V1().PodDisruptionBudgets().Lister()
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dynamicresources
import (
"sync"
resourceapi "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
"k8s.io/dynamic-resource-allocation/structured"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/utils/ptr"
)
// foreachAllocatedDevice invokes the provided callback for each
// device in the claim's allocation result which was allocated
// exclusively for the claim.
//
// Devices allocated with admin access can be shared with other
// claims and are skipped without invoking the callback.
//
// foreachAllocatedDevice does nothing if the claim is not allocated.
func foreachAllocatedDevice(claim *resourceapi.ResourceClaim,
dedicatedDeviceCallback func(deviceID structured.DeviceID),
enabledConsumableCapacity bool,
sharedDeviceCallback func(structured.SharedDeviceID),
consumedCapacityCallback func(structured.DeviceConsumedCapacity)) {
if claim.Status.Allocation == nil {
return
}
for _, result := range claim.Status.Allocation.Devices.Results {
// Kubernetes 1.31 did not set this, 1.32 always does.
// Supporting 1.31 is not worth the additional code that
// would have to be written (= looking up in request) because
// it is extremely unlikely that there really is a result
// that still exists in a cluster from 1.31 where this matters.
if ptr.Deref(result.AdminAccess, false) {
// Is not considered as allocated.
continue
}
deviceID := structured.MakeDeviceID(result.Driver, result.Pool, result.Device)
// None of the users of this helper need to abort iterating,
// therefore it's not supported as it only would add overhead.
// Execute sharedDeviceCallback and consumedCapacityCallback correspondingly
// if DRAConsumableCapacity feature is enabled
if enabledConsumableCapacity {
shared := result.ShareID != nil
if shared {
sharedDeviceID := structured.MakeSharedDeviceID(deviceID, result.ShareID)
sharedDeviceCallback(sharedDeviceID)
if result.ConsumedCapacity != nil {
deviceConsumedCapacity := structured.NewDeviceConsumedCapacity(deviceID, result.ConsumedCapacity)
consumedCapacityCallback(deviceConsumedCapacity)
}
continue
}
}
// Otherwise, execute dedicatedDeviceCallback
dedicatedDeviceCallback(deviceID)
}
}
// allocatedDevices reacts to events in a cache and maintains a set of all allocated devices.
// This is cheaper than repeatedly calling List, making strings unique, and building the set
// each time PreFilter is called.
//
// All methods are thread-safe. Get returns a cloned set.
type allocatedDevices struct {
logger klog.Logger
mutex sync.RWMutex
ids sets.Set[structured.DeviceID]
shareIDs sets.Set[structured.SharedDeviceID]
capacities structured.ConsumedCapacityCollection
enabledConsumableCapacity bool
}
func newAllocatedDevices(logger klog.Logger) *allocatedDevices {
return &allocatedDevices{
logger: logger,
ids: sets.New[structured.DeviceID](),
shareIDs: sets.New[structured.SharedDeviceID](),
capacities: structured.NewConsumedCapacityCollection(),
enabledConsumableCapacity: utilfeature.DefaultFeatureGate.Enabled(features.DRAConsumableCapacity),
}
}
func (a *allocatedDevices) Get() sets.Set[structured.DeviceID] {
a.mutex.RLock()
defer a.mutex.RUnlock()
return a.ids.Clone()
}
func (a *allocatedDevices) Capacities() structured.ConsumedCapacityCollection {
a.mutex.RLock()
defer a.mutex.RUnlock()
return a.capacities.Clone()
}
func (a *allocatedDevices) handlers() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: a.onAdd,
UpdateFunc: a.onUpdate,
DeleteFunc: a.onDelete,
}
}
func (a *allocatedDevices) onAdd(obj any) {
claim, _, err := schedutil.As[*resourceapi.ResourceClaim](obj, nil)
if err != nil {
// Shouldn't happen.
a.logger.Error(err, "unexpected object in allocatedDevices.onAdd")
return
}
if claim.Status.Allocation != nil {
a.addDevices(claim)
}
}
func (a *allocatedDevices) onUpdate(oldObj, newObj any) {
originalClaim, modifiedClaim, err := schedutil.As[*resourceapi.ResourceClaim](oldObj, newObj)
if err != nil {
// Shouldn't happen.
a.logger.Error(err, "unexpected object in allocatedDevices.onUpdate")
return
}
switch {
case originalClaim.Status.Allocation == nil && modifiedClaim.Status.Allocation != nil:
a.addDevices(modifiedClaim)
case originalClaim.Status.Allocation != nil && modifiedClaim.Status.Allocation == nil:
a.removeDevices(originalClaim)
default:
// Nothing to do. Either both nil or both non-nil, in which case the content
// also must be the same (immutable!).
}
}
func (a *allocatedDevices) onDelete(obj any) {
claim, _, err := schedutil.As[*resourceapi.ResourceClaim](obj, nil)
if err != nil {
// Shouldn't happen.
a.logger.Error(err, "unexpected object in allocatedDevices.onDelete")
return
}
a.removeDevices(claim)
}
func (a *allocatedDevices) addDevices(claim *resourceapi.ResourceClaim) {
if claim.Status.Allocation == nil {
return
}
// Locking of the mutex gets minimized by pre-computing what needs to be done
// without holding the lock.
deviceIDs := make([]structured.DeviceID, 0, 20)
var shareIDs []structured.SharedDeviceID
var deviceCapacities []structured.DeviceConsumedCapacity
if a.enabledConsumableCapacity {
shareIDs = make([]structured.SharedDeviceID, 0, 20)
deviceCapacities = make([]structured.DeviceConsumedCapacity, 0, 20)
}
foreachAllocatedDevice(claim,
func(deviceID structured.DeviceID) {
a.logger.V(6).Info("Observed device allocation", "device", deviceID, "claim", klog.KObj(claim))
deviceIDs = append(deviceIDs, deviceID)
},
a.enabledConsumableCapacity,
func(sharedDeviceID structured.SharedDeviceID) {
a.logger.V(6).Info("Observed shared device allocation", "shared device", sharedDeviceID, "claim", klog.KObj(claim))
shareIDs = append(shareIDs, sharedDeviceID)
},
func(capacity structured.DeviceConsumedCapacity) {
a.logger.V(6).Info("Observed consumed capacity", "device", capacity.DeviceID, "consumed capacity", capacity.ConsumedCapacity, "claim", klog.KObj(claim))
deviceCapacities = append(deviceCapacities, capacity)
},
)
a.mutex.Lock()
defer a.mutex.Unlock()
for _, deviceID := range deviceIDs {
a.ids.Insert(deviceID)
}
for _, shareID := range shareIDs {
a.shareIDs.Insert(shareID)
}
for _, capacity := range deviceCapacities {
a.capacities.Insert(capacity)
}
}
func (a *allocatedDevices) removeDevices(claim *resourceapi.ResourceClaim) {
if claim.Status.Allocation == nil {
return
}
// Locking of the mutex gets minimized by pre-computing what needs to be done
// without holding the lock.
deviceIDs := make([]structured.DeviceID, 0, 20)
var shareIDs []structured.SharedDeviceID
var deviceCapacities []structured.DeviceConsumedCapacity
if a.enabledConsumableCapacity {
shareIDs = make([]structured.SharedDeviceID, 0, 20)
deviceCapacities = make([]structured.DeviceConsumedCapacity, 0, 20)
}
foreachAllocatedDevice(claim,
func(deviceID structured.DeviceID) {
a.logger.V(6).Info("Observed device deallocation", "device", deviceID, "claim", klog.KObj(claim))
deviceIDs = append(deviceIDs, deviceID)
},
a.enabledConsumableCapacity,
func(sharedDeviceID structured.SharedDeviceID) {
a.logger.V(6).Info("Observed shared device deallocation", "shared device", sharedDeviceID, "claim", klog.KObj(claim))
shareIDs = append(shareIDs, sharedDeviceID)
},
func(capacity structured.DeviceConsumedCapacity) {
a.logger.V(6).Info("Observed consumed capacity release", "device id", capacity.DeviceID, "consumed capacity", capacity.ConsumedCapacity, "claim", klog.KObj(claim))
deviceCapacities = append(deviceCapacities, capacity)
})
a.mutex.Lock()
defer a.mutex.Unlock()
for _, deviceID := range deviceIDs {
a.ids.Delete(deviceID)
}
for _, shareID := range shareIDs {
a.shareIDs.Delete(shareID)
}
for _, capacity := range deviceCapacities {
a.capacities.Remove(capacity)
}
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dynamicresources
import (
"fmt"
"iter"
resourceapi "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/types"
)
// claimStore manages all ResourceClaims for the pod, whether they were
// created by the user directly (ResourceClaim) or indirectly (ResourceClaimTemplate)
// as well as the special ResourceClaim for extended resources.
//
// The zero value is usable and empty.
type claimStore struct {
// claims contains all user-owned claims, optionally followed by the special ResourceClaim for extended resources.
claims []*resourceapi.ResourceClaim
// numUserOwned is the number of claims without the special ResourceClaim.
numUserOwned int
// initialExtendedResourceClaimUID is the initial extended resource claim UID from PreFilter phase.
// It is different from the UID set by API server when the in-memory, temproary claim is written to the API server in PreBind phase.
initialExtendedResourceClaimUID types.UID
}
// newClaimStore stores the list of user-owned claims and the optional claim owned by the scheduler.
func newClaimStore(claims []*resourceapi.ResourceClaim, extendedResourceClaim *resourceapi.ResourceClaim) claimStore {
cs := claimStore{
claims: claims,
numUserOwned: len(claims),
initialExtendedResourceClaimUID: "",
}
if extendedResourceClaim != nil {
cs.claims = append(cs.claims, extendedResourceClaim)
cs.initialExtendedResourceClaimUID = extendedResourceClaim.UID
}
return cs
}
// empty returns true when there are no ResourceClaims.
func (cs *claimStore) empty() bool {
return len(cs.claims) == 0
}
// len returns number of all claims, whether they are owned by the user or the scheduler.
func (cs *claimStore) len() int {
return len(cs.claims)
}
// all returns an iterator for all claims, whether they are owned by the user
// or the scheduler. If there is a special ResourceClaim for extended resoures,
// then it comes last.
func (cs *claimStore) all() iter.Seq2[int, *resourceapi.ResourceClaim] {
return func(yield func(int, *resourceapi.ResourceClaim) bool) {
for i, claim := range cs.claims {
if !yield(i, claim) {
return
}
}
}
}
// allUserClaims returns an iterator which excludes the special ResourceClaim for extended resoures.
func (cs *claimStore) allUserClaims() iter.Seq2[int, *resourceapi.ResourceClaim] {
return func(yield func(int, *resourceapi.ResourceClaim) bool) {
for i, claim := range cs.claims[0:cs.numUserOwned] {
if !yield(i, claim) {
return
}
}
}
}
// toAllocate returns an iterator for all claims which have no allocation result.
// the index returned is the original index in the underlying claim store, it
// may not be sequentially numbered (e.g. 0, 1, 2 ...).
func (cs *claimStore) toAllocate() iter.Seq2[int, *resourceapi.ResourceClaim] {
return func(yield func(int, *resourceapi.ResourceClaim) bool) {
for i, claim := range cs.claims {
if claim.Status.Allocation != nil {
continue
}
if !yield(i, claim) {
return
}
}
}
}
// extendedResourceClaim returns the special ResourceClaim if there is one, otherwise nil.
// The ResourceClaim is read-only and must be cloned before modifying it.
func (cs *claimStore) extendedResourceClaim() *resourceapi.ResourceClaim {
if cs.numUserOwned == len(cs.claims) {
return nil
}
return cs.claims[cs.numUserOwned]
}
// noUserClaim returns true when there is no user claim.
func (cs *claimStore) noUserClaim() bool {
return cs.numUserOwned == 0
}
// get returns the claim at the input index
func (cs claimStore) get(i int) *resourceapi.ResourceClaim {
return cs.claims[i]
}
// set sets the input claim at the input index for the internal claims slice.
func (cs claimStore) set(i int, c *resourceapi.ResourceClaim) {
cs.claims[i] = c
}
// updateExtendedResourceClaim updates the input claim as extended resource
// claim in the internal claims slice.
// It returns error when there is no extended resource claim in the internal
// claims slice.
func (cs *claimStore) updateExtendedResourceClaim(c *resourceapi.ResourceClaim) error {
if cs.numUserOwned == len(cs.claims) {
return fmt.Errorf("no extended resource claim")
}
cs.claims[cs.numUserOwned] = c
return nil
}
// getInitialExtendedResourceClaimUID returns the UID of the claim in use until
// PreBind creates the ResourceClaim in API server.
// It can only be called when extended resource claim exists.
func (cs claimStore) getInitialExtendedResourceClaimUID() types.UID {
return cs.initialExtendedResourceClaimUID
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dynamicresources
import (
"context"
"fmt"
"sync"
resourceapi "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
resourcelisters "k8s.io/client-go/listers/resource/v1"
resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker"
"k8s.io/dynamic-resource-allocation/structured"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
)
var _ fwk.SharedDRAManager = &DefaultDRAManager{}
// DefaultDRAManager is the default implementation of SharedDRAManager. It obtains the DRA objects
// from API informers, and uses an AssumeCache and a map of in-flight allocations in order
// to avoid race conditions when modifying ResourceClaims.
type DefaultDRAManager struct {
resourceClaimTracker *claimTracker
resourceSliceLister *resourceSliceLister
deviceClassLister *deviceClassLister
}
func NewDRAManager(ctx context.Context, claimsCache *assumecache.AssumeCache, resourceSliceTracker *resourceslicetracker.Tracker, informerFactory informers.SharedInformerFactory) *DefaultDRAManager {
logger := klog.FromContext(ctx)
manager := &DefaultDRAManager{
resourceClaimTracker: &claimTracker{
cache: claimsCache,
inFlightAllocations: &sync.Map{},
allocatedDevices: newAllocatedDevices(logger),
logger: logger,
},
resourceSliceLister: &resourceSliceLister{tracker: resourceSliceTracker},
deviceClassLister: &deviceClassLister{classLister: informerFactory.Resource().V1().DeviceClasses().Lister()},
}
// Reacting to events is more efficient than iterating over the list
// repeatedly in PreFilter.
manager.resourceClaimTracker.cache.AddEventHandler(manager.resourceClaimTracker.allocatedDevices.handlers())
return manager
}
func (s *DefaultDRAManager) ResourceClaims() fwk.ResourceClaimTracker {
return s.resourceClaimTracker
}
func (s *DefaultDRAManager) ResourceSlices() fwk.ResourceSliceLister {
return s.resourceSliceLister
}
func (s *DefaultDRAManager) DeviceClasses() fwk.DeviceClassLister {
return s.deviceClassLister
}
var _ fwk.ResourceSliceLister = &resourceSliceLister{}
type resourceSliceLister struct {
tracker *resourceslicetracker.Tracker
}
func (l *resourceSliceLister) ListWithDeviceTaintRules() ([]*resourceapi.ResourceSlice, error) {
return l.tracker.ListPatchedResourceSlices()
}
var _ fwk.DeviceClassLister = &deviceClassLister{}
type deviceClassLister struct {
classLister resourcelisters.DeviceClassLister
}
func (l *deviceClassLister) Get(className string) (*resourceapi.DeviceClass, error) {
return l.classLister.Get(className)
}
func (l *deviceClassLister) List() ([]*resourceapi.DeviceClass, error) {
return l.classLister.List(labels.Everything())
}
var _ fwk.ResourceClaimTracker = &claimTracker{}
type claimTracker struct {
// cache enables temporarily storing a newer claim object
// while the scheduler has allocated it and the corresponding object
// update from the apiserver has not been processed by the claim
// informer callbacks. ResourceClaimTracker get added here in PreBind and removed by
// the informer callback (based on the "newer than" comparison in the
// assume cache).
//
// It uses cache.MetaNamespaceKeyFunc to generate object names, which
// therefore are "<namespace>/<name>".
//
// This is necessary to ensure that reconstructing the resource usage
// at the start of a pod scheduling cycle doesn't reuse the resources
// assigned to such a claim. Alternatively, claim allocation state
// could also get tracked across pod scheduling cycles, but that
// - adds complexity (need to carefully sync state with informer events
// for claims and ResourceSlices)
// - would make integration with cluster autoscaler harder because it would need
// to trigger informer callbacks.
cache *assumecache.AssumeCache
// inFlightAllocations is a map from claim UUIDs to claim objects for those claims
// for which allocation was triggered during a scheduling cycle and the
// corresponding claim status update call in PreBind has not been done
// yet. If another pod needs the claim, the pod is treated as "not
// schedulable yet". The cluster event for the claim status update will
// make it schedulable.
//
// This mechanism avoids the following problem:
// - Pod A triggers allocation for claim X.
// - Pod B shares access to that claim and gets scheduled because
// the claim is assumed to be allocated.
// - PreBind for pod B is called first, tries to update reservedFor and
// fails because the claim is not really allocated yet.
//
// We could avoid the ordering problem by allowing either pod A or pod B
// to set the allocation. But that is more complicated and leads to another
// problem:
// - Pod A and B get scheduled as above.
// - PreBind for pod A gets called first, then fails with a temporary API error.
// It removes the updated claim from the assume cache because of that.
// - PreBind for pod B gets called next and succeeds with adding the
// allocation and its own reservedFor entry.
// - The assume cache is now not reflecting that the claim is allocated,
// which could lead to reusing the same resource for some other claim.
//
// A sync.Map is used because in practice sharing of a claim between
// pods is expected to be rare compared to per-pod claim, so we end up
// hitting the "multiple goroutines read, write, and overwrite entries
// for disjoint sets of keys" case that sync.Map is optimized for.
inFlightAllocations *sync.Map
allocatedDevices *allocatedDevices
logger klog.Logger
}
func (c *claimTracker) ClaimHasPendingAllocation(claimUID types.UID) bool {
_, found := c.inFlightAllocations.Load(claimUID)
return found
}
func (c *claimTracker) SignalClaimPendingAllocation(claimUID types.UID, allocatedClaim *resourceapi.ResourceClaim) error {
c.inFlightAllocations.Store(claimUID, allocatedClaim)
// There's no reason to return an error in this implementation, but the error is helpful for other implementations.
// For example, implementations that have to deal with fake claims might want to return an error if the allocation
// is for an invalid claim.
return nil
}
func (c *claimTracker) RemoveClaimPendingAllocation(claimUID types.UID) (deleted bool) {
_, found := c.inFlightAllocations.LoadAndDelete(claimUID)
return found
}
func (c *claimTracker) Get(namespace, claimName string) (*resourceapi.ResourceClaim, error) {
obj, err := c.cache.Get(namespace + "/" + claimName)
if err != nil {
return nil, err
}
claim, ok := obj.(*resourceapi.ResourceClaim)
if !ok {
return nil, fmt.Errorf("unexpected object type %T for assumed object %s/%s", obj, namespace, claimName)
}
return claim, nil
}
func (c *claimTracker) List() ([]*resourceapi.ResourceClaim, error) {
var result []*resourceapi.ResourceClaim
// Probably not worth adding an index for?
objs := c.cache.List(nil)
for _, obj := range objs {
claim, ok := obj.(*resourceapi.ResourceClaim)
if ok {
result = append(result, claim)
}
}
return result, nil
}
func (c *claimTracker) ListAllAllocatedDevices() (sets.Set[structured.DeviceID], error) {
// Start with a fresh set that matches the current known state of the
// world according to the informers.
allocated := c.allocatedDevices.Get()
// Whatever is in flight also has to be checked.
c.inFlightAllocations.Range(func(key, value any) bool {
claim := value.(*resourceapi.ResourceClaim)
foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) {
c.logger.V(6).Info("Device is in flight for allocation", "device", deviceID, "claim", klog.KObj(claim))
allocated.Insert(deviceID)
}, false, func(structured.SharedDeviceID) {}, func(structured.DeviceConsumedCapacity) {})
return true
})
// There's no reason to return an error in this implementation, but the error might be helpful for other implementations.
return allocated, nil
}
func (c *claimTracker) GatherAllocatedState() (*structured.AllocatedState, error) {
// Start with a fresh set that matches the current known state of the
// world according to the informers.
allocated := c.allocatedDevices.Get()
allocatedSharedDeviceIDs := sets.New[structured.SharedDeviceID]()
aggregatedCapacity := c.allocatedDevices.Capacities()
enabledConsumableCapacity := utilfeature.DefaultFeatureGate.Enabled(features.DRAConsumableCapacity)
// Whatever is in flight also has to be checked.
c.inFlightAllocations.Range(func(key, value any) bool {
claim := value.(*resourceapi.ResourceClaim)
foreachAllocatedDevice(claim, func(deviceID structured.DeviceID) {
c.logger.V(6).Info("Device is in flight for allocation", "device", deviceID, "claim", klog.KObj(claim))
allocated.Insert(deviceID)
},
enabledConsumableCapacity,
func(sharedDeviceID structured.SharedDeviceID) {
c.logger.V(6).Info("Device is in flight for allocation", "shared device", sharedDeviceID, "claim", klog.KObj(claim))
allocatedSharedDeviceIDs.Insert(sharedDeviceID)
}, func(capacity structured.DeviceConsumedCapacity) {
c.logger.V(6).Info("Device is in flight for allocation", "consumed capacity", capacity, "claim", klog.KObj(claim))
aggregatedCapacity.Insert(capacity)
})
return true
})
// There's no reason to return an error in this implementation, but the error might be helpful for other implementations.
return &structured.AllocatedState{
AllocatedDevices: allocated,
AllocatedSharedDeviceIDs: allocatedSharedDeviceIDs,
AggregatedCapacity: aggregatedCapacity,
}, nil
}
func (c *claimTracker) AssumeClaimAfterAPICall(claim *resourceapi.ResourceClaim) error {
return c.cache.Assume(claim)
}
func (c *claimTracker) AssumedClaimRestore(namespace, claimName string) {
c.cache.Restore(namespace + "/" + claimName)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dynamicresources
import (
"context"
"errors"
"fmt"
"slices"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/dynamic-resource-allocation/cel"
"k8s.io/dynamic-resource-allocation/resourceclaim"
"k8s.io/dynamic-resource-allocation/structured"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/extended"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/utils/ptr"
)
const (
// Name is the name of the plugin used in Registry and configurations.
Name = names.DynamicResources
stateKey fwk.StateKey = Name
// specialClaimInMemName is the name of the special resource claim that
// exists only in memory. The claim will get a generated name when it is
// written to API server.
//
// It's intentionally not a valid ResourceClaim name to avoid conflicts with
// some actual ResourceClaim in the apiserver.
specialClaimInMemName = "<extended-resources>"
// BindingTimeoutDefaultSeconds is the default timeout for waiting for
// BindingConditions to be ready.
BindingTimeoutDefaultSeconds = 600
// AssumeExtendedResourceTimeoutDefaultSeconds is the default timeout for waiting
// for the extended resource claim to be updated in assumed cache.
AssumeExtendedResourceTimeoutDefaultSeconds = 120
)
// The state is initialized in PreFilter phase. Because we save the pointer in
// fwk.CycleState, in the later phases we don't need to call Write method
// to update the value
type stateData struct {
// A copy of all claims for the Pod (i.e. 1:1 match with
// pod.Spec.ResourceClaims), initially with the status from the start
// of the scheduling cycle. Each claim instance is read-only because it
// might come from the informer cache. The instances get replaced when
// the plugin itself successfully does an Update.
//
// When the DRAExtendedResource feature is enabled, the special ResourceClaim
// which represents extended resource requests is also stored here.
// The plugin code should treat this field as a black box and only
// access it via its methods, in particular:
// - The all method can be used to iterate over all claims, including
// the special one.
// - The allUserClaims method excludes the special one.
//
// Empty if the Pod has no claims and no special claim for extended
// resources backed by DRA, in which case the plugin has no work to do for
// the Pod.
claims claimStore
// draExtendedResource stores data for extended resources backed by DRA.
// It will remain empty when the DRAExtendedResource feature is disabled.
draExtendedResource draExtendedResource
// Allocator handles claims with structured parameters, which is all of them nowadays.
allocator structured.Allocator
// mutex must be locked while accessing any of the fields below.
mutex sync.Mutex
// The indices of all claims that:
// - are allocated
// - were not available on at least one node
//
// Set in parallel during Filter, so write access there must be
// protected by the mutex. Used by PostFilter.
unavailableClaims sets.Set[int]
// informationsForClaim has one entry for each claim in claims.
informationsForClaim []informationForClaim
// nodeAllocations caches the result of Filter for the nodes, its key is node name.
nodeAllocations map[string]nodeAllocation
}
func (d *stateData) Clone() fwk.StateData {
return d
}
// draExtendedResource stores data for extended resources backed by DRA.
// It will remain empty when the DRAExtendedResource feature is disabled.
type draExtendedResource struct {
// May have extended resource backed by DRA.
podScalarResources map[v1.ResourceName]int64
// The mapping of extended resource to device class name
resourceToDeviceClass map[v1.ResourceName]string
}
type informationForClaim struct {
// Node selector based on the claim status if allocated.
availableOnNodes *nodeaffinity.NodeSelector
// Set by Reserved, published by PreBind, empty if nothing had to be allocated.
allocation *resourceapi.AllocationResult
}
// nodeAllocation holds the the allocation results and extended resource claim per node.
type nodeAllocation struct {
// allocationResults has the allocation results, matching the order of
// claims which had to be allocated.
allocationResults []resourceapi.AllocationResult
// extendedResourceClaim has the special claim for extended resource backed by DRA
// created during Filter for the nodes.
extendedResourceClaim *resourceapi.ResourceClaim
}
// DynamicResources is a plugin that ensures that ResourceClaims are allocated.
type DynamicResources struct {
enabled bool
enableAdminAccess bool
enablePrioritizedList bool
enableSchedulingQueueHint bool
enablePartitionableDevices bool
enableDeviceTaints bool
enableDeviceBindingConditions bool
enableDeviceStatus bool
enableExtendedResource bool
enableFilterTimeout bool
filterTimeout time.Duration
enableConsumableCapacity bool
fh fwk.Handle
clientset kubernetes.Interface
celCache *cel.Cache
draManager fwk.SharedDRAManager
}
// New initializes a new plugin and returns it.
func New(ctx context.Context, plArgs runtime.Object, fh fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
if !fts.EnableDynamicResourceAllocation {
// Disabled, won't do anything.
return &DynamicResources{}, nil
}
args, ok := plArgs.(*config.DynamicResourcesArgs)
if !ok {
return nil, fmt.Errorf("got args of type %T, want *DynamicResourcesArgs", plArgs)
}
if err := validation.ValidateDynamicResourcesArgs(nil, args, fts); err != nil {
return nil, err
}
pl := &DynamicResources{
enabled: true,
enableAdminAccess: fts.EnableDRAAdminAccess,
enableDeviceTaints: fts.EnableDRADeviceTaints,
enablePrioritizedList: fts.EnableDRAPrioritizedList,
enableFilterTimeout: fts.EnableDRASchedulerFilterTimeout,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
enablePartitionableDevices: fts.EnablePartitionableDevices,
enableExtendedResource: fts.EnableDRAExtendedResource,
enableConsumableCapacity: fts.EnableConsumableCapacity,
filterTimeout: ptr.Deref(args.FilterTimeout, metav1.Duration{}).Duration,
enableDeviceBindingConditions: fts.EnableDRADeviceBindingConditions,
enableDeviceStatus: fts.EnableDRAResourceClaimDeviceStatus,
fh: fh,
clientset: fh.ClientSet(),
// This is a LRU cache for compiled CEL expressions. The most
// recent 10 of them get reused across different scheduling
// cycles.
celCache: cel.NewCache(10, cel.Features{EnableConsumableCapacity: fts.EnableConsumableCapacity}),
draManager: fh.SharedDRAManager(),
}
return pl, nil
}
var _ fwk.PreEnqueuePlugin = &DynamicResources{}
var _ fwk.PreFilterPlugin = &DynamicResources{}
var _ fwk.FilterPlugin = &DynamicResources{}
var _ fwk.PostFilterPlugin = &DynamicResources{}
var _ fwk.ReservePlugin = &DynamicResources{}
var _ fwk.EnqueueExtensions = &DynamicResources{}
var _ fwk.PreBindPlugin = &DynamicResources{}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *DynamicResources) Name() string {
return Name
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *DynamicResources) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
if !pl.enabled {
return nil, nil
}
// A resource might depend on node labels for topology filtering.
// A new or updated node may make pods schedulable.
//
// A note about UpdateNodeTaint event:
// Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint | fwk.UpdateNodeAllocatable
if pl.enableSchedulingQueueHint {
// When QHint is enabled, the problematic preCheck is already removed, and we can remove UpdateNodeTaint.
nodeActionType = fwk.Add | fwk.UpdateNodeLabel | fwk.UpdateNodeAllocatable
}
events := []fwk.ClusterEventWithHint{
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}},
// Allocation is tracked in ResourceClaims, so any changes may make the pods schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.ResourceClaim, ActionType: fwk.Add | fwk.Update}, QueueingHintFn: pl.isSchedulableAfterClaimChange},
// Adding the ResourceClaim name to the pod status makes pods waiting for their ResourceClaim schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.UpdatePodGeneratedResourceClaim}, QueueingHintFn: pl.isSchedulableAfterPodChange},
// A pod might be waiting for a class to get created or modified.
{Event: fwk.ClusterEvent{Resource: fwk.DeviceClass, ActionType: fwk.Add | fwk.Update}},
// Adding or updating a ResourceSlice might make a pod schedulable because new resources became available.
{Event: fwk.ClusterEvent{Resource: fwk.ResourceSlice, ActionType: fwk.Add | fwk.Update}},
}
return events, nil
}
// PreEnqueue checks if there are known reasons why a pod currently cannot be
// scheduled. When this fails, one of the registered events can trigger another
// attempt.
func (pl *DynamicResources) PreEnqueue(ctx context.Context, pod *v1.Pod) (status *fwk.Status) {
if !pl.enabled {
return nil
}
if err := pl.foreachPodResourceClaim(pod, nil); err != nil {
return statusUnschedulable(klog.FromContext(ctx), err.Error())
}
return nil
}
// isSchedulableAfterClaimChange is invoked for add and update claim events reported by
// an informer. It checks whether that change made a previously unschedulable
// pod schedulable. It errs on the side of letting a pod scheduling attempt
// happen. The delete claim event will not invoke it, so newObj will never be nil.
func (pl *DynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalClaim, modifiedClaim, err := schedutil.As[*resourceapi.ResourceClaim](oldObj, newObj)
if err != nil {
// Shouldn't happen.
return fwk.Queue, fmt.Errorf("unexpected object in isSchedulableAfterClaimChange: %w", err)
}
usesClaim := false
if err := pl.foreachPodResourceClaim(pod, func(_ string, claim *resourceapi.ResourceClaim) {
if claim.UID == modifiedClaim.UID {
usesClaim = true
}
}); err != nil {
// This is not an unexpected error: we know that
// foreachPodResourceClaim only returns errors for "not
// schedulable".
if loggerV := logger.V(6); loggerV.Enabled() {
owner := metav1.GetControllerOf(modifiedClaim)
loggerV.Info("pod is not schedulable after resource claim change", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim), "claimOwner", owner, "reason", err.Error())
}
return fwk.QueueSkip, nil
}
if originalClaim != nil &&
originalClaim.Status.Allocation != nil &&
modifiedClaim.Status.Allocation == nil {
// A claim with structured parameters was deallocated. This might have made
// resources available for other pods.
logger.V(6).Info("claim with structured parameters got deallocated", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim))
return fwk.Queue, nil
}
if !usesClaim {
// This was not the claim the pod was waiting for.
logger.V(6).Info("unrelated claim got modified", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim))
return fwk.QueueSkip, nil
}
if originalClaim == nil {
logger.V(5).Info("claim for pod got created", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim))
return fwk.Queue, nil
}
// Modifications may or may not be relevant. If the entire
// status is as before, then something else must have changed
// and we don't care. What happens in practice is that the
// resource driver adds the finalizer.
if apiequality.Semantic.DeepEqual(&originalClaim.Status, &modifiedClaim.Status) {
if loggerV := logger.V(7); loggerV.Enabled() {
// Log more information.
loggerV.Info("claim for pod got modified where the pod doesn't care", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim), "diff", diff.Diff(originalClaim, modifiedClaim))
} else {
logger.V(6).Info("claim for pod got modified where the pod doesn't care", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim))
}
return fwk.QueueSkip, nil
}
logger.V(5).Info("status of claim for pod got updated", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim))
return fwk.Queue, nil
}
// isSchedulableAfterPodChange is invoked for update pod events reported by
// an informer. It checks whether that change adds the ResourceClaim(s) that the
// pod has been waiting for.
func (pl *DynamicResources) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, modifiedPod, err := schedutil.As[*v1.Pod](nil, newObj)
if err != nil {
// Shouldn't happen.
return fwk.Queue, fmt.Errorf("unexpected object in isSchedulableAfterClaimChange: %w", err)
}
if pod.UID != modifiedPod.UID {
logger.V(7).Info("pod is not schedulable after change in other pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
if err := pl.foreachPodResourceClaim(modifiedPod, nil); err != nil {
// This is not an unexpected error: we know that
// foreachPodResourceClaim only returns errors for "not
// schedulable".
logger.V(6).Info("pod is not schedulable after being updated", "pod", klog.KObj(pod))
return fwk.QueueSkip, nil
}
logger.V(5).Info("pod got updated and is schedulable", "pod", klog.KObj(pod))
return fwk.Queue, nil
}
// podResourceClaims returns the ResourceClaims for all pod.Spec.PodResourceClaims.
func (pl *DynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourceapi.ResourceClaim, error) {
claims := make([]*resourceapi.ResourceClaim, 0, len(pod.Spec.ResourceClaims))
if err := pl.foreachPodResourceClaim(pod, func(_ string, claim *resourceapi.ResourceClaim) {
// We store the pointer as returned by the lister. The
// assumption is that if a claim gets modified while our code
// runs, the cache will store a new pointer, not mutate the
// existing object that we point to here.
claims = append(claims, claim)
}); err != nil {
return nil, err
}
return claims, nil
}
// foreachPodResourceClaim checks that each ResourceClaim for the pod exists.
// It calls an optional handler for those claims that it finds.
func (pl *DynamicResources) foreachPodResourceClaim(pod *v1.Pod, cb func(podResourceName string, claim *resourceapi.ResourceClaim)) error {
for _, resource := range pod.Spec.ResourceClaims {
claimName, mustCheckOwner, err := resourceclaim.Name(pod, &resource)
if err != nil {
return err
}
// The claim name might be nil if no underlying resource claim
// was generated for the referenced claim. There are valid use
// cases when this might happen, so we simply skip it.
if claimName == nil {
continue
}
claim, err := pl.draManager.ResourceClaims().Get(pod.Namespace, *claimName)
if err != nil {
return err
}
if claim.DeletionTimestamp != nil {
return fmt.Errorf("resourceclaim %q is being deleted", claim.Name)
}
if mustCheckOwner {
if err := resourceclaim.IsForPod(pod, claim); err != nil {
return err
}
}
if cb != nil {
cb(resource.Name, claim)
}
}
return nil
}
// hasDeviceClassMappedExtendedResource returns true when the given resource list has an extended resource, that has
// a mapping to a device class.
func hasDeviceClassMappedExtendedResource(reqs v1.ResourceList, deviceClassMapping map[v1.ResourceName]string) bool {
for rName, rValue := range reqs {
if rValue.IsZero() {
// We only care about the resources requested by the pod we are trying to schedule.
continue
}
if v1helper.IsExtendedResourceName(rName) {
_, ok := deviceClassMapping[rName]
if ok {
return true
}
}
}
return false
}
// findExtendedResourceClaim looks for the extended resource claim, i.e., the claim with special annotation
// set to "true", and with the pod as owner. It must be called with all ResourceClaims in the cluster.
// The returned ResourceClaim is read-only.
func findExtendedResourceClaim(pod *v1.Pod, resourceClaims []*resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
for _, c := range resourceClaims {
if c.Annotations[resourceapi.ExtendedResourceClaimAnnotation] == "true" {
for _, or := range c.OwnerReferences {
if or.Name == pod.Name && *or.Controller && or.UID == pod.UID {
return c
}
}
}
}
return nil
}
// preFilterExtendedResources checks if there is any extended resource in the
// pod requests that has a device class mapping, i.e., there is a device class
// that has spec.ExtendedResourceName or its implicit extended resource name
// matching the given extended resource in that pod requests.
//
// It looks for the special resource claim for the pod created from prior scheduling
// cycle. If not found, it creates the special claim with no Requests in the Spec,
// with a temporary UID, and the specialClaimInMemName name.
// Either way, the special claim is stored in state.claims.
//
// In addition, draExtendedResource is also stored in the cycle state.
//
// It returns the special ResourceClaim and an error status. It returns nil for both
// if the feature is disabled or not required for the Pod.
func (pl *DynamicResources) preFilterExtendedResources(pod *v1.Pod, logger klog.Logger, s *stateData) (*resourceapi.ResourceClaim, *fwk.Status) {
if !pl.enableExtendedResource {
return nil, nil
}
deviceClassMapping, err := extended.DeviceClassMapping(pl.draManager)
if err != nil {
return nil, statusError(logger, err, "retrieving extended resource to DeviceClass mapping")
}
reqs := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{})
hasExtendedResource := hasDeviceClassMappedExtendedResource(reqs, deviceClassMapping)
if !hasExtendedResource {
return nil, nil
}
s.draExtendedResource.resourceToDeviceClass = deviceClassMapping
r := framework.NewResource(reqs)
s.draExtendedResource.podScalarResources = r.ScalarResources
resourceClaims, err := pl.draManager.ResourceClaims().List()
if err != nil {
return nil, statusError(logger, err, "listing ResourceClaims")
}
// Check if the special resource claim has been created from prior scheduling cycle.
//
// If it was already allocated earlier, that allocation might not be valid anymore.
// We could try to check that, but it depends on various factors that are difficult to
// cover (basically needs to replicate allocator logic) and if it turns out that the
// allocation is stale, we would have to schedule with those allocated devices not
// available for a new allocation. This situation should be rare (= binding failure),
// so we solve it via brute-force
// - Kick off deallocation in the background.
// - Mark the pod as unschedulable. Successful deallocation will make it schedulable again.
extendedResourceClaim := findExtendedResourceClaim(pod, resourceClaims)
if extendedResourceClaim == nil {
// Create one special claim for all extended resources backed by DRA in the Pod.
// Create the ResourceClaim with pod as owner, with a generated name that uses
// <pod name>-extended-resources- as base. The final name will get truncated if it
// would be too long.
extendedResourceClaim = &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: pod.Namespace,
Name: specialClaimInMemName,
// fake temporary UID for use in SignalClaimPendingAllocation
UID: types.UID(uuid.NewUUID()),
GenerateName: pod.Name + "-extended-resources-",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "Pod",
Name: pod.Name,
UID: pod.UID,
Controller: ptr.To(true),
BlockOwnerDeletion: ptr.To(true),
},
},
Annotations: map[string]string{
resourceapi.ExtendedResourceClaimAnnotation: "true",
},
},
Spec: resourceapi.ResourceClaimSpec{},
}
}
return extendedResourceClaim, nil
}
// PreFilter invoked at the prefilter extension point to check if pod has all
// immediate claims bound. UnschedulableAndUnresolvable is returned if
// the pod cannot be scheduled at the moment on any node.
func (pl *DynamicResources) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
if !pl.enabled {
return nil, fwk.NewStatus(fwk.Skip)
}
logger := klog.FromContext(ctx)
// If the pod does not reference any claim, we don't need to do
// anything for it. We just initialize an empty state to record that
// observation for the other functions. This gets updated below
// if we get that far.
s := &stateData{}
state.Write(stateKey, s)
userClaims, err := pl.podResourceClaims(pod)
if err != nil {
return nil, statusUnschedulable(logger, err.Error())
}
logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(userClaims))
extendedResourceClaim, status := pl.preFilterExtendedResources(pod, logger, s)
if status != nil {
return nil, status
}
claims := newClaimStore(userClaims, extendedResourceClaim)
// This check covers user and extended ResourceClaim.
if claims.empty() {
return nil, fwk.NewStatus(fwk.Skip)
}
// Counts all claims which the scheduler needs to allocate itself.
numClaimsToAllocate := 0
s.informationsForClaim = make([]informationForClaim, claims.len())
for index, claim := range claims.all() {
if claim.Status.Allocation != nil &&
!resourceclaim.CanBeReserved(claim) &&
!resourceclaim.IsReservedForPod(pod, claim) {
// Resource is in use. The pod has to wait.
return nil, statusUnschedulable(logger, "resourceclaim in use", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
}
if claim.Status.Allocation != nil {
if claim.Status.Allocation.NodeSelector != nil {
nodeSelector, err := nodeaffinity.NewNodeSelector(claim.Status.Allocation.NodeSelector)
if err != nil {
return nil, statusError(logger, err)
}
s.informationsForClaim[index].availableOnNodes = nodeSelector
}
} else {
numClaimsToAllocate++
// Allocation in flight? Better wait for that
// to finish, see inFlightAllocations
// documentation for details.
if pl.draManager.ResourceClaims().ClaimHasPendingAllocation(claim.UID) {
return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s is in the process of being allocated", klog.KObj(claim)))
}
// Continue without validating the special claim for extended resource backed by DRA.
// For the claim template, it is not allocated yet at this point, and it does not have a spec.
// For the claim from prior scheduling cycle, leave it to Filter Phase to validate.
if claim == extendedResourceClaim {
continue
}
// Check all requests and device classes. If a class
// does not exist, scheduling cannot proceed, no matter
// how the claim is being allocated.
//
// When using a control plane controller, a class might
// have a node filter. This is useful for trimming the
// initial set of potential nodes before we ask the
// driver(s) for information about the specific pod.
for _, request := range claim.Spec.Devices.Requests {
// The requirements differ depending on whether the request has a list of
// alternative subrequests defined in the firstAvailable field.
switch {
case request.Exactly != nil:
if status := pl.validateDeviceClass(logger, request.Exactly.DeviceClassName, request.Name); status != nil {
return nil, status
}
case len(request.FirstAvailable) > 0:
if !pl.enablePrioritizedList {
return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s, request %s: has subrequests, but the DRAPrioritizedList feature is disabled", klog.KObj(claim), request.Name))
}
for _, subRequest := range request.FirstAvailable {
qualRequestName := strings.Join([]string{request.Name, subRequest.Name}, "/")
if status := pl.validateDeviceClass(logger, subRequest.DeviceClassName, qualRequestName); status != nil {
return nil, status
}
}
default:
return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s, request %s: unknown request type", klog.KObj(claim), request.Name))
}
}
}
}
if numClaimsToAllocate > 0 {
if loggerV := logger.V(5); loggerV.Enabled() {
claimsToAllocate := make([]*resourceapi.ResourceClaim, 0, claims.len())
for _, claim := range claims.toAllocate() {
claimsToAllocate = append(claimsToAllocate, claim)
}
loggerV.Info("Preparing allocation with structured parameters", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(claimsToAllocate))
}
// Doing this over and over again for each pod could be avoided
// by setting the allocator up once and then keeping it up-to-date
// as changes are observed.
//
// But that would cause problems for using the plugin in the
// Cluster Autoscaler. If this step here turns out to be
// expensive, we may have to maintain and update state more
// persistently.
//
// Claims (and thus their devices) are treated as "allocated" if they are in the assume cache
// or currently their allocation is in-flight. This does not change
// during filtering, so we can determine that once.
var allocatedState *structured.AllocatedState
if pl.enableConsumableCapacity {
allocatedState, err = pl.draManager.ResourceClaims().GatherAllocatedState()
if err != nil {
return nil, statusError(logger, err)
}
if allocatedState == nil {
return nil, statusError(logger, errors.New("nil allocated state"))
}
} else {
allocatedDevices, err := pl.draManager.ResourceClaims().ListAllAllocatedDevices()
if err != nil {
return nil, statusError(logger, err)
}
allocatedState = &structured.AllocatedState{
AllocatedDevices: allocatedDevices,
AllocatedSharedDeviceIDs: sets.New[structured.SharedDeviceID](),
AggregatedCapacity: structured.NewConsumedCapacityCollection(),
}
}
slices, err := pl.draManager.ResourceSlices().ListWithDeviceTaintRules()
if err != nil {
return nil, statusError(logger, err)
}
features := structured.Features{
AdminAccess: pl.enableAdminAccess,
PrioritizedList: pl.enablePrioritizedList,
PartitionableDevices: pl.enablePartitionableDevices,
DeviceTaints: pl.enableDeviceTaints,
DeviceBinding: pl.enableDeviceBindingConditions,
DeviceStatus: pl.enableDeviceStatus,
ConsumableCapacity: pl.enableConsumableCapacity,
}
allocator, err := structured.NewAllocator(ctx, features, *allocatedState, pl.draManager.DeviceClasses(), slices, pl.celCache)
if err != nil {
return nil, statusError(logger, err)
}
s.allocator = allocator
s.nodeAllocations = make(map[string]nodeAllocation)
}
s.claims = claims
return nil, nil
}
func (pl *DynamicResources) validateDeviceClass(logger klog.Logger, deviceClassName, requestName string) *fwk.Status {
if deviceClassName == "" {
return statusError(logger, fmt.Errorf("request %s: unsupported request type", requestName))
}
_, err := pl.draManager.DeviceClasses().Get(deviceClassName)
if err != nil {
// If the class cannot be retrieved, allocation cannot proceed.
if apierrors.IsNotFound(err) {
// Here we mark the pod as "unschedulable", so it'll sleep in
// the unscheduleable queue until a DeviceClass event occurs.
return statusUnschedulable(logger, fmt.Sprintf("request %s: device class %s does not exist", requestName, deviceClassName))
}
}
return nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *DynamicResources) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
func getStateData(cs fwk.CycleState) (*stateData, error) {
state, err := cs.Read(stateKey)
if err != nil {
return nil, err
}
s, ok := state.(*stateData)
if !ok {
return nil, errors.New("unable to convert state into stateData")
}
return s, nil
}
// filterExtendedResources computes the special claim's Requests based on the
// node's Allocatable. It returns the special claim updated to match what needs
// to be allocated through DRA for the node or nil if nothing needs to be allocated.
//
// It returns an error when the pod's extended resource requests cannot be allocated
// from node's Allocatable, nor matching any device class's explicit or implicit
// ExtendedResourceName.
func (pl *DynamicResources) filterExtendedResources(state *stateData, pod *v1.Pod, nodeInfo fwk.NodeInfo, logger klog.Logger) (*resourceapi.ResourceClaim, *fwk.Status) {
extendedResourceClaim := state.claims.extendedResourceClaim()
if extendedResourceClaim == nil {
// Nothing to do.
return nil, nil
}
// The claim is from the prior scheduling cycle, return unschedulable such that it can be
// deleted at the PostFilter phase, and retry anew.
if extendedResourceClaim.Spec.Devices.Requests != nil {
return nil, statusUnschedulable(logger, "cannot schedule extended resource claim", "pod", klog.KObj(pod), "node", klog.KObj(nodeInfo.Node()), "claim", klog.KObj(extendedResourceClaim))
}
extendedResources := make(map[v1.ResourceName]int64)
hasExtendedResource := false
for rName, rQuant := range state.draExtendedResource.podScalarResources {
if !v1helper.IsExtendedResourceName(rName) {
continue
}
// Skip in case request quantity is zero
if rQuant == 0 {
continue
}
_, okScalar := nodeInfo.GetAllocatable().GetScalarResources()[rName]
_, okDynamic := state.draExtendedResource.resourceToDeviceClass[rName]
if okDynamic {
if okScalar {
// node provides the resource via device plugin
extendedResources[rName] = 0
} else {
// node needs to provide the resource via DRA
extendedResources[rName] = rQuant
hasExtendedResource = true
}
} else if !okScalar {
// has request neither provided by device plugin, nor backed by DRA,
// hence the pod does not fit the node.
return nil, statusUnschedulable(logger, "cannot fit resource", "pod", klog.KObj(pod), "node", klog.KObj(nodeInfo.Node()), "resource", rName)
}
}
// No extended resources backed by DRA on this node.
// The pod may have extended resources, but they are all backed by device
// plugin, hence the noderesources plugin should have checked if the node
// can fit the pod.
// This dynamic resources plugin Filter phase has nothing left to do.
if state.claims.noUserClaim() && !hasExtendedResource {
// It cannot be allocated when reaching here, as the claim from prior scheduling cycle
// would return unschedulable earlier in this function.
return nil, nil
}
// Each node needs its own, potentially different variant of the claim.
nodeExtendedResourceClaim := extendedResourceClaim.DeepCopy()
nodeExtendedResourceClaim.Spec.Devices.Requests = createDeviceRequests(pod, extendedResources, state.draExtendedResource.resourceToDeviceClass)
if extendedResourceClaim.Status.Allocation != nil {
// If it is already allocated, then we cannot simply allocate it again.
//
// It cannot be allocated when reaching here, as the claim found from prior scheduling cycle
// would return unschedulable earlier in this function.
return nil, nil
}
return nodeExtendedResourceClaim, nil
}
// createDeviceRequests computes the special claim's Requests based on the pod's extended resources
// that are not satisfied by the node's Allocatable.
//
// the device request name has the format: container-%d-request-%d,
// the first %d is the container's index in the pod's initContainer and containers
// the second %d is the extended resource's index in that container's sorted resource requests.
func createDeviceRequests(pod *v1.Pod, extendedResources map[v1.ResourceName]int64, deviceClassMapping map[v1.ResourceName]string) []resourceapi.DeviceRequest {
var deviceRequests []resourceapi.DeviceRequest
// Creating the extended resource claim's Requests by
// iterating over the containers, and the resources in the containers,
// and create one request per <container, extended resource>.
// pod level resources currently have only cpu and memory, they are not considered here for now.
// if extended resources are added to pod level resources in the future, they need to be
// supported separately.
containers := slices.Clone(pod.Spec.InitContainers)
containers = append(containers, pod.Spec.Containers...)
for r := range extendedResources {
for i, c := range containers {
creqs := c.Resources.Requests
if creqs == nil {
continue
}
var rQuant resource.Quantity
var ok bool
if rQuant, ok = creqs[r]; !ok {
continue
}
crq, ok := (&rQuant).AsInt64()
if !ok || crq == 0 {
continue
}
className, ok := deviceClassMapping[r]
// skip if the request does not map to a device class
if !ok || className == "" {
continue
}
keys := make([]string, 0, len(creqs))
for k := range creqs {
keys = append(keys, k.String())
}
// resource requests in a container is a map, their names must
// be sorted to determine the resource's index order.
slice.SortStrings(keys)
ridx := 0
for j := range keys {
if keys[j] == r.String() {
ridx = j
break
}
}
// i is the index of the container if the list of initContainers + containers.
// ridx is the index of the extended resource request in the sorted all requests in the container.
// crq is the quantity of the extended resource request.
deviceRequests = append(deviceRequests,
resourceapi.DeviceRequest{
Name: fmt.Sprintf("container-%d-request-%d", i, ridx), // need to be container name index - extended resource name index
Exactly: &resourceapi.ExactDeviceRequest{
DeviceClassName: className, // map external resource name -> device class name
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
Count: crq,
},
})
}
}
return deviceRequests
}
// Filter invoked at the filter extension point.
// It evaluates if a pod can fit due to the resources it requests,
// for both allocated and unallocated claims.
//
// For claims that are bound, then it checks that the node affinity is
// satisfied by the given node.
//
// For claims that are unbound, it checks whether the claim might get allocated
// for the node.
func (pl *DynamicResources) Filter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
if !pl.enabled {
return nil
}
state, err := getStateData(cs)
if err != nil {
return statusError(klog.FromContext(ctx), err)
}
if state.claims.empty() {
return nil
}
logger := klog.FromContext(ctx)
node := nodeInfo.Node()
nodeExtendedResourceClaim, status := pl.filterExtendedResources(state, pod, nodeInfo, logger)
if status != nil {
return status
}
// The pod has no user claim, it may have extended resource claim that is satisfied by device plugin.
// Then there is nothing left to do for this plugin.
if nodeExtendedResourceClaim == nil && state.claims.noUserClaim() {
return nil
}
var unavailableClaims []int
for index, claim := range state.claims.all() {
logger.V(10).Info("filtering based on resource claims of the pod", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim))
// This node selector only gets set if the claim is allocated.
if nodeSelector := state.informationsForClaim[index].availableOnNodes; nodeSelector != nil && !nodeSelector.Match(node) {
logger.V(5).Info("allocation's node selector does not match", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim))
unavailableClaims = append(unavailableClaims, index)
continue
}
if claim.Status.Allocation == nil {
// The claim is not allocated yet, don't have to check
// anything else.
continue
}
// The claim is allocated, check whether it is ready for binding.
if pl.enableDeviceBindingConditions && pl.enableDeviceStatus {
ready, err := pl.isClaimReadyForBinding(claim)
// If the claim is not ready yet (ready false, no error) and binding has timed out
// or binding has failed (err non-nil), then the scheduler should consider deallocating this
// claim in PostFilter to unblock trying other devices.
if err != nil || !ready && pl.isClaimTimeout(claim) {
unavailableClaims = append(unavailableClaims, index)
}
}
}
// Use allocator to check the node and cache the result in case that the node is picked.
var allocations []resourceapi.AllocationResult
if state.allocator != nil {
allocCtx := ctx
if loggerV := logger.V(5); loggerV.Enabled() {
allocCtx = klog.NewContext(allocCtx, klog.LoggerWithValues(logger, "node", klog.KObj(node)))
}
// Apply timeout to the operation?
if pl.enableFilterTimeout && pl.filterTimeout > 0 {
c, cancel := context.WithTimeout(allocCtx, pl.filterTimeout)
defer cancel()
allocCtx = c
}
// Check which claims need to be allocated.
//
// This replaces the special ResourceClaim for extended resources with one
// matching the node.
claimsToAllocate := make([]*resourceapi.ResourceClaim, 0, state.claims.len())
extendedResourceClaim := state.claims.extendedResourceClaim()
for _, claim := range state.claims.toAllocate() {
if claim == extendedResourceClaim && nodeExtendedResourceClaim != nil {
claim = nodeExtendedResourceClaim
}
claimsToAllocate = append(claimsToAllocate, claim)
}
a, err := state.allocator.Allocate(allocCtx, node, claimsToAllocate)
switch {
case errors.Is(err, context.DeadlineExceeded):
return statusUnschedulable(logger, "timed out trying to allocate devices", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaims", klog.KObjSlice(claimsToAllocate))
case ctx.Err() != nil:
return statusUnschedulable(logger, fmt.Sprintf("asked by caller to stop allocating devices: %v", context.Cause(ctx)), "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaims", klog.KObjSlice(claimsToAllocate))
case err != nil:
// This should only fail if there is something wrong with the claim or class.
// Return an error to abort scheduling of it.
//
// This will cause retries. It would be slightly nicer to mark it as unschedulable
// *and* abort scheduling. Then only cluster event for updating the claim or class
// with the broken CEL expression would trigger rescheduling.
//
// But we cannot do both. As this shouldn't occur often, aborting like this is
// better than the more complicated alternative (return Unschedulable here, remember
// the error, then later raise it again later if needed).
return statusError(logger, err, "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaims", klog.KObjSlice(claimsToAllocate))
}
// Check for exact length just to be sure. In practice this is all-or-nothing.
if len(a) != len(claimsToAllocate) {
return statusUnschedulable(logger, "cannot allocate all claims", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaims", klog.KObjSlice(claimsToAllocate))
}
// Reserve uses this information.
allocations = a
}
// Store information in state while holding the mutex.
if state.allocator != nil || len(unavailableClaims) > 0 {
state.mutex.Lock()
defer state.mutex.Unlock()
}
if len(unavailableClaims) > 0 {
// Remember all unavailable claims. This might be observed
// concurrently, so we have to lock the state before writing.
if state.unavailableClaims == nil {
state.unavailableClaims = sets.New[int]()
}
for _, index := range unavailableClaims {
state.unavailableClaims.Insert(index)
}
return statusUnschedulable(logger, "resourceclaim not available on the node", "pod", klog.KObj(pod))
}
if state.allocator != nil {
state.nodeAllocations[node.Name] = nodeAllocation{
allocationResults: allocations,
extendedResourceClaim: nodeExtendedResourceClaim,
}
}
return nil
}
// isSpecialClaimName return true when the name is the specialClaimInMemName.
func isSpecialClaimName(name string) bool {
return name == specialClaimInMemName
}
// PostFilter checks whether there are allocated claims that could get
// deallocated to help get the Pod schedulable. If yes, it picks one and
// requests its deallocation. This only gets called when filtering found no
// suitable node.
func (pl *DynamicResources) PostFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
if !pl.enabled {
return nil, fwk.NewStatus(fwk.Unschedulable, "plugin disabled")
}
logger := klog.FromContext(ctx)
state, err := getStateData(cs)
if err != nil {
return nil, statusError(logger, err)
}
// If a Pod doesn't have any resource claims attached to it, there is no need for further processing.
// Thus we provide a fast path for this case to avoid unnecessary computations.
if state.claims.empty() {
return nil, fwk.NewStatus(fwk.Unschedulable, "no new claims to deallocate")
}
extendedResourceClaim := state.claims.extendedResourceClaim()
// Iterating over a map is random. This is intentional here, we want to
// pick one claim randomly because there is no better heuristic.
for index := range state.unavailableClaims {
claim := state.claims.get(index)
if claim == extendedResourceClaim {
if extendedResourceClaim != nil && !isSpecialClaimName(extendedResourceClaim.Name) {
// Handled below.
break
}
continue
}
if len(claim.Status.ReservedFor) == 0 ||
len(claim.Status.ReservedFor) == 1 && claim.Status.ReservedFor[0].UID == pod.UID {
claim := claim.DeepCopy()
claim.Status.ReservedFor = nil
claim.Status.Allocation = nil
claim.Status.Devices = nil
logger.V(5).Info("Deallocation of ResourceClaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
if _, err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
return nil, statusError(logger, err)
}
return nil, fwk.NewStatus(fwk.Unschedulable, "deallocation of ResourceClaim completed")
}
}
if extendedResourceClaim != nil && !isSpecialClaimName(extendedResourceClaim.Name) {
// If the special resource claim for extended resource backed by DRA
// is reserved or allocated at prior scheduling cycle, then it should be deleted.
extendedResourceClaim := extendedResourceClaim.DeepCopy()
if err := pl.deleteClaim(ctx, extendedResourceClaim, logger); err != nil {
return nil, statusError(logger, err)
}
return nil, fwk.NewStatus(fwk.Unschedulable, "deletion of ResourceClaim completed")
}
return nil, fwk.NewStatus(fwk.Unschedulable, "still not schedulable")
}
// Reserve reserves claims for the pod.
func (pl *DynamicResources) Reserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) (status *fwk.Status) {
if !pl.enabled {
return nil
}
state, err := getStateData(cs)
if err != nil {
return statusError(klog.FromContext(ctx), err)
}
if state.claims.empty() {
return nil
}
logger := klog.FromContext(ctx)
numClaimsWithAllocator := 0
for _, claim := range state.claims.all() {
if claim.Status.Allocation != nil {
// Allocated, but perhaps not reserved yet. We checked in PreFilter that
// the pod could reserve the claim. Instead of reserving here by
// updating the ResourceClaim status, we assume that reserving
// will work and only do it for real during binding. If it fails at
// that time, some other pod was faster and we have to try again.
continue
}
numClaimsWithAllocator++
}
if numClaimsWithAllocator == 0 {
// Nothing left to do.
return nil
}
extendedResourceClaim := state.claims.extendedResourceClaim()
numClaimsToAllocate := 0
needToAllocateUserClaims := false
for _, claim := range state.claims.toAllocate() {
numClaimsToAllocate++
if claim != extendedResourceClaim {
needToAllocateUserClaims = true
}
}
// Prepare allocation of claims handled by the schedulder.
if state.allocator != nil {
// Entries in these two slices match each other.
allocations, ok := state.nodeAllocations[nodeName]
if !ok || len(allocations.allocationResults) == 0 {
// This can happen only when claimsToAllocate has a single special claim template for extended resource backed by DRA,
// But it is satisfied by the node with device plugin, hence no DRA allocation.
if !needToAllocateUserClaims {
return nil
}
// We checked before that the node is suitable. This shouldn't have failed,
// so treat this as an error.
return statusError(logger, errors.New("claim allocation not found for node"))
}
// Sanity check: do we have results for all pending claims?
if len(allocations.allocationResults) != numClaimsToAllocate ||
len(allocations.allocationResults) != numClaimsWithAllocator {
return statusError(logger, fmt.Errorf("internal error, have %d allocations, %d claims to allocate, want %d claims", len(allocations.allocationResults), numClaimsToAllocate, numClaimsWithAllocator))
}
allocIndex := 0
for index, claim := range state.claims.toAllocate() {
// The index returned is the original index in the underlying claim store, it
// may not be sequentially numbered (e.g. 0, 1, 2 ...).
allocation := &allocations.allocationResults[allocIndex]
state.informationsForClaim[index].allocation = allocation
if claim == extendedResourceClaim {
// replace the special claim template for extended
// resource backed by DRA with the real instantiated claim.
claim = allocations.extendedResourceClaim
}
// Strictly speaking, we don't need to store the full modified object.
// The allocation would be enough. The full object is useful for
// debugging, testing and the allocator, so let's make it realistic.
claim = claim.DeepCopy()
if !slices.Contains(claim.Finalizers, resourceapi.Finalizer) {
claim.Finalizers = append(claim.Finalizers, resourceapi.Finalizer)
}
claim.Status.Allocation = allocation
err := pl.draManager.ResourceClaims().SignalClaimPendingAllocation(claim.UID, claim)
if err != nil {
return statusError(logger, fmt.Errorf("internal error, couldn't signal allocation for claim %s", claim.Name))
}
logger.V(5).Info("Reserved resource in allocation result", "claim", klog.KObj(claim), "allocation", klog.Format(allocation))
allocIndex++
}
}
return nil
}
// Unreserve clears the ReservedFor field for all claims.
// It's idempotent, and does nothing if no state found for the given pod.
func (pl *DynamicResources) Unreserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) {
if !pl.enabled {
return
}
state, err := getStateData(cs)
if err != nil {
return
}
if state.claims.empty() {
return
}
logger := klog.FromContext(ctx)
// we process user claims here first, extendedResourceClaim if any is handled below.
for _, claim := range state.claims.allUserClaims() {
// If allocation was in-flight, then it's not anymore and we need to revert the
// claim object in the assume cache to what it was before.
if deleted := pl.draManager.ResourceClaims().RemoveClaimPendingAllocation(claim.UID); deleted {
pl.draManager.ResourceClaims().AssumedClaimRestore(claim.Namespace, claim.Name)
}
if claim.Status.Allocation != nil &&
resourceclaim.IsReservedForPod(pod, claim) {
// Remove pod from ReservedFor. A strategic-merge-patch is used
// because that allows removing an individual entry without having
// the latest ResourceClaim.
patch := fmt.Sprintf(`{"metadata": {"uid": %q}, "status": { "reservedFor": [ {"$patch": "delete", "uid": %q} ] }}`,
claim.UID,
pod.UID,
)
logger.V(5).Info("unreserve", "resourceclaim", klog.KObj(claim), "pod", klog.KObj(pod))
claim, err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).Patch(ctx, claim.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "status")
if err != nil {
// We will get here again when pod scheduling is retried.
logger.Error(err, "unreserve", "resourceclaim", klog.KObj(claim))
}
}
}
extendedResourceClaim := state.claims.extendedResourceClaim()
if extendedResourceClaim == nil {
// there is no extended resource claim
return
}
if deleted := pl.draManager.ResourceClaims().RemoveClaimPendingAllocation(state.claims.getInitialExtendedResourceClaimUID()); deleted {
pl.draManager.ResourceClaims().AssumedClaimRestore(extendedResourceClaim.Namespace, extendedResourceClaim.Name)
}
if isSpecialClaimName(extendedResourceClaim.Name) {
// In memory temporary extended resource claim does not need to be deleted
return
}
logger.V(5).Info("delete extended resource backed by DRA", "resourceclaim", klog.KObj(extendedResourceClaim), "pod", klog.KObj(pod), "claim.UID", extendedResourceClaim.UID)
extendedResourceClaim = extendedResourceClaim.DeepCopy()
if err := pl.deleteClaim(ctx, extendedResourceClaim, logger); err != nil {
logger.Error(err, "delete", "resourceclaim", klog.KObj(extendedResourceClaim))
}
}
// deleteClaim deletes the claim after removing the finalizer from the claim, if there is any.
func (pl *DynamicResources) deleteClaim(ctx context.Context, claim *resourceapi.ResourceClaim, logger klog.Logger) error {
refreshClaim := false
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if refreshClaim {
updatedClaim, err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("get resourceclaim %s/%s: %w", claim.Namespace, claim.Name, err)
}
claim = updatedClaim
} else {
refreshClaim = true
}
// Remove the finalizer to unblock removal first.
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourceapi.Finalizer)
if builtinControllerFinalizer >= 0 {
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
}
_, err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("update resourceclaim %s/%s: %w", claim.Namespace, claim.Name, err)
}
return nil
})
if retryErr != nil {
return retryErr
}
logger.V(5).Info("Delete", "resourceclaim", klog.KObj(claim))
err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
if err != nil {
return err
}
return nil
}
// PreBind gets called in a separate goroutine after it has been determined
// that the pod should get bound to this node. Because Reserve did not actually
// reserve claims, we need to do it now. For claims with the builtin controller,
// we also handle the allocation.
//
// If anything fails, we return an error and
// the pod will have to go into the backoff queue. The scheduler will call
// Unreserve as part of the error handling.
func (pl *DynamicResources) PreBind(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !pl.enabled {
return nil
}
state, err := getStateData(cs)
if err != nil {
return statusError(klog.FromContext(ctx), err)
}
if state.claims.empty() {
return nil
}
logger := klog.FromContext(ctx)
for index, claim := range state.claims.all() {
if !resourceclaim.IsReservedForPod(pod, claim) {
claim, err := pl.bindClaim(ctx, state, index, pod, nodeName)
if err != nil {
return statusError(logger, err)
}
// Updated here such that Unreserve can work with patched claim.
state.claims.set(index, claim)
}
}
if !pl.enableDeviceBindingConditions || !pl.enableDeviceStatus {
// If we don't have binding conditions, we can return early.
// The claim is now reserved for the pod and the scheduler can proceed with binding.
return nil
}
// We need to check if the device is attached to the node.
needToWait := hasBindingConditions(state)
// If no device needs to be prepared, we can return early.
if !needToWait {
return nil
}
// We need to wait for the device to be attached to the node.
pl.fh.EventRecorder().Eventf(pod, nil, v1.EventTypeNormal, "BindingConditionsPending", "Scheduling", "waiting for binding conditions for device on node %s", nodeName)
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Duration(BindingTimeoutDefaultSeconds)*time.Second, true,
func(ctx context.Context) (bool, error) {
return pl.isPodReadyForBinding(state)
})
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
err = errors.New("device binding timeout")
}
// Returning an error here causes another scheduling attempt.
// In that next attempt, PreFilter will detect the timeout or
// error and try to recover.
return statusError(logger, err)
}
// If we get here, we know that reserving the claim for
// the pod worked and we can proceed with binding it.
return nil
}
// PreBindPreFlight is called before PreBind, and determines whether PreBind is going to do something for this pod, or not.
// It just checks state.claims to determine whether there are any claims and hence the plugin has to handle them at PreBind.
func (pl *DynamicResources) PreBindPreFlight(ctx context.Context, cs fwk.CycleState, p *v1.Pod, nodeName string) *fwk.Status {
if !pl.enabled {
return fwk.NewStatus(fwk.Skip)
}
state, err := getStateData(cs)
if err != nil {
return statusError(klog.FromContext(ctx), err)
}
if state.claims.empty() {
return fwk.NewStatus(fwk.Skip)
}
return nil
}
// createRequestMappings creates the requestMappings for the special extended resource claim.
// For each device request in the claim, it finds the container name, and
// the extended resource name in that container matching the device request.
// the device request name has the format: container-%d-request-%d,
// the first %d is the container's index in the pod's initContainer and containers
// the second %d is the extended resource's index in that container's sorted resource requests.
func createRequestMappings(claim *resourceapi.ResourceClaim, pod *v1.Pod) []v1.ContainerExtendedResourceRequest {
var cer []v1.ContainerExtendedResourceRequest
deviceReqNames := make([]string, 0, len(claim.Spec.Devices.Requests))
for _, r := range claim.Spec.Devices.Requests {
deviceReqNames = append(deviceReqNames, r.Name)
}
// pod level resources currently have only cpu and memory, they are not considered here for now.
// if extended resources are added to pod level resources in the future, they need to be
// supported separately.
containers := slices.Clone(pod.Spec.InitContainers)
containers = append(containers, pod.Spec.Containers...)
for i, c := range containers {
creqs := c.Resources.Requests
keys := make([]string, 0, len(creqs))
for k := range creqs {
keys = append(keys, k.String())
}
// resource requests in a container is a map, their names must
// be sorted to determine the resource's index order.
slice.SortStrings(keys)
for rName := range creqs {
ridx := 0
for j := range keys {
if keys[j] == rName.String() {
ridx = j
break
}
}
for _, devReqName := range deviceReqNames {
// During filter phase, device request name is set to be
// container name index "-" extended resource name index
if fmt.Sprintf("container-%d-request-%d", i, ridx) == devReqName {
cer = append(cer,
v1.ContainerExtendedResourceRequest{
ContainerName: c.Name,
ResourceName: rName.String(),
RequestName: devReqName,
})
}
}
}
}
return cer
}
// bindClaim gets called by PreBind for claim which is not reserved for the pod yet.
// It might not even be allocated. bindClaim then ensures that the allocation
// and reservation are recorded. This finishes the work started in Reserve.
func (pl *DynamicResources) bindClaim(ctx context.Context, state *stateData, index int, pod *v1.Pod, nodeName string) (*resourceapi.ResourceClaim, error) {
logger := klog.FromContext(ctx)
claim := state.claims.get(index)
allocation := state.informationsForClaim[index].allocation
isExtendedResourceClaim := false
if claim == state.claims.extendedResourceClaim() {
// extended resource requests satisfied by device plugin
if allocation == nil && claim.Spec.Devices.Requests == nil {
return claim, nil
}
isExtendedResourceClaim = true
}
claimUIDs := []types.UID{claim.UID}
resourceClaimModified := false
defer func() {
// The scheduler was handling allocation. Now that has
// completed, either successfully or with a failure.
if resourceClaimModified {
if isExtendedResourceClaim {
// Unlike other claims, extended resource claim is created in API server below.
// AssumeClaimAfterAPICall returns ErrNotFound when the informer update has not reached assumed cache yet.
// Hence we must poll and wait for it.
pollErr := wait.PollUntilContextTimeout(ctx, 1*time.Second, time.Duration(AssumeExtendedResourceTimeoutDefaultSeconds)*time.Second, true,
func(ctx context.Context) (bool, error) {
if err := pl.draManager.ResourceClaims().AssumeClaimAfterAPICall(claim); err != nil {
if errors.Is(err, assumecache.ErrNotFound) {
return false, nil
}
logger.V(5).Info("Claim not stored in assume cache", "claim", klog.KObj(claim), "err", err)
return false, err
}
return true, nil
})
if pollErr != nil {
logger.V(5).Info("Claim not stored in assume cache after retries", "claim", klog.KObj(claim), "err", pollErr)
}
} else {
// This can fail, but only for reasons that are okay (concurrent delete or update).
// Shouldn't happen in this case.
if err := pl.draManager.ResourceClaims().AssumeClaimAfterAPICall(claim); err != nil {
logger.V(5).Info("Claim not stored in assume cache", "err", err)
}
}
}
if allocation != nil {
for _, claimUID := range claimUIDs {
pl.draManager.ResourceClaims().RemoveClaimPendingAllocation(claimUID)
}
}
}()
// Create the special claim for extended resource backed by DRA
if isExtendedResourceClaim && isSpecialClaimName(claim.Name) {
logger.V(5).Info("preparing to create claim for extended resources", "pod", klog.KObj(pod), "node", nodeName, "resourceclaim", klog.Format(claim))
// Replace claim template with instantiated claim for the node.
if nodeAllocation, ok := state.nodeAllocations[nodeName]; ok && nodeAllocation.extendedResourceClaim != nil {
claim = nodeAllocation.extendedResourceClaim.DeepCopy()
} else {
return nil, fmt.Errorf("extended resource claim not found for node %s", nodeName)
}
logger.V(5).Info("create claim for extended resources", "pod", klog.KObj(pod), "node", nodeName, "resourceclaim", klog.Format(claim))
// Clear fields which must or can not be set during creation.
claim.Status.Allocation = nil
claim.Name = ""
claim.UID = ""
var err error
claim, err = pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("create claim for extended resources %v: %w", klog.KObj(claim), err)
}
resourceClaimModified = true
logger.V(5).Info("created claim for extended resources", "pod", klog.KObj(pod), "node", nodeName, "resourceclaim", klog.Format(claim))
// Track the actual extended ResourceClaim from now.
// Relevant if we need to delete again in Unreserve.
if err := state.claims.updateExtendedResourceClaim(claim); err != nil {
return nil, fmt.Errorf("internal error: update extended ResourceClaim: %w", err)
}
}
logger.V(5).Info("preparing claim status update", "claim", klog.KObj(state.claims.get(index)), "allocation", klog.Format(allocation))
// We may run into a ResourceVersion conflict because there may be some
// benign concurrent changes. In that case we get the latest claim and
// try again.
refreshClaim := false
claim = claim.DeepCopy()
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if refreshClaim {
updatedClaim, err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("get updated claim %s after conflict: %w", klog.KObj(claim), err)
}
logger.V(5).Info("retrying update after conflict", "claim", klog.KObj(claim))
claim = updatedClaim
} else {
// All future retries must get a new claim first.
refreshClaim = true
}
if claim.DeletionTimestamp != nil {
return fmt.Errorf("claim %s got deleted in the meantime", klog.KObj(claim))
}
// Do we need to store an allocation result from Reserve?
if allocation != nil {
if claim.Status.Allocation != nil {
return fmt.Errorf("claim %s got allocated elsewhere in the meantime", klog.KObj(claim))
}
// The finalizer needs to be added in a normal update.
// If we were interrupted in the past, it might already be set and we simply continue.
if !slices.Contains(claim.Finalizers, resourceapi.Finalizer) {
claim.Finalizers = append(claim.Finalizers, resourceapi.Finalizer)
updatedClaim, err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("add finalizer to claim %s: %w", klog.KObj(claim), err)
}
claim = updatedClaim
}
claim.Status.Allocation = allocation
}
// We can simply try to add the pod here without checking
// preconditions. The apiserver will tell us with a
// non-conflict error if this isn't possible.
claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: pod.Name, UID: pod.UID})
if pl.enableDeviceBindingConditions && pl.enableDeviceStatus && claim.Status.Allocation.AllocationTimestamp == nil {
claim.Status.Allocation.AllocationTimestamp = &metav1.Time{Time: time.Now()}
}
updatedClaim, err := pl.clientset.ResourceV1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
if err != nil {
if allocation != nil {
return fmt.Errorf("add allocation and reservation to claim %s: %w", klog.KObj(claim), err)
}
return fmt.Errorf("add reservation to claim %s: %w", klog.KObj(claim), err)
}
claim = updatedClaim
resourceClaimModified = true
return nil
})
if retryErr != nil {
return nil, retryErr
}
logger.V(5).Info("reserved", "pod", klog.KObj(pod), "node", nodeName, "resourceclaim", klog.Format(claim))
// Patch the pod status with the new information about the generated
// special resource claim.
if isExtendedResourceClaim {
cer := createRequestMappings(claim, pod)
podStatusCopy := pod.Status.DeepCopy()
podStatusCopy.ExtendedResourceClaimStatus = &v1.PodExtendedResourceClaimStatus{
RequestMappings: cer,
ResourceClaimName: claim.Name,
}
err := schedutil.PatchPodStatus(ctx, pl.clientset, pod.Name, pod.Namespace, &pod.Status, podStatusCopy)
if err != nil {
return nil, fmt.Errorf("update pod %s/%s ExtendedResourceClaimStatus: %w", pod.Namespace, pod.Name, err)
}
}
return claim, nil
}
// isClaimReadyForBinding checks whether a given resource claim is
// ready for binding.
// It returns an error if the claim is not ready for binding.
// It returns true if (and only if) all binding conditions are true,
// and no binding failure conditions are true,
// which includes the case that there are no binding conditions.
func (pl *DynamicResources) isClaimReadyForBinding(claim *resourceapi.ResourceClaim) (bool, error) {
if claim.Status.Allocation == nil {
return false, nil
}
for _, deviceRequest := range claim.Status.Allocation.Devices.Results {
if len(deviceRequest.BindingConditions) == 0 {
continue
}
deviceStatus := getAllocatedDeviceStatus(claim, &deviceRequest)
if deviceStatus == nil {
return false, nil
}
for _, cond := range deviceRequest.BindingFailureConditions {
failedCond := apimeta.FindStatusCondition(deviceStatus.Conditions, cond)
if failedCond != nil && failedCond.Status == metav1.ConditionTrue {
return false, fmt.Errorf("claim %s binding failed: reason=%s, message=%q",
claim.Name,
failedCond.Reason,
failedCond.Message)
}
}
for _, cond := range deviceRequest.BindingConditions {
if !apimeta.IsStatusConditionTrue(deviceStatus.Conditions, cond) {
return false, nil
}
}
}
return true, nil
}
// isClaimTimeout checks whether a given resource claim has
// reached the binding timeout.
// It returns true if the binding timeout is reached.
// It returns false if the binding timeout is not reached.
func (pl *DynamicResources) isClaimTimeout(claim *resourceapi.ResourceClaim) bool {
if !pl.enableDeviceBindingConditions || !pl.enableDeviceStatus {
return false
}
if claim.Status.Allocation == nil || claim.Status.Allocation.AllocationTimestamp == nil {
return false
}
// check if the binding timeout is reached
for _, deviceRequest := range claim.Status.Allocation.Devices.Results {
if deviceRequest.BindingConditions == nil {
continue
}
if claim.Status.Allocation.AllocationTimestamp.Add(time.Duration(BindingTimeoutDefaultSeconds) * time.Second).Before(time.Now()) {
return true
}
}
return false
}
// isPodReadyForBinding checks the binding status of devices within the given state claims.
// It returns true if (and only if) all binding conditions are true,
// and no binding failure conditions are true,
// which includes the case when there are no binding conditions.
// It returns an error if any binding failure condition is set.
func (pl *DynamicResources) isPodReadyForBinding(state *stateData) (bool, error) {
for claimIndex, claim := range state.claims.all() {
claim, err := pl.draManager.ResourceClaims().Get(claim.Namespace, claim.Name)
if err != nil {
return false, err
}
state.claims.set(claimIndex, claim)
ready, err := pl.isClaimReadyForBinding(claim)
if err != nil {
return false, err
}
if !ready {
if pl.isClaimTimeout(claim) {
return false, fmt.Errorf("claim %s binding timeout", claim.Name)
}
return false, nil
}
}
return true, nil
}
// hasBindingConditions checks whether any of the claims in the state
// has binding conditions.
// It returns true if at least one claim has binding conditions.
// It returns false if no claim has binding conditions.
func hasBindingConditions(state *stateData) bool {
for _, claim := range state.claims.all() {
if claim.Status.Allocation == nil {
continue
}
for _, device := range claim.Status.Allocation.Devices.Results {
if len(device.BindingConditions) > 0 {
return true
}
}
}
return false
}
// statusUnschedulable ensures that there is a log message associated with the
// line where the status originated.
func statusUnschedulable(logger klog.Logger, reason string, kv ...interface{}) *fwk.Status {
if loggerV := logger.V(5); loggerV.Enabled() {
helper, loggerV := loggerV.WithCallStackHelper()
helper()
kv = append(kv, "reason", reason)
// nolint: logcheck // warns because it cannot check key/values
loggerV.Info("pod unschedulable", kv...)
}
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, reason)
}
// statusError ensures that there is a log message associated with the
// line where the error originated.
func statusError(logger klog.Logger, err error, kv ...interface{}) *fwk.Status {
if loggerV := logger.V(5); loggerV.Enabled() {
helper, loggerV := loggerV.WithCallStackHelper()
helper()
// nolint: logcheck // warns because it cannot check key/values
loggerV.Error(err, "dynamic resource plugin failed", kv...)
}
return fwk.AsStatus(err)
}
func getAllocatedDeviceStatus(claim *resourceapi.ResourceClaim, deviceRequest *resourceapi.DeviceRequestAllocationResult) *resourceapi.AllocatedDeviceStatus {
for _, device := range claim.Status.Devices {
if deviceRequest.Device == device.Device && deviceRequest.Driver == device.Driver && deviceRequest.Pool == device.Pool {
return &device
}
}
return nil
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package extended
import (
v1 "k8s.io/api/core/v1"
"k8s.io/api/resource/v1beta1"
fwk "k8s.io/kube-scheduler/framework"
)
func DeviceClassMapping(draManager fwk.SharedDRAManager) (map[v1.ResourceName]string, error) {
classes, err := draManager.DeviceClasses().List()
extendedResources := make(map[v1.ResourceName]string, len(classes))
if err != nil {
return nil, err
}
for _, c := range classes {
if c.Spec.ExtendedResourceName == nil {
extendedResources[v1.ResourceName(v1beta1.ResourceDeviceClassPrefix+c.Name)] = c.Name
} else {
extendedResources[v1.ResourceName(*c.Spec.ExtendedResourceName)] = c.Name
}
}
return extendedResources, nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package feature
import (
"k8s.io/component-base/featuregate"
"k8s.io/kubernetes/pkg/features"
)
// Features carries feature gate values used by various plugins.
// This struct allows us to break the dependency of the plugins on
// the internal k8s features pkg.
type Features struct {
EnableDRAExtendedResource bool
EnableDRAPrioritizedList bool
EnableDRAAdminAccess bool
EnableDRADeviceTaints bool
EnableDRADeviceBindingConditions bool
EnableDRAResourceClaimDeviceStatus bool
EnableDRASchedulerFilterTimeout bool
EnableDynamicResourceAllocation bool
EnableVolumeAttributesClass bool
EnableCSIMigrationPortworx bool
EnableNodeInclusionPolicyInPodTopologySpread bool
EnableMatchLabelKeysInPodTopologySpread bool
EnableInPlacePodVerticalScaling bool
EnableSidecarContainers bool
EnableSchedulingQueueHint bool
EnableAsyncPreemption bool
EnablePodLevelResources bool
EnablePartitionableDevices bool
EnableStorageCapacityScoring bool
EnableConsumableCapacity bool
}
// NewSchedulerFeaturesFromGates copies the current state of the feature gates into the struct.
func NewSchedulerFeaturesFromGates(featureGate featuregate.FeatureGate) Features {
return Features{
EnableDRAExtendedResource: featureGate.Enabled(features.DRAExtendedResource),
EnableDRAPrioritizedList: featureGate.Enabled(features.DRAPrioritizedList),
EnableDRAAdminAccess: featureGate.Enabled(features.DRAAdminAccess),
EnableConsumableCapacity: featureGate.Enabled(features.DRAConsumableCapacity),
EnableDRADeviceTaints: featureGate.Enabled(features.DRADeviceTaints),
EnableDRASchedulerFilterTimeout: featureGate.Enabled(features.DRASchedulerFilterTimeout),
EnableDRAResourceClaimDeviceStatus: featureGate.Enabled(features.DRAResourceClaimDeviceStatus),
EnableDRADeviceBindingConditions: featureGate.Enabled(features.DRADeviceBindingConditions),
EnableDynamicResourceAllocation: featureGate.Enabled(features.DynamicResourceAllocation),
EnableVolumeAttributesClass: featureGate.Enabled(features.VolumeAttributesClass),
EnableCSIMigrationPortworx: featureGate.Enabled(features.CSIMigrationPortworx),
EnableNodeInclusionPolicyInPodTopologySpread: featureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
EnableMatchLabelKeysInPodTopologySpread: featureGate.Enabled(features.MatchLabelKeysInPodTopologySpread),
EnableInPlacePodVerticalScaling: featureGate.Enabled(features.InPlacePodVerticalScaling),
EnableSidecarContainers: featureGate.Enabled(features.SidecarContainers),
EnableSchedulingQueueHint: featureGate.Enabled(features.SchedulerQueueingHints),
EnableAsyncPreemption: featureGate.Enabled(features.SchedulerAsyncPreemption),
EnablePodLevelResources: featureGate.Enabled(features.PodLevelResources),
EnablePartitionableDevices: featureGate.Enabled(features.DRAPartitionableDevices),
EnableStorageCapacityScoring: featureGate.Enabled(features.StorageCapacityScoring),
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import (
fwk "k8s.io/kube-scheduler/framework"
)
// DefaultNormalizeScore generates a Normalize Score function that can normalize the
// scores from [0, max(scores)] to [0, maxPriority]. If reverse is set to true, it
// reverses the scores by subtracting it from maxPriority.
// Note: The input scores are always assumed to be non-negative integers.
func DefaultNormalizeScore(maxPriority int64, reverse bool, scores fwk.NodeScoreList) *fwk.Status {
var maxCount int64
for i := range scores {
if scores[i].Score > maxCount {
maxCount = scores[i].Score
}
}
if maxCount == 0 {
if reverse {
for i := range scores {
scores[i].Score = maxPriority
}
}
return nil
}
for i := range scores {
score := scores[i].Score
score = maxPriority * score / maxCount
if reverse {
score = maxPriority - score
}
scores[i].Score = score
}
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
// FunctionShape represents a collection of FunctionShapePoint.
type FunctionShape []FunctionShapePoint
// FunctionShapePoint represents a shape point.
type FunctionShapePoint struct {
// Utilization is function argument.
Utilization int64
// Score is function value.
Score int64
}
// BuildBrokenLinearFunction creates a function which is built using linear segments. Segments are defined via shape array.
// Shape[i].Utilization slice represents points on "Utilization" axis where different segments meet.
// Shape[i].Score represents function values at meeting points.
//
// function f(p) is defined as:
//
// shape[0].Score for p < shape[0].Utilization
// shape[n-1].Score for p > shape[n-1].Utilization
//
// and linear between points (p < shape[i].Utilization)
func BuildBrokenLinearFunction(shape FunctionShape) func(int64) int64 {
return func(p int64) int64 {
for i := 0; i < len(shape); i++ {
if p <= int64(shape[i].Utilization) {
if i == 0 {
return shape[0].Score
}
return shape[i-1].Score + (shape[i].Score-shape[i-1].Score)*(p-shape[i-1].Utilization)/(shape[i].Utilization-shape[i-1].Utilization)
}
}
return shape[len(shape)-1].Score
}
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
)
var (
rcKind = v1.SchemeGroupVersion.WithKind("ReplicationController")
rsKind = appsv1.SchemeGroupVersion.WithKind("ReplicaSet")
ssKind = appsv1.SchemeGroupVersion.WithKind("StatefulSet")
)
// DefaultSelector returns a selector deduced from the Services, Replication
// Controllers, Replica Sets, and Stateful Sets matching the given pod.
func DefaultSelector(
pod *v1.Pod,
sl corelisters.ServiceLister,
cl corelisters.ReplicationControllerLister,
rsl appslisters.ReplicaSetLister,
ssl appslisters.StatefulSetLister,
) labels.Selector {
labelSet := make(labels.Set)
// Since services, RCs, RSs and SSs match the pod, they won't have conflicting
// labels. Merging is safe.
if services, err := GetPodServices(sl, pod); err == nil {
for _, service := range services {
labelSet = labels.Merge(labelSet, service.Spec.Selector)
}
}
selector := labelSet.AsSelector()
owner := metav1.GetControllerOfNoCopy(pod)
if owner == nil {
return selector
}
gv, err := schema.ParseGroupVersion(owner.APIVersion)
if err != nil {
return selector
}
gvk := gv.WithKind(owner.Kind)
switch gvk {
case rcKind:
if rc, err := cl.ReplicationControllers(pod.Namespace).Get(owner.Name); err == nil {
labelSet = labels.Merge(labelSet, rc.Spec.Selector)
selector = labelSet.AsSelector()
}
case rsKind:
if rs, err := rsl.ReplicaSets(pod.Namespace).Get(owner.Name); err == nil {
if other, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil {
if r, ok := other.Requirements(); ok {
selector = selector.Add(r...)
}
}
}
case ssKind:
if ss, err := ssl.StatefulSets(pod.Namespace).Get(owner.Name); err == nil {
if other, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil {
if r, ok := other.Requirements(); ok {
selector = selector.Add(r...)
}
}
}
default:
// Not owned by a supported controller.
}
return selector
}
// GetPodServices gets the services that have the selector that match the labels on the given pod.
func GetPodServices(sl corelisters.ServiceLister, pod *v1.Pod) ([]*v1.Service, error) {
allServices, err := sl.Services(pod.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
var services []*v1.Service
for i := range allServices {
service := allServices[i]
if service.Spec.Selector == nil {
// services with nil selectors match nothing, not everything.
continue
}
selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
if selector.Matches(labels.Set(pod.Labels)) {
services = append(services, service)
}
}
return services, nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helper
import v1 "k8s.io/api/core/v1"
// DoNotScheduleTaintsFilterFunc returns the filter function that can
// filter out the node taints that reject scheduling Pod on a Node.
func DoNotScheduleTaintsFilterFunc() func(t *v1.Taint) bool {
return func(t *v1.Taint) bool {
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package imagelocality
import (
"context"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
// The two thresholds are used as bounds for the image score range. They correspond to a reasonable size range for
// container images compressed and stored in registries; 90%ile of images on dockerhub drops into this range.
const (
mb int64 = 1024 * 1024
minThreshold int64 = 23 * mb
maxContainerThreshold int64 = 1000 * mb
)
// ImageLocality is a score plugin that favors nodes that already have requested pod container's images.
type ImageLocality struct {
handle fwk.Handle
}
var _ fwk.ScorePlugin = &ImageLocality{}
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.ImageLocality
// Name returns name of the plugin. It is used in logs, etc.
func (pl *ImageLocality) Name() string {
return Name
}
// Score invoked at the score extension point.
func (pl *ImageLocality) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
nodeInfos, err := pl.handle.SnapshotSharedLister().NodeInfos().List()
if err != nil {
return 0, fwk.AsStatus(err)
}
totalNumNodes := len(nodeInfos)
imageScores := sumImageScores(nodeInfo, pod, totalNumNodes)
score := calculatePriority(imageScores, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
return score, nil
}
// ScoreExtensions of the Score plugin.
func (pl *ImageLocality) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, h fwk.Handle) (fwk.Plugin, error) {
return &ImageLocality{handle: h}, nil
}
// calculatePriority returns the priority of a node. Given the sumScores of requested images on the node, the node's
// priority is obtained by scaling the maximum priority value with a ratio proportional to the sumScores.
func calculatePriority(sumScores int64, numContainers int) int64 {
maxThreshold := maxContainerThreshold * int64(numContainers)
if sumScores < minThreshold {
sumScores = minThreshold
} else if sumScores > maxThreshold {
sumScores = maxThreshold
}
return fwk.MaxNodeScore * (sumScores - minThreshold) / (maxThreshold - minThreshold)
}
// sumImageScores returns the sum of image scores of all the containers that are already on the node.
// Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate
// the final score.
func sumImageScores(nodeInfo fwk.NodeInfo, pod *v1.Pod, totalNumNodes int) int64 {
var sum int64
for _, container := range pod.Spec.InitContainers {
if state, ok := nodeInfo.GetImageStates()[normalizedImageName(container.Image)]; ok {
sum += scaledImageScore(state, totalNumNodes)
}
}
for _, container := range pod.Spec.Containers {
if state, ok := nodeInfo.GetImageStates()[normalizedImageName(container.Image)]; ok {
sum += scaledImageScore(state, totalNumNodes)
}
}
return sum
}
// scaledImageScore returns an adaptively scaled score for the given state of an image.
// The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to.
// This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or
// a few nodes due to image locality.
func scaledImageScore(imageState *fwk.ImageStateSummary, totalNumNodes int) int64 {
spread := float64(imageState.NumNodes) / float64(totalNumNodes)
return int64(float64(imageState.Size) * spread)
}
// normalizedImageName returns the CRI compliant name for a given image.
// TODO: cover the corner cases of missed matches, e.g,
// 1. Using Docker as runtime and docker.io/library/test:tag in pod spec, but only test:tag will present in node status
// 2. Using the implicit registry, i.e., test:tag or library/test:tag in pod spec but only docker.io/library/test:tag
// in node status; note that if users consistently use one registry format, this should not happen.
func normalizedImageName(name string) string {
if strings.LastIndex(name, ":") <= strings.LastIndex(name, "/") {
name = name + ":latest"
}
return name
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package interpodaffinity
import (
"context"
"fmt"
"sync/atomic"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
const (
// preFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data for Filtering.
// Using the name of the plugin will likely help us avoid collisions with other plugins.
preFilterStateKey = "PreFilter" + Name
// ErrReasonExistingAntiAffinityRulesNotMatch is used for ExistingPodsAntiAffinityRulesNotMatch predicate error.
ErrReasonExistingAntiAffinityRulesNotMatch = "node(s) didn't satisfy existing pods anti-affinity rules"
// ErrReasonAffinityRulesNotMatch is used for PodAffinityRulesNotMatch predicate error.
ErrReasonAffinityRulesNotMatch = "node(s) didn't match pod affinity rules"
// ErrReasonAntiAffinityRulesNotMatch is used for PodAntiAffinityRulesNotMatch predicate error.
ErrReasonAntiAffinityRulesNotMatch = "node(s) didn't match pod anti-affinity rules"
)
// preFilterState computed at PreFilter and used at Filter.
type preFilterState struct {
// A map of topology pairs to the number of existing pods that has anti-affinity terms that match the "pod".
existingAntiAffinityCounts topologyToMatchedTermCount
// A map of topology pairs to the number of existing pods that match the affinity terms of the "pod".
affinityCounts topologyToMatchedTermCount
// A map of topology pairs to the number of existing pods that match the anti-affinity terms of the "pod".
antiAffinityCounts topologyToMatchedTermCount
// podInfo of the incoming pod.
podInfo fwk.PodInfo
// A copy of the incoming pod's namespace labels.
namespaceLabels labels.Set
}
// Clone the prefilter state.
func (s *preFilterState) Clone() fwk.StateData {
if s == nil {
return nil
}
copy := preFilterState{}
copy.affinityCounts = s.affinityCounts.clone()
copy.antiAffinityCounts = s.antiAffinityCounts.clone()
copy.existingAntiAffinityCounts = s.existingAntiAffinityCounts.clone()
// No need to deep copy the podInfo because it shouldn't change.
copy.podInfo = s.podInfo
copy.namespaceLabels = s.namespaceLabels
return ©
}
// updateWithPod updates the preFilterState counters with the (anti)affinity matches for the given podInfo.
func (s *preFilterState) updateWithPod(pInfo fwk.PodInfo, node *v1.Node, multiplier int64) {
if s == nil {
return
}
s.existingAntiAffinityCounts.updateWithAntiAffinityTerms(pInfo.GetRequiredAntiAffinityTerms(), s.podInfo.GetPod(), s.namespaceLabels, node, multiplier)
s.affinityCounts.updateWithAffinityTerms(s.podInfo.GetRequiredAffinityTerms(), pInfo.GetPod(), node, multiplier)
// The incoming pod's terms have the namespaceSelector merged into the namespaces, and so
// here we don't lookup the updated pod's namespace labels, hence passing nil for nsLabels.
s.antiAffinityCounts.updateWithAntiAffinityTerms(s.podInfo.GetRequiredAntiAffinityTerms(), pInfo.GetPod(), nil, node, multiplier)
}
type topologyPair struct {
key string
value string
}
type topologyToMatchedTermCount map[topologyPair]int64
func (m topologyToMatchedTermCount) merge(toMerge topologyToMatchedTermCount) {
for pair, count := range toMerge {
m[pair] += count
}
}
func (m topologyToMatchedTermCount) mergeWithList(toMerge topologyToMatchedTermCountList) {
for _, tmtc := range toMerge {
m[tmtc.topologyPair] += tmtc.count
}
}
func (m topologyToMatchedTermCount) clone() topologyToMatchedTermCount {
copy := make(topologyToMatchedTermCount, len(m))
copy.merge(m)
return copy
}
func (m topologyToMatchedTermCount) update(node *v1.Node, tk string, value int64) {
if tv, ok := node.Labels[tk]; ok {
pair := topologyPair{key: tk, value: tv}
m[pair] += value
// value could be negative, hence we delete the entry if it is down to zero.
if m[pair] == 0 {
delete(m, pair)
}
}
}
// updates the topologyToMatchedTermCount map with the specified value
// for each affinity term if "targetPod" matches ALL terms.
func (m topologyToMatchedTermCount) updateWithAffinityTerms(
terms []fwk.AffinityTerm, pod *v1.Pod, node *v1.Node, value int64) {
if podMatchesAllAffinityTerms(terms, pod) {
for _, t := range terms {
m.update(node, t.TopologyKey, value)
}
}
}
// updates the topologyToMatchedTermCount map with the specified value
// for each anti-affinity term matched the target pod.
func (m topologyToMatchedTermCount) updateWithAntiAffinityTerms(terms []fwk.AffinityTerm, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, value int64) {
// Check anti-affinity terms.
for _, t := range terms {
if t.Matches(pod, nsLabels) {
m.update(node, t.TopologyKey, value)
}
}
}
// topologyToMatchedTermCountList is a slice equivalent of topologyToMatchedTermCount map.
// The use of slice improves the performance of PreFilter,
// especially due to faster iteration when merging than with topologyToMatchedTermCount.
type topologyToMatchedTermCountList []topologyPairCount
type topologyPairCount struct {
topologyPair topologyPair
count int64
}
func (m *topologyToMatchedTermCountList) append(node *v1.Node, tk string, value int64) {
if tv, ok := node.Labels[tk]; ok {
pair := topologyPair{key: tk, value: tv}
*m = append(*m, topologyPairCount{
topologyPair: pair,
count: value,
})
}
}
// appends the specified value to the topologyToMatchedTermCountList
// for each affinity term if "targetPod" matches ALL terms.
func (m *topologyToMatchedTermCountList) appendWithAffinityTerms(
terms []fwk.AffinityTerm, pod *v1.Pod, node *v1.Node, value int64) {
if podMatchesAllAffinityTerms(terms, pod) {
for _, t := range terms {
m.append(node, t.TopologyKey, value)
}
}
}
// appends the specified value to the topologyToMatchedTermCountList
// for each anti-affinity term matched the target pod.
func (m *topologyToMatchedTermCountList) appendWithAntiAffinityTerms(terms []fwk.AffinityTerm, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, value int64) {
// Check anti-affinity terms.
for _, t := range terms {
if t.Matches(pod, nsLabels) {
m.append(node, t.TopologyKey, value)
}
}
}
// returns true IFF the given pod matches all the given terms.
func podMatchesAllAffinityTerms(terms []fwk.AffinityTerm, pod *v1.Pod) bool {
if len(terms) == 0 {
return false
}
for _, t := range terms {
// The incoming pod NamespaceSelector was merged into the Namespaces set, and so
// we are not explicitly passing in namespace labels.
if !t.Matches(pod, nil) {
return false
}
}
return true
}
// calculates the following for each existing pod on each node:
// 1. Whether it has PodAntiAffinity
// 2. Whether any AntiAffinityTerm matches the incoming pod
func (pl *InterPodAffinity) getExistingAntiAffinityCounts(ctx context.Context, pod *v1.Pod, nsLabels labels.Set, nodes []fwk.NodeInfo) topologyToMatchedTermCount {
antiAffinityCountsList := make([]topologyToMatchedTermCountList, len(nodes))
index := int32(-1)
processNode := func(i int) {
nodeInfo := nodes[i]
node := nodeInfo.Node()
antiAffinityCounts := make(topologyToMatchedTermCountList, 0)
for _, existingPod := range nodeInfo.GetPodsWithRequiredAntiAffinity() {
antiAffinityCounts.appendWithAntiAffinityTerms(existingPod.GetRequiredAntiAffinityTerms(), pod, nsLabels, node, 1)
}
if len(antiAffinityCounts) != 0 {
antiAffinityCountsList[atomic.AddInt32(&index, 1)] = antiAffinityCounts
}
}
pl.parallelizer.Until(ctx, len(nodes), processNode, pl.Name())
result := make(topologyToMatchedTermCount)
// Traditional for loop is slightly faster in this case than its "for range" equivalent.
for i := 0; i <= int(index); i++ {
result.mergeWithList(antiAffinityCountsList[i])
}
return result
}
// finds existing Pods that match affinity terms of the incoming pod's (anti)affinity terms.
// It returns a topologyToMatchedTermCount that are checked later by the affinity
// predicate. With this topologyToMatchedTermCount available, the affinity predicate does not
// need to check all the pods in the cluster.
func (pl *InterPodAffinity) getIncomingAffinityAntiAffinityCounts(ctx context.Context, podInfo fwk.PodInfo, allNodes []fwk.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount) {
affinityCounts := make(topologyToMatchedTermCount)
antiAffinityCounts := make(topologyToMatchedTermCount)
if len(podInfo.GetRequiredAffinityTerms()) == 0 && len(podInfo.GetRequiredAntiAffinityTerms()) == 0 {
return affinityCounts, antiAffinityCounts
}
affinityCountsList := make([]topologyToMatchedTermCountList, len(allNodes))
antiAffinityCountsList := make([]topologyToMatchedTermCountList, len(allNodes))
index := int32(-1)
processNode := func(i int) {
nodeInfo := allNodes[i]
node := nodeInfo.Node()
affinity := make(topologyToMatchedTermCountList, 0)
antiAffinity := make(topologyToMatchedTermCountList, 0)
for _, existingPod := range nodeInfo.GetPods() {
affinity.appendWithAffinityTerms(podInfo.GetRequiredAffinityTerms(), existingPod.GetPod(), node, 1)
// The incoming pod's terms have the namespaceSelector merged into the namespaces, and so
// here we don't lookup the existing pod's namespace labels, hence passing nil for nsLabels.
antiAffinity.appendWithAntiAffinityTerms(podInfo.GetRequiredAntiAffinityTerms(), existingPod.GetPod(), nil, node, 1)
}
if len(affinity) > 0 || len(antiAffinity) > 0 {
k := atomic.AddInt32(&index, 1)
affinityCountsList[k] = affinity
antiAffinityCountsList[k] = antiAffinity
}
}
pl.parallelizer.Until(ctx, len(allNodes), processNode, pl.Name())
for i := 0; i <= int(index); i++ {
affinityCounts.mergeWithList(affinityCountsList[i])
antiAffinityCounts.mergeWithList(antiAffinityCountsList[i])
}
return affinityCounts, antiAffinityCounts
}
// PreFilter invoked at the prefilter extension point.
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, allNodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
var nodesWithRequiredAntiAffinityPods []fwk.NodeInfo
var err error
if nodesWithRequiredAntiAffinityPods, err = pl.sharedLister.NodeInfos().HavePodsWithRequiredAntiAffinityList(); err != nil {
return nil, fwk.AsStatus(fmt.Errorf("failed to list NodeInfos with pods with affinity: %w", err))
}
s := &preFilterState{}
if s.podInfo, err = framework.NewPodInfo(pod); err != nil {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, fmt.Sprintf("parsing pod: %+v", err))
}
for i := range s.podInfo.GetRequiredAffinityTerms() {
if err := pl.mergeAffinityTermNamespacesIfNotEmpty(s.podInfo.GetRequiredAffinityTerms()[i]); err != nil {
return nil, fwk.AsStatus(err)
}
}
for i := range s.podInfo.GetRequiredAntiAffinityTerms() {
if err := pl.mergeAffinityTermNamespacesIfNotEmpty(s.podInfo.GetRequiredAntiAffinityTerms()[i]); err != nil {
return nil, fwk.AsStatus(err)
}
}
logger := klog.FromContext(ctx)
s.namespaceLabels = GetNamespaceLabelsSnapshot(logger, pod.Namespace, pl.nsLister)
s.existingAntiAffinityCounts = pl.getExistingAntiAffinityCounts(ctx, pod, s.namespaceLabels, nodesWithRequiredAntiAffinityPods)
s.affinityCounts, s.antiAffinityCounts = pl.getIncomingAffinityAntiAffinityCounts(ctx, s.podInfo, allNodes)
if len(s.existingAntiAffinityCounts) == 0 && len(s.podInfo.GetRequiredAffinityTerms()) == 0 && len(s.podInfo.GetRequiredAntiAffinityTerms()) == 0 {
return nil, fwk.NewStatus(fwk.Skip)
}
cycleState.Write(preFilterStateKey, s)
return nil, nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *InterPodAffinity) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}
// AddPod from pre-computed data in cycleState.
func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
state.updateWithPod(podInfoToAdd, nodeInfo.Node(), 1)
return nil
}
// RemovePod from pre-computed data in cycleState.
func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
state.updateWithPod(podInfoToRemove, nodeInfo.Node(), -1)
return nil
}
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
return nil, fmt.Errorf("error reading %q from cycleState: %w", preFilterStateKey, err)
}
s, ok := c.(*preFilterState)
if !ok {
return nil, fmt.Errorf("%+v convert to interpodaffinity.state error", c)
}
return s, nil
}
// Checks if scheduling the pod onto this node would break any anti-affinity
// terms indicated by the existing pods.
func satisfyExistingPodsAntiAffinity(state *preFilterState, nodeInfo fwk.NodeInfo) bool {
if len(state.existingAntiAffinityCounts) > 0 {
// Iterate over topology pairs to get any of the pods being affected by
// the scheduled pod anti-affinity terms
for topologyKey, topologyValue := range nodeInfo.Node().Labels {
tp := topologyPair{key: topologyKey, value: topologyValue}
if state.existingAntiAffinityCounts[tp] > 0 {
return false
}
}
}
return true
}
// Checks if the node satisfies the incoming pod's anti-affinity rules.
func satisfyPodAntiAffinity(state *preFilterState, nodeInfo fwk.NodeInfo) bool {
if len(state.antiAffinityCounts) > 0 {
for _, term := range state.podInfo.GetRequiredAntiAffinityTerms() {
if topologyValue, ok := nodeInfo.Node().Labels[term.TopologyKey]; ok {
tp := topologyPair{key: term.TopologyKey, value: topologyValue}
if state.antiAffinityCounts[tp] > 0 {
return false
}
}
}
}
return true
}
// Checks if the node satisfies the incoming pod's affinity rules.
func satisfyPodAffinity(state *preFilterState, nodeInfo fwk.NodeInfo) bool {
podsExist := true
for _, term := range state.podInfo.GetRequiredAffinityTerms() {
if topologyValue, ok := nodeInfo.Node().Labels[term.TopologyKey]; ok {
tp := topologyPair{key: term.TopologyKey, value: topologyValue}
if state.affinityCounts[tp] <= 0 {
podsExist = false
}
} else {
// All topology labels must exist on the node.
return false
}
}
if !podsExist {
// This pod may be the first pod in a series that have affinity to themselves. In order
// to not leave such pods in pending state forever, we check that if no other pod
// in the cluster matches the namespace and selector of this pod, the pod matches
// its own terms, and the node has all the requested topologies, then we allow the pod
// to pass the affinity check.
if len(state.affinityCounts) == 0 && podMatchesAllAffinityTerms(state.podInfo.GetRequiredAffinityTerms(), state.podInfo.GetPod()) {
return true
}
return false
}
return true
}
// Filter invoked at the filter extension point.
// It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
if !satisfyPodAffinity(state, nodeInfo) {
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReasonAffinityRulesNotMatch)
}
if !satisfyPodAntiAffinity(state, nodeInfo) {
return fwk.NewStatus(fwk.Unschedulable, ErrReasonAntiAffinityRulesNotMatch)
}
if !satisfyExistingPodsAntiAffinity(state, nodeInfo) {
return fwk.NewStatus(fwk.Unschedulable, ErrReasonExistingAntiAffinityRulesNotMatch)
}
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package interpodaffinity
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.InterPodAffinity
var _ fwk.PreFilterPlugin = &InterPodAffinity{}
var _ fwk.FilterPlugin = &InterPodAffinity{}
var _ fwk.PreScorePlugin = &InterPodAffinity{}
var _ fwk.ScorePlugin = &InterPodAffinity{}
var _ fwk.EnqueueExtensions = &InterPodAffinity{}
// InterPodAffinity is a plugin that checks inter pod affinity
type InterPodAffinity struct {
parallelizer fwk.Parallelizer
args config.InterPodAffinityArgs
sharedLister fwk.SharedLister
nsLister listersv1.NamespaceLister
enableSchedulingQueueHint bool
}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *InterPodAffinity) Name() string {
return Name
}
// EventsToRegister returns the possible events that may make a failed Pod
// schedulable
func (pl *InterPodAffinity) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// A note about UpdateNodeTaint event:
// Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint
if pl.enableSchedulingQueueHint {
// When QueueingHint is enabled, we don't use preCheck and we don't need to register UpdateNodeTaint event.
nodeActionType = fwk.Add | fwk.UpdateNodeLabel
}
return []fwk.ClusterEventWithHint{
// All ActionType includes the following events:
// - Delete. An unschedulable Pod may fail due to violating an existing Pod's anti-affinity constraints,
// deleting an existing Pod may make it schedulable.
// - UpdatePodLabel. Updating on an existing Pod's labels (e.g., removal) may make
// an unschedulable Pod schedulable.
// - Add. An unschedulable Pod may fail due to violating pod-affinity constraints,
// adding an assigned Pod may make it schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.Add | fwk.UpdatePodLabel | fwk.Delete}, QueueingHintFn: pl.isSchedulableAfterPodChange},
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}, QueueingHintFn: pl.isSchedulableAfterNodeChange},
}, nil
}
// New initializes a new plugin and returns it.
func New(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
if h.SnapshotSharedLister() == nil {
return nil, fmt.Errorf("SnapshotSharedlister is nil")
}
args, err := getArgs(plArgs)
if err != nil {
return nil, err
}
if err := validation.ValidateInterPodAffinityArgs(nil, &args); err != nil {
return nil, err
}
pl := &InterPodAffinity{
parallelizer: h.Parallelizer(),
args: args,
sharedLister: h.SnapshotSharedLister(),
nsLister: h.SharedInformerFactory().Core().V1().Namespaces().Lister(),
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}
return pl, nil
}
func getArgs(obj runtime.Object) (config.InterPodAffinityArgs, error) {
ptr, ok := obj.(*config.InterPodAffinityArgs)
if !ok {
return config.InterPodAffinityArgs{}, fmt.Errorf("want args to be of type InterPodAffinityArgs, got %T", obj)
}
return *ptr, nil
}
// Updates Namespaces with the set of namespaces identified by NamespaceSelector.
// If successful, NamespaceSelector is set to nil.
// The assumption is that the term is for an incoming pod, in which case
// namespaceSelector is either unrolled into Namespaces (and so the selector
// is set to Nothing()) or is Empty(), which means match everything. Therefore,
// there when matching against this term, there is no need to lookup the existing
// pod's namespace labels to match them against term's namespaceSelector explicitly.
func (pl *InterPodAffinity) mergeAffinityTermNamespacesIfNotEmpty(at fwk.AffinityTerm) error {
if at.NamespaceSelector.Empty() {
return nil
}
ns, err := pl.nsLister.List(at.NamespaceSelector)
if err != nil {
return err
}
for _, n := range ns {
at.Namespaces.Insert(n.Name)
}
at.NamespaceSelector = labels.Nothing()
return nil
}
// GetNamespaceLabelsSnapshot returns a snapshot of the labels associated with
// the namespace.
func GetNamespaceLabelsSnapshot(logger klog.Logger, ns string, nsLister listersv1.NamespaceLister) (nsLabels labels.Set) {
podNS, err := nsLister.Get(ns)
if err == nil {
// Create and return snapshot of the labels.
return labels.Merge(podNS.Labels, nil)
}
logger.V(3).Info("getting namespace, assuming empty set of namespace labels", "namespace", ns, "err", err)
return
}
func (pl *InterPodAffinity) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalPod, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if (modifiedPod != nil && modifiedPod.Spec.NodeName == "") || (originalPod != nil && originalPod.Spec.NodeName == "") {
logger.V(5).Info("the added/updated/deleted pod is unscheduled, so it doesn't make the target pod schedulable",
"pod", klog.KObj(pod), "originalPod", klog.KObj(originalPod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
terms, err := fwk.GetAffinityTerms(pod, fwk.GetPodAffinityTerms(pod.Spec.Affinity))
if err != nil {
return fwk.Queue, err
}
antiTerms, err := fwk.GetAffinityTerms(pod, fwk.GetPodAntiAffinityTerms(pod.Spec.Affinity))
if err != nil {
return fwk.Queue, err
}
// Pod is updated. Return Queue when the updated pod matching the target pod's affinity or not matching anti-affinity.
// Note that, we don't need to check each affinity individually when the Pod has more than one affinity
// because the current PodAffinity looks for a **single** existing pod that can satisfy **all** the terms of inter-pod affinity of an incoming pod.
if modifiedPod != nil && originalPod != nil {
if !podMatchesAllAffinityTerms(terms, originalPod) && podMatchesAllAffinityTerms(terms, modifiedPod) {
logger.V(5).Info("a scheduled pod was updated to match the target pod's affinity, and the pod may be schedulable now",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
if podMatchesAllAffinityTerms(antiTerms, originalPod) && !podMatchesAllAffinityTerms(antiTerms, modifiedPod) {
logger.V(5).Info("a scheduled pod was updated not to match the target pod's anti affinity, and the pod may be schedulable now",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
logger.V(5).Info("a scheduled pod was updated but it doesn't match the target pod's affinity or does match the target pod's anti-affinity",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
// Pod is added. Return Queue when the added pod matching the target pod's affinity.
if modifiedPod != nil {
if podMatchesAllAffinityTerms(terms, modifiedPod) {
logger.V(5).Info("a scheduled pod was added and it matches the target pod's affinity",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
logger.V(5).Info("a scheduled pod was added and it doesn't match the target pod's affinity",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
// Pod is deleted. Return Queue when the deleted pod matching the target pod's anti-affinity.
if !podMatchesAllAffinityTerms(antiTerms, originalPod) {
logger.V(5).Info("a scheduled pod was deleted but it doesn't match the target pod's anti-affinity",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
logger.V(5).Info("a scheduled pod was deleted and it matches the target pod's anti-affinity. The pod may be schedulable now",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
func (pl *InterPodAffinity) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
terms, err := fwk.GetAffinityTerms(pod, fwk.GetPodAffinityTerms(pod.Spec.Affinity))
if err != nil {
return fwk.Queue, err
}
// When queuing this Pod:
// - 1. A new node is added with the pod affinity topologyKey, the pod may become schedulable.
// - 2. The original node does not have the pod affinity topologyKey but the modified node does, the pod may become schedulable.
// - 3. Both the original and modified nodes have the pod affinity topologyKey and they differ, the pod may become schedulable.
for _, term := range terms {
if originalNode == nil {
if _, ok := modifiedNode.Labels[term.TopologyKey]; ok {
// Case 1: A new node is added with the pod affinity topologyKey.
logger.V(5).Info("A node with a matched pod affinity topologyKey was added and it may make the pod schedulable",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
continue
}
originalTopologyValue, originalHasKey := originalNode.Labels[term.TopologyKey]
modifiedTopologyValue, modifiedHasKey := modifiedNode.Labels[term.TopologyKey]
if !originalHasKey && modifiedHasKey {
// Case 2: Original node does not have the pod affinity topologyKey, but the modified node does.
logger.V(5).Info("A node got updated to have the topology key of pod affinity, which may make the pod schedulable",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
if originalHasKey && modifiedHasKey && (originalTopologyValue != modifiedTopologyValue) {
// Case 3: Both nodes have the pod affinity topologyKey, but the values differ.
logger.V(5).Info("A node is moved to a different domain of pod affinity, which may make the pod schedulable",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
}
antiTerms, err := fwk.GetAffinityTerms(pod, fwk.GetPodAntiAffinityTerms(pod.Spec.Affinity))
if err != nil {
return fwk.Queue, err
}
// When queuing this Pod:
// - 1. A new node is added, the pod may become schedulable.
// - 2. The original node have the pod anti-affinity topologyKey but the modified node does not, the pod may become schedulable.
// - 3. Both the original and modified nodes have the pod anti-affinity topologyKey and they differ, the pod may become schedulable.
for _, term := range antiTerms {
if originalNode == nil {
// Case 1: A new node is added.
// We always requeue the Pod with anti-affinity because:
// - the node without the topology key is always allowed to have a Pod with anti-affinity.
// - the addition of a node with the topology key makes Pods schedulable only when the topology it joins doesn't have any Pods that the Pod hates.
// But, it's out-of-scope of this QHint to check which Pods are in the topology this Node is in.
logger.V(5).Info("A node was added and it may make the pod schedulable",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
originalTopologyValue, originalHasKey := originalNode.Labels[term.TopologyKey]
modifiedTopologyValue, modifiedHasKey := modifiedNode.Labels[term.TopologyKey]
if originalHasKey && !modifiedHasKey {
// Case 2: The original node have the pod anti-affinity topologyKey but the modified node does not.
// Note that we don't need to check the opposite case (!originalHasKey && modifiedHasKey)
// because the node without the topology label can always accept pods with pod anti-affinity.
logger.V(5).Info("A node got updated to not have the topology key of pod anti-affinity, which may make the pod schedulable",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
if originalHasKey && modifiedHasKey && (originalTopologyValue != modifiedTopologyValue) {
// Case 3: Both nodes have the pod anti-affinity topologyKey, but the values differ.
logger.V(5).Info("A node is moved to a different domain of pod anti-affinity, which may make the pod schedulable",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
}
logger.V(5).Info("a node is added/updated but doesn't have any topologyKey which matches pod affinity/anti-affinity",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package interpodaffinity
import (
"context"
"fmt"
"math"
"sync/atomic"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// preScoreStateKey is the key in CycleState to InterPodAffinity pre-computed data for Scoring.
const preScoreStateKey = "PreScore" + Name
type scoreMap map[string]map[string]int64
// preScoreState computed at PreScore and used at Score.
type preScoreState struct {
topologyScore scoreMap
podInfo fwk.PodInfo
// A copy of the incoming pod's namespace labels.
namespaceLabels labels.Set
}
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() fwk.StateData {
return s
}
func (m scoreMap) processTerm(term *fwk.AffinityTerm, weight int32, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, multiplier int32) {
if term.Matches(pod, nsLabels) {
if tpValue, tpValueExist := node.Labels[term.TopologyKey]; tpValueExist {
if m[term.TopologyKey] == nil {
m[term.TopologyKey] = make(map[string]int64)
}
m[term.TopologyKey][tpValue] += int64(weight * multiplier)
}
}
}
func (m scoreMap) processTerms(terms []fwk.WeightedAffinityTerm, pod *v1.Pod, nsLabels labels.Set, node *v1.Node, multiplier int32) {
for _, term := range terms {
m.processTerm(&term.AffinityTerm, term.Weight, pod, nsLabels, node, multiplier)
}
}
func (m scoreMap) append(other scoreMap) {
for topology, oScores := range other {
scores := m[topology]
if scores == nil {
m[topology] = oScores
continue
}
for k, v := range oScores {
scores[k] += v
}
}
}
func (pl *InterPodAffinity) processExistingPod(
state *preScoreState,
existingPod fwk.PodInfo,
existingPodNodeInfo fwk.NodeInfo,
incomingPod *v1.Pod,
topoScore scoreMap,
) {
existingPodNode := existingPodNodeInfo.Node()
if len(existingPodNode.Labels) == 0 {
return
}
// For every soft pod affinity term of <pod>, if <existingPod> matches the term,
// increment <p.counts> for every node in the cluster with the same <term.TopologyKey>
// value as that of <existingPods>`s node by the term`s weight.
// Note that the incoming pod's terms have the namespaceSelector merged into the namespaces, and so
// here we don't lookup the existing pod's namespace labels, hence passing nil for nsLabels.
topoScore.processTerms(state.podInfo.GetPreferredAffinityTerms(), existingPod.GetPod(), nil, existingPodNode, 1)
// For every soft pod anti-affinity term of <pod>, if <existingPod> matches the term,
// decrement <p.counts> for every node in the cluster with the same <term.TopologyKey>
// value as that of <existingPod>`s node by the term`s weight.
// Note that the incoming pod's terms have the namespaceSelector merged into the namespaces, and so
// here we don't lookup the existing pod's namespace labels, hence passing nil for nsLabels.
topoScore.processTerms(state.podInfo.GetPreferredAntiAffinityTerms(), existingPod.GetPod(), nil, existingPodNode, -1)
// For every hard pod affinity term of <existingPod>, if <pod> matches the term,
// increment <p.counts> for every node in the cluster with the same <term.TopologyKey>
// value as that of <existingPod>'s node by the constant <args.hardPodAffinityWeight>
if pl.args.HardPodAffinityWeight > 0 && len(existingPodNode.Labels) != 0 {
for _, t := range existingPod.GetRequiredAffinityTerms() {
topoScore.processTerm(&t, pl.args.HardPodAffinityWeight, incomingPod, state.namespaceLabels, existingPodNode, 1)
}
}
// For every soft pod affinity term of <existingPod>, if <pod> matches the term,
// increment <p.counts> for every node in the cluster with the same <term.TopologyKey>
// value as that of <existingPod>'s node by the term's weight.
topoScore.processTerms(existingPod.GetPreferredAffinityTerms(), incomingPod, state.namespaceLabels, existingPodNode, 1)
// For every soft pod anti-affinity term of <existingPod>, if <pod> matches the term,
// decrement <pm.counts> for every node in the cluster with the same <term.TopologyKey>
// value as that of <existingPod>'s node by the term's weight.
topoScore.processTerms(existingPod.GetPreferredAntiAffinityTerms(), incomingPod, state.namespaceLabels, existingPodNode, -1)
}
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *InterPodAffinity) PreScore(
pCtx context.Context,
cycleState fwk.CycleState,
pod *v1.Pod,
nodes []fwk.NodeInfo,
) *fwk.Status {
if pl.sharedLister == nil {
return fwk.NewStatus(fwk.Error, "empty shared lister in InterPodAffinity PreScore")
}
affinity := pod.Spec.Affinity
hasPreferredAffinityConstraints := affinity != nil && affinity.PodAffinity != nil && len(affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0
hasPreferredAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil && len(affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0
hasConstraints := hasPreferredAffinityConstraints || hasPreferredAntiAffinityConstraints
// Optionally ignore calculating preferences of existing pods' affinity rules
// if the incoming pod has no inter-pod affinities.
if pl.args.IgnorePreferredTermsOfExistingPods && !hasConstraints {
return fwk.NewStatus(fwk.Skip)
}
// Unless the pod being scheduled has preferred affinity terms, we only
// need to process nodes hosting pods with affinity.
var allNodes []fwk.NodeInfo
var err error
if hasConstraints {
allNodes, err = pl.sharedLister.NodeInfos().List()
if err != nil {
return fwk.AsStatus(fmt.Errorf("failed to get all nodes from shared lister: %w", err))
}
} else {
allNodes, err = pl.sharedLister.NodeInfos().HavePodsWithAffinityList()
if err != nil {
return fwk.AsStatus(fmt.Errorf("failed to get pods with affinity list: %w", err))
}
}
state := &preScoreState{
topologyScore: make(map[string]map[string]int64),
}
if state.podInfo, err = framework.NewPodInfo(pod); err != nil {
// Ideally we never reach here, because errors will be caught by PreFilter
return fwk.AsStatus(fmt.Errorf("failed to parse pod: %w", err))
}
for i := range state.podInfo.GetPreferredAffinityTerms() {
if err := pl.mergeAffinityTermNamespacesIfNotEmpty(state.podInfo.GetPreferredAffinityTerms()[i].AffinityTerm); err != nil {
return fwk.AsStatus(fmt.Errorf("updating PreferredAffinityTerms: %w", err))
}
}
for i := range state.podInfo.GetPreferredAntiAffinityTerms() {
if err := pl.mergeAffinityTermNamespacesIfNotEmpty(state.podInfo.GetPreferredAntiAffinityTerms()[i].AffinityTerm); err != nil {
return fwk.AsStatus(fmt.Errorf("updating PreferredAntiAffinityTerms: %w", err))
}
}
logger := klog.FromContext(pCtx)
state.namespaceLabels = GetNamespaceLabelsSnapshot(logger, pod.Namespace, pl.nsLister)
topoScores := make([]scoreMap, len(allNodes))
index := int32(-1)
processNode := func(i int) {
nodeInfo := allNodes[i]
// Unless the pod being scheduled has preferred affinity terms, we only
// need to process pods with affinity in the node.
podsToProcess := nodeInfo.GetPodsWithAffinity()
if hasConstraints {
// We need to process all the pods.
podsToProcess = nodeInfo.GetPods()
}
topoScore := make(scoreMap)
for _, existingPod := range podsToProcess {
pl.processExistingPod(state, existingPod, nodeInfo, pod, topoScore)
}
if len(topoScore) > 0 {
topoScores[atomic.AddInt32(&index, 1)] = topoScore
}
}
pl.parallelizer.Until(pCtx, len(allNodes), processNode, pl.Name())
if index == -1 {
return fwk.NewStatus(fwk.Skip)
}
for i := 0; i <= int(index); i++ {
state.topologyScore.append(topoScores[i])
}
cycleState.Write(preScoreStateKey, state)
return nil
}
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("failed to read %q from cycleState: %w", preScoreStateKey, err)
}
s, ok := c.(*preScoreState)
if !ok {
return nil, fmt.Errorf("%+v convert to interpodaffinity.preScoreState error", c)
}
return s, nil
}
// Score invoked at the Score extension point.
// The "score" returned in this function is the sum of weights got from cycleState which have its topologyKey matching with the node's labels.
// it is normalized later.
// Note: the returned "score" is positive for pod-affinity, and negative for pod-antiaffinity.
func (pl *InterPodAffinity) Score(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
node := nodeInfo.Node()
s, err := getPreScoreState(cycleState)
if err != nil {
return 0, fwk.AsStatus(err)
}
var score int64
for tpKey, tpValues := range s.topologyScore {
if v, exist := node.Labels[tpKey]; exist {
score += tpValues[v]
}
}
return score, nil
}
// NormalizeScore normalizes the score for each filteredNode.
func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
s, err := getPreScoreState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
if len(s.topologyScore) == 0 {
return nil
}
var minCount int64 = math.MaxInt64
var maxCount int64 = math.MinInt64
for i := range scores {
score := scores[i].Score
if score > maxCount {
maxCount = score
}
if score < minCount {
minCount = score
}
}
maxMinDiff := maxCount - minCount
for i := range scores {
fScore := float64(0)
if maxMinDiff > 0 {
fScore = float64(fwk.MaxNodeScore) * (float64(scores[i].Score-minCount) / float64(maxMinDiff))
}
scores[i].Score = int64(fScore)
}
return nil
}
// ScoreExtensions of the Score plugin.
func (pl *InterPodAffinity) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeaffinity
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// NodeAffinity is a plugin that checks if a pod node selector matches the node label.
type NodeAffinity struct {
handle fwk.Handle
addedNodeSelector *nodeaffinity.NodeSelector
addedPrefSchedTerms *nodeaffinity.PreferredSchedulingTerms
enableSchedulingQueueHint bool
}
var _ fwk.PreFilterPlugin = &NodeAffinity{}
var _ fwk.FilterPlugin = &NodeAffinity{}
var _ fwk.PreScorePlugin = &NodeAffinity{}
var _ fwk.ScorePlugin = &NodeAffinity{}
var _ fwk.EnqueueExtensions = &NodeAffinity{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.NodeAffinity
// preScoreStateKey is the key in CycleState to NodeAffinity pre-computed data for Scoring.
preScoreStateKey = "PreScore" + Name
// preFilterStateKey is the key in CycleState to NodeAffinity pre-compute data for Filtering.
preFilterStateKey = "PreFilter" + Name
// ErrReasonPod is the reason for Pod's node affinity/selector not matching.
ErrReasonPod = "node(s) didn't match Pod's node affinity/selector"
// errReasonEnforced is the reason for added node affinity not matching.
errReasonEnforced = "node(s) didn't match scheduler-enforced node affinity"
// errReasonConflict is the reason for pod's conflicting affinity rules.
errReasonConflict = "pod affinity terms conflict"
)
// Name returns name of the plugin. It is used in logs, etc.
func (pl *NodeAffinity) Name() string {
return Name
}
type preFilterState struct {
requiredNodeSelectorAndAffinity nodeaffinity.RequiredNodeAffinity
}
// Clone just returns the same state because it is not affected by pod additions or deletions.
func (s *preFilterState) Clone() fwk.StateData {
return s
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *NodeAffinity) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// A note about UpdateNodeTaint event:
// Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint
if pl.enableSchedulingQueueHint {
// preCheck is not used when QHint is enabled, and hence we can use UpdateNodeLabel instead of Update.
nodeActionType = fwk.Add | fwk.UpdateNodeLabel
}
return []fwk.ClusterEventWithHint{
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}, QueueingHintFn: pl.isSchedulableAfterNodeChange},
}, nil
}
// isSchedulableAfterNodeChange is invoked whenever a node changed. It checks whether
// that change made a previously unschedulable pod schedulable.
func (pl *NodeAffinity) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if pl.addedNodeSelector != nil && !pl.addedNodeSelector.Match(modifiedNode) {
logger.V(4).Info("added or modified node didn't match scheduler-enforced node affinity and this event won't make the Pod schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod)
isMatched, err := requiredNodeAffinity.Match(modifiedNode)
if err != nil {
return fwk.Queue, err
}
if !isMatched {
logger.V(5).Info("node was created or updated, but the pod's NodeAffinity doesn't match", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
// Since the node was added and it matches the pod's affinity criteria, we can unblock it.
if originalNode == nil {
logger.V(5).Info("node was created, and matches with the pod's NodeAffinity", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
// At this point we know the operation is update so we can narrow down the criteria to unmatch -> match changes only
// (necessary affinity label was added to the node in this case).
wasMatched, err := requiredNodeAffinity.Match(originalNode)
if err != nil {
return fwk.Queue, err
}
if wasMatched {
logger.V(5).Info("node updated, but the pod's NodeAffinity hasn't changed", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
logger.V(5).Info("node was updated and the pod's NodeAffinity changed to matched", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
// PreFilter builds and writes cycle state used by Filter.
func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
affinity := pod.Spec.Affinity
noNodeAffinity := (affinity == nil ||
affinity.NodeAffinity == nil ||
affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil)
if noNodeAffinity && pl.addedNodeSelector == nil && pod.Spec.NodeSelector == nil {
// NodeAffinity Filter has nothing to do with the Pod.
return nil, fwk.NewStatus(fwk.Skip)
}
state := &preFilterState{requiredNodeSelectorAndAffinity: nodeaffinity.GetRequiredNodeAffinity(pod)}
cycleState.Write(preFilterStateKey, state)
if noNodeAffinity || len(affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 {
return nil, nil
}
// Check if there is affinity to a specific node and return it.
terms := affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
var nodeNames sets.Set[string]
for _, t := range terms {
var termNodeNames sets.Set[string]
for _, r := range t.MatchFields {
if r.Key == metav1.ObjectNameField && r.Operator == v1.NodeSelectorOpIn {
// The requirements represent ANDed constraints, and so we need to
// find the intersection of nodes.
s := sets.New(r.Values...)
if termNodeNames == nil {
termNodeNames = s
} else {
termNodeNames = termNodeNames.Intersection(s)
}
}
}
if termNodeNames == nil {
// If this term has no node.Name field affinity,
// then all nodes are eligible because the terms are ORed.
return nil, nil
}
nodeNames = nodeNames.Union(termNodeNames)
}
// If nodeNames is not nil, but length is 0, it means each term have conflicting affinity to node.Name;
// therefore, pod will not match any node.
if nodeNames != nil && len(nodeNames) == 0 {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, errReasonConflict)
} else if len(nodeNames) > 0 {
return &fwk.PreFilterResult{NodeNames: nodeNames}, nil
}
return nil, nil
}
// PreFilterExtensions not necessary for this plugin as state doesn't depend on pod additions or deletions.
func (pl *NodeAffinity) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
// Filter checks if the Node matches the Pod .spec.affinity.nodeAffinity and
// the plugin's added affinity.
func (pl *NodeAffinity) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
node := nodeInfo.Node()
if pl.addedNodeSelector != nil && !pl.addedNodeSelector.Match(node) {
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, errReasonEnforced)
}
s, err := getPreFilterState(state)
if err != nil {
// Fallback to calculate requiredNodeSelector and requiredNodeAffinity
// here when PreFilter is disabled.
s = &preFilterState{requiredNodeSelectorAndAffinity: nodeaffinity.GetRequiredNodeAffinity(pod)}
}
// Ignore parsing errors for backwards compatibility.
match, _ := s.requiredNodeSelectorAndAffinity.Match(node)
if !match {
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReasonPod)
}
return nil
}
// preScoreState computed at PreScore and used at Score.
type preScoreState struct {
preferredNodeAffinity *nodeaffinity.PreferredSchedulingTerms
}
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() fwk.StateData {
return s
}
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *NodeAffinity) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
preferredNodeAffinity, err := getPodPreferredNodeAffinity(pod)
if err != nil {
return fwk.AsStatus(err)
}
if preferredNodeAffinity == nil && pl.addedPrefSchedTerms == nil {
// NodeAffinity Score has nothing to do with the Pod.
return fwk.NewStatus(fwk.Skip)
}
state := &preScoreState{
preferredNodeAffinity: preferredNodeAffinity,
}
cycleState.Write(preScoreStateKey, state)
return nil
}
// Score returns the sum of the weights of the terms that match the Node.
// Terms came from the Pod .spec.affinity.nodeAffinity and from the plugin's
// default affinity.
func (pl *NodeAffinity) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
node := nodeInfo.Node()
var count int64
if pl.addedPrefSchedTerms != nil {
count += pl.addedPrefSchedTerms.Score(node)
}
s, err := getPreScoreState(state)
if err != nil {
// Fallback to calculate preferredNodeAffinity here when PreScore is disabled.
preferredNodeAffinity, err := getPodPreferredNodeAffinity(pod)
if err != nil {
return 0, fwk.AsStatus(err)
}
s = &preScoreState{
preferredNodeAffinity: preferredNodeAffinity,
}
}
if s.preferredNodeAffinity != nil {
count += s.preferredNodeAffinity.Score(node)
}
return count, nil
}
// NormalizeScore invoked after scoring all nodes.
func (pl *NodeAffinity) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
return helper.DefaultNormalizeScore(fwk.MaxNodeScore, false, scores)
}
// ScoreExtensions of the Score plugin.
func (pl *NodeAffinity) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
// New initializes a new plugin and returns it.
func New(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, err := getArgs(plArgs)
if err != nil {
return nil, err
}
pl := &NodeAffinity{
handle: h,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}
if args.AddedAffinity != nil {
if ns := args.AddedAffinity.RequiredDuringSchedulingIgnoredDuringExecution; ns != nil {
pl.addedNodeSelector, err = nodeaffinity.NewNodeSelector(ns)
if err != nil {
return nil, fmt.Errorf("parsing addedAffinity.requiredDuringSchedulingIgnoredDuringExecution: %w", err)
}
}
// TODO: parse requiredDuringSchedulingRequiredDuringExecution when it gets added to the API.
if terms := args.AddedAffinity.PreferredDuringSchedulingIgnoredDuringExecution; len(terms) != 0 {
pl.addedPrefSchedTerms, err = nodeaffinity.NewPreferredSchedulingTerms(terms)
if err != nil {
return nil, fmt.Errorf("parsing addedAffinity.preferredDuringSchedulingIgnoredDuringExecution: %w", err)
}
}
}
return pl, nil
}
func getArgs(obj runtime.Object) (config.NodeAffinityArgs, error) {
ptr, ok := obj.(*config.NodeAffinityArgs)
if !ok {
return config.NodeAffinityArgs{}, fmt.Errorf("args are not of type NodeAffinityArgs, got %T", obj)
}
return *ptr, validation.ValidateNodeAffinityArgs(nil, ptr)
}
func getPodPreferredNodeAffinity(pod *v1.Pod) (*nodeaffinity.PreferredSchedulingTerms, error) {
affinity := pod.Spec.Affinity
if affinity != nil && affinity.NodeAffinity != nil && affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
return nodeaffinity.NewPreferredSchedulingTerms(affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution)
}
return nil, nil
}
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err)
}
s, ok := c.(*preScoreState)
if !ok {
return nil, fmt.Errorf("invalid PreScore state, got type %T", c)
}
return s, nil
}
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %v", preFilterStateKey, err)
}
s, ok := c.(*preFilterState)
if !ok {
return nil, fmt.Errorf("invalid PreFilter state, got type %T", c)
}
return s, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodename
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
// NodeName is a plugin that checks if a pod spec node name matches the current node.
type NodeName struct {
enableSchedulingQueueHint bool
}
var _ fwk.FilterPlugin = &NodeName{}
var _ fwk.EnqueueExtensions = &NodeName{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.NodeName
// ErrReason returned when node name doesn't match.
ErrReason = "node(s) didn't match the requested node name"
)
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *NodeName) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// A note about UpdateNodeTaint/UpdateNodeLabel event:
// Ideally, it's supposed to register only Add because any Node update event will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeTaint | fwk.UpdateNodeLabel
if pl.enableSchedulingQueueHint {
// preCheck is not used when QHint is enabled, and hence Update event isn't necessary.
nodeActionType = fwk.Add
}
return []fwk.ClusterEventWithHint{
// We don't need the QueueingHintFn here because the scheduling of Pods will be always retried with backoff when this Event happens.
// (the same as Queue)
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}},
}, nil
}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *NodeName) Name() string {
return Name
}
// Filter invoked at the filter extension point.
func (pl *NodeName) Filter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
if !Fits(pod, nodeInfo) {
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReason)
}
return nil
}
// Fits actually checks if the pod fits the node.
func Fits(pod *v1.Pod, nodeInfo fwk.NodeInfo) bool {
return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &NodeName{
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeports
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// NodePorts is a plugin that checks if a node has free ports for the requested pod ports.
type NodePorts struct {
enableSchedulingQueueHint bool
}
var _ fwk.PreFilterPlugin = &NodePorts{}
var _ fwk.FilterPlugin = &NodePorts{}
var _ fwk.EnqueueExtensions = &NodePorts{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.NodePorts
// preFilterStateKey is the key in CycleState to NodePorts pre-computed data.
// Using the name of the plugin will likely help us avoid collisions with other plugins.
preFilterStateKey = "PreFilter" + Name
// ErrReason when node ports aren't available.
ErrReason = "node(s) didn't have free ports for the requested pod ports"
)
type preFilterState []v1.ContainerPort
// Clone the prefilter state.
func (s preFilterState) Clone() fwk.StateData {
// The state is not impacted by adding/removing existing pods, hence we don't need to make a deep copy.
return s
}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *NodePorts) Name() string {
return Name
}
// PreFilter invoked at the prefilter extension point.
func (pl *NodePorts) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
s := util.GetHostPorts(pod)
// Skip if a pod has no ports.
if len(s) == 0 {
return nil, fwk.NewStatus(fwk.Skip)
}
cycleState.Write(preFilterStateKey, preFilterState(s))
return nil, nil
}
// PreFilterExtensions do not exist for this plugin.
func (pl *NodePorts) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
func getPreFilterState(cycleState fwk.CycleState) (preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
return nil, fmt.Errorf("reading %q from cycleState: %w", preFilterStateKey, err)
}
s, ok := c.(preFilterState)
if !ok {
return nil, fmt.Errorf("%+v convert to nodeports.preFilterState error", c)
}
return s, nil
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *NodePorts) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// A note about UpdateNodeTaint/UpdateNodeLabel event:
// Ideally, it's supposed to register only Add because NodeUpdated event never means to have any free ports for the Pod.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeTaint | fwk.UpdateNodeLabel
if pl.enableSchedulingQueueHint {
// preCheck is not used when QHint is enabled, and hence Update event isn't necessary.
nodeActionType = fwk.Add
}
return []fwk.ClusterEventWithHint{
// Due to immutable fields `spec.containers[*].ports`, pod update events are ignored.
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.Delete}, QueueingHintFn: pl.isSchedulableAfterPodDeleted},
// We don't need the QueueingHintFn here because the scheduling of Pods will be always retried with backoff when this Event happens.
// (the same as Queue)
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}},
}, nil
}
// isSchedulableAfterPodDeleted is invoked whenever a pod deleted. It checks whether
// that change made a previously unschedulable pod schedulable.
func (pl *NodePorts) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
deletedPod, _, err := util.As[*v1.Pod](oldObj, nil)
if err != nil {
return fwk.Queue, err
}
// If the deleted pod is unscheduled, it doesn't make the target pod schedulable.
if deletedPod.Spec.NodeName == "" && deletedPod.Status.NominatedNodeName == "" {
logger.V(4).Info("the deleted pod is unscheduled and it doesn't make the target pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod))
return fwk.QueueSkip, nil
}
// If the deleted pod doesn't use any host ports, it doesn't make the target pod schedulable.
ports := util.GetHostPorts(deletedPod)
if len(ports) == 0 {
return fwk.QueueSkip, nil
}
// Verify that `pod` and the deleted pod don't have any common port(s).
// So, deleting that pod couldn't make `pod` schedulable.
portsInUse := make(fwk.HostPortInfo, len(ports))
for _, p := range ports {
portsInUse.Add(p.HostIP, string(p.Protocol), p.HostPort)
}
if fitsPorts(util.GetHostPorts(pod), portsInUse) {
logger.V(4).Info("the deleted pod and the target pod don't have any common port(s), returning QueueSkip as deleting this Pod won't make the Pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod))
return fwk.QueueSkip, nil
}
logger.V(4).Info("the deleted pod and the target pod have any common port(s), returning Queue as deleting this Pod may make the Pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod))
return fwk.Queue, nil
}
// Filter invoked at the filter extension point.
func (pl *NodePorts) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
wantPorts, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
fits := fitsPorts(wantPorts, nodeInfo.GetUsedPorts())
if !fits {
return fwk.NewStatus(fwk.Unschedulable, ErrReason)
}
return nil
}
// Fits checks if the pod has any ports conflicting with nodeInfo's ports.
// It returns true if there are no conflicts (which means that pod fits the node), otherwise false.
func Fits(pod *v1.Pod, nodeInfo fwk.NodeInfo) bool {
return fitsPorts(util.GetHostPorts(pod), nodeInfo.GetUsedPorts())
}
func fitsPorts(wantPorts []v1.ContainerPort, portsInUse fwk.HostPortInfo) bool {
// try to see whether portsInUse and wantPorts will conflict or not
for _, cp := range wantPorts {
if portsInUse.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) {
return false
}
}
return true
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &NodePorts{
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"fmt"
"math"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
// BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
type BalancedAllocation struct {
handle fwk.Handle
resourceAllocationScorer
}
var _ fwk.PreScorePlugin = &BalancedAllocation{}
var _ fwk.ScorePlugin = &BalancedAllocation{}
// BalancedAllocationName is the name of the plugin used in the plugin registry and configurations.
const (
BalancedAllocationName = names.NodeResourcesBalancedAllocation
// balancedAllocationPreScoreStateKey is the key in CycleState to NodeResourcesBalancedAllocation pre-computed data for Scoring.
balancedAllocationPreScoreStateKey = "PreScore" + BalancedAllocationName
)
// balancedAllocationPreScoreState computed at PreScore and used at Score.
type balancedAllocationPreScoreState struct {
// podRequests have the same order of the resources defined in NodeResourcesFitArgs.Resources,
// same for other place we store a list like that.
podRequests []int64
}
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *balancedAllocationPreScoreState) Clone() fwk.StateData {
return s
}
// PreScore calculates incoming pod's resource requests and writes them to the cycle state used.
func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
podRequests := ba.calculatePodResourceRequestList(pod, ba.resources)
if ba.isBestEffortPod(podRequests) {
// Skip BalancedAllocation scoring for best-effort pods to
// prevent a large number of pods from being scheduled to the same node.
// See https://github.com/kubernetes/kubernetes/issues/129138 for details.
return fwk.NewStatus(fwk.Skip)
}
state := &balancedAllocationPreScoreState{
podRequests: podRequests,
}
cycleState.Write(balancedAllocationPreScoreStateKey, state)
return nil
}
func getBalancedAllocationPreScoreState(cycleState fwk.CycleState) (*balancedAllocationPreScoreState, error) {
c, err := cycleState.Read(balancedAllocationPreScoreStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %w", balancedAllocationPreScoreStateKey, err)
}
s, ok := c.(*balancedAllocationPreScoreState)
if !ok {
return nil, fmt.Errorf("invalid PreScore state, got type %T", c)
}
return s, nil
}
// Name returns name of the plugin. It is used in logs, etc.
func (ba *BalancedAllocation) Name() string {
return BalancedAllocationName
}
// Score invoked at the score extension point.
func (ba *BalancedAllocation) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
s, err := getBalancedAllocationPreScoreState(state)
if err != nil {
s = &balancedAllocationPreScoreState{podRequests: ba.calculatePodResourceRequestList(pod, ba.resources)}
if ba.isBestEffortPod(s.podRequests) {
return 0, nil
}
}
// ba.score favors nodes with balanced resource usage rate.
// It calculates the standard deviation for those resources and prioritizes the node based on how close the usage of those resources is to each other.
// Detail: score = (1 - std) * MaxNodeScore, where std is calculated by the root square of Σ((fraction(i)-mean)^2)/len(resources)
// The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
return ba.score(ctx, pod, nodeInfo, s.podRequests)
}
// ScoreExtensions of the Score plugin.
func (ba *BalancedAllocation) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
// NewBalancedAllocation initializes a new plugin and returns it.
func NewBalancedAllocation(_ context.Context, baArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, ok := baArgs.(*config.NodeResourcesBalancedAllocationArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesBalancedAllocationArgs, got %T", baArgs)
}
if err := validation.ValidateNodeResourcesBalancedAllocationArgs(nil, args); err != nil {
return nil, err
}
return &BalancedAllocation{
handle: h,
resourceAllocationScorer: resourceAllocationScorer{
Name: BalancedAllocationName,
enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling,
enablePodLevelResources: fts.EnablePodLevelResources,
scorer: balancedResourceScorer,
useRequested: true,
resources: args.Resources,
},
}, nil
}
func balancedResourceScorer(requested, allocable []int64) int64 {
var resourceToFractions []float64
var totalFraction float64
for i := range requested {
if allocable[i] == 0 {
continue
}
fraction := float64(requested[i]) / float64(allocable[i])
if fraction > 1 {
fraction = 1
}
totalFraction += fraction
resourceToFractions = append(resourceToFractions, fraction)
}
std := 0.0
// For most cases, resources are limited to cpu and memory, the std could be simplified to std := (fraction1-fraction2)/2
// len(fractions) > 2: calculate std based on the well-known formula - root square of Σ((fraction(i)-mean)^2)/len(fractions)
// Otherwise, set the std to zero is enough.
if len(resourceToFractions) == 2 {
std = math.Abs((resourceToFractions[0] - resourceToFractions[1]) / 2)
} else if len(resourceToFractions) > 2 {
mean := totalFraction / float64(len(resourceToFractions))
var sum float64
for _, fraction := range resourceToFractions {
sum = sum + (fraction-mean)*(fraction-mean)
}
std = math.Sqrt(sum / float64(len(resourceToFractions)))
}
// STD (standard deviation) is always a positive value. 1-deviation lets the score to be higher for node which has least deviation and
// multiplying it with `MaxNodeScore` provides the scaling factor needed.
return int64((1 - std) * float64(fwk.MaxNodeScore))
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/extended"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
var _ fwk.PreFilterPlugin = &Fit{}
var _ fwk.FilterPlugin = &Fit{}
var _ fwk.EnqueueExtensions = &Fit{}
var _ fwk.PreScorePlugin = &Fit{}
var _ fwk.ScorePlugin = &Fit{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.NodeResourcesFit
// preFilterStateKey is the key in CycleState to NodeResourcesFit pre-computed data.
// Using the name of the plugin will likely help us avoid collisions with other plugins.
preFilterStateKey = "PreFilter" + Name
// preScoreStateKey is the key in CycleState to NodeResourcesFit pre-computed data for Scoring.
preScoreStateKey = "PreScore" + Name
)
// nodeResourceStrategyTypeMap maps strategy to scorer implementation
var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{
config.LeastAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer {
resources := args.ScoringStrategy.Resources
return &resourceAllocationScorer{
Name: string(config.LeastAllocated),
scorer: leastResourceScorer(resources),
resources: resources,
}
},
config.MostAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer {
resources := args.ScoringStrategy.Resources
return &resourceAllocationScorer{
Name: string(config.MostAllocated),
scorer: mostResourceScorer(resources),
resources: resources,
}
},
config.RequestedToCapacityRatio: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer {
resources := args.ScoringStrategy.Resources
return &resourceAllocationScorer{
Name: string(config.RequestedToCapacityRatio),
scorer: requestedToCapacityRatioScorer(resources, args.ScoringStrategy.RequestedToCapacityRatio.Shape),
resources: resources,
}
},
}
// Fit is a plugin that checks if a node has sufficient resources.
type Fit struct {
ignoredResources sets.Set[string]
ignoredResourceGroups sets.Set[string]
enableInPlacePodVerticalScaling bool
enableSidecarContainers bool
enableSchedulingQueueHint bool
enablePodLevelResources bool
enableDRAExtendedResource bool
handle fwk.Handle
resourceAllocationScorer
}
// ScoreExtensions of the Score plugin.
func (f *Fit) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
// preFilterState computed at PreFilter and used at Filter.
type preFilterState struct {
framework.Resource
// resourceToDeviceClass holds the mapping of extended resource to device class name.
resourceToDeviceClass map[v1.ResourceName]string
}
// Clone the prefilter state.
func (s *preFilterState) Clone() fwk.StateData {
return s
}
// preScoreState computed at PreScore and used at Score.
type preScoreState struct {
// podRequests have the same order as the resources defined in NodeResourcesBalancedAllocationArgs.Resources,
// same for other place we store a list like that.
podRequests []int64
}
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() fwk.StateData {
return s
}
// PreScore calculates incoming pod's resource requests and writes them to the cycle state used.
func (f *Fit) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
state := &preScoreState{
podRequests: f.calculatePodResourceRequestList(pod, f.resources),
}
cycleState.Write(preScoreStateKey, state)
return nil
}
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err)
}
s, ok := c.(*preScoreState)
if !ok {
return nil, fmt.Errorf("invalid PreScore state, got type %T", c)
}
return s, nil
}
// Name returns name of the plugin. It is used in logs, etc.
func (f *Fit) Name() string {
return Name
}
// NewFit initializes a new plugin and returns it.
func NewFit(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, ok := plArgs.(*config.NodeResourcesFitArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", plArgs)
}
if err := validation.ValidateNodeResourcesFitArgs(nil, args); err != nil {
return nil, err
}
if args.ScoringStrategy == nil {
return nil, fmt.Errorf("scoring strategy not specified")
}
strategy := args.ScoringStrategy.Type
scorePlugin, exists := nodeResourceStrategyTypeMap[strategy]
if !exists {
return nil, fmt.Errorf("scoring strategy %s is not supported", strategy)
}
return &Fit{
ignoredResources: sets.New(args.IgnoredResources...),
ignoredResourceGroups: sets.New(args.IgnoredResourceGroups...),
enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling,
enableSidecarContainers: fts.EnableSidecarContainers,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
handle: h,
enablePodLevelResources: fts.EnablePodLevelResources,
enableDRAExtendedResource: fts.EnableDRAExtendedResource,
resourceAllocationScorer: *scorePlugin(args),
}, nil
}
type ResourceRequestsOptions struct {
EnablePodLevelResources bool
EnableDRAExtendedResource bool
}
// computePodResourceRequest returns a framework.Resource that covers the largest
// width in each resource dimension. Because init-containers run sequentially, we collect
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
// regular containers since they run simultaneously.
//
// # The resources defined for Overhead should be added to the calculated Resource request sum
//
// Example:
//
// Pod:
//
// InitContainers
// IC1:
// CPU: 2
// Memory: 1G
// IC2:
// CPU: 2
// Memory: 3G
// Containers
// C1:
// CPU: 2
// Memory: 1G
// C2:
// CPU: 1
// Memory: 1G
//
// Result: CPU: 3, Memory: 3G
// TODO(ndixita): modify computePodResourceRequest to accept opts of type
// ResourceRequestOptions as the second parameter.
func computePodResourceRequest(pod *v1.Pod, opts ResourceRequestsOptions) *preFilterState {
// pod hasn't scheduled yet so we don't need to worry about InPlacePodVerticalScalingEnabled
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !opts.EnablePodLevelResources,
})
result := &preFilterState{}
result.SetMaxResource(reqs)
return result
}
// withDeviceClass adds resource to device class mapping to preFilterState.
func withDeviceClass(result *preFilterState, draManager fwk.SharedDRAManager) *fwk.Status {
hasExtendedResource := false
for rName, rQuant := range result.ScalarResources {
// Skip in case request quantity is zero
if rQuant == 0 {
continue
}
if v1helper.IsExtendedResourceName(rName) {
hasExtendedResource = true
break
}
}
if hasExtendedResource {
resourceToDeviceClass, err := extended.DeviceClassMapping(draManager)
if err != nil {
return fwk.AsStatus(err)
}
result.resourceToDeviceClass = resourceToDeviceClass
if len(resourceToDeviceClass) == 0 {
// ensure it is empty map, not nil.
result.resourceToDeviceClass = make(map[v1.ResourceName]string, 0)
}
}
return nil
}
// PreFilter invoked at the prefilter extension point.
func (f *Fit) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
if !f.enableSidecarContainers && hasRestartableInitContainer(pod) {
// Scheduler will calculate resources usage for a Pod containing
// restartable init containers that will be equal or more than kubelet will
// require to run the Pod. So there will be no overbooking. However, to
// avoid the inconsistency in resource calculation between the scheduler
// and the older (before v1.28) kubelet, make the Pod unschedulable.
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled")
}
result := computePodResourceRequest(pod, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources})
if f.enableDRAExtendedResource {
if err := withDeviceClass(result, f.handle.SharedDRAManager()); err != nil {
return nil, err
}
}
cycleState.Write(preFilterStateKey, result)
return nil, nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (f *Fit) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
return nil, fmt.Errorf("error reading %q from cycleState: %w", preFilterStateKey, err)
}
s, ok := c.(*preFilterState)
if !ok {
return nil, fmt.Errorf("%+v convert to NodeResourcesFit.preFilterState error", c)
}
return s, nil
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (f *Fit) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
podActionType := fwk.Delete
if f.enableInPlacePodVerticalScaling {
// If InPlacePodVerticalScaling (KEP 1287) is enabled, then UpdatePodScaleDown event should be registered
// for this plugin since a Pod update may free up resources that make other Pods schedulable.
podActionType |= fwk.UpdatePodScaleDown
}
// A note about UpdateNodeTaint/UpdateNodeLabel event:
// Ideally, it's supposed to register only Add | UpdateNodeAllocatable because the only resource update could change the node resource fit plugin's result.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeAllocatable | fwk.UpdateNodeTaint | fwk.UpdateNodeLabel
if f.enableSchedulingQueueHint {
// preCheck is not used when QHint is enabled, and hence Update event isn't necessary.
nodeActionType = fwk.Add | fwk.UpdateNodeAllocatable
}
return []fwk.ClusterEventWithHint{
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodEvent},
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}, QueueingHintFn: f.isSchedulableAfterNodeChange},
}, nil
}
// isSchedulableAfterPodEvent is invoked whenever a pod deleted or scaled down. It checks whether
// that change made a previously unschedulable pod schedulable.
func (f *Fit) isSchedulableAfterPodEvent(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalPod, modifiedPod, err := schedutil.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if modifiedPod == nil {
if originalPod.Spec.NodeName == "" && originalPod.Status.NominatedNodeName == "" {
logger.V(5).Info("the deleted pod was unscheduled and it wouldn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
return fwk.QueueSkip, nil
}
// any deletion event to a scheduled pod could make the unscheduled pod schedulable.
logger.V(5).Info("another scheduled pod was deleted, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
return fwk.Queue, nil
}
if !f.enableInPlacePodVerticalScaling {
// If InPlacePodVerticalScaling (KEP 1287) is disabled, the pod scale down event cannot free up any resources.
logger.V(5).Info("another pod was modified, but InPlacePodVerticalScaling is disabled, so it doesn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
if !f.isSchedulableAfterPodScaleDown(pod, originalPod, modifiedPod) {
if loggerV := logger.V(10); loggerV.Enabled() {
// Log more information.
loggerV.Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", diff.Diff(originalPod, modifiedPod))
} else {
logger.V(5).Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
}
return fwk.QueueSkip, nil
}
logger.V(5).Info("another scheduled pod or the target pod itself got scaled down, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
// isSchedulableAfterPodScaleDown checks whether the scale down event may make the target pod schedulable. Specifically:
// - Returns true when the update event is for the target pod itself.
// - Returns true when the update event shows a scheduled pod's resource request that the target pod also requests got reduced.
func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool {
if modifiedPod.UID == targetPod.UID {
// If the scaling down event is for targetPod, it would make targetPod schedulable.
return true
}
if modifiedPod.Spec.NodeName == "" {
// If the update event is for a unscheduled Pod,
// it wouldn't make targetPod schedulable.
return false
}
// the other pod was scheduled, so modification or deletion may free up some resources.
originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{}
originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
// check whether the resource request of the modified pod is less than the original pod.
podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})
for rName, rValue := range podRequests {
if rValue.IsZero() {
// We only care about the resources requested by the pod we are trying to schedule.
continue
}
switch rName {
case v1.ResourceCPU:
if originalMaxResourceReq.MilliCPU > modifiedMaxResourceReq.MilliCPU {
return true
}
case v1.ResourceMemory:
if originalMaxResourceReq.Memory > modifiedMaxResourceReq.Memory {
return true
}
case v1.ResourceEphemeralStorage:
if originalMaxResourceReq.EphemeralStorage > modifiedMaxResourceReq.EphemeralStorage {
return true
}
default:
if schedutil.IsScalarResourceName(rName) && originalMaxResourceReq.ScalarResources[rName] > modifiedMaxResourceReq.ScalarResources[rName] {
return true
}
}
}
return false
}
// isSchedulableAfterNodeChange is invoked whenever a node added or changed. It checks whether
// that change could make a previously unschedulable pod schedulable.
func (f *Fit) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalNode, modifiedNode, err := schedutil.As[*v1.Node](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
// Leaving in the queue, since the pod won't fit into the modified node anyway.
if !isFit(pod, modifiedNode, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources, EnableDRAExtendedResource: f.enableDRAExtendedResource}) {
logger.V(5).Info("node was created or updated, but it doesn't have enough resource(s) to accommodate this pod", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
// The pod will fit, so since it's add, unblock scheduling.
if originalNode == nil {
logger.V(5).Info("node was added and it might fit the pod's resource requests", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
// The pod will fit, but since there was no increase in available resources, the change won't make the pod schedulable.
if !haveAnyRequestedResourcesIncreased(pod, originalNode, modifiedNode, ResourceRequestsOptions{EnablePodLevelResources: f.enablePodLevelResources, EnableDRAExtendedResource: f.enableDRAExtendedResource}) {
logger.V(5).Info("node was updated, but haven't changed the pod's resource requestments fit assessment", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
logger.V(5).Info("node was updated, and may now fit the pod's resource requests", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
// haveAnyRequestedResourcesIncreased returns true if any of the resources requested by the pod have increased or if allowed pod number increased.
func haveAnyRequestedResourcesIncreased(pod *v1.Pod, originalNode, modifiedNode *v1.Node, opts ResourceRequestsOptions) bool {
podRequest := computePodResourceRequest(pod, opts)
originalNodeInfo := framework.NewNodeInfo()
originalNodeInfo.SetNode(originalNode)
modifiedNodeInfo := framework.NewNodeInfo()
modifiedNodeInfo.SetNode(modifiedNode)
if modifiedNodeInfo.Allocatable.GetAllowedPodNumber() > originalNodeInfo.Allocatable.GetAllowedPodNumber() {
return true
}
if podRequest.MilliCPU == 0 &&
podRequest.Memory == 0 &&
podRequest.EphemeralStorage == 0 &&
len(podRequest.ScalarResources) == 0 {
return false
}
if (podRequest.MilliCPU > 0 && modifiedNodeInfo.Allocatable.GetMilliCPU() > originalNodeInfo.Allocatable.GetMilliCPU()) ||
(podRequest.Memory > 0 && modifiedNodeInfo.Allocatable.GetMemory() > originalNodeInfo.Allocatable.GetMemory()) ||
(podRequest.EphemeralStorage > 0 && modifiedNodeInfo.Allocatable.GetEphemeralStorage() > originalNodeInfo.Allocatable.GetEphemeralStorage()) {
return true
}
for rName, rQuant := range podRequest.ScalarResources {
// Skip in case request quantity is zero
if rQuant == 0 {
continue
}
if modifiedNodeInfo.Allocatable.GetScalarResources()[rName] > originalNodeInfo.Allocatable.GetScalarResources()[rName] {
return true
}
if opts.EnableDRAExtendedResource {
_, okScalar := modifiedNodeInfo.GetAllocatable().GetScalarResources()[rName]
_, okDynamic := podRequest.resourceToDeviceClass[rName]
if (okDynamic || podRequest.resourceToDeviceClass == nil) && !okScalar {
// The extended resource request matches a device class or no device class mapping
// provided and it is not in the node's Allocatable (i.e. it is not provided
// by the node's device plugin), then leave it to the dynamicresources
// plugin to evaluate whether it can be satisfy by DRA resources.
return true
}
}
}
return false
}
// isFit checks if the pod fits the node. If the node is nil, it returns false.
// It constructs a fake NodeInfo object for the node and checks if the pod fits the node.
func isFit(pod *v1.Pod, node *v1.Node, opts ResourceRequestsOptions) bool {
if node == nil {
return false
}
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(node)
return len(Fits(pod, nodeInfo, opts)) == 0
}
// Filter invoked at the filter extension point.
// Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod.
func (f *Fit) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
insufficientResources := fitsRequest(s, nodeInfo, f.ignoredResources, f.ignoredResourceGroups, ResourceRequestsOptions{
EnablePodLevelResources: f.enablePodLevelResources,
EnableDRAExtendedResource: f.enableDRAExtendedResource})
if len(insufficientResources) != 0 {
// We will keep all failure reasons.
failureReasons := make([]string, 0, len(insufficientResources))
statusCode := fwk.Unschedulable
for i := range insufficientResources {
failureReasons = append(failureReasons, insufficientResources[i].Reason)
if insufficientResources[i].Unresolvable {
statusCode = fwk.UnschedulableAndUnresolvable
}
}
return fwk.NewStatus(statusCode, failureReasons...)
}
return nil
}
func hasRestartableInitContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.InitContainers {
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
return true
}
}
return false
}
// InsufficientResource describes what kind of resource limit is hit and caused the pod to not fit the node.
type InsufficientResource struct {
ResourceName v1.ResourceName
// We explicitly have a parameter for reason to avoid formatting a message on the fly
// for common resources, which is expensive for cluster autoscaler simulations.
Reason string
Requested int64
Used int64
Capacity int64
// Unresolvable indicates whether this node could be schedulable for the pod by the preemption,
// which is determined by comparing the node's size and the pod's request.
Unresolvable bool
}
// Fits checks if node have enough resources to host the pod.
func Fits(pod *v1.Pod, nodeInfo fwk.NodeInfo, opts ResourceRequestsOptions) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod, opts), nodeInfo, nil, nil, opts)
}
func fitsRequest(podRequest *preFilterState, nodeInfo fwk.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.Set[string], opts ResourceRequestsOptions) []InsufficientResource {
insufficientResources := make([]InsufficientResource, 0, 4)
allowedPodNumber := nodeInfo.GetAllocatable().GetAllowedPodNumber()
if len(nodeInfo.GetPods())+1 > allowedPodNumber {
insufficientResources = append(insufficientResources, InsufficientResource{
ResourceName: v1.ResourcePods,
Reason: "Too many pods",
Requested: 1,
Used: int64(len(nodeInfo.GetPods())),
Capacity: int64(allowedPodNumber),
})
}
if podRequest.MilliCPU == 0 &&
podRequest.Memory == 0 &&
podRequest.EphemeralStorage == 0 &&
len(podRequest.ScalarResources) == 0 {
return insufficientResources
}
if podRequest.MilliCPU > 0 && podRequest.MilliCPU > (nodeInfo.GetAllocatable().GetMilliCPU()-nodeInfo.GetRequested().GetMilliCPU()) {
insufficientResources = append(insufficientResources, InsufficientResource{
ResourceName: v1.ResourceCPU,
Reason: "Insufficient cpu",
Requested: podRequest.MilliCPU,
Used: nodeInfo.GetRequested().GetMilliCPU(),
Capacity: nodeInfo.GetAllocatable().GetMilliCPU(),
Unresolvable: podRequest.MilliCPU > nodeInfo.GetAllocatable().GetMilliCPU(),
})
}
if podRequest.Memory > 0 && podRequest.Memory > (nodeInfo.GetAllocatable().GetMemory()-nodeInfo.GetRequested().GetMemory()) {
insufficientResources = append(insufficientResources, InsufficientResource{
ResourceName: v1.ResourceMemory,
Reason: "Insufficient memory",
Requested: podRequest.Memory,
Used: nodeInfo.GetRequested().GetMemory(),
Capacity: nodeInfo.GetAllocatable().GetMemory(),
Unresolvable: podRequest.Memory > nodeInfo.GetAllocatable().GetMemory(),
})
}
if podRequest.EphemeralStorage > 0 &&
podRequest.EphemeralStorage > (nodeInfo.GetAllocatable().GetEphemeralStorage()-nodeInfo.GetRequested().GetEphemeralStorage()) {
insufficientResources = append(insufficientResources, InsufficientResource{
ResourceName: v1.ResourceEphemeralStorage,
Reason: "Insufficient ephemeral-storage",
Requested: podRequest.EphemeralStorage,
Used: nodeInfo.GetRequested().GetEphemeralStorage(),
Capacity: nodeInfo.GetAllocatable().GetEphemeralStorage(),
Unresolvable: podRequest.GetEphemeralStorage() > nodeInfo.GetAllocatable().GetEphemeralStorage(),
})
}
for rName, rQuant := range podRequest.ScalarResources {
// Skip in case request quantity is zero
if rQuant == 0 {
continue
}
if v1helper.IsExtendedResourceName(rName) {
// If this resource is one of the extended resources that should be ignored, we will skip checking it.
// rName is guaranteed to have a slash due to API validation.
var rNamePrefix string
if ignoredResourceGroups.Len() > 0 {
rNamePrefix = strings.Split(string(rName), "/")[0]
}
if ignoredExtendedResources.Has(string(rName)) || ignoredResourceGroups.Has(rNamePrefix) {
continue
}
}
if opts.EnableDRAExtendedResource {
_, okScalar := nodeInfo.GetAllocatable().GetScalarResources()[rName]
_, okDynamic := podRequest.resourceToDeviceClass[rName]
if (okDynamic || podRequest.resourceToDeviceClass == nil) && !okScalar {
// The extended resource request matches a device class or no device class mapping
// provided and it is not in the node's Allocatable (i.e. it is not provided
// by the node's device plugin), then leave it to the dynamicresources
// plugin to evaluate whether it can be satisfy by DRA resources.
continue
}
}
if rQuant > (nodeInfo.GetAllocatable().GetScalarResources()[rName] - nodeInfo.GetRequested().GetScalarResources()[rName]) {
insufficientResources = append(insufficientResources, InsufficientResource{
ResourceName: rName,
Reason: fmt.Sprintf("Insufficient %v", rName),
Requested: podRequest.ScalarResources[rName],
Used: nodeInfo.GetRequested().GetScalarResources()[rName],
Capacity: nodeInfo.GetAllocatable().GetScalarResources()[rName],
Unresolvable: rQuant > nodeInfo.GetAllocatable().GetScalarResources()[rName],
})
}
}
return insufficientResources
}
// Score invoked at the Score extension point.
func (f *Fit) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
s, err := getPreScoreState(state)
if err != nil {
s = &preScoreState{
podRequests: f.calculatePodResourceRequestList(pod, f.resources),
}
}
return f.score(ctx, pod, nodeInfo, s.podRequests)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
)
// leastResourceScorer favors nodes with fewer requested resources.
// It calculates the percentage of memory, CPU and other resources requested by pods scheduled on the node, and
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
//
// Details:
// (cpu((capacity-requested)*MaxNodeScore*cpuWeight/capacity) + memory((capacity-requested)*MaxNodeScore*memoryWeight/capacity) + ...)/weightSum
func leastResourceScorer(resources []config.ResourceSpec) func([]int64, []int64) int64 {
return func(requested, allocable []int64) int64 {
var nodeScore, weightSum int64
for i := range requested {
if allocable[i] == 0 {
continue
}
weight := resources[i].Weight
resourceScore := leastRequestedScore(requested[i], allocable[i])
nodeScore += resourceScore * weight
weightSum += weight
}
if weightSum == 0 {
return 0
}
return nodeScore / weightSum
}
}
// The unused capacity is calculated on a scale of 0-MaxNodeScore
// 0 being the lowest priority and `MaxNodeScore` being the highest.
// The more unused resources the higher the score is.
func leastRequestedScore(requested, capacity int64) int64 {
if capacity == 0 {
return 0
}
if requested > capacity {
return 0
}
return ((capacity - requested) * fwk.MaxNodeScore) / capacity
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
)
// mostResourceScorer favors nodes with most requested resources.
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the maximum of the average of the fraction of requested to capacity.
//
// Details:
// (cpu(MaxNodeScore * requested * cpuWeight / capacity) + memory(MaxNodeScore * requested * memoryWeight / capacity) + ...) / weightSum
func mostResourceScorer(resources []config.ResourceSpec) func(requested, allocable []int64) int64 {
return func(requested, allocable []int64) int64 {
var nodeScore, weightSum int64
for i := range requested {
if allocable[i] == 0 {
continue
}
weight := resources[i].Weight
resourceScore := mostRequestedScore(requested[i], allocable[i])
nodeScore += resourceScore * weight
weightSum += weight
}
if weightSum == 0 {
return 0
}
return nodeScore / weightSum
}
}
// The used capacity is calculated on a scale of 0-MaxNodeScore (MaxNodeScore is
// constant with value set to 100).
// 0 being the lowest priority and 100 being the highest.
// The more resources are used the higher the score is. This function
// is almost a reversed version of noderesources.leastRequestedScore.
func mostRequestedScore(requested, capacity int64) int64 {
if capacity == 0 {
return 0
}
if requested > capacity {
// `requested` might be greater than `capacity` because pods with no
// requests get minimum values.
requested = capacity
}
return (requested * fwk.MaxNodeScore) / capacity
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"math"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
)
const maxUtilization = 100
// buildRequestedToCapacityRatioScorerFunction allows users to apply bin packing
// on core resources like CPU, Memory as well as extended resources like accelerators.
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.FunctionShape, resources []config.ResourceSpec) func([]int64, []int64) int64 {
rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape)
resourceScoringFunction := func(requested, capacity int64) int64 {
if capacity == 0 || requested > capacity {
return rawScoringFunction(maxUtilization)
}
return rawScoringFunction(requested * maxUtilization / capacity)
}
return func(requested, allocable []int64) int64 {
var nodeScore, weightSum int64
for i := range requested {
if allocable[i] == 0 {
continue
}
weight := resources[i].Weight
resourceScore := resourceScoringFunction(requested[i], allocable[i])
if resourceScore > 0 {
nodeScore += resourceScore * weight
weightSum += weight
}
}
if weightSum == 0 {
return 0
}
return int64(math.Round(float64(nodeScore) / float64(weightSum)))
}
}
func requestedToCapacityRatioScorer(resources []config.ResourceSpec, shape []config.UtilizationShapePoint) func([]int64, []int64) int64 {
shapes := make([]helper.FunctionShapePoint, 0, len(shape))
for _, point := range shape {
shapes = append(shapes, helper.FunctionShapePoint{
Utilization: int64(point.Utilization),
// MaxCustomPriorityScore may diverge from the max score used in the scheduler and defined by MaxNodeScore,
// therefore we need to scale the score returned by requested to capacity ratio to the score range
// used by the scheduler.
Score: int64(point.Score) * (fwk.MaxNodeScore / config.MaxCustomPriorityScore),
})
}
return buildRequestedToCapacityRatioScorerFunction(shapes, resources)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
resourcehelper "k8s.io/component-helpers/resource"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
// scorer is decorator for resourceAllocationScorer
type scorer func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer
// resourceAllocationScorer contains information to calculate resource allocation score.
type resourceAllocationScorer struct {
Name string
enableInPlacePodVerticalScaling bool
enablePodLevelResources bool
// used to decide whether to use Requested or NonZeroRequested for
// cpu and memory.
useRequested bool
scorer func(requested, allocable []int64) int64
resources []config.ResourceSpec
}
// score will use `scorer` function to calculate the score.
func (r *resourceAllocationScorer) score(
ctx context.Context,
pod *v1.Pod,
nodeInfo fwk.NodeInfo,
podRequests []int64) (int64, *fwk.Status) {
logger := klog.FromContext(ctx)
node := nodeInfo.Node()
// resources not set, nothing scheduled,
if len(r.resources) == 0 {
return 0, fwk.NewStatus(fwk.Error, "resources not found")
}
requested := make([]int64, len(r.resources))
allocatable := make([]int64, len(r.resources))
for i := range r.resources {
alloc, req := r.calculateResourceAllocatableRequest(logger, nodeInfo, v1.ResourceName(r.resources[i].Name), podRequests[i])
// Only fill the extended resource entry when it's non-zero.
if alloc == 0 {
continue
}
allocatable[i] = alloc
requested[i] = req
}
score := r.scorer(requested, allocatable)
if loggerV := logger.V(10); loggerV.Enabled() { // Serializing these maps is costly.
loggerV.Info("Listed internal info for allocatable resources, requested resources and score", "pod",
klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name,
"allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score,
)
}
return score, nil
}
// calculateResourceAllocatableRequest returns 2 parameters:
// - 1st param: quantity of allocatable resource on the node.
// - 2nd param: aggregated quantity of requested resource on the node.
// Note: if it's an extended resource, and the pod doesn't request it, (0, 0) is returned.
func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger klog.Logger, nodeInfo fwk.NodeInfo, resource v1.ResourceName, podRequest int64) (int64, int64) {
requested := nodeInfo.GetNonZeroRequested()
if r.useRequested {
requested = nodeInfo.GetRequested()
}
// If it's an extended resource, and the pod doesn't request it. We return (0, 0)
// as an implication to bypass scoring on this resource.
if podRequest == 0 && schedutil.IsScalarResourceName(resource) {
return 0, 0
}
switch resource {
case v1.ResourceCPU:
return nodeInfo.GetAllocatable().GetMilliCPU(), (requested.GetMilliCPU() + podRequest)
case v1.ResourceMemory:
return nodeInfo.GetAllocatable().GetMemory(), (requested.GetMemory() + podRequest)
case v1.ResourceEphemeralStorage:
return nodeInfo.GetAllocatable().GetEphemeralStorage(), (nodeInfo.GetRequested().GetEphemeralStorage() + podRequest)
default:
if _, exists := nodeInfo.GetAllocatable().GetScalarResources()[resource]; exists {
return nodeInfo.GetAllocatable().GetScalarResources()[resource], (nodeInfo.GetRequested().GetScalarResources()[resource] + podRequest)
}
}
logger.V(10).Info("Requested resource is omitted for node score calculation", "resourceName", resource)
return 0, 0
}
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod
// the Overhead is added to the result.
func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 {
opts := resourcehelper.PodResourcesOptions{
UseStatusResources: r.enableInPlacePodVerticalScaling,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !r.enablePodLevelResources,
}
if !r.useRequested {
opts.NonMissingContainerRequests = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
}
}
requests := resourcehelper.PodRequests(pod, opts)
quantity := requests[resourceName]
if resourceName == v1.ResourceCPU {
return quantity.MilliValue()
}
return quantity.Value()
}
func (r *resourceAllocationScorer) calculatePodResourceRequestList(pod *v1.Pod, resources []config.ResourceSpec) []int64 {
podRequests := make([]int64, len(resources))
for i := range resources {
podRequests[i] = r.calculatePodResourceRequest(pod, v1.ResourceName(resources[i].Name))
}
return podRequests
}
func (r *resourceAllocationScorer) isBestEffortPod(podRequests []int64) bool {
for _, request := range podRequests {
if request != 0 {
return false
}
}
return true
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesources
import (
"github.com/google/go-cmp/cmp/cmpopts" //nolint:depguard
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
)
var (
ignoreBadValueDetail = cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail")
defaultResources = []config.ResourceSpec{
{Name: string(v1.ResourceCPU), Weight: 1},
{Name: string(v1.ResourceMemory), Weight: 1},
}
extendedRes = "abc.com/xyz"
extendedResourceSet = []config.ResourceSpec{
{Name: string(v1.ResourceCPU), Weight: 1},
{Name: string(v1.ResourceMemory), Weight: 1},
{Name: extendedRes, Weight: 1},
}
)
func makeNode(node string, milliCPU, memory int64, extendedResource map[string]int64) *v1.Node {
resourceList := make(map[v1.ResourceName]resource.Quantity)
for res, quantity := range extendedResource {
resourceList[v1.ResourceName(res)] = *resource.NewQuantity(quantity, resource.DecimalSI)
}
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(milliCPU, resource.DecimalSI)
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI)
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: resourceList,
Allocatable: resourceList,
},
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeunschedulable
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// NodeUnschedulable plugin filters nodes that set node.Spec.Unschedulable=true unless
// the pod tolerates {key=node.kubernetes.io/unschedulable, effect:NoSchedule} taint.
type NodeUnschedulable struct {
enableSchedulingQueueHint bool
}
var _ fwk.FilterPlugin = &NodeUnschedulable{}
var _ fwk.EnqueueExtensions = &NodeUnschedulable{}
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.NodeUnschedulable
const (
// ErrReasonUnknownCondition is used for NodeUnknownCondition predicate error.
ErrReasonUnknownCondition = "node(s) had unknown conditions"
// ErrReasonUnschedulable is used for NodeUnschedulable predicate error.
ErrReasonUnschedulable = "node(s) were unschedulable"
)
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *NodeUnschedulable) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
if !pl.enableSchedulingQueueHint {
return []fwk.ClusterEventWithHint{
// A note about UpdateNodeLabel event:
// Ideally, it's supposed to register only Add | UpdateNodeTaint because UpdateNodeLabel will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Add | fwk.UpdateNodeTaint | fwk.UpdateNodeLabel}, QueueingHintFn: pl.isSchedulableAfterNodeChange},
}, nil
}
return []fwk.ClusterEventWithHint{
// When QueueingHint is enabled, we don't use preCheck and we don't need to register UpdateNodeLabel event.
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Add | fwk.UpdateNodeTaint}, QueueingHintFn: pl.isSchedulableAfterNodeChange},
// When the QueueingHint feature is enabled,
// the scheduling queue uses Pod/Update Queueing Hint
// to determine whether a Pod's update makes the Pod schedulable or not.
// https://github.com/kubernetes/kubernetes/pull/122234
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.UpdatePodToleration}, QueueingHintFn: pl.isSchedulableAfterPodTolerationChange},
}, nil
}
// isSchedulableAfterPodTolerationChange is invoked whenever a pod's toleration changed.
func (pl *NodeUnschedulable) isSchedulableAfterPodTolerationChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if pod.UID == modifiedPod.UID {
// Note: we don't need to check oldPod tolerations the taint because:
// - Taint can be added, but can't be modified nor removed.
// - If the Pod already has the toleration, it shouldn't have rejected by this plugin in the first place.
// Meaning, here this Pod has been rejected by this plugin, and hence it shouldn't have the toleration yet.
if v1helper.TolerationsTolerateTaint(modifiedPod.Spec.Tolerations, &v1.Taint{
Key: v1.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
}) {
// This update makes the pod tolerate the unschedulable taint.
logger.V(5).Info("a new toleration is added for the unschedulable Pod, and it may make it schedulable", "pod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
logger.V(5).Info("a new toleration is added for the unschedulable Pod, but it's an unrelated toleration", "pod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
logger.V(5).Info("a new toleration is added for a Pod, but it's an unrelated Pod and wouldn't change the TaintToleration plugin's decision", "pod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
// isSchedulableAfterNodeChange is invoked for all node events reported by
// an informer. It checks whether that change made a previously unschedulable
// pod schedulable.
func (pl *NodeUnschedulable) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
// We queue this Pod when -
// 1. the node is updated from unschedulable to schedulable.
// 2. the node is added and is schedulable.
if (originalNode != nil && originalNode.Spec.Unschedulable && !modifiedNode.Spec.Unschedulable) ||
(originalNode == nil && !modifiedNode.Spec.Unschedulable) {
logger.V(5).Info("node was created or updated, pod may be schedulable now", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
logger.V(5).Info("node was created or updated, but it doesn't make this pod schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *NodeUnschedulable) Name() string {
return Name
}
// Filter invoked at the filter extension point.
func (pl *NodeUnschedulable) Filter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
node := nodeInfo.Node()
if !node.Spec.Unschedulable {
return nil
}
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{
Key: v1.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
})
if !podToleratesUnschedulable {
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReasonUnschedulable)
}
return nil
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &NodeUnschedulable{enableSchedulingQueueHint: fts.EnableSchedulingQueueHint}, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodevolumelimits
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/rand"
corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
ephemeral "k8s.io/component-helpers/storage/ephemeral"
storagehelpers "k8s.io/component-helpers/storage/volume"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
const (
// ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count"
)
// InTreeToCSITranslator contains methods required to check migratable status
// and perform translations from InTree PV's to CSI
type InTreeToCSITranslator interface {
IsPVMigratable(pv *v1.PersistentVolume) bool
IsInlineMigratable(vol *v1.Volume) bool
IsMigratableIntreePluginByName(inTreePluginName string) bool
GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error)
GetCSINameFromInTreeName(pluginName string) (string, error)
TranslateInTreePVToCSI(logger klog.Logger, pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
TranslateInTreeInlineVolumeToCSI(logger klog.Logger, volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error)
}
// CSILimits is a plugin that checks node volume limits.
type CSILimits struct {
csiNodeLister storagelisters.CSINodeLister
pvLister corelisters.PersistentVolumeLister
pvcLister corelisters.PersistentVolumeClaimLister
scLister storagelisters.StorageClassLister
vaLister storagelisters.VolumeAttachmentLister
enableCSIMigrationPortworx bool
randomVolumeIDPrefix string
translator InTreeToCSITranslator
}
var _ fwk.PreFilterPlugin = &CSILimits{}
var _ fwk.FilterPlugin = &CSILimits{}
var _ fwk.EnqueueExtensions = &CSILimits{}
// CSIName is the name of the plugin used in the plugin registry and configurations.
const CSIName = names.NodeVolumeLimits
// Name returns name of the plugin. It is used in logs, etc.
func (pl *CSILimits) Name() string {
return CSIName
}
// EventsToRegister returns the possible events that may make a Pod.
// failed by this plugin schedulable.
func (pl *CSILimits) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
return []fwk.ClusterEventWithHint{
// We don't register any `QueueingHintFn` intentionally
// because any new CSINode could make pods that were rejected by CSI volumes schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.CSINode, ActionType: fwk.Add}},
{Event: fwk.ClusterEvent{Resource: fwk.CSINode, ActionType: fwk.Update}, QueueingHintFn: pl.isSchedulableAfterCSINodeUpdated},
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.Delete}, QueueingHintFn: pl.isSchedulableAfterPodDeleted},
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolumeClaim, ActionType: fwk.Add}, QueueingHintFn: pl.isSchedulableAfterPVCAdded},
{Event: fwk.ClusterEvent{Resource: fwk.VolumeAttachment, ActionType: fwk.Delete}, QueueingHintFn: pl.isSchedulableAfterVolumeAttachmentDeleted},
}, nil
}
func (pl *CSILimits) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
deletedPod, _, err := util.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPodDeleted: %w", err)
}
if len(deletedPod.Spec.Volumes) == 0 {
return fwk.QueueSkip, nil
}
if deletedPod.Spec.NodeName == "" && deletedPod.Status.NominatedNodeName == "" {
return fwk.QueueSkip, nil
}
for _, vol := range deletedPod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil || pl.translator.IsInlineMigratable(&vol) {
return fwk.Queue, nil
}
}
logger.V(5).Info("The deleted pod does not impact the scheduling of the unscheduled pod", "deletedPod", klog.KObj(pod), "pod", klog.KObj(deletedPod))
return fwk.QueueSkip, nil
}
func (pl *CSILimits) isSchedulableAfterPVCAdded(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, addedPvc, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPVCAdded: %w", err)
}
if addedPvc.Namespace != pod.Namespace {
return fwk.QueueSkip, nil
}
for _, volumes := range pod.Spec.Volumes {
var pvcName string
switch {
case volumes.PersistentVolumeClaim != nil:
pvcName = volumes.PersistentVolumeClaim.ClaimName
case volumes.Ephemeral != nil:
pvcName = ephemeral.VolumeClaimName(pod, &volumes)
default:
// Volume is not using a PVC, ignore
continue
}
if pvcName == addedPvc.Name {
logger.V(5).Info("PVC that is referred from the pod was created, which might make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(addedPvc))
return fwk.Queue, nil
}
}
logger.V(5).Info("PVC irrelevant to the Pod was created, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(addedPvc))
return fwk.QueueSkip, nil
}
func (pl *CSILimits) isSchedulableAfterVolumeAttachmentDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
deletedVolumeAttachment, _, err := util.As[*storagev1.VolumeAttachment](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterVolumeAttachmentDeleted: %w", err)
}
for _, vol := range pod.Spec.Volumes {
// Check if the pod volume uses a PVC
// If it does, return Queue
if vol.PersistentVolumeClaim != nil {
logger.V(5).Info("Pod volume uses PersistentVolumeClaim, which might make this pod schedulable due to VolumeAttachment deletion", "pod", klog.KObj(pod), "volumeAttachment", klog.KObj(deletedVolumeAttachment), "volume", vol.Name)
return fwk.Queue, nil
}
if !pl.translator.IsInlineMigratable(&vol) {
continue
}
translatedPV, err := pl.translator.TranslateInTreeInlineVolumeToCSI(logger, &vol, pod.Namespace)
if err != nil || translatedPV == nil {
return fwk.Queue, fmt.Errorf("converting volume(%s) from inline to csi: %w", vol.Name, err)
}
if translatedPV.Spec.CSI != nil && deletedVolumeAttachment.Spec.Attacher == translatedPV.Spec.CSI.Driver {
// deleted VolumeAttachment Attacher matches the translated PV CSI driver
logger.V(5).Info("Pod volume is an Inline Migratable volume that matches the CSI driver, which might make this pod schedulable due to VolumeAttachment deletion",
"pod", klog.KObj(pod), "volumeAttachment", klog.KObj(deletedVolumeAttachment),
"volume", vol.Name, "csiDriver", translatedPV.Spec.CSI.Driver,
)
return fwk.Queue, nil
}
}
logger.V(5).Info("the VolumeAttachment deletion wouldn't make this pod schedulable because the pod has no volume related to a deleted VolumeAttachment",
"pod", klog.KObj(pod), "volumeAttachment", klog.KObj(deletedVolumeAttachment))
return fwk.QueueSkip, nil
}
func (pl *CSILimits) isSchedulableAfterCSINodeUpdated(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
oldCSINode, newCSINode, err := util.As[*storagev1.CSINode](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterCSINodeUpdated: %w", err)
}
oldLimits := make(map[string]int32)
for _, d := range oldCSINode.Spec.Drivers {
var count int32
if d.Allocatable != nil && d.Allocatable.Count != nil {
count = *d.Allocatable.Count
}
oldLimits[d.Name] = count
}
// Compare new driver limits vs. old. If limit increased, queue pod.
for _, d := range newCSINode.Spec.Drivers {
var oldLimit int32
if val, exists := oldLimits[d.Name]; exists {
oldLimit = val
}
newLimit := int32(0)
if d.Allocatable != nil && d.Allocatable.Count != nil {
newLimit = *d.Allocatable.Count
}
if newLimit > oldLimit {
logger.V(5).Info("CSINode driver limit increased, might make this pod schedulable",
"pod", klog.KObj(pod),
"driver", d.Name,
"oldLimit", oldLimit,
"newLimit", newLimit,
)
return fwk.Queue, nil
}
}
// If no driver limit was increased, skip queueing.
return fwk.QueueSkip, nil
}
// PreFilter invoked at the prefilter extension point
//
// If the pod haven't those types of volumes, we'll skip the Filter phase
func (pl *CSILimits) PreFilter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, _ []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]
if vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil || pl.translator.IsInlineMigratable(vol) {
return nil, nil
}
}
return nil, fwk.NewStatus(fwk.Skip)
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *CSILimits) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
// Filter invoked at the filter extension point.
func (pl *CSILimits) Filter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
// If the new pod doesn't have any volume attached to it, the predicate will always be true
if len(pod.Spec.Volumes) == 0 {
return nil
}
node := nodeInfo.Node()
logger := klog.FromContext(ctx)
csiNode, err := pl.csiNodeLister.Get(node.Name)
if err != nil {
// TODO: return the error once CSINode is created by default (2 releases)
logger.V(5).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
}
// Count CSI volumes from the new pod
newVolumes := make(map[string]string)
if err := pl.filterAttachableVolumes(logger, pod, csiNode, true /* new pod */, newVolumes); err != nil {
if apierrors.IsNotFound(err) {
// PVC is not found. This Pod will never be schedulable until PVC is created.
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, err.Error())
}
return fwk.AsStatus(err)
}
// If the pod doesn't have any new CSI volumes, the predicate will always be true
if len(newVolumes) == 0 {
return nil
}
// If the node doesn't have volume limits, the predicate will always be true
nodeVolumeLimits := getVolumeLimits(csiNode)
if len(nodeVolumeLimits) == 0 {
return nil
}
// Count CSI volumes from existing pods
attachedVolumes := make(map[string]string)
for _, existingPod := range nodeInfo.GetPods() {
if err := pl.filterAttachableVolumes(logger, existingPod.GetPod(), csiNode, false /* existing pod */, attachedVolumes); err != nil {
return fwk.AsStatus(err)
}
}
attachedVolumeCount := map[string]int{}
for volumeUniqueName, driverName := range attachedVolumes {
// Don't count single volume used in multiple pods more than once
delete(newVolumes, volumeUniqueName)
attachedVolumeCount[driverName]++
}
// Count CSI volumes from VolumeAttachments
volumeAttachments, err := pl.getNodeVolumeAttachmentInfo(logger, node.Name)
if err != nil {
return fwk.AsStatus(err)
}
for volumeUniqueName, driverName := range volumeAttachments {
// Avoid double-counting volumes already used by existing pods
if _, exists := attachedVolumes[volumeUniqueName]; !exists {
attachedVolumeCount[driverName]++
}
}
// Count the new volumes count per driver
newVolumeCount := map[string]int{}
for _, driverName := range newVolumes {
newVolumeCount[driverName]++
}
for driverName, count := range newVolumeCount {
maxVolumeLimit, ok := nodeVolumeLimits[driverName]
if ok {
currentVolumeCount := attachedVolumeCount[driverName]
logger.V(5).Info("Found plugin volume limits", "node", node.Name, "driverName", driverName,
"maxLimits", maxVolumeLimit, "currentVolumeCount", currentVolumeCount, "newVolumeCount", count,
"pod", klog.KObj(pod))
if currentVolumeCount+count > int(maxVolumeLimit) {
return fwk.NewStatus(fwk.Unschedulable, ErrReasonMaxVolumeCountExceeded)
}
}
}
return nil
}
// filterAttachableVolumes filters the attachable volumes from the pod and adds them to the result map.
// The result map is a map of volumeUniqueName to driver name. The volumeUniqueName is a unique name for
// the volume in the format of "driverName/volumeHandle". And driver name is the CSI driver name.
func (pl *CSILimits) filterAttachableVolumes(
logger klog.Logger, pod *v1.Pod, csiNode *storagev1.CSINode, newPod bool, result map[string]string) error {
for _, vol := range pod.Spec.Volumes {
pvcName := ""
isEphemeral := false
switch {
case vol.PersistentVolumeClaim != nil:
// Normal CSI volume can only be used through PVC
pvcName = vol.PersistentVolumeClaim.ClaimName
case vol.Ephemeral != nil:
// Generic ephemeral inline volumes also use a PVC,
// just with a computed name and certain ownership.
// That is checked below once the pvc object is
// retrieved.
pvcName = ephemeral.VolumeClaimName(pod, &vol)
isEphemeral = true
default:
// Inline Volume does not have PVC.
// Need to check if CSI migration is enabled for this inline volume.
// - If the volume is migratable and CSI migration is enabled, need to count it
// as well.
// - If the volume is not migratable, it will be count in non_csi filter.
if err := pl.checkAttachableInlineVolume(logger, &vol, csiNode, pod, result); err != nil {
return err
}
continue
}
if pvcName == "" {
return fmt.Errorf("PersistentVolumeClaim had no name")
}
pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName)
if err != nil {
if newPod {
// The PVC is required to proceed with
// scheduling of a new pod because it cannot
// run without it. Bail out immediately.
return fmt.Errorf("looking up PVC %s/%s: %w", pod.Namespace, pvcName, err)
}
// If the PVC is invalid, we don't count the volume because
// there's no guarantee that it belongs to the running predicate.
logger.V(5).Info("Unable to look up PVC info", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName))
continue
}
// The PVC for an ephemeral volume must be owned by the pod.
if isEphemeral {
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
return err
}
}
driverName, volumeHandle := pl.getCSIDriverInfo(logger, csiNode, pvc)
if driverName == "" || volumeHandle == "" {
logger.V(5).Info("Could not find a CSI driver name or volume handle, not counting volume")
continue
}
volumeUniqueName := getVolumeUniqueName(driverName, volumeHandle)
result[volumeUniqueName] = driverName
}
return nil
}
// checkAttachableInlineVolume takes an inline volume and add to the result map if the
// volume is migratable and CSI migration for this plugin has been enabled.
func (pl *CSILimits) checkAttachableInlineVolume(logger klog.Logger, vol *v1.Volume, csiNode *storagev1.CSINode,
pod *v1.Pod, result map[string]string) error {
if !pl.translator.IsInlineMigratable(vol) {
return nil
}
// Check if the intree provisioner CSI migration has been enabled.
inTreeProvisionerName, err := pl.translator.GetInTreePluginNameFromSpec(nil, vol)
if err != nil {
return fmt.Errorf("looking up provisioner name for volume %s: %w", vol.Name, err)
}
if !isCSIMigrationOn(csiNode, inTreeProvisionerName, pl.enableCSIMigrationPortworx) {
csiNodeName := ""
if csiNode != nil {
csiNodeName = csiNode.Name
}
logger.V(5).Info("CSI Migration is not enabled for provisioner", "provisioner", inTreeProvisionerName,
"pod", klog.KObj(pod), "csiNode", csiNodeName)
return nil
}
// Do translation for the in-tree volume.
translatedPV, err := pl.translator.TranslateInTreeInlineVolumeToCSI(logger, vol, pod.Namespace)
if err != nil || translatedPV == nil {
return fmt.Errorf("converting volume(%s) from inline to csi: %w", vol.Name, err)
}
driverName, err := pl.translator.GetCSINameFromInTreeName(inTreeProvisionerName)
if err != nil {
return fmt.Errorf("looking up CSI driver name for provisioner %s: %w", inTreeProvisionerName, err)
}
// TranslateInTreeInlineVolumeToCSI should translate inline volume to CSI. If it is not set,
// the volume does not support inline. Skip the count.
if translatedPV.Spec.PersistentVolumeSource.CSI == nil {
return nil
}
volumeUniqueName := getVolumeUniqueName(driverName, translatedPV.Spec.PersistentVolumeSource.CSI.VolumeHandle)
result[volumeUniqueName] = driverName
return nil
}
// getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC.
// If the PVC is from a migrated in-tree plugin, this function will return
// the information of the CSI driver that the plugin has been migrated to.
func (pl *CSILimits) getCSIDriverInfo(logger klog.Logger, csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
pvName := pvc.Spec.VolumeName
if pvName == "" {
logger.V(5).Info("Persistent volume had no name for claim", "PVC", klog.KObj(pvc))
return pl.getCSIDriverInfoFromSC(logger, csiNode, pvc)
}
pv, err := pl.pvLister.Get(pvName)
if err != nil {
logger.V(5).Info("Unable to look up PV info for PVC and PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvName))
// If we can't fetch PV associated with PVC, may be it got deleted
// or PVC was prebound to a PVC that hasn't been created yet.
// fallback to using StorageClass for volume counting
return pl.getCSIDriverInfoFromSC(logger, csiNode, pvc)
}
csiSource := pv.Spec.PersistentVolumeSource.CSI
if csiSource == nil {
// We make a fast path for non-CSI volumes that aren't migratable
if !pl.translator.IsPVMigratable(pv) {
return "", ""
}
pluginName, err := pl.translator.GetInTreePluginNameFromSpec(pv, nil)
if err != nil {
logger.V(5).Info("Unable to look up plugin name from PV spec", "err", err)
return "", ""
}
if !isCSIMigrationOn(csiNode, pluginName, pl.enableCSIMigrationPortworx) {
logger.V(5).Info("CSI Migration of plugin is not enabled", "plugin", pluginName)
return "", ""
}
csiPV, err := pl.translator.TranslateInTreePVToCSI(logger, pv)
if err != nil {
logger.V(5).Info("Unable to translate in-tree volume to CSI", "err", err)
return "", ""
}
if csiPV.Spec.PersistentVolumeSource.CSI == nil {
logger.V(5).Info("Unable to get a valid volume source for translated PV", "PV", pvName)
return "", ""
}
csiSource = csiPV.Spec.PersistentVolumeSource.CSI
}
return csiSource.Driver, csiSource.VolumeHandle
}
// getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass.
func (pl *CSILimits) getCSIDriverInfoFromSC(logger klog.Logger, csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
namespace := pvc.Namespace
pvcName := pvc.Name
scName := storagehelpers.GetPersistentVolumeClaimClass(pvc)
// If StorageClass is not set or not found, then PVC must be using immediate binding mode
// and hence it must be bound before scheduling. So it is safe to not count it.
if scName == "" {
logger.V(5).Info("PVC has no StorageClass", "PVC", klog.KObj(pvc))
return "", ""
}
storageClass, err := pl.scLister.Get(scName)
if err != nil {
logger.V(5).Info("Could not get StorageClass for PVC", "PVC", klog.KObj(pvc), "err", err)
return "", ""
}
// We use random prefix to avoid conflict with volume IDs. If PVC is bound during the execution of the
// predicate and there is another pod on the same node that uses same volume, then we will overcount
// the volume and consider both volumes as different.
volumeHandle := fmt.Sprintf("%s-%s/%s", pl.randomVolumeIDPrefix, namespace, pvcName)
provisioner := storageClass.Provisioner
if pl.translator.IsMigratableIntreePluginByName(provisioner) {
if !isCSIMigrationOn(csiNode, provisioner, pl.enableCSIMigrationPortworx) {
logger.V(5).Info("CSI Migration of provisioner is not enabled", "provisioner", provisioner)
return "", ""
}
driverName, err := pl.translator.GetCSINameFromInTreeName(provisioner)
if err != nil {
logger.V(5).Info("Unable to look up driver name from provisioner name", "provisioner", provisioner, "err", err)
return "", ""
}
return driverName, volumeHandle
}
return provisioner, volumeHandle
}
// NewCSI initializes a new plugin and returns it.
func NewCSI(_ context.Context, _ runtime.Object, handle fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
informerFactory := handle.SharedInformerFactory()
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
csiNodesLister := informerFactory.Storage().V1().CSINodes().Lister()
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
vaLister := informerFactory.Storage().V1().VolumeAttachments().Lister()
csiTranslator := csitrans.New()
return &CSILimits{
csiNodeLister: csiNodesLister,
pvLister: pvLister,
pvcLister: pvcLister,
scLister: scLister,
vaLister: vaLister,
enableCSIMigrationPortworx: fts.EnableCSIMigrationPortworx,
randomVolumeIDPrefix: rand.String(32),
translator: csiTranslator,
}, nil
}
// getVolumeLimits reads the volume limits from CSINode object and returns a map of volume limits.
// The key is the driver name and the value is the maximum number of volumes that can be attached to the node.
// If a key is not found in the map, it means there is no limit for the driver on the node.
func getVolumeLimits(csiNode *storagev1.CSINode) map[string]int64 {
nodeVolumeLimits := make(map[string]int64)
if csiNode == nil {
return nodeVolumeLimits
}
for _, d := range csiNode.Spec.Drivers {
if d.Allocatable != nil && d.Allocatable.Count != nil {
nodeVolumeLimits[d.Name] = int64(*d.Allocatable.Count)
}
}
return nodeVolumeLimits
}
// getNodeVolumeAttachmentInfo returns a map of volumeID to driver name for the given node.
func (pl *CSILimits) getNodeVolumeAttachmentInfo(logger klog.Logger, nodeName string) (map[string]string, error) {
volumeAttachments := make(map[string]string)
vas, err := pl.vaLister.List(labels.Everything())
if err != nil {
return nil, err
}
for _, va := range vas {
if va.Spec.NodeName == nodeName {
if va.Spec.Attacher == "" {
logger.V(5).Info("VolumeAttachment has no attacher", "VolumeAttachment", klog.KObj(va))
continue
}
if va.Spec.Source.PersistentVolumeName == nil {
logger.V(5).Info("VolumeAttachment has no PV name", "VolumeAttachment", klog.KObj(va))
continue
}
pv, err := pl.pvLister.Get(*va.Spec.Source.PersistentVolumeName)
if err != nil {
logger.V(5).Info("Unable to get PV for VolumeAttachment", "VolumeAttachment", klog.KObj(va), "err", err)
continue
}
if pv.Spec.CSI == nil {
logger.V(5).Info("PV is not a CSI volume", "PV", klog.KObj(pv))
continue
}
volumeID := getVolumeUniqueName(va.Spec.Attacher, pv.Spec.CSI.VolumeHandle)
volumeAttachments[volumeID] = va.Spec.Attacher
}
}
return volumeAttachments, nil
}
func getVolumeUniqueName(driverName, volumeHandle string) string {
return fmt.Sprintf("%s/%s", driverName, volumeHandle)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodevolumelimits
import (
"strings"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/util/sets"
csilibplugins "k8s.io/csi-translation-lib/plugins"
)
// isCSIMigrationOn returns a boolean value indicating whether
// the CSI migration has been enabled for a particular storage plugin.
func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string, enableCSIMigrationPortworx bool) bool {
if csiNode == nil || len(pluginName) == 0 {
return false
}
// In-tree storage to CSI driver migration feature should be enabled,
// along with the plugin-specific one
switch pluginName {
case csilibplugins.AWSEBSInTreePluginName:
return true
case csilibplugins.PortworxVolumePluginName:
if !enableCSIMigrationPortworx {
return false
}
case csilibplugins.GCEPDInTreePluginName:
return true
case csilibplugins.AzureDiskInTreePluginName:
return true
case csilibplugins.CinderInTreePluginName:
return true
default:
return false
}
// The plugin name should be listed in the CSINode object annotation.
// This indicates that the plugin has been migrated to a CSI driver in the node.
csiNodeAnn := csiNode.GetAnnotations()
if csiNodeAnn == nil {
return false
}
var mpaSet sets.Set[string]
mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey]
if len(mpa) == 0 {
mpaSet = sets.New[string]()
} else {
tok := strings.Split(mpa, ",")
mpaSet = sets.New(tok...)
}
return mpaSet.Has(pluginName)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podtopologyspread
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/utils/ptr"
)
// topologySpreadConstraint is an internal version for v1.TopologySpreadConstraint
// and where the selector is parsed.
// Fields are exported for comparison during testing.
type topologySpreadConstraint struct {
MaxSkew int32
TopologyKey string
Selector labels.Selector
MinDomains int32
NodeAffinityPolicy v1.NodeInclusionPolicy
NodeTaintsPolicy v1.NodeInclusionPolicy
}
func (tsc *topologySpreadConstraint) matchNodeInclusionPolicies(pod *v1.Pod, node *v1.Node, require nodeaffinity.RequiredNodeAffinity) bool {
if tsc.NodeAffinityPolicy == v1.NodeInclusionPolicyHonor {
// We ignore parsing errors here for backwards compatibility.
if match, _ := require.Match(node); !match {
return false
}
}
if tsc.NodeTaintsPolicy == v1.NodeInclusionPolicyHonor {
if _, untolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc()); untolerated {
return false
}
}
return true
}
// buildDefaultConstraints builds the constraints for a pod using
// .DefaultConstraints and the selectors from the services, replication
// controllers, replica sets and stateful sets that match the pod.
func (pl *PodTopologySpread) buildDefaultConstraints(p *v1.Pod, action v1.UnsatisfiableConstraintAction) ([]topologySpreadConstraint, error) {
constraints, err := pl.filterTopologySpreadConstraints(pl.defaultConstraints, p.Labels, action)
if err != nil || len(constraints) == 0 {
return nil, err
}
selector := helper.DefaultSelector(p, pl.services, pl.replicationCtrls, pl.replicaSets, pl.statefulSets)
if selector.Empty() {
return nil, nil
}
for i := range constraints {
constraints[i].Selector = selector
}
return constraints, nil
}
// nodeLabelsMatchSpreadConstraints checks if ALL topology keys in spread Constraints are present in node labels.
func nodeLabelsMatchSpreadConstraints(nodeLabels map[string]string, constraints []topologySpreadConstraint) bool {
for _, c := range constraints {
if _, ok := nodeLabels[c.TopologyKey]; !ok {
return false
}
}
return true
}
func (pl *PodTopologySpread) filterTopologySpreadConstraints(constraints []v1.TopologySpreadConstraint, podLabels map[string]string, action v1.UnsatisfiableConstraintAction) ([]topologySpreadConstraint, error) {
var result []topologySpreadConstraint
for _, c := range constraints {
if c.WhenUnsatisfiable == action {
selector, err := metav1.LabelSelectorAsSelector(c.LabelSelector)
if err != nil {
return nil, err
}
if pl.enableMatchLabelKeysInPodTopologySpread && len(c.MatchLabelKeys) > 0 {
matchLabels := make(labels.Set)
for _, labelKey := range c.MatchLabelKeys {
if value, ok := podLabels[labelKey]; ok {
matchLabels[labelKey] = value
}
}
if len(matchLabels) > 0 {
selector = mergeLabelSetWithSelector(matchLabels, selector)
}
}
tsc := topologySpreadConstraint{
MaxSkew: c.MaxSkew,
TopologyKey: c.TopologyKey,
Selector: selector,
MinDomains: ptr.Deref(c.MinDomains, 1), // If MinDomains is nil, we treat MinDomains as 1.
NodeAffinityPolicy: v1.NodeInclusionPolicyHonor, // If NodeAffinityPolicy is nil, we treat NodeAffinityPolicy as "Honor".
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore, // If NodeTaintsPolicy is nil, we treat NodeTaintsPolicy as "Ignore".
}
if pl.enableNodeInclusionPolicyInPodTopologySpread {
if c.NodeAffinityPolicy != nil {
tsc.NodeAffinityPolicy = *c.NodeAffinityPolicy
}
if c.NodeTaintsPolicy != nil {
tsc.NodeTaintsPolicy = *c.NodeTaintsPolicy
}
}
result = append(result, tsc)
}
}
return result, nil
}
func mergeLabelSetWithSelector(matchLabels labels.Set, s labels.Selector) labels.Selector {
mergedSelector := labels.SelectorFromSet(matchLabels)
requirements, ok := s.Requirements()
if !ok {
return s
}
for _, r := range requirements {
mergedSelector = mergedSelector.Add(r)
}
return mergedSelector
}
func countPodsMatchSelector(podInfos []fwk.PodInfo, selector labels.Selector, ns string) int {
if selector.Empty() {
return 0
}
count := 0
for _, p := range podInfos {
// Bypass terminating Pod (see #87621).
if p.GetPod().DeletionTimestamp != nil || p.GetPod().Namespace != ns {
continue
}
if selector.Matches(labels.Set(p.GetPod().Labels)) {
count++
}
}
return count
}
// podLabelsMatchSpreadConstraints returns whether tha labels matches with the selector in any of topologySpreadConstraint
func podLabelsMatchSpreadConstraints(constraints []topologySpreadConstraint, labels labels.Set) bool {
for _, c := range constraints {
if c.Selector.Matches(labels) {
return true
}
}
return false
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podtopologyspread
import (
"context"
"fmt"
"maps"
"math"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
)
const preFilterStateKey = "PreFilter" + Name
// preFilterState computed at PreFilter and used at Filter.
// It combines CriticalPaths and TpValueToMatchNum to represent:
// (1) critical paths where the least pods are matched on each spread constraint.
// (2) number of pods matched on each spread constraint.
// A nil preFilterState denotes it's not set at all (in PreFilter phase);
// An empty preFilterState object denotes it's a legit state and is set in PreFilter phase.
// Fields are exported for comparison during testing.
type preFilterState struct {
Constraints []topologySpreadConstraint
// CriticalPaths is a slice indexed by constraint index.
// Per each entry, we record 2 critical paths instead of all critical paths.
// CriticalPaths[i][0].MatchNum always holds the minimum matching number.
// CriticalPaths[i][1].MatchNum is always greater or equal to CriticalPaths[i][0].MatchNum, but
// it's not guaranteed to be the 2nd minimum match number.
CriticalPaths []*criticalPaths
// TpValueToMatchNum is a slice indexed by constraint index.
// Each entry is keyed with topology value, and valued with the number of matching pods.
TpValueToMatchNum []map[string]int
}
// minMatchNum returns the global minimum for the calculation of skew while taking MinDomains into account.
func (s *preFilterState) minMatchNum(constraintID int, minDomains int32) (int, error) {
paths := s.CriticalPaths[constraintID]
minMatchNum := paths[0].MatchNum
domainsNum := len(s.TpValueToMatchNum[constraintID])
if domainsNum < int(minDomains) {
// When the number of eligible domains with matching topology keys is less than `minDomains`,
// it treats "global minimum" as 0.
minMatchNum = 0
}
return minMatchNum, nil
}
// Clone makes a copy of the given state.
func (s *preFilterState) Clone() fwk.StateData {
if s == nil {
return nil
}
copy := preFilterState{
// Constraints are shared because they don't change.
Constraints: s.Constraints,
CriticalPaths: make([]*criticalPaths, len(s.CriticalPaths)),
TpValueToMatchNum: make([]map[string]int, len(s.TpValueToMatchNum)),
}
for i, paths := range s.CriticalPaths {
copy.CriticalPaths[i] = &criticalPaths{paths[0], paths[1]}
}
for i, tpMap := range s.TpValueToMatchNum {
copy.TpValueToMatchNum[i] = maps.Clone(tpMap)
}
return ©
}
// CAVEAT: the reason that `[2]criticalPath` can work is based on the implementation of current
// preemption algorithm, in particular the following 2 facts:
// Fact 1: we only preempt pods on the same node, instead of pods on multiple nodes.
// Fact 2: each node is evaluated on a separate copy of the preFilterState during its preemption cycle.
// If we plan to turn to a more complex algorithm like "arbitrary pods on multiple nodes", this
// structure needs to be revisited.
// Fields are exported for comparison during testing.
type criticalPaths [2]struct {
// TopologyValue denotes the topology value mapping to topology key.
TopologyValue string
// MatchNum denotes the number of matching pods.
MatchNum int
}
func newCriticalPaths() *criticalPaths {
return &criticalPaths{{MatchNum: math.MaxInt32}, {MatchNum: math.MaxInt32}}
}
func (p *criticalPaths) update(tpVal string, num int) {
// first verify if `tpVal` exists or not
i := -1
if tpVal == p[0].TopologyValue {
i = 0
} else if tpVal == p[1].TopologyValue {
i = 1
}
if i >= 0 {
// `tpVal` exists
p[i].MatchNum = num
if p[0].MatchNum > p[1].MatchNum {
// swap paths[0] and paths[1]
p[0], p[1] = p[1], p[0]
}
} else {
// `tpVal` doesn't exist
if num < p[0].MatchNum {
// update paths[1] with paths[0]
p[1] = p[0]
// update paths[0]
p[0].TopologyValue, p[0].MatchNum = tpVal, num
} else if num < p[1].MatchNum {
// update paths[1]
p[1].TopologyValue, p[1].MatchNum = tpVal, num
}
}
}
// PreFilter invoked at the prefilter extension point.
func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
s, err := pl.calPreFilterState(ctx, pod, nodes)
if err != nil {
return nil, fwk.AsStatus(err)
} else if s != nil && len(s.Constraints) == 0 {
return nil, fwk.NewStatus(fwk.Skip)
}
cycleState.Write(preFilterStateKey, s)
return nil, nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *PodTopologySpread) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}
// AddPod from pre-computed data in cycleState.
func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
pl.updateWithPod(s, podInfoToAdd.GetPod(), podToSchedule, nodeInfo.Node(), 1)
return nil
}
// RemovePod from pre-computed data in cycleState.
func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
pl.updateWithPod(s, podInfoToRemove.GetPod(), podToSchedule, nodeInfo.Node(), -1)
return nil
}
func (pl *PodTopologySpread) updateWithPod(s *preFilterState, updatedPod, preemptorPod *v1.Pod, node *v1.Node, delta int) {
if s == nil || updatedPod.Namespace != preemptorPod.Namespace || node == nil {
return
}
if !nodeLabelsMatchSpreadConstraints(node.Labels, s.Constraints) {
return
}
requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(preemptorPod)
if !pl.enableNodeInclusionPolicyInPodTopologySpread {
// spreading is applied to nodes that pass those filters.
// Ignore parsing errors for backwards compatibility.
if match, _ := requiredSchedulingTerm.Match(node); !match {
return
}
}
podLabelSet := labels.Set(updatedPod.Labels)
for i, constraint := range s.Constraints {
if !constraint.Selector.Matches(podLabelSet) {
continue
}
if pl.enableNodeInclusionPolicyInPodTopologySpread &&
!constraint.matchNodeInclusionPolicies(preemptorPod, node, requiredSchedulingTerm) {
continue
}
v := node.Labels[constraint.TopologyKey]
s.TpValueToMatchNum[i][v] += delta
s.CriticalPaths[i].update(v, s.TpValueToMatchNum[i][v])
}
}
// getPreFilterState fetches a pre-computed preFilterState.
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
return nil, fmt.Errorf("reading %q from cycleState: %w", preFilterStateKey, err)
}
s, ok := c.(*preFilterState)
if !ok {
return nil, fmt.Errorf("%+v convert to podtopologyspread.preFilterState error", c)
}
return s, nil
}
type topologyCount struct {
topologyValue string
constraintID int
count int
}
// calPreFilterState computes preFilterState describing how pods are spread on topologies.
func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod, allNodes []fwk.NodeInfo) (*preFilterState, error) {
constraints, err := pl.getConstraints(pod)
if err != nil {
return nil, fmt.Errorf("get constraints from pod: %w", err)
}
if len(constraints) == 0 {
return &preFilterState{}, nil
}
s := preFilterState{
Constraints: constraints,
CriticalPaths: make([]*criticalPaths, len(constraints)),
TpValueToMatchNum: make([]map[string]int, len(constraints)),
}
for i := 0; i < len(constraints); i++ {
s.TpValueToMatchNum[i] = make(map[string]int, sizeHeuristic(len(allNodes), constraints[i]))
}
tpCountsByNode := make([][]topologyCount, len(allNodes))
requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod)
processNode := func(n int) {
nodeInfo := allNodes[n]
node := nodeInfo.Node()
if !pl.enableNodeInclusionPolicyInPodTopologySpread {
// spreading is applied to nodes that pass those filters.
// Ignore parsing errors for backwards compatibility.
if match, _ := requiredNodeAffinity.Match(node); !match {
return
}
}
// Ensure current node's labels contains all topologyKeys in 'Constraints'.
if !nodeLabelsMatchSpreadConstraints(node.Labels, constraints) {
return
}
tpCounts := make([]topologyCount, 0, len(constraints))
for i, c := range constraints {
if pl.enableNodeInclusionPolicyInPodTopologySpread &&
!c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) {
continue
}
value := node.Labels[c.TopologyKey]
count := countPodsMatchSelector(nodeInfo.GetPods(), c.Selector, pod.Namespace)
tpCounts = append(tpCounts, topologyCount{
topologyValue: value,
constraintID: i,
count: count,
})
}
tpCountsByNode[n] = tpCounts
}
pl.parallelizer.Until(ctx, len(allNodes), processNode, pl.Name())
for _, tpCounts := range tpCountsByNode {
// tpCounts might not hold all the constraints, so index can't be used here as constraintID.
for _, tpCount := range tpCounts {
s.TpValueToMatchNum[tpCount.constraintID][tpCount.topologyValue] += tpCount.count
}
}
// calculate min match for each constraint and topology value
for i := 0; i < len(constraints); i++ {
s.CriticalPaths[i] = newCriticalPaths()
for value, num := range s.TpValueToMatchNum[i] {
s.CriticalPaths[i].update(value, num)
}
}
return &s, nil
}
// Filter invoked at the filter extension point.
func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
node := nodeInfo.Node()
s, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
// However, "empty" preFilterState is legit which tolerates every toSchedule Pod.
if len(s.Constraints) == 0 {
return nil
}
logger := klog.FromContext(ctx)
podLabelSet := labels.Set(pod.Labels)
for i, c := range s.Constraints {
tpKey := c.TopologyKey
tpVal, ok := node.Labels[tpKey]
if !ok {
logger.V(5).Info("Node doesn't have required topology label for spread constraint", "node", klog.KObj(node), "topologyKey", tpKey)
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReasonNodeLabelNotMatch)
}
// judging criteria:
// 'existing matching num' + 'if self-match (1 or 0)' - 'global minimum' <= 'maxSkew'
minMatchNum, err := s.minMatchNum(i, c.MinDomains)
if err != nil {
logger.Error(err, "Internal error occurred while retrieving value precalculated in PreFilter", "topologyKey", tpKey, "paths", s.CriticalPaths[i])
continue
}
selfMatchNum := 0
if c.Selector.Matches(podLabelSet) {
selfMatchNum = 1
}
matchNum := s.TpValueToMatchNum[i][tpVal]
skew := matchNum + selfMatchNum - minMatchNum
if skew > int(c.MaxSkew) {
logger.V(5).Info("Node failed spreadConstraint: matchNum + selfMatchNum - minMatchNum > maxSkew", "node", klog.KObj(node), "topologyKey", tpKey, "matchNum", matchNum, "selfMatchNum", selfMatchNum, "minMatchNum", minMatchNum, "maxSkew", c.MaxSkew)
return fwk.NewStatus(fwk.Unschedulable, ErrReasonConstraintsNotMatch)
}
}
return nil
}
func sizeHeuristic(nodes int, constraint topologySpreadConstraint) int {
if constraint.TopologyKey == v1.LabelHostname {
return nodes
}
return 0
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podtopologyspread
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
const (
// ErrReasonConstraintsNotMatch is used for PodTopologySpread filter error.
ErrReasonConstraintsNotMatch = "node(s) didn't match pod topology spread constraints"
// ErrReasonNodeLabelNotMatch is used when the node doesn't hold the required label.
ErrReasonNodeLabelNotMatch = ErrReasonConstraintsNotMatch + " (missing required label)"
)
var systemDefaultConstraints = []v1.TopologySpreadConstraint{
{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.ScheduleAnyway,
MaxSkew: 3,
},
{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.ScheduleAnyway,
MaxSkew: 5,
},
}
// PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied.
type PodTopologySpread struct {
systemDefaulted bool
parallelizer fwk.Parallelizer
defaultConstraints []v1.TopologySpreadConstraint
sharedLister fwk.SharedLister
services corelisters.ServiceLister
replicationCtrls corelisters.ReplicationControllerLister
replicaSets appslisters.ReplicaSetLister
statefulSets appslisters.StatefulSetLister
enableNodeInclusionPolicyInPodTopologySpread bool
enableMatchLabelKeysInPodTopologySpread bool
enableSchedulingQueueHint bool
}
var _ fwk.PreFilterPlugin = &PodTopologySpread{}
var _ fwk.FilterPlugin = &PodTopologySpread{}
var _ fwk.PreScorePlugin = &PodTopologySpread{}
var _ fwk.ScorePlugin = &PodTopologySpread{}
var _ fwk.EnqueueExtensions = &PodTopologySpread{}
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.PodTopologySpread
// Name returns name of the plugin. It is used in logs, etc.
func (pl *PodTopologySpread) Name() string {
return Name
}
// New initializes a new plugin and returns it.
func New(_ context.Context, plArgs runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
if h.SnapshotSharedLister() == nil {
return nil, fmt.Errorf("SnapshotSharedlister is nil")
}
args, err := getArgs(plArgs)
if err != nil {
return nil, err
}
if err := validation.ValidatePodTopologySpreadArgs(nil, &args); err != nil {
return nil, err
}
pl := &PodTopologySpread{
parallelizer: h.Parallelizer(),
sharedLister: h.SnapshotSharedLister(),
defaultConstraints: args.DefaultConstraints,
enableNodeInclusionPolicyInPodTopologySpread: fts.EnableNodeInclusionPolicyInPodTopologySpread,
enableMatchLabelKeysInPodTopologySpread: fts.EnableMatchLabelKeysInPodTopologySpread,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}
if args.DefaultingType == config.SystemDefaulting {
pl.defaultConstraints = systemDefaultConstraints
pl.systemDefaulted = true
}
if len(pl.defaultConstraints) != 0 {
if h.SharedInformerFactory() == nil {
return nil, fmt.Errorf("SharedInformerFactory is nil")
}
pl.setListers(h.SharedInformerFactory())
}
return pl, nil
}
func getArgs(obj runtime.Object) (config.PodTopologySpreadArgs, error) {
ptr, ok := obj.(*config.PodTopologySpreadArgs)
if !ok {
return config.PodTopologySpreadArgs{}, fmt.Errorf("want args to be of type PodTopologySpreadArgs, got %T", obj)
}
return *ptr, nil
}
func (pl *PodTopologySpread) setListers(factory informers.SharedInformerFactory) {
pl.services = factory.Core().V1().Services().Lister()
pl.replicationCtrls = factory.Core().V1().ReplicationControllers().Lister()
pl.replicaSets = factory.Apps().V1().ReplicaSets().Lister()
pl.statefulSets = factory.Apps().V1().StatefulSets().Lister()
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *PodTopologySpread) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
podActionType := fwk.Add | fwk.UpdatePodLabel | fwk.Delete
if pl.enableSchedulingQueueHint {
// When the QueueingHint feature is enabled, the scheduling queue uses Pod/Update Queueing Hint
// to determine whether a Pod's update makes the Pod schedulable or not.
// https://github.com/kubernetes/kubernetes/pull/122234
// (If not, the scheduling queue always retries the unschedulable Pods when they're updated.)
//
// The Pod rejected by this plugin can be schedulable when the Pod has a spread constraint with NodeTaintsPolicy:Honor
// and has got a new toleration.
// So, we add UpdatePodToleration here only when QHint is enabled.
podActionType = fwk.Add | fwk.UpdatePodLabel | fwk.UpdatePodToleration | fwk.Delete
}
return []fwk.ClusterEventWithHint{
// All ActionType includes the following events:
// - Add. An unschedulable Pod may fail due to violating topology spread constraints,
// adding an assigned Pod may make it schedulable.
// - UpdatePodLabel. Updating on an existing Pod's labels (e.g., removal) may make
// an unschedulable Pod schedulable.
// - Delete. An unschedulable Pod may fail due to violating an existing Pod's topology spread constraints,
// deleting an existing Pod may make it schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: podActionType}, QueueingHintFn: pl.isSchedulableAfterPodChange},
// Node add|delete|update maybe lead an topology key changed,
// and make these pod in scheduling schedulable or unschedulable.
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Add | fwk.Delete | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint}, QueueingHintFn: pl.isSchedulableAfterNodeChange},
}, nil
}
// involvedInTopologySpreading returns true if the incomingPod is involved in the topology spreading of podWithSpreading.
func involvedInTopologySpreading(incomingPod, podWithSpreading *v1.Pod) bool {
return incomingPod.UID == podWithSpreading.UID ||
(incomingPod.Spec.NodeName != "" && incomingPod.Namespace == podWithSpreading.Namespace)
}
// hasConstraintWithNodeTaintsPolicyHonor returns true if any constraint has `NodeTaintsPolicy: Honor`.
func hasConstraintWithNodeTaintsPolicyHonor(constraints []topologySpreadConstraint) bool {
for _, c := range constraints {
if c.NodeTaintsPolicy == v1.NodeInclusionPolicyHonor {
return true
}
}
return false
}
func (pl *PodTopologySpread) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalPod, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if (modifiedPod != nil && !involvedInTopologySpreading(modifiedPod, pod)) || (originalPod != nil && !involvedInTopologySpreading(originalPod, pod)) {
logger.V(5).Info("the added/updated/deleted pod is unscheduled or has different namespace with target pod, so it doesn't make the target pod schedulable",
"pod", klog.KObj(pod), "originalPod", klog.KObj(originalPod))
return fwk.QueueSkip, nil
}
constraints, err := pl.getConstraints(pod)
if err != nil {
return fwk.Queue, err
}
// Pod is modified. Return Queue when the label(s) matching topologySpread's selector is added, changed, or deleted.
if modifiedPod != nil && originalPod != nil {
if pod.UID == modifiedPod.UID && !equality.Semantic.DeepEqual(modifiedPod.Spec.Tolerations, originalPod.Spec.Tolerations) && hasConstraintWithNodeTaintsPolicyHonor(constraints) {
// If any constraint has `NodeTaintsPolicy: Honor`, we can return Queue when the target Pod has got a new toleration.
logger.V(5).Info("the unschedulable pod has got a new toleration, which could make it schedulable",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
if equality.Semantic.DeepEqual(modifiedPod.Labels, originalPod.Labels) {
logger.V(5).Info("the pod's update doesn't include the label update, which doesn't make the target pod schedulable",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
for _, c := range constraints {
if c.Selector.Matches(labels.Set(originalPod.Labels)) != c.Selector.Matches(labels.Set(modifiedPod.Labels)) {
// This modification makes this Pod match(or not match) with this constraint.
// Maybe now the scheduling result of topology spread gets changed by this change.
logger.V(5).Info("a scheduled pod's label was updated and it makes the updated pod match or unmatch the pod's topology spread constraints",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
}
// This modification of labels doesn't change whether this Pod would match selector or not in any constraints.
logger.V(5).Info("a scheduled pod's label was updated, but it's a change unrelated to the pod's topology spread constraints",
"pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
// Pod is added. Return Queue when the added Pod has a label that matches with topologySpread's selector.
if modifiedPod != nil {
if podLabelsMatchSpreadConstraints(constraints, modifiedPod.Labels) {
logger.V(5).Info("a scheduled pod was created and it matches with the pod's topology spread constraints",
"pod", klog.KObj(pod), "createdPod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
logger.V(5).Info("a scheduled pod was created, but it doesn't matches with the pod's topology spread constraints",
"pod", klog.KObj(pod), "createdPod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
// Pod is deleted. Return Queue when the deleted Pod has a label that matches with topologySpread's selector.
if podLabelsMatchSpreadConstraints(constraints, originalPod.Labels) {
logger.V(5).Info("a scheduled pod which matches with the pod's topology spread constraints was deleted, and the pod may be schedulable now",
"pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
return fwk.Queue, nil
}
logger.V(5).Info("a scheduled pod was deleted, but it's unrelated to the pod's topology spread constraints",
"pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
return fwk.QueueSkip, nil
}
// getConstraints extracts topologySpreadConstraint(s) from the Pod spec.
// If the Pod doesn't have any topologySpreadConstraint, it returns default constraints.
func (pl *PodTopologySpread) getConstraints(pod *v1.Pod) ([]topologySpreadConstraint, error) {
var constraints []topologySpreadConstraint
var err error
if len(pod.Spec.TopologySpreadConstraints) > 0 {
// We have feature gating in APIServer to strip the spec
// so don't need to re-check feature gate, just check length of Constraints.
constraints, err = pl.filterTopologySpreadConstraints(
pod.Spec.TopologySpreadConstraints,
pod.Labels,
v1.DoNotSchedule,
)
if err != nil {
return nil, fmt.Errorf("obtaining pod's hard topology spread constraints: %w", err)
}
} else {
constraints, err = pl.buildDefaultConstraints(pod, v1.DoNotSchedule)
if err != nil {
return nil, fmt.Errorf("setting default hard topology spread constraints: %w", err)
}
}
return constraints, nil
}
// isSchedulableAfterNodeChange returns Queue when node has topologyKey in its labels, else return QueueSkip.
func (pl *PodTopologySpread) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
constraints, err := pl.getConstraints(pod)
if err != nil {
return fwk.Queue, err
}
var originalNodeMatching, modifiedNodeMatching bool
if originalNode != nil {
originalNodeMatching = nodeLabelsMatchSpreadConstraints(originalNode.Labels, constraints)
}
if modifiedNode != nil {
modifiedNodeMatching = nodeLabelsMatchSpreadConstraints(modifiedNode.Labels, constraints)
}
// We return Queue in the following cases:
// 1. Node/UpdateNodeLabel:
// - The original node matched the pod's topology spread constraints, but the modified node does not.
// - The modified node matches the pod's topology spread constraints, but the original node does not.
// - The modified node matches the pod's topology spread constraints, and the original node and the modified node have different label values for any topologyKey.
// 2. Node/UpdateNodeTaint:
// - The modified node match the pod's topology spread constraints, and the original node and the modified node have different taints.
// 3. Node/Add: The created node matches the pod's topology spread constraints.
// 4. Node/Delete: The original node matched the pod's topology spread constraints.
if originalNode != nil && modifiedNode != nil {
if originalNodeMatching != modifiedNodeMatching {
logger.V(5).Info("the node is updated and now pod topology spread constraints has changed, and the pod may be schedulable now",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode), "originalMatching", originalNodeMatching, "newMatching", modifiedNodeMatching)
return fwk.Queue, nil
}
if modifiedNodeMatching && (checkTopologyKeyLabelsChanged(originalNode.Labels, modifiedNode.Labels, constraints) || !equality.Semantic.DeepEqual(originalNode.Spec.Taints, modifiedNode.Spec.Taints)) {
logger.V(5).Info("the node is updated and now has different taints or labels, and the pod may be schedulable now",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
return fwk.QueueSkip, nil
}
if modifiedNode != nil {
if !modifiedNodeMatching {
logger.V(5).Info("the created node doesn't match pod topology spread constraints",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
logger.V(5).Info("the created node matches topology spread constraints, and the pod may be schedulable now",
"pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
if !originalNodeMatching {
logger.V(5).Info("the deleted node doesn't match pod topology spread constraints", "pod", klog.KObj(pod), "node", klog.KObj(originalNode))
return fwk.QueueSkip, nil
}
logger.V(5).Info("the deleted node matches topology spread constraints, and the pod may be schedulable now",
"pod", klog.KObj(pod), "node", klog.KObj(originalNode))
return fwk.Queue, nil
}
// checkTopologyKeyLabelsChanged checks if any of the labels specified as topologyKey in the constraints have changed.
func checkTopologyKeyLabelsChanged(originalLabels, modifiedLabels map[string]string, constraints []topologySpreadConstraint) bool {
for _, constraint := range constraints {
topologyKey := constraint.TopologyKey
if originalLabels[topologyKey] != modifiedLabels[topologyKey] {
return true
}
}
return false
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podtopologyspread
import (
"context"
"fmt"
"math"
"sync/atomic"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
fwk "k8s.io/kube-scheduler/framework"
)
const preScoreStateKey = "PreScore" + Name
const invalidScore = -1
// preScoreState computed at PreScore and used at Score.
// Fields are exported for comparison during testing.
type preScoreState struct {
Constraints []topologySpreadConstraint
// IgnoredNodes is a set of node names which miss some Constraints[*].topologyKey.
IgnoredNodes sets.Set[string]
// TopologyValueToPodCounts is a slice indexed by constraint index.
// Each entry is keyed with topology value, and valued with the number of matching pods.
TopologyValueToPodCounts []map[string]*int64
// TopologyNormalizingWeight is the weight we give to the counts per topology.
// This allows the pod counts of smaller topologies to not be watered down by
// bigger ones.
TopologyNormalizingWeight []float64
}
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() fwk.StateData {
return s
}
// initPreScoreState iterates "filteredNodes" to filter out the nodes which
// don't have required topologyKey(s), and initialize:
// 1) s.TopologyPairToPodCounts: keyed with both eligible topology pair and node names.
// 2) s.IgnoredNodes: the set of nodes that shouldn't be scored.
// 3) s.TopologyNormalizingWeight: The weight to be given to each constraint based on the number of values in a topology.
func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, filteredNodes []fwk.NodeInfo, requireAllTopologies bool) error {
var err error
if len(pod.Spec.TopologySpreadConstraints) > 0 {
s.Constraints, err = pl.filterTopologySpreadConstraints(
pod.Spec.TopologySpreadConstraints,
pod.Labels,
v1.ScheduleAnyway,
)
if err != nil {
return fmt.Errorf("obtaining pod's soft topology spread constraints: %w", err)
}
} else {
s.Constraints, err = pl.buildDefaultConstraints(pod, v1.ScheduleAnyway)
if err != nil {
return fmt.Errorf("setting default soft topology spread constraints: %w", err)
}
}
if len(s.Constraints) == 0 {
return nil
}
s.TopologyValueToPodCounts = make([]map[string]*int64, len(s.Constraints))
for i := 0; i < len(s.Constraints); i++ {
s.TopologyValueToPodCounts[i] = make(map[string]*int64)
}
topoSize := make([]int, len(s.Constraints))
for _, node := range filteredNodes {
if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Node().Labels, s.Constraints) {
// Nodes which don't have all required topologyKeys present are ignored
// when scoring later.
s.IgnoredNodes.Insert(node.Node().Name)
continue
}
for i, constraint := range s.Constraints {
// per-node counts are calculated during Score.
if constraint.TopologyKey == v1.LabelHostname {
continue
}
value := node.Node().Labels[constraint.TopologyKey]
if s.TopologyValueToPodCounts[i][value] == nil {
s.TopologyValueToPodCounts[i][value] = new(int64)
topoSize[i]++
}
}
}
s.TopologyNormalizingWeight = make([]float64, len(s.Constraints))
for i, c := range s.Constraints {
sz := topoSize[i]
if c.TopologyKey == v1.LabelHostname {
sz = len(filteredNodes) - len(s.IgnoredNodes)
}
s.TopologyNormalizingWeight[i] = topologyNormalizingWeight(sz)
}
return nil
}
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *PodTopologySpread) PreScore(
ctx context.Context,
cycleState fwk.CycleState,
pod *v1.Pod,
filteredNodes []fwk.NodeInfo,
) *fwk.Status {
allNodes, err := pl.sharedLister.NodeInfos().List()
if err != nil {
return fwk.AsStatus(fmt.Errorf("getting all nodes: %w", err))
}
if len(allNodes) == 0 {
// No need to score.
return fwk.NewStatus(fwk.Skip)
}
state := &preScoreState{
IgnoredNodes: sets.New[string](),
}
// Only require that nodes have all the topology labels if using
// non-system-default spreading rules. This allows nodes that don't have a
// zone label to still have hostname spreading.
requireAllTopologies := len(pod.Spec.TopologySpreadConstraints) > 0 || !pl.systemDefaulted
err = pl.initPreScoreState(state, pod, filteredNodes, requireAllTopologies)
if err != nil {
return fwk.AsStatus(fmt.Errorf("calculating preScoreState: %w", err))
}
// return Skip if incoming pod doesn't have soft topology spread Constraints.
if len(state.Constraints) == 0 {
return fwk.NewStatus(fwk.Skip)
}
// Ignore parsing errors for backwards compatibility.
requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod)
processAllNode := func(n int) {
nodeInfo := allNodes[n]
node := nodeInfo.Node()
if !pl.enableNodeInclusionPolicyInPodTopologySpread {
// `node` should satisfy incoming pod's NodeSelector/NodeAffinity
if match, _ := requiredNodeAffinity.Match(node); !match {
return
}
}
// All topologyKeys need to be present in `node`
if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Labels, state.Constraints) {
return
}
for i, c := range state.Constraints {
if pl.enableNodeInclusionPolicyInPodTopologySpread &&
!c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) {
continue
}
value := node.Labels[c.TopologyKey]
// If current topology pair is not associated with any candidate node,
// continue to avoid unnecessary calculation.
// Per-node counts are also skipped, as they are done during Score.
tpCount := state.TopologyValueToPodCounts[i][value]
if tpCount == nil {
continue
}
count := countPodsMatchSelector(nodeInfo.GetPods(), c.Selector, pod.Namespace)
atomic.AddInt64(tpCount, int64(count))
}
}
pl.parallelizer.Until(ctx, len(allNodes), processAllNode, pl.Name())
cycleState.Write(preScoreStateKey, state)
return nil
}
// Score invoked at the Score extension point.
// The "score" returned in this function is the matching number of pods on the `nodeName`,
// it is normalized later.
func (pl *PodTopologySpread) Score(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
node := nodeInfo.Node()
s, err := getPreScoreState(cycleState)
if err != nil {
return 0, fwk.AsStatus(err)
}
// Return if the node is not qualified.
if s.IgnoredNodes.Has(node.Name) {
return 0, nil
}
// For each present <pair>, current node gets a credit of <matchSum>.
// And we sum up <matchSum> and return it as this node's score.
var score float64
for i, c := range s.Constraints {
if tpVal, ok := node.Labels[c.TopologyKey]; ok {
var cnt int64
if c.TopologyKey == v1.LabelHostname {
cnt = int64(countPodsMatchSelector(nodeInfo.GetPods(), c.Selector, pod.Namespace))
} else {
cnt = *s.TopologyValueToPodCounts[i][tpVal]
}
score += scoreForCount(cnt, c.MaxSkew, s.TopologyNormalizingWeight[i])
}
}
return int64(math.Round(score)), nil
}
// NormalizeScore invoked after scoring all nodes.
func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
s, err := getPreScoreState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
if s == nil {
return nil
}
// Calculate <minScore> and <maxScore>
var minScore int64 = math.MaxInt64
var maxScore int64
for i, score := range scores {
// it's mandatory to check if <score.Name> is present in m.IgnoredNodes
if s.IgnoredNodes.Has(score.Name) {
scores[i].Score = invalidScore
continue
}
if score.Score < minScore {
minScore = score.Score
}
if score.Score > maxScore {
maxScore = score.Score
}
}
for i := range scores {
if scores[i].Score == invalidScore {
scores[i].Score = 0
continue
}
if maxScore == 0 {
scores[i].Score = fwk.MaxNodeScore
continue
}
s := scores[i].Score
scores[i].Score = fwk.MaxNodeScore * (maxScore + minScore - s) / maxScore
}
return nil
}
// ScoreExtensions of the Score plugin.
func (pl *PodTopologySpread) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("error reading %q from cycleState: %w", preScoreStateKey, err)
}
s, ok := c.(*preScoreState)
if !ok {
return nil, fmt.Errorf("%+v convert to podtopologyspread.preScoreState error", c)
}
return s, nil
}
// topologyNormalizingWeight calculates the weight for the topology, based on
// the number of values that exist for a topology.
// Since <size> is at least 1 (all nodes that passed the Filters are in the
// same topology), and k8s supports 5k nodes, the result is in the interval
// <1.09, 8.52>.
//
// Note: <size> could also be zero when no nodes have the required topologies,
// however we don't care about topology weight in this case as we return a 0
// score for all nodes.
func topologyNormalizingWeight(size int) float64 {
return math.Log(float64(size + 2))
}
// scoreForCount calculates the score based on number of matching pods in a
// topology domain, the constraint's maxSkew and the topology weight.
// `maxSkew-1` is added to the score so that differences between topology
// domains get watered down, controlling the tolerance of the score to skews.
func scoreForCount(cnt int64, maxSkew int32, tpWeight float64) float64 {
return float64(cnt)*tpWeight + float64(maxSkew-1)
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queuesort
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = names.PrioritySort
// PrioritySort is a plugin that implements Priority based sorting.
type PrioritySort struct{}
var _ fwk.QueueSortPlugin = &PrioritySort{}
// Name returns name of the plugin.
func (pl *PrioritySort) Name() string {
return Name
}
// Less is the function used by the activeQ heap algorithm to sort pods.
// It sorts pods based on their priority. When priorities are equal, it uses
// PodQueueInfo.timestamp.
func (pl *PrioritySort) Less(pInfo1, pInfo2 fwk.QueuedPodInfo) bool {
p1 := corev1helpers.PodPriority(pInfo1.GetPodInfo().GetPod())
p2 := corev1helpers.PodPriority(pInfo2.GetPodInfo().GetPod())
return (p1 > p2) || (p1 == p2 && pInfo1.GetTimestamp().Before(pInfo2.GetTimestamp()))
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, handle fwk.Handle) (fwk.Plugin, error) {
return &PrioritySort{}, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/schedulinggates"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
)
// NewInTreeRegistry builds the registry with all the in-tree plugins.
// A scheduler that runs out of tree plugins can register additional plugins
// through the WithFrameworkOutOfTreeRegistry option.
func NewInTreeRegistry() runtime.Registry {
fts := plfeature.NewSchedulerFeaturesFromGates(feature.DefaultFeatureGate)
registry := runtime.Registry{
dynamicresources.Name: runtime.FactoryAdapter(fts, dynamicresources.New),
imagelocality.Name: imagelocality.New,
tainttoleration.Name: runtime.FactoryAdapter(fts, tainttoleration.New),
nodename.Name: runtime.FactoryAdapter(fts, nodename.New),
nodeports.Name: runtime.FactoryAdapter(fts, nodeports.New),
nodeaffinity.Name: runtime.FactoryAdapter(fts, nodeaffinity.New),
podtopologyspread.Name: runtime.FactoryAdapter(fts, podtopologyspread.New),
nodeunschedulable.Name: runtime.FactoryAdapter(fts, nodeunschedulable.New),
noderesources.Name: runtime.FactoryAdapter(fts, noderesources.NewFit),
noderesources.BalancedAllocationName: runtime.FactoryAdapter(fts, noderesources.NewBalancedAllocation),
volumebinding.Name: runtime.FactoryAdapter(fts, volumebinding.New),
volumerestrictions.Name: runtime.FactoryAdapter(fts, volumerestrictions.New),
volumezone.Name: runtime.FactoryAdapter(fts, volumezone.New),
nodevolumelimits.CSIName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCSI),
interpodaffinity.Name: runtime.FactoryAdapter(fts, interpodaffinity.New),
queuesort.Name: queuesort.New,
defaultbinder.Name: defaultbinder.New,
defaultpreemption.Name: runtime.FactoryAdapter(fts, defaultpreemption.New),
schedulinggates.Name: runtime.FactoryAdapter(fts, schedulinggates.New),
}
return registry
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedulinggates
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// Name of the plugin used in the plugin registry and configurations.
const Name = names.SchedulingGates
// SchedulingGates checks if a Pod carries .spec.schedulingGates.
type SchedulingGates struct {
enableSchedulingQueueHint bool
}
var _ fwk.PreEnqueuePlugin = &SchedulingGates{}
var _ fwk.EnqueueExtensions = &SchedulingGates{}
func (pl *SchedulingGates) Name() string {
return Name
}
func (pl *SchedulingGates) PreEnqueue(ctx context.Context, p *v1.Pod) *fwk.Status {
if len(p.Spec.SchedulingGates) == 0 {
return nil
}
gates := make([]string, 0, len(p.Spec.SchedulingGates))
for _, gate := range p.Spec.SchedulingGates {
gates = append(gates, gate.Name)
}
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, fmt.Sprintf("waiting for scheduling gates: %v", gates))
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *SchedulingGates) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
if !pl.enableSchedulingQueueHint {
return nil, nil
}
// When the QueueingHint feature is enabled,
// the scheduling queue uses Pod/Update Queueing Hint
// to determine whether a Pod's update makes the Pod schedulable or not.
// https://github.com/kubernetes/kubernetes/pull/122234
return []fwk.ClusterEventWithHint{
// Pods can be more schedulable once it's gates are removed
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.UpdatePodSchedulingGatesEliminated}, QueueingHintFn: pl.isSchedulableAfterUpdatePodSchedulingGatesEliminated},
}, nil
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, _ fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &SchedulingGates{
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil
}
func (pl *SchedulingGates) isSchedulableAfterUpdatePodSchedulingGatesEliminated(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if modifiedPod.UID != pod.UID {
// If the update event is not for targetPod, it wouldn't make targetPod schedulable.
return fwk.QueueSkip, nil
}
return fwk.Queue, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tainttoleration
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// TaintToleration is a plugin that checks if a pod tolerates a node's taints.
type TaintToleration struct {
handle fwk.Handle
enableSchedulingQueueHint bool
}
var _ fwk.FilterPlugin = &TaintToleration{}
var _ fwk.PreScorePlugin = &TaintToleration{}
var _ fwk.ScorePlugin = &TaintToleration{}
var _ fwk.EnqueueExtensions = &TaintToleration{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.TaintToleration
// preScoreStateKey is the key in CycleState to TaintToleration pre-computed data for Scoring.
preScoreStateKey = "PreScore" + Name
// ErrReasonNotMatch is the Filter reason status when not matching.
ErrReasonNotMatch = "node(s) had taints that the pod didn't tolerate"
)
// Name returns name of the plugin. It is used in logs, etc.
func (pl *TaintToleration) Name() string {
return Name
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *TaintToleration) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
if pl.enableSchedulingQueueHint {
return []fwk.ClusterEventWithHint{
// When the QueueingHint feature is enabled, preCheck is eliminated and we don't need additional UpdateNodeLabel.
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Add | fwk.UpdateNodeTaint}, QueueingHintFn: pl.isSchedulableAfterNodeChange},
// When the QueueingHint feature is enabled,
// the scheduling queue uses Pod/Update Queueing Hint
// to determine whether a Pod's update makes the Pod schedulable or not.
// https://github.com/kubernetes/kubernetes/pull/122234
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.UpdatePodToleration}, QueueingHintFn: pl.isSchedulableAfterPodTolerationChange},
}, nil
}
return []fwk.ClusterEventWithHint{
// A note about UpdateNodeLabel event:
// Ideally, it's supposed to register only Add | UpdateNodeTaint because UpdateNodeLabel will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.Add | fwk.UpdateNodeTaint | fwk.UpdateNodeLabel}, QueueingHintFn: pl.isSchedulableAfterNodeChange},
// No need to register the Pod event; the update to the unschedulable Pods already triggers the scheduling retry when QHint is disabled.
}, nil
}
// isSchedulableAfterNodeChange is invoked for all node events reported by
// an informer. It checks whether that change made a previously unschedulable
// pod schedulable.
func (pl *TaintToleration) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
wasUntolerated := true
if originalNode != nil {
_, wasUntolerated = v1helper.FindMatchingUntoleratedTaint(originalNode.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc())
}
_, isUntolerated := v1helper.FindMatchingUntoleratedTaint(modifiedNode.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc())
if wasUntolerated && !isUntolerated {
logger.V(5).Info("node was created or updated, and this may make the Pod rejected by TaintToleration plugin in the previous scheduling cycle schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.Queue, nil
}
logger.V(5).Info("node was created or updated, but it doesn't change the TaintToleration plugin's decision", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode))
return fwk.QueueSkip, nil
}
// Filter invoked at the filter extension point.
func (pl *TaintToleration) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
node := nodeInfo.Node()
taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc())
if !isUntolerated {
return nil
}
errReason := fmt.Sprintf("node(s) had untolerated taint {%s: %s}", taint.Key, taint.Value)
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, errReason)
}
// preScoreState computed at PreScore and used at Score.
type preScoreState struct {
tolerationsPreferNoSchedule []v1.Toleration
}
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() fwk.StateData {
return s
}
// getAllTolerationEffectPreferNoSchedule gets the list of all Tolerations with Effect PreferNoSchedule or with no effect.
func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) {
for _, toleration := range tolerations {
// Empty effect means all effects which includes PreferNoSchedule, so we need to collect it as well.
if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule {
tolerationList = append(tolerationList, toleration)
}
}
return
}
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *TaintToleration) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
tolerationsPreferNoSchedule := getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
state := &preScoreState{
tolerationsPreferNoSchedule: tolerationsPreferNoSchedule,
}
cycleState.Write(preScoreStateKey, state)
return nil
}
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("failed to read %q from cycleState: %w", preScoreStateKey, err)
}
s, ok := c.(*preScoreState)
if !ok {
return nil, fmt.Errorf("%+v convert to tainttoleration.preScoreState error", c)
}
return s, nil
}
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) {
for _, taint := range taints {
// check only on taints that have effect PreferNoSchedule
if taint.Effect != v1.TaintEffectPreferNoSchedule {
continue
}
if !v1helper.TolerationsTolerateTaint(tolerations, &taint) {
intolerableTaints++
}
}
return
}
// Score invoked at the Score extension point.
func (pl *TaintToleration) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
node := nodeInfo.Node()
s, err := getPreScoreState(state)
if err != nil {
return 0, fwk.AsStatus(err)
}
score := int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, s.tolerationsPreferNoSchedule))
return score, nil
}
// NormalizeScore invoked after scoring all nodes.
func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, scores fwk.NodeScoreList) *fwk.Status {
return helper.DefaultNormalizeScore(fwk.MaxNodeScore, true, scores)
}
// ScoreExtensions of the Score plugin.
func (pl *TaintToleration) ScoreExtensions() fwk.ScoreExtensions {
return pl
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, h fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
return &TaintToleration{
handle: h,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil
}
// isSchedulableAfterPodTolerationChange is invoked whenever a pod's toleration changed.
func (pl *TaintToleration) isSchedulableAfterPodTolerationChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, modifiedPod, err := util.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if pod.UID == modifiedPod.UID {
// The updated Pod is the unschedulable Pod.
logger.V(5).Info("a new toleration is added for the unschedulable Pod, and it may make it schedulable", "pod", klog.KObj(modifiedPod))
return fwk.Queue, nil
}
logger.V(5).Info("a new toleration is added for a Pod, but it's an unrelated Pod and wouldn't change the TaintToleration plugin's decision", "pod", klog.KObj(modifiedPod))
return fwk.QueueSkip, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"fmt"
v1 "k8s.io/api/core/v1"
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
)
// PVAssumeCache is a AssumeCache for PersistentVolume objects
type PVAssumeCache struct {
*assumecache.AssumeCache
logger klog.Logger
}
func pvStorageClassIndexFunc(obj interface{}) ([]string, error) {
if pv, ok := obj.(*v1.PersistentVolume); ok {
return []string{storagehelpers.GetPersistentVolumeClass(pv)}, nil
}
return []string{""}, fmt.Errorf("object is not a v1.PersistentVolume: %v", obj)
}
// NewPVAssumeCache creates a PV assume cache.
func NewPVAssumeCache(logger klog.Logger, informer assumecache.Informer) *PVAssumeCache {
logger = klog.LoggerWithName(logger, "PV Cache")
return &PVAssumeCache{
AssumeCache: assumecache.NewAssumeCache(logger, informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc),
logger: logger,
}
}
func (c *PVAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) {
obj, err := c.Get(pvName)
if err != nil {
return nil, err
}
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolume", Object: obj}
}
return pv, nil
}
func (c *PVAssumeCache) GetAPIPV(pvName string) (*v1.PersistentVolume, error) {
obj, err := c.GetAPIObj(pvName)
if err != nil {
return nil, err
}
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolume", Object: obj}
}
return pv, nil
}
func (c *PVAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume {
objs := c.List(&v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
StorageClassName: storageClassName,
},
})
pvs := []*v1.PersistentVolume{}
for _, obj := range objs {
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
c.logger.Error(&assumecache.WrongTypeError{TypeName: "v1.PersistentVolume", Object: obj}, "ListPVs")
continue
}
pvs = append(pvs, pv)
}
return pvs
}
// PVCAssumeCache is a AssumeCache for PersistentVolumeClaim objects
type PVCAssumeCache struct {
*assumecache.AssumeCache
logger klog.Logger
}
// NewPVCAssumeCache creates a PVC assume cache.
func NewPVCAssumeCache(logger klog.Logger, informer assumecache.Informer) *PVCAssumeCache {
logger = klog.LoggerWithName(logger, "PVC Cache")
return &PVCAssumeCache{
AssumeCache: assumecache.NewAssumeCache(logger, informer, "v1.PersistentVolumeClaim", "", nil),
logger: logger,
}
}
func (c *PVCAssumeCache) GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) {
obj, err := c.Get(pvcKey)
if err != nil {
return nil, err
}
pvc, ok := obj.(*v1.PersistentVolumeClaim)
if !ok {
return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolumeClaim", Object: obj}
}
return pvc, nil
}
func (c *PVCAssumeCache) GetAPIPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) {
obj, err := c.GetAPIObj(pvcKey)
if err != nil {
return nil, err
}
pvc, ok := obj.(*v1.PersistentVolumeClaim)
if !ok {
return nil, &assumecache.WrongTypeError{TypeName: "v1.PersistentVolumeClaim", Object: obj}
}
return pvc, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"time"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage"
coreinformers "k8s.io/client-go/informers/core/v1"
storageinformers "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/component-helpers/storage/volume"
csitrans "k8s.io/csi-translation-lib"
csiplugins "k8s.io/csi-translation-lib/plugins"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
)
// ConflictReason is used for the special strings which explain why
// volume binding is impossible for a node.
type ConflictReason string
// ConflictReasons contains all reasons that explain why volume binding is impossible for a node.
type ConflictReasons []ConflictReason
func (reasons ConflictReasons) Len() int { return len(reasons) }
func (reasons ConflictReasons) Less(i, j int) bool { return reasons[i] < reasons[j] }
func (reasons ConflictReasons) Swap(i, j int) { reasons[i], reasons[j] = reasons[j], reasons[i] }
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict ConflictReason = "node(s) didn't match PersistentVolume's node affinity"
// ErrReasonNotEnoughSpace is used when a pod cannot start on a node because not enough storage space is available.
ErrReasonNotEnoughSpace = "node(s) did not have enough free storage"
// ErrReasonPVNotExist is used when a pod has one or more PVC(s) bound to non-existent persistent volume(s)"
ErrReasonPVNotExist = "node(s) unavailable due to one or more pvc(s) bound to non-existent pv(s)"
)
// BindingInfo holds a binding between PV and PVC.
type BindingInfo struct {
// PVC that needs to be bound
pvc *v1.PersistentVolumeClaim
// Proposed PV to bind to this PVC
pv *v1.PersistentVolume
}
// StorageClassName returns the name of the storage class.
func (b *BindingInfo) StorageClassName() string {
return b.pv.Spec.StorageClassName
}
// StorageResource represents storage resource.
type StorageResource struct {
Requested int64
Capacity int64
}
// StorageResource returns storage resource.
func (b *BindingInfo) StorageResource() *StorageResource {
// both fields are mandatory
requestedQty := b.pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
capacityQty := b.pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
return &StorageResource{
Requested: requestedQty.Value(),
Capacity: capacityQty.Value(),
}
}
// DynamicProvision represents a dynamically provisioned volume.
type DynamicProvision struct {
PVC *v1.PersistentVolumeClaim
NodeCapacity *storagev1.CSIStorageCapacity
}
// PodVolumes holds pod's volumes information used in volume scheduling.
type PodVolumes struct {
// StaticBindings are binding decisions for PVCs which can be bound to
// pre-provisioned static PVs.
StaticBindings []*BindingInfo
// DynamicProvisions are PVCs that require dynamic provisioning
DynamicProvisions []*DynamicProvision
}
// InTreeToCSITranslator contains methods required to check migratable status
// and perform translations from InTree PV's to CSI
type InTreeToCSITranslator interface {
IsPVMigratable(pv *v1.PersistentVolume) bool
GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error)
TranslateInTreePVToCSI(logger klog.Logger, pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
}
// SchedulerVolumeBinder is used by the scheduler VolumeBinding plugin to
// handle PVC/PV binding and dynamic provisioning. The binding decisions are
// integrated into the pod scheduling workflow so that the PV NodeAffinity is
// also considered along with the pod's other scheduling requirements.
//
// This integrates into the existing scheduler workflow as follows:
// 1. The scheduler takes a Pod off the scheduler queue and processes it serially:
// a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked
// here, pod volume information will be saved in current scheduling cycle state for later use.
// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here.
// c. Invokes all score plugins. Future/TBD
// d. Selects the best node for the Pod.
// e. Invokes all reserve plugins. AssumePodVolumes() is invoked here.
// i. If PVC binding is required, cache in-memory only:
// * For manual binding: update PV objects for prebinding to the corresponding PVCs.
// * For dynamic provisioning: update PVC object with a selected node from c)
// * For the pod, which PVCs and PVs need API updates.
// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache,
// This is handled in the scheduler and not here.
// f. Asynchronously bind volumes and pod in a separate goroutine
// i. BindPodVolumes() is called first in PreBind phase. It makes all the necessary API updates and waits for
// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent
// back through the scheduler.
// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding.
// 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue
// while the actual binding operation occurs in the background.
type SchedulerVolumeBinder interface {
// GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning),
// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error)
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the
// node and returns pod's volumes information.
//
// If a PVC is bound, it checks if the PV's NodeAffinity matches the Node.
// Otherwise, it tries to find an available PV to bind to the PVC.
//
// It returns an error when something went wrong or a list of reasons why the node is
// (currently) not usable for the pod.
//
// If the CSIStorageCapacity feature is enabled, then it also checks for sufficient storage
// for volumes that still need to be created.
//
// This function is called by the scheduler VolumeBinding plugin and can be called in parallel
FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error)
// AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
// that the PV is prebound to the PVC.
// 2. Take the PVCs that need provisioning and update the PVC cache with related
// annotations set.
//
// It returns true if all volumes are fully bound
//
// This function is called serially.
AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error)
// RevertAssumedPodVolumes will revert assumed PV and PVC cache.
RevertAssumedPodVolumes(podVolumes *PodVolumes)
// BindPodVolumes will:
// 1. Initiate the volume binding by making the API call to prebind the PV
// to its matching PVC.
// 2. Trigger the volume provisioning by making the API call to set related
// annotations on the PVC
// 3. Wait for PVCs to be completely bound by the PV controller
//
// This function can be called in parallel.
BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error
}
type PodVolumeClaims struct {
// boundClaims are the pod's bound PVCs.
boundClaims []*v1.PersistentVolumeClaim
// unboundClaimsDelayBinding are the pod's unbound with delayed binding (including provisioning) PVCs.
unboundClaimsDelayBinding []*v1.PersistentVolumeClaim
// unboundClaimsImmediate are the pod's unbound with immediate binding PVCs (i.e., supposed to be bound already) .
unboundClaimsImmediate []*v1.PersistentVolumeClaim
// unboundVolumesDelayBinding are PVs that belong to storage classes of the pod's unbound PVCs with delayed binding.
unboundVolumesDelayBinding map[string][]*v1.PersistentVolume
}
type volumeBinder struct {
kubeClient clientset.Interface
enableVolumeAttributesClass bool
enableCSIMigrationPortworx bool
classLister storagelisters.StorageClassLister
podLister corelisters.PodLister
nodeLister corelisters.NodeLister
csiNodeLister storagelisters.CSINodeLister
pvcCache *PVCAssumeCache
pvCache *PVAssumeCache
// Amount of time to wait for the bind operation to succeed
bindTimeout time.Duration
translator InTreeToCSITranslator
csiDriverLister storagelisters.CSIDriverLister
csiStorageCapacityLister storagelisters.CSIStorageCapacityLister
}
var _ SchedulerVolumeBinder = &volumeBinder{}
// CapacityCheck contains additional parameters for NewVolumeBinder that
// are only needed when checking volume sizes against available storage
// capacity is desired.
type CapacityCheck struct {
CSIDriverInformer storageinformers.CSIDriverInformer
CSIStorageCapacityInformer storageinformers.CSIStorageCapacityInformer
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions.
//
// capacityCheck determines how storage capacity is checked (CSIStorageCapacity feature).
func NewVolumeBinder(
logger klog.Logger,
kubeClient clientset.Interface,
fts feature.Features,
podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer,
csiNodeInformer storageinformers.CSINodeInformer,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
pvInformer coreinformers.PersistentVolumeInformer,
storageClassInformer storageinformers.StorageClassInformer,
capacityCheck CapacityCheck,
bindTimeout time.Duration) SchedulerVolumeBinder {
b := &volumeBinder{
kubeClient: kubeClient,
enableVolumeAttributesClass: fts.EnableVolumeAttributesClass,
enableCSIMigrationPortworx: fts.EnableCSIMigrationPortworx,
podLister: podInformer.Lister(),
classLister: storageClassInformer.Lister(),
nodeLister: nodeInformer.Lister(),
csiNodeLister: csiNodeInformer.Lister(),
pvcCache: NewPVCAssumeCache(logger, pvcInformer.Informer()),
pvCache: NewPVAssumeCache(logger, pvInformer.Informer()),
bindTimeout: bindTimeout,
translator: csitrans.New(),
}
b.csiDriverLister = capacityCheck.CSIDriverInformer.Lister()
b.csiStorageCapacityLister = capacityCheck.CSIStorageCapacityInformer.Lister()
return b
}
// FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs
// for the given pod and node. If the node does not fit, conflict reasons are
// returned.
func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
podVolumes = &PodVolumes{}
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
logger.V(5).Info("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node))
// Initialize to true for pods that don't have volumes. These
// booleans get translated into reason strings when the function
// returns without an error.
unboundVolumesSatisfied := true
boundVolumesSatisfied := true
sufficientStorage := true
boundPVsFound := true
defer func() {
if err != nil {
return
}
if !boundVolumesSatisfied {
reasons = append(reasons, ErrReasonNodeConflict)
}
if !unboundVolumesSatisfied {
reasons = append(reasons, ErrReasonBindConflict)
}
if !sufficientStorage {
reasons = append(reasons, ErrReasonNotEnoughSpace)
}
if !boundPVsFound {
reasons = append(reasons, ErrReasonPVNotExist)
}
}()
defer func() {
if err != nil {
metrics.VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc()
}
}()
var (
staticBindings []*BindingInfo
dynamicProvisions []*DynamicProvision
)
defer func() {
// Although we do not distinguish nil from empty in this function, for
// easier testing, we normalize empty to nil.
if len(staticBindings) == 0 {
staticBindings = nil
}
if len(dynamicProvisions) == 0 {
dynamicProvisions = nil
}
podVolumes.StaticBindings = staticBindings
podVolumes.DynamicProvisions = dynamicProvisions
}()
// Check PV node affinity on bound volumes
if len(podVolumeClaims.boundClaims) > 0 {
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(logger, podVolumeClaims.boundClaims, node, pod)
if err != nil {
return
}
}
// Find matching volumes and node for unbound claims
if len(podVolumeClaims.unboundClaimsDelayBinding) > 0 {
var (
claimsToFindMatching []*v1.PersistentVolumeClaim
claimsToProvision []*v1.PersistentVolumeClaim
)
// Filter out claims to provision
for _, claim := range podVolumeClaims.unboundClaimsDelayBinding {
if selectedNode, ok := claim.Annotations[volume.AnnSelectedNode]; ok {
if selectedNode != node.Name {
// Fast path, skip unmatched node.
unboundVolumesSatisfied = false
return
}
claimsToProvision = append(claimsToProvision, claim)
} else {
claimsToFindMatching = append(claimsToFindMatching, claim)
}
}
// Find matching volumes
if len(claimsToFindMatching) > 0 {
var unboundClaims []*v1.PersistentVolumeClaim
unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(logger, pod, claimsToFindMatching, podVolumeClaims.unboundVolumesDelayBinding, node)
if err != nil {
return
}
claimsToProvision = append(claimsToProvision, unboundClaims...)
}
// Check for claims to provision. This is the first time where we potentially
// find out that storage is not sufficient for the node.
if len(claimsToProvision) > 0 {
unboundVolumesSatisfied, sufficientStorage, dynamicProvisions, err = b.checkVolumeProvisions(logger, pod, claimsToProvision, node)
if err != nil {
return
}
}
}
return
}
// ConvertDynamicProvisionsToPVCs converts a slice of *DynamicProvision to a
// slice of PersistentVolumeClaim
func convertDynamicProvisionsToPVCs(dynamicProvisions []*DynamicProvision) []*v1.PersistentVolumeClaim {
pvcs := make([]*v1.PersistentVolumeClaim, 0, len(dynamicProvisions))
for _, dynamicProvision := range dynamicProvisions {
pvcs = append(pvcs, dynamicProvision.PVC)
}
return pvcs
}
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
// volume information for the chosen node, and:
// 1. Update the pvCache with the new prebound PV.
// 2. Update the pvcCache with the new PVCs with annotations set
// 3. Update PodVolumes again with cached API updates for PVs and PVCs.
func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) {
logger.V(4).Info("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
defer func() {
if err != nil {
metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc()
}
}()
if allBound := b.arePodVolumesBound(logger, assumedPod); allBound {
logger.V(4).Info("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName))
return true, nil
}
// Assume PV
newBindings := []*BindingInfo{}
for _, binding := range podVolumes.StaticBindings {
newPV, dirty, err := volume.GetBindVolumeToClaim(binding.pv, binding.pvc)
logger.V(5).Info("AssumePodVolumes: GetBindVolumeToClaim",
"pod", klog.KObj(assumedPod),
"PV", klog.KObj(binding.pv),
"PVC", klog.KObj(binding.pvc),
"newPV", klog.KObj(newPV),
"dirty", dirty,
)
if err != nil {
logger.Error(err, "AssumePodVolumes: fail to GetBindVolumeToClaim")
b.revertAssumedPVs(newBindings)
return false, err
}
// TODO: can we assume every time?
if dirty {
err = b.pvCache.Assume(newPV)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, err
}
}
newBindings = append(newBindings, &BindingInfo{pv: newPV, pvc: binding.pvc})
}
// Assume PVCs
newProvisionedPVCs := []*DynamicProvision{}
for _, dynamicProvision := range podVolumes.DynamicProvisions {
// The claims from method args can be pointing to watcher cache. We must not
// modify these, therefore create a copy.
claimClone := dynamicProvision.PVC.DeepCopy()
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, volume.AnnSelectedNode, nodeName)
err = b.pvcCache.Assume(claimClone)
if err != nil {
pvcs := convertDynamicProvisionsToPVCs(newProvisionedPVCs)
b.revertAssumedPVs(newBindings)
b.revertAssumedPVCs(pvcs)
return
}
newProvisionedPVCs = append(newProvisionedPVCs, &DynamicProvision{PVC: claimClone})
}
podVolumes.StaticBindings = newBindings
podVolumes.DynamicProvisions = newProvisionedPVCs
return
}
// RevertAssumedPodVolumes will revert assumed PV and PVC cache.
func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) {
pvcs := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions)
b.revertAssumedPVs(podVolumes.StaticBindings)
b.revertAssumedPVCs(pvcs)
}
// BindPodVolumes gets the cached bindings and PVCs to provision in pod's volumes information,
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
// by the PV controller.
func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) {
logger := klog.FromContext(ctx)
logger.V(4).Info("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName))
defer func() {
if err != nil {
metrics.VolumeSchedulingStageFailed.WithLabelValues("bind").Inc()
}
}()
bindings := podVolumes.StaticBindings
claimsToProvision := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions)
// Start API operations
err = b.bindAPIUpdate(ctx, assumedPod, bindings, claimsToProvision)
if err != nil {
return err
}
err = wait.PollUntilContextTimeout(ctx, time.Second, b.bindTimeout, false, func(ctx context.Context) (bool, error) {
b, err := b.checkBindings(logger, assumedPod, bindings, claimsToProvision)
return b, err
})
if err != nil {
return fmt.Errorf("binding volumes: %w", err)
}
return nil
}
func getPodName(pod *v1.Pod) string {
return pod.Namespace + "/" + pod.Name
}
func getPVCName(pvc *v1.PersistentVolumeClaim) string {
return pvc.Namespace + "/" + pvc.Name
}
// bindAPIUpdate makes the API update for those PVs/PVCs.
func (b *volumeBinder) bindAPIUpdate(ctx context.Context, pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
logger := klog.FromContext(ctx)
podName := getPodName(pod)
if bindings == nil {
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
lastProcessedBinding := 0
lastProcessedProvisioning := 0
defer func() {
// only revert assumed cached updates for volumes we haven't successfully bound
if lastProcessedBinding < len(bindings) {
b.revertAssumedPVs(bindings[lastProcessedBinding:])
}
// only revert assumed cached updates for claims we haven't updated,
if lastProcessedProvisioning < len(claimsToProvision) {
b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:])
}
}()
var (
binding *BindingInfo
i int
claim *v1.PersistentVolumeClaim
)
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for _, binding = range bindings {
// TODO: does it hurt if we make an api call and nothing needs to be updated?
logger.V(5).Info("Updating PersistentVolume: binding to claim", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(ctx, binding.pv, metav1.UpdateOptions{})
if err != nil {
logger.V(4).Info("Updating PersistentVolume: binding to claim failed", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err)
return err
}
logger.V(2).Info("Updated PersistentVolume with claim. Waiting for binding to complete", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc))
// Save updated object from apiserver for later checking.
binding.pv = newPV
lastProcessedBinding++
}
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expected to signal back by removing related annotations if actual provisioning fails
for i, claim = range claimsToProvision {
logger.V(5).Info("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim))
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
if err != nil {
logger.V(4).Info("Updating PersistentVolumeClaim: binding to volume failed", "PVC", klog.KObj(claim), "err", err)
return err
}
// Save updated object from apiserver for later checking.
claimsToProvision[i] = newClaim
lastProcessedProvisioning++
}
return nil
}
var (
versioner = storage.APIObjectVersioner{}
)
// checkBindings runs through all the PVCs in the Pod and checks:
// * if the PVC is fully bound
// * if there are any conditions that require binding to fail and be retried
//
// It returns true when all of the Pod's PVCs are fully bound, and error if
// binding (and scheduling) needs to be retried
// Note that it checks on API objects not PV/PVC cache, this is because
// PV/PVC cache can be assumed again in main scheduler loop, we must check
// latest state in API server which are shared with PV controller and
// provisioners
func (b *volumeBinder) checkBindings(logger klog.Logger, pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) {
podName := getPodName(pod)
if bindings == nil {
return false, fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
node, err := b.nodeLister.Get(pod.Spec.NodeName)
if err != nil {
return false, fmt.Errorf("failed to get node %q: %w", pod.Spec.NodeName, err)
}
csiNode, err := b.csiNodeLister.Get(node.Name)
if err != nil {
// TODO: return the error once CSINode is created by default
logger.V(4).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
}
// Check for any conditions that might require scheduling retry
// When pod is deleted, binding operation should be cancelled. There is no
// need to check PV/PVC bindings any more.
_, err = b.podLister.Pods(pod.Namespace).Get(pod.Name)
if err != nil {
if apierrors.IsNotFound(err) {
return false, fmt.Errorf("pod does not exist any more: %w", err)
}
logger.Error(err, "Failed to get pod from the lister", "pod", klog.KObj(pod))
}
for _, binding := range bindings {
pv, err := b.pvCache.GetAPIPV(binding.pv.Name)
if err != nil {
return false, fmt.Errorf("failed to check binding: %w", err)
}
pvc, err := b.pvcCache.GetAPIPVC(getPVCName(binding.pvc))
if err != nil {
return false, fmt.Errorf("failed to check binding: %w", err)
}
// Because we updated PV in apiserver, skip if API object is older
// and wait for new API object propagated from apiserver.
if versioner.CompareResourceVersion(binding.pv, pv) > 0 {
return false, nil
}
pv, err = b.tryTranslatePVToCSI(logger, pv, csiNode)
if err != nil {
return false, fmt.Errorf("failed to translate pv to csi: %w", err)
}
// Check PV's node affinity (the node might not have the proper label)
if err := volume.CheckNodeAffinity(pv, node.Labels); err != nil {
return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err)
}
// Check if pv.ClaimRef got dropped by unbindVolume()
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" {
return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name)
}
// Check if pvc is fully bound
if !b.isPVCFullyBound(pvc) {
return false, nil
}
}
for _, claim := range claimsToProvision {
pvc, err := b.pvcCache.GetAPIPVC(getPVCName(claim))
if err != nil {
return false, fmt.Errorf("failed to check provisioning pvc: %w", err)
}
// Because we updated PVC in apiserver, skip if API object is older
// and wait for new API object propagated from apiserver.
if versioner.CompareResourceVersion(claim, pvc) > 0 {
return false, nil
}
// Check if selectedNode annotation is still set
if pvc.Annotations == nil {
return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name)
}
selectedNode := pvc.Annotations[volume.AnnSelectedNode]
if selectedNode != pod.Spec.NodeName {
// If provisioner fails to provision a volume, selectedNode
// annotation will be removed to signal back to the scheduler to
// retry.
return false, fmt.Errorf("provisioning failed for PVC %q", pvc.Name)
}
// If the PVC is bound to a PV, check its node affinity
if pvc.Spec.VolumeName != "" {
pv, err := b.pvCache.GetAPIPV(pvc.Spec.VolumeName)
if err != nil {
if errors.Is(err, assumecache.ErrNotFound) {
// We tolerate NotFound error here, because PV is possibly
// not found because of API delay, we can check next time.
// And if PV does not exist because it's deleted, PVC will
// be unbound eventually.
return false, nil
}
return false, fmt.Errorf("failed to get pv %q from cache: %w", pvc.Spec.VolumeName, err)
}
pv, err = b.tryTranslatePVToCSI(logger, pv, csiNode)
if err != nil {
return false, err
}
if err := volume.CheckNodeAffinity(pv, node.Labels); err != nil {
return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %w", pv.Name, node.Name, err)
}
}
// Check if pvc is fully bound
if !b.isPVCFullyBound(pvc) {
return false, nil
}
}
// All pvs and pvcs that we operated on are bound
logger.V(2).Info("All PVCs for pod are bound", "pod", klog.KObj(pod))
return true, nil
}
func (b *volumeBinder) isVolumeBound(logger klog.Logger, pod *v1.Pod, vol *v1.Volume) (bound bool, pvc *v1.PersistentVolumeClaim, err error) {
pvcName := ""
isEphemeral := false
switch {
case vol.PersistentVolumeClaim != nil:
pvcName = vol.PersistentVolumeClaim.ClaimName
case vol.Ephemeral != nil:
// Generic ephemeral inline volumes also use a PVC,
// just with a computed name, and...
pvcName = ephemeral.VolumeClaimName(pod, vol)
isEphemeral = true
default:
return true, nil, nil
}
bound, pvc, err = b.isPVCBound(logger, pod.Namespace, pvcName)
// ... the PVC must be owned by the pod.
if isEphemeral && err == nil && pvc != nil {
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
return false, nil, err
}
}
return
}
func (b *volumeBinder) isPVCBound(logger klog.Logger, namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: namespace,
},
}
pvcKey := getPVCName(claim)
pvc, err := b.pvcCache.GetPVC(pvcKey)
if err != nil || pvc == nil {
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err)
}
fullyBound := b.isPVCFullyBound(pvc)
if fullyBound {
logger.V(5).Info("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
} else {
if pvc.Spec.VolumeName != "" {
logger.V(5).Info("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName))
} else {
logger.V(5).Info("PVC is not bound", "PVC", klog.KObj(pvc))
}
}
return fullyBound, pvc, nil
}
func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool {
return pvc.Spec.VolumeName != "" && metav1.HasAnnotation(pvc.ObjectMeta, volume.AnnBindCompleted)
}
// arePodVolumesBound returns true if all volumes are fully bound
func (b *volumeBinder) arePodVolumesBound(logger klog.Logger, pod *v1.Pod) bool {
for _, vol := range pod.Spec.Volumes {
if isBound, _, _ := b.isVolumeBound(logger, pod, &vol); !isBound {
// Pod has at least one PVC that needs binding
return false
}
}
return true
}
// GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning),
// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
func (b *volumeBinder) GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) {
podVolumeClaims = &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsImmediate: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
}
for _, vol := range pod.Spec.Volumes {
volumeBound, pvc, err := b.isVolumeBound(logger, pod, &vol)
if err != nil {
return podVolumeClaims, err
}
if pvc == nil {
continue
}
if volumeBound {
podVolumeClaims.boundClaims = append(podVolumeClaims.boundClaims, pvc)
} else {
delayBindingMode, err := volume.IsDelayBindingMode(pvc, b.classLister)
if err != nil {
return podVolumeClaims, err
}
// Prebound PVCs are treated as unbound immediate binding
if delayBindingMode && pvc.Spec.VolumeName == "" {
// Scheduler path
podVolumeClaims.unboundClaimsDelayBinding = append(podVolumeClaims.unboundClaimsDelayBinding, pvc)
} else {
// !delayBindingMode || pvc.Spec.VolumeName != ""
// Immediate binding should have already been bound
podVolumeClaims.unboundClaimsImmediate = append(podVolumeClaims.unboundClaimsImmediate, pvc)
}
}
}
podVolumeClaims.unboundVolumesDelayBinding = map[string][]*v1.PersistentVolume{}
for _, pvc := range podVolumeClaims.unboundClaimsDelayBinding {
// Get storage class name from each PVC
storageClassName := volume.GetPersistentVolumeClaimClass(pvc)
podVolumeClaims.unboundVolumesDelayBinding[storageClassName] = b.pvCache.ListPVs(storageClassName)
}
return podVolumeClaims, nil
}
func (b *volumeBinder) checkBoundClaims(logger klog.Logger, claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) {
csiNode, err := b.csiNodeLister.Get(node.Name)
if err != nil {
// TODO: return the error once CSINode is created by default
logger.V(5).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err)
}
for _, pvc := range claims {
pvName := pvc.Spec.VolumeName
pv, err := b.pvCache.GetPV(pvName)
if err != nil {
if errors.Is(err, assumecache.ErrNotFound) {
err = nil
}
return true, false, err
}
pv, err = b.tryTranslatePVToCSI(logger, pv, csiNode)
if err != nil {
return false, true, err
}
err = volume.CheckNodeAffinity(pv, node.Labels)
if err != nil {
logger.V(5).Info("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err)
return false, true, nil
}
logger.V(5).Info("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod))
}
logger.V(5).Info("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true, true, nil
}
// findMatchingVolumes tries to find matching volumes for given claims,
// and return unbound claims for further provision.
func (b *volumeBinder) findMatchingVolumes(logger klog.Logger, pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, unboundVolumesDelayBinding map[string][]*v1.PersistentVolume, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
// Sort all the claims by increasing size request to get the smallest fits
sort.Sort(byPVCSize(claimsToBind))
chosenPVs := map[string]*v1.PersistentVolume{}
foundMatches = true
for _, pvc := range claimsToBind {
// Get storage class name from each PVC
storageClassName := volume.GetPersistentVolumeClaimClass(pvc)
pvs := unboundVolumesDelayBinding[storageClassName]
// Find a matching PV
pv, err := volume.FindMatchingVolume(pvc, pvs, node, chosenPVs, true, b.enableVolumeAttributesClass)
if err != nil {
return false, nil, nil, err
}
if pv == nil {
logger.V(5).Info("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node))
unboundClaims = append(unboundClaims, pvc)
foundMatches = false
continue
}
// matching PV needs to be excluded so we don't select it again
chosenPVs[pv.Name] = pv
bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc})
logger.V(5).Info("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod))
}
if foundMatches {
logger.V(5).Info("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node))
}
return
}
// checkVolumeProvisions checks given unbound claims (the claims have gone through func
// findMatchingVolumes, and do not have matching volumes for binding), and return true
// if all of the claims are eligible for dynamic provision.
func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*DynamicProvision, err error) {
dynamicProvisions = []*DynamicProvision{}
// We return early with provisionedClaims == nil if a check
// fails or we encounter an error.
for _, claim := range claimsToProvision {
pvcName := getPVCName(claim)
className := volume.GetPersistentVolumeClaimClass(claim)
if className == "" {
return false, false, nil, fmt.Errorf("no class for claim %q", pvcName)
}
class, err := b.classLister.Get(className)
if err != nil {
return false, false, nil, fmt.Errorf("failed to find storage class %q", className)
}
provisioner := class.Provisioner
if provisioner == "" || provisioner == volume.NotSupportedProvisioner {
logger.V(5).Info("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim))
return false, true, nil, nil
}
// Check if the node can satisfy the topology requirement in the class
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
logger.V(5).Info("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim))
return false, true, nil, nil
}
// Check storage capacity.
sufficient, capacity, err := b.hasEnoughCapacity(logger, provisioner, claim, class, node)
if err != nil {
return false, false, nil, err
}
if !sufficient {
// hasEnoughCapacity logs an explanation.
return true, false, nil, nil
}
dynamicProvisions = append(dynamicProvisions, &DynamicProvision{
PVC: claim,
NodeCapacity: capacity,
})
}
logger.V(5).Info("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node))
return true, true, dynamicProvisions, nil
}
func (b *volumeBinder) revertAssumedPVs(bindings []*BindingInfo) {
for _, BindingInfo := range bindings {
b.pvCache.Restore(BindingInfo.pv.Name)
}
}
func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) {
for _, claim := range claims {
b.pvcCache.Restore(getPVCName(claim))
}
}
// hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size
// that is available from the node. This function returns the node capacity based on the PVC's storage class.
func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, *storagev1.CSIStorageCapacity, error) {
quantity, ok := claim.Spec.Resources.Requests[v1.ResourceStorage]
if !ok {
// No capacity to check for.
return true, nil, nil
}
// Only enabled for CSI drivers which opt into it.
driver, err := b.csiDriverLister.Get(provisioner)
if err != nil {
if apierrors.IsNotFound(err) {
// Either the provisioner is not a CSI driver or the driver does not
// opt into storage capacity scheduling. Either way, skip
// capacity checking.
return true, nil, nil
}
return false, nil, err
}
if driver.Spec.StorageCapacity == nil || !*driver.Spec.StorageCapacity {
return true, nil, nil
}
// Look for a matching CSIStorageCapacity object(s).
// TODO (for beta): benchmark this and potentially introduce some kind of lookup structure (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-654356718).
capacities, err := b.csiStorageCapacityLister.List(labels.Everything())
if err != nil {
return false, nil, err
}
sizeInBytes := quantity.Value()
for _, capacity := range capacities {
if capacity.StorageClassName == storageClass.Name &&
capacitySufficient(capacity, sizeInBytes) &&
b.nodeHasAccess(logger, node, capacity) {
// Enough capacity found.
return true, capacity, nil
}
}
// TODO (?): this doesn't give any information about which pools where considered and why
// they had to be rejected. Log that above? But that might be a lot of log output...
logger.V(5).Info("Node has no accessible CSIStorageCapacity with enough capacity for PVC",
"node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass))
return false, nil, nil
}
func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int64) bool {
limit := volumeLimit(capacity)
return limit != nil && limit.Value() >= sizeInBytes
}
func volumeLimit(capacity *storagev1.CSIStorageCapacity) *resource.Quantity {
if capacity.MaximumVolumeSize != nil {
// Prefer MaximumVolumeSize if available, it is more precise.
return capacity.MaximumVolumeSize
}
return capacity.Capacity
}
func (b *volumeBinder) nodeHasAccess(logger klog.Logger, node *v1.Node, capacity *storagev1.CSIStorageCapacity) bool {
if capacity.NodeTopology == nil {
// Unavailable
return false
}
// Only matching by label is supported.
selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology)
if err != nil {
logger.Error(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology)
return false
}
return selector.Matches(labels.Set(node.Labels))
}
type byPVCSize []*v1.PersistentVolumeClaim
func (a byPVCSize) Len() int {
return len(a)
}
func (a byPVCSize) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a byPVCSize) Less(i, j int) bool {
iSize := a[i].Spec.Resources.Requests[v1.ResourceStorage]
jSize := a[j].Spec.Resources.Requests[v1.ResourceStorage]
// return true if iSize is less than jSize
return iSize.Cmp(jSize) == -1
}
// isCSIMigrationOnForPlugin checks if CSI migration is enabled for a given plugin.
func isCSIMigrationOnForPlugin(pluginName string, enableCSIMigrationPortworx bool) bool {
switch pluginName {
case csiplugins.AWSEBSInTreePluginName:
return true
case csiplugins.GCEPDInTreePluginName:
return true
case csiplugins.AzureDiskInTreePluginName:
return true
case csiplugins.CinderInTreePluginName:
return true
case csiplugins.PortworxVolumePluginName:
return enableCSIMigrationPortworx
}
return false
}
// isPluginMigratedToCSIOnNode checks if an in-tree plugin has been migrated to a CSI driver on the node.
func isPluginMigratedToCSIOnNode(pluginName string, csiNode *storagev1.CSINode) bool {
if csiNode == nil {
return false
}
csiNodeAnn := csiNode.GetAnnotations()
if csiNodeAnn == nil {
return false
}
var mpaSet sets.Set[string]
mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey]
if len(mpa) == 0 {
mpaSet = sets.New[string]()
} else {
tok := strings.Split(mpa, ",")
mpaSet = sets.New(tok...)
}
return mpaSet.Has(pluginName)
}
// tryTranslatePVToCSI will translate the in-tree PV to CSI if it meets the criteria. If not, it returns the unmodified in-tree PV.
func (b *volumeBinder) tryTranslatePVToCSI(logger klog.Logger, pv *v1.PersistentVolume, csiNode *storagev1.CSINode) (*v1.PersistentVolume, error) {
if !b.translator.IsPVMigratable(pv) {
return pv, nil
}
pluginName, err := b.translator.GetInTreePluginNameFromSpec(pv, nil)
if err != nil {
return nil, fmt.Errorf("could not get plugin name from pv: %v", err)
}
if !isCSIMigrationOnForPlugin(pluginName, b.enableCSIMigrationPortworx) {
return pv, nil
}
if !isPluginMigratedToCSIOnNode(pluginName, csiNode) {
return pv, nil
}
transPV, err := b.translator.TranslateInTreePVToCSI(logger, pv)
if err != nil {
return nil, fmt.Errorf("could not translate pv: %v", err)
}
return transPV, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
)
// FakeVolumeBinderConfig holds configurations for fake volume binder.
type FakeVolumeBinderConfig struct {
AllBound bool
FindReasons ConflictReasons
FindErr error
AssumeErr error
BindErr error
}
// NewFakeVolumeBinder sets up all the caches needed for the scheduler to make
// topology-aware volume binding decisions.
func NewFakeVolumeBinder(config *FakeVolumeBinderConfig) *FakeVolumeBinder {
return &FakeVolumeBinder{
config: config,
}
}
// FakeVolumeBinder represents a fake volume binder for testing.
type FakeVolumeBinder struct {
config *FakeVolumeBinderConfig
AssumeCalled bool
BindCalled bool
}
var _ SchedulerVolumeBinder = &FakeVolumeBinder{}
// GetPodVolumeClaims implements SchedulerVolumeBinder.GetPodVolumes.
func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) {
return &PodVolumeClaims{}, nil
}
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
return nil, b.config.FindReasons, b.config.FindErr
}
// AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes.
func (b *FakeVolumeBinder) AssumePodVolumes(_ klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (bool, error) {
b.AssumeCalled = true
return b.config.AllBound, b.config.AssumeErr
}
// RevertAssumedPodVolumes implements SchedulerVolumeBinder.RevertAssumedPodVolumes
func (b *FakeVolumeBinder) RevertAssumedPodVolumes(_ *PodVolumes) {}
// BindPodVolumes implements SchedulerVolumeBinder.BindPodVolumes.
func (b *FakeVolumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error {
b.BindCalled = true
return b.config.BindErr
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
// VolumeSchedulerSubsystem - subsystem name used by scheduler
const VolumeSchedulerSubsystem = "scheduler_volume"
var (
// VolumeBindingRequestSchedulerBinderCache tracks the number of volume binder cache operations.
VolumeBindingRequestSchedulerBinderCache = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: VolumeSchedulerSubsystem,
Name: "binder_cache_requests_total",
Help: "Total number for request volume binding cache",
StabilityLevel: metrics.ALPHA,
},
[]string{"operation"},
)
// VolumeSchedulingStageFailed tracks the number of failed volume scheduling operations.
VolumeSchedulingStageFailed = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: VolumeSchedulerSubsystem,
Name: "scheduling_stage_error_total",
Help: "Volume scheduling stage error count",
StabilityLevel: metrics.ALPHA,
},
[]string{"operation"},
)
)
// RegisterVolumeSchedulingMetrics is used for scheduler, because the volume binding cache is a library
// used by scheduler process.
func RegisterVolumeSchedulingMetrics() {
legacyregistry.MustRegister(VolumeBindingRequestSchedulerBinderCache)
legacyregistry.MustRegister(VolumeSchedulingStageFailed)
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"math"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
)
// classResourceMap holds a map of storage class to resource.
type classResourceMap map[string]*StorageResource
// volumeCapacityScorer calculates the score based on class storage resource information.
type volumeCapacityScorer func(classResourceMap) int64
// buildScorerFunction builds volumeCapacityScorer from the scoring function shape.
func buildScorerFunction(scoringFunctionShape helper.FunctionShape) volumeCapacityScorer {
rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape)
f := func(requested, capacity int64) int64 {
if capacity == 0 || requested > capacity {
return rawScoringFunction(maxUtilization)
}
return rawScoringFunction(requested * maxUtilization / capacity)
}
return func(classResources classResourceMap) int64 {
var nodeScore int64
// in alpha stage, all classes have the same weight
weightSum := len(classResources)
if weightSum == 0 {
return 0
}
for _, resource := range classResources {
classScore := f(resource.Requested, resource.Capacity)
nodeScore += classScore
}
return int64(math.Round(float64(nodeScore) / float64(weightSum)))
}
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/component-helpers/storage/volume"
"k8s.io/utils/ptr"
)
type nodeBuilder struct {
*v1.Node
}
func makeNode(name string) nodeBuilder {
return nodeBuilder{Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
v1.LabelHostname: name,
},
},
}}
}
func (nb nodeBuilder) withLabel(key, value string) nodeBuilder {
if nb.Node.ObjectMeta.Labels == nil {
nb.Node.ObjectMeta.Labels = map[string]string{}
}
nb.Node.ObjectMeta.Labels[key] = value
return nb
}
type pvBuilder struct {
*v1.PersistentVolume
}
func makePV(name, className string) pvBuilder {
return pvBuilder{PersistentVolume: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PersistentVolumeSpec{
StorageClassName: className,
},
}}
}
func (pvb pvBuilder) withNodeAffinity(keyValues map[string][]string) pvBuilder {
matchExpressions := make([]v1.NodeSelectorRequirement, 0)
for key, values := range keyValues {
matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{
Key: key,
Operator: v1.NodeSelectorOpIn,
Values: values,
})
}
pvb.PersistentVolume.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: matchExpressions,
},
},
},
}
return pvb
}
func (pvb pvBuilder) withVersion(version string) pvBuilder {
pvb.PersistentVolume.ObjectMeta.ResourceVersion = version
return pvb
}
func (pvb pvBuilder) withCapacity(capacity resource.Quantity) pvBuilder {
pvb.PersistentVolume.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): capacity,
}
return pvb
}
func (pvb pvBuilder) withPhase(phase v1.PersistentVolumePhase) pvBuilder {
pvb.PersistentVolume.Status = v1.PersistentVolumeStatus{
Phase: phase,
}
return pvb
}
type pvcBuilder struct {
*v1.PersistentVolumeClaim
}
func makePVC(name string, storageClassName string) pvcBuilder {
return pvcBuilder{PersistentVolumeClaim: &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: ptr.To(storageClassName),
},
}}
}
func (pvcb pvcBuilder) withBoundPV(pvName string) pvcBuilder {
pvcb.PersistentVolumeClaim.Spec.VolumeName = pvName
metav1.SetMetaDataAnnotation(&pvcb.PersistentVolumeClaim.ObjectMeta, volume.AnnBindCompleted, "true")
return pvcb
}
func (pvcb pvcBuilder) withRequestStorage(request resource.Quantity) pvcBuilder {
pvcb.PersistentVolumeClaim.Spec.Resources = v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): request,
},
}
return pvcb
}
func (pvcb pvcBuilder) withPhase(phase v1.PersistentVolumeClaimPhase) pvcBuilder {
pvcb.PersistentVolumeClaim.Status = v1.PersistentVolumeClaimStatus{
Phase: phase,
}
return pvcb
}
type podBuilder struct {
*v1.Pod
}
func makePod(name string) podBuilder {
pb := podBuilder{Pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: v1.NamespaceDefault,
},
}}
pb.Pod.Spec.Volumes = make([]v1.Volume, 0)
return pb
}
func (pb podBuilder) withNodeName(name string) podBuilder {
pb.Pod.Spec.NodeName = name
return pb
}
func (pb podBuilder) withNamespace(name string) podBuilder {
pb.Pod.ObjectMeta.Namespace = name
return pb
}
func (pb podBuilder) withPVCVolume(pvcName, name string) podBuilder {
pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
})
return pb
}
func (pb podBuilder) withPVCSVolume(pvcs []*v1.PersistentVolumeClaim) podBuilder {
for i, pvc := range pvcs {
pb.withPVCVolume(pvc.Name, fmt.Sprintf("vol%v", i))
}
return pb
}
func (pb podBuilder) withEmptyDirVolume() podBuilder {
pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
})
return pb
}
func (pb podBuilder) withGenericEphemeralVolume(name string) podBuilder {
pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{},
},
})
return pb
}
func (pb podBuilder) withCSI(driver string) podBuilder {
pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{
VolumeSource: v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: driver,
},
},
})
return pb
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumebinding
import (
"context"
"errors"
"fmt"
"sync"
"time"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
const (
stateKey fwk.StateKey = Name
maxUtilization = 100
)
// the state is initialized in PreFilter phase. because we save the pointer in
// fwk.CycleState, in the later phases we don't need to call Write method
// to update the value
type stateData struct {
allBound bool
// podVolumesByNode holds the pod's volume information found in the Filter
// phase for each node
// it's initialized in the PreFilter phase
podVolumesByNode map[string]*PodVolumes
podVolumeClaims *PodVolumeClaims
// hasStaticBindings declares whether the pod contains one or more StaticBinding.
// If not, vloumeBinding will skip score extension point.
hasStaticBindings bool
sync.Mutex
}
func (d *stateData) Clone() fwk.StateData {
return d
}
// VolumeBinding is a plugin that binds pod volumes in scheduling.
// In the Filter phase, pod binding cache is created for the pod and used in
// Reserve and PreBind phases.
type VolumeBinding struct {
Binder SchedulerVolumeBinder
PVCLister corelisters.PersistentVolumeClaimLister
classLister storagelisters.StorageClassLister
scorer volumeCapacityScorer
fts feature.Features
}
var _ fwk.PreFilterPlugin = &VolumeBinding{}
var _ fwk.FilterPlugin = &VolumeBinding{}
var _ fwk.ReservePlugin = &VolumeBinding{}
var _ fwk.PreBindPlugin = &VolumeBinding{}
var _ fwk.PreScorePlugin = &VolumeBinding{}
var _ fwk.ScorePlugin = &VolumeBinding{}
var _ fwk.EnqueueExtensions = &VolumeBinding{}
// Name is the name of the plugin used in Registry and configurations.
const Name = names.VolumeBinding
// Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeBinding) Name() string {
return Name
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *VolumeBinding) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// Pods may fail to find available PVs because the node labels do not
// match the storage class's allowed topologies or PV's node affinity.
// A new or updated node may make pods schedulable.
//
// A note about UpdateNodeTaint event:
// Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint
if pl.fts.EnableSchedulingQueueHint {
// When scheduling queue hint is enabled, we don't use the problematic preCheck and don't need to register UpdateNodeTaint event.
nodeActionType = fwk.Add | fwk.UpdateNodeLabel
}
events := []fwk.ClusterEventWithHint{
// Pods may fail because of missing or mis-configured storage class
// (e.g., allowedTopologies, volumeBindingMode), and hence may become
// schedulable upon StorageClass Add or Update events.
{Event: fwk.ClusterEvent{Resource: fwk.StorageClass, ActionType: fwk.Add | fwk.Update}, QueueingHintFn: pl.isSchedulableAfterStorageClassChange},
// We bind PVCs with PVs, so any changes may make the pods schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolumeClaim, ActionType: fwk.Add | fwk.Update}, QueueingHintFn: pl.isSchedulableAfterPersistentVolumeClaimChange},
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolume, ActionType: fwk.Add | fwk.Update}},
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}},
// We rely on CSI node to translate in-tree PV to CSI.
// TODO: kube-schduler will unregister the CSINode events once all the volume plugins has completed their CSI migration.
{Event: fwk.ClusterEvent{Resource: fwk.CSINode, ActionType: fwk.Add | fwk.Update}, QueueingHintFn: pl.isSchedulableAfterCSINodeChange},
// When CSIStorageCapacity is enabled, pods may become schedulable
// on CSI driver & storage capacity changes.
{Event: fwk.ClusterEvent{Resource: fwk.CSIDriver, ActionType: fwk.Update}, QueueingHintFn: pl.isSchedulableAfterCSIDriverChange},
{Event: fwk.ClusterEvent{Resource: fwk.CSIStorageCapacity, ActionType: fwk.Add | fwk.Update}, QueueingHintFn: pl.isSchedulableAfterCSIStorageCapacityChange},
}
return events, nil
}
func (pl *VolumeBinding) isSchedulableAfterCSINodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
if oldObj == nil {
logger.V(5).Info("CSINode creation could make the pod schedulable")
return fwk.Queue, nil
}
oldCSINode, modifiedCSINode, err := util.As[*storagev1.CSINode](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
logger = klog.LoggerWithValues(
logger,
"Pod", klog.KObj(pod),
"CSINode", klog.KObj(modifiedCSINode),
)
if oldCSINode.ObjectMeta.Annotations[v1.MigratedPluginsAnnotationKey] != modifiedCSINode.ObjectMeta.Annotations[v1.MigratedPluginsAnnotationKey] {
logger.V(5).Info("CSINode's migrated plugins annotation is updated and that may make the pod schedulable")
return fwk.Queue, nil
}
logger.V(5).Info("CISNode was created or updated but it doesn't make this pod schedulable")
return fwk.QueueSkip, nil
}
func (pl *VolumeBinding) isSchedulableAfterPersistentVolumeClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, newPVC, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
logger = klog.LoggerWithValues(
logger,
"Pod", klog.KObj(pod),
"PersistentVolumeClaim", klog.KObj(newPVC),
)
if pod.Namespace != newPVC.Namespace {
logger.V(5).Info("PersistentVolumeClaim was created or updated, but it doesn't make this pod schedulable because the PVC belongs to a different namespace")
return fwk.QueueSkip, nil
}
for _, vol := range pod.Spec.Volumes {
var pvcName string
switch {
case vol.PersistentVolumeClaim != nil:
pvcName = vol.PersistentVolumeClaim.ClaimName
case vol.Ephemeral != nil:
pvcName = ephemeral.VolumeClaimName(pod, &vol)
default:
continue
}
if pvcName == newPVC.Name {
// Return Queue because, in this case,
// all PVC creations and almost all PVC updates could make the Pod schedulable.
logger.V(5).Info("PersistentVolumeClaim the pod requires was created or updated, potentially making the target Pod schedulable")
return fwk.Queue, nil
}
}
logger.V(5).Info("PersistentVolumeClaim was created or updated, but it doesn't make this pod schedulable")
return fwk.QueueSkip, nil
}
// isSchedulableAfterStorageClassChange checks whether an StorageClass event might make a Pod schedulable or not.
// Any StorageClass addition and a StorageClass update to allowedTopologies
// might make a Pod schedulable.
// Note that an update to volume binding mode is not allowed and we don't have to consider while examining the update event.
func (pl *VolumeBinding) isSchedulableAfterStorageClassChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
oldSC, newSC, err := util.As[*storagev1.StorageClass](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
logger = klog.LoggerWithValues(
logger,
"Pod", klog.KObj(pod),
"StorageClass", klog.KObj(newSC),
)
if oldSC == nil {
// No further filtering can be made for a creation event,
// and we just always return Queue.
logger.V(5).Info("A new StorageClass was created, which could make a Pod schedulable")
return fwk.Queue, nil
}
if !apiequality.Semantic.DeepEqual(newSC.AllowedTopologies, oldSC.AllowedTopologies) {
logger.V(5).Info("StorageClass got an update in AllowedTopologies", "AllowedTopologies", newSC.AllowedTopologies)
return fwk.Queue, nil
}
logger.V(5).Info("StorageClass was updated, but it doesn't make this pod schedulable")
return fwk.QueueSkip, nil
}
// isSchedulableAfterCSIStorageCapacityChange checks whether a CSIStorageCapacity event
// might make a Pod schedulable or not.
// Any CSIStorageCapacity addition and a CSIStorageCapacity update to volume limit
// (calculated based on capacity and maximumVolumeSize) might make a Pod schedulable.
// Note that an update to nodeTopology and storageClassName is not allowed and
// we don't have to consider while examining the update event.
func (pl *VolumeBinding) isSchedulableAfterCSIStorageCapacityChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
oldCap, newCap, err := util.As[*storagev1.CSIStorageCapacity](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
if oldCap == nil {
logger.V(5).Info(
"A new CSIStorageCapacity was created, which could make a Pod schedulable",
"Pod", klog.KObj(pod),
"CSIStorageCapacity", klog.KObj(newCap),
)
return fwk.Queue, nil
}
oldLimit := volumeLimit(oldCap)
newLimit := volumeLimit(newCap)
logger = klog.LoggerWithValues(
logger,
"Pod", klog.KObj(pod),
"CSIStorageCapacity", klog.KObj(newCap),
"volumeLimit(new)", newLimit,
"volumeLimit(old)", oldLimit,
)
if newLimit != nil && (oldLimit == nil || newLimit.Value() > oldLimit.Value()) {
logger.V(5).Info("VolumeLimit was increased, which could make a Pod schedulable")
return fwk.Queue, nil
}
logger.V(5).Info("CSIStorageCapacity was updated, but it doesn't make this pod schedulable")
return fwk.QueueSkip, nil
}
func (pl *VolumeBinding) isSchedulableAfterCSIDriverChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalCSIDriver, modifiedCSIDriver, err := util.As[*storagev1.CSIDriver](oldObj, newObj)
if err != nil {
return fwk.Queue, err
}
logger = klog.LoggerWithValues(
logger,
"Pod", klog.KObj(pod),
"CSIDriver", klog.KObj(modifiedCSIDriver),
)
for _, vol := range pod.Spec.Volumes {
if vol.CSI == nil || vol.CSI.Driver != modifiedCSIDriver.Name {
continue
}
if (originalCSIDriver.Spec.StorageCapacity != nil && *originalCSIDriver.Spec.StorageCapacity) &&
(modifiedCSIDriver.Spec.StorageCapacity == nil || !*modifiedCSIDriver.Spec.StorageCapacity) {
logger.V(5).Info("CSIDriver was updated and storage capacity got disabled, which may make the pod schedulable")
return fwk.Queue, nil
}
}
logger.V(5).Info("CSIDriver was created or updated but it doesn't make this pod schedulable")
return fwk.QueueSkip, nil
}
// podHasPVCs returns 2 values:
// - the first one to denote if the given "pod" has any PVC defined.
// - the second one to return any error if the requested PVC is illegal.
func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) {
hasPVC := false
for _, vol := range pod.Spec.Volumes {
var pvcName string
isEphemeral := false
switch {
case vol.PersistentVolumeClaim != nil:
pvcName = vol.PersistentVolumeClaim.ClaimName
case vol.Ephemeral != nil:
pvcName = ephemeral.VolumeClaimName(pod, &vol)
isEphemeral = true
default:
// Volume is not using a PVC, ignore
continue
}
hasPVC = true
pvc, err := pl.PVCLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName)
if err != nil {
// The error usually has already enough context ("persistentvolumeclaim "myclaim" not found"),
// but we can do better for generic ephemeral inline volumes where that situation
// is normal directly after creating a pod.
if isEphemeral && apierrors.IsNotFound(err) {
err = fmt.Errorf("waiting for ephemeral volume controller to create the persistentvolumeclaim %q", pvcName)
}
return hasPVC, err
}
if pvc.Status.Phase == v1.ClaimLost {
return hasPVC, fmt.Errorf("persistentvolumeclaim %q bound to non-existent persistentvolume %q", pvc.Name, pvc.Spec.VolumeName)
}
if pvc.DeletionTimestamp != nil {
return hasPVC, fmt.Errorf("persistentvolumeclaim %q is being deleted", pvc.Name)
}
if isEphemeral {
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
return hasPVC, err
}
}
}
return hasPVC, nil
}
// PreFilter invoked at the prefilter extension point to check if pod has all
// immediate PVCs bound. If not all immediate PVCs are bound, an
// UnschedulableAndUnresolvable is returned.
func (pl *VolumeBinding) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
logger := klog.FromContext(ctx)
// If pod does not reference any PVC, we don't need to do anything.
if hasPVC, err := pl.podHasPVCs(pod); err != nil {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, err.Error())
} else if !hasPVC {
state.Write(stateKey, &stateData{})
return nil, fwk.NewStatus(fwk.Skip)
}
podVolumeClaims, err := pl.Binder.GetPodVolumeClaims(logger, pod)
if err != nil {
return nil, fwk.AsStatus(err)
}
if len(podVolumeClaims.unboundClaimsImmediate) > 0 {
// Return UnschedulableAndUnresolvable error if immediate claims are
// not bound. Pod will be moved to active/backoff queues once these
// claims are bound by PV controller.
status := fwk.NewStatus(fwk.UnschedulableAndUnresolvable)
status.AppendReason("pod has unbound immediate PersistentVolumeClaims")
return nil, status
}
state.Write(stateKey, &stateData{
podVolumesByNode: make(map[string]*PodVolumes),
podVolumeClaims: &PodVolumeClaims{
boundClaims: podVolumeClaims.boundClaims,
unboundClaimsDelayBinding: podVolumeClaims.unboundClaimsDelayBinding,
unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding,
},
})
return nil, nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *VolumeBinding) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
func getStateData(cs fwk.CycleState) (*stateData, error) {
state, err := cs.Read(stateKey)
if err != nil {
return nil, err
}
s, ok := state.(*stateData)
if !ok {
return nil, errors.New("unable to convert state into stateData")
}
return s, nil
}
// Filter invoked at the filter extension point.
// It evaluates if a pod can fit due to the volumes it requests,
// for both bound and unbound PVCs.
//
// For PVCs that are bound, then it checks that the corresponding PV's node affinity is
// satisfied by the given node.
//
// For PVCs that are unbound, it tries to find available PVs that can satisfy the PVC requirements
// and that the PV node affinity is satisfied by the given node.
//
// If storage capacity tracking is enabled, then enough space has to be available
// for the node and volumes that still need to be created.
//
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
// PVCs can be matched with an available and node-compatible PV.
func (pl *VolumeBinding) Filter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
logger := klog.FromContext(ctx)
node := nodeInfo.Node()
state, err := getStateData(cs)
if err != nil {
return fwk.AsStatus(err)
}
podVolumes, reasons, err := pl.Binder.FindPodVolumes(logger, pod, state.podVolumeClaims, node)
if err != nil {
return fwk.AsStatus(err)
}
if len(reasons) > 0 {
status := fwk.NewStatus(fwk.UnschedulableAndUnresolvable)
for _, reason := range reasons {
status.AppendReason(string(reason))
}
return status
}
// multiple goroutines call `Filter` on different nodes simultaneously and the `CycleState` may be duplicated, so we must use a local lock here
state.Lock()
state.podVolumesByNode[node.Name] = podVolumes
state.hasStaticBindings = state.hasStaticBindings || (podVolumes != nil && len(podVolumes.StaticBindings) > 0)
state.Unlock()
return nil
}
// PreScore invoked at the preScore extension point. It checks whether volumeBinding can skip Score
func (pl *VolumeBinding) PreScore(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
if pl.scorer == nil {
return fwk.NewStatus(fwk.Skip)
}
state, err := getStateData(cs)
if err != nil {
return fwk.AsStatus(err)
}
if state.hasStaticBindings || pl.fts.EnableStorageCapacityScoring {
return nil
}
return fwk.NewStatus(fwk.Skip)
}
// Score invoked at the score extension point.
func (pl *VolumeBinding) Score(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
if pl.scorer == nil {
return 0, nil
}
state, err := getStateData(cs)
if err != nil {
return 0, fwk.AsStatus(err)
}
nodeName := nodeInfo.Node().Name
podVolumes, ok := state.podVolumesByNode[nodeName]
if !ok {
return 0, nil
}
classResources := make(classResourceMap)
if len(podVolumes.StaticBindings) != 0 || !pl.fts.EnableStorageCapacityScoring {
// group static binding volumes by storage class
for _, staticBinding := range podVolumes.StaticBindings {
class := staticBinding.StorageClassName()
storageResource := staticBinding.StorageResource()
if _, ok := classResources[class]; !ok {
classResources[class] = &StorageResource{
Requested: 0,
Capacity: 0,
}
}
classResources[class].Requested += storageResource.Requested
classResources[class].Capacity += storageResource.Capacity
}
} else {
// group dynamic binding volumes by storage class
for _, provision := range podVolumes.DynamicProvisions {
if provision.NodeCapacity == nil {
continue
}
class := *provision.PVC.Spec.StorageClassName
if _, ok := classResources[class]; !ok {
classResources[class] = &StorageResource{
Requested: 0,
Capacity: 0,
}
}
// The following line cannot be +=. For example, if a Pod requests two 50GB volumes from
// a StorageClass with 100GB of capacity on a node, this part of the code will be executed twice.
// In that case, using += would incorrectly set classResources[class].Capacity to 200GB.
classResources[class].Capacity = provision.NodeCapacity.Capacity.Value()
requestedQty := provision.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
classResources[class].Requested += requestedQty.Value()
}
}
return pl.scorer(classResources), nil
}
// ScoreExtensions of the Score plugin.
func (pl *VolumeBinding) ScoreExtensions() fwk.ScoreExtensions {
return nil
}
// Reserve reserves volumes of pod and saves binding status in cycle state.
func (pl *VolumeBinding) Reserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
state, err := getStateData(cs)
if err != nil {
return fwk.AsStatus(err)
}
// we don't need to hold the lock as only one node will be reserved for the given pod
podVolumes, ok := state.podVolumesByNode[nodeName]
if ok {
allBound, err := pl.Binder.AssumePodVolumes(klog.FromContext(ctx), pod, nodeName, podVolumes)
if err != nil {
return fwk.AsStatus(err)
}
state.allBound = allBound
} else {
// may not exist if the pod does not reference any PVC
state.allBound = true
}
return nil
}
var errNoPodVolumeForNode = fmt.Errorf("no pod volume found for node")
// PreBindPreFlight is called before PreBind, and determines whether PreBind is going to do something for this pod, or not.
// It checks state.podVolumesByNode to determine whether there are any pod volumes for the node and hence the plugin has to handle them at PreBind.
func (pl *VolumeBinding) PreBindPreFlight(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
s, err := getStateData(state)
if err != nil {
return fwk.AsStatus(err)
}
if s.allBound {
// no need to bind volumes
return fwk.NewStatus(fwk.Skip)
}
if _, ok := s.podVolumesByNode[nodeName]; !ok {
return fwk.AsStatus(fmt.Errorf("%w %q", errNoPodVolumeForNode, nodeName))
}
return nil
}
// PreBind will make the API update with the assumed bindings and wait until
// the PV controller has completely finished the binding operation.
//
// If binding errors, times out or gets undone, then an error will be returned to
// retry scheduling.
func (pl *VolumeBinding) PreBind(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
s, err := getStateData(cs)
if err != nil {
return fwk.AsStatus(err)
}
if s.allBound {
// no need to bind volumes
return nil
}
// we don't need to hold the lock as only one node will be pre-bound for the given pod
podVolumes, ok := s.podVolumesByNode[nodeName]
if !ok {
return fwk.AsStatus(fmt.Errorf("%w %q", errNoPodVolumeForNode, nodeName))
}
logger := klog.FromContext(ctx)
logger.V(5).Info("Trying to bind volumes for pod", "pod", klog.KObj(pod))
err = pl.Binder.BindPodVolumes(ctx, pod, podVolumes)
if err != nil {
logger.V(5).Info("Failed to bind volumes for pod", "pod", klog.KObj(pod), "err", err)
return fwk.AsStatus(err)
}
logger.V(5).Info("Success binding volumes for pod", "pod", klog.KObj(pod))
return nil
}
// Unreserve clears assumed PV and PVC cache.
// It's idempotent, and does nothing if no cache found for the given pod.
func (pl *VolumeBinding) Unreserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) {
s, err := getStateData(cs)
if err != nil {
return
}
// we don't need to hold the lock as only one node may be unreserved
podVolumes, ok := s.podVolumesByNode[nodeName]
if !ok {
return
}
pl.Binder.RevertAssumedPodVolumes(podVolumes)
}
// New initializes a new plugin and returns it.
func New(ctx context.Context, plArgs runtime.Object, fh fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
args, ok := plArgs.(*config.VolumeBindingArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type VolumeBindingArgs, got %T", plArgs)
}
if err := validation.ValidateVolumeBindingArgsWithOptions(nil, args, validation.VolumeBindingArgsValidationOptions{
AllowStorageCapacityScoring: fts.EnableStorageCapacityScoring,
}); err != nil {
return nil, err
}
podInformer := fh.SharedInformerFactory().Core().V1().Pods()
nodeInformer := fh.SharedInformerFactory().Core().V1().Nodes()
pvcInformer := fh.SharedInformerFactory().Core().V1().PersistentVolumeClaims()
pvInformer := fh.SharedInformerFactory().Core().V1().PersistentVolumes()
storageClassInformer := fh.SharedInformerFactory().Storage().V1().StorageClasses()
csiNodeInformer := fh.SharedInformerFactory().Storage().V1().CSINodes()
capacityCheck := CapacityCheck{
CSIDriverInformer: fh.SharedInformerFactory().Storage().V1().CSIDrivers(),
CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1().CSIStorageCapacities(),
}
binder := NewVolumeBinder(klog.FromContext(ctx), fh.ClientSet(), fts, podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second)
// build score function
var scorer volumeCapacityScorer
if fts.EnableStorageCapacityScoring {
shape := make(helper.FunctionShape, 0, len(args.Shape))
for _, point := range args.Shape {
shape = append(shape, helper.FunctionShapePoint{
Utilization: int64(point.Utilization),
Score: int64(point.Score) * (fwk.MaxNodeScore / config.MaxCustomPriorityScore),
})
}
scorer = buildScorerFunction(shape)
}
return &VolumeBinding{
Binder: binder,
PVCLister: pvcInformer.Lister(),
classLister: storageClassInformer.Lister(),
scorer: scorer,
fts: fts,
}, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumerestrictions
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// VolumeRestrictions is a plugin that checks volume restrictions.
type VolumeRestrictions struct {
pvcLister corelisters.PersistentVolumeClaimLister
sharedLister fwk.SharedLister
enableSchedulingQueueHint bool
}
var _ fwk.PreFilterPlugin = &VolumeRestrictions{}
var _ fwk.FilterPlugin = &VolumeRestrictions{}
var _ fwk.EnqueueExtensions = &VolumeRestrictions{}
var _ fwk.StateData = &preFilterState{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.VolumeRestrictions
// preFilterStateKey is the key in CycleState to VolumeRestrictions pre-computed data for Filtering.
// Using the name of the plugin will likely help us avoid collisions with other plugins.
preFilterStateKey = "PreFilter" + Name
// ErrReasonDiskConflict is used for NoDiskConflict predicate error.
ErrReasonDiskConflict = "node(s) had no available disk"
// ErrReasonReadWriteOncePodConflict is used when a PVC with ReadWriteOncePod access mode is already in-use by another pod.
ErrReasonReadWriteOncePodConflict = "node(s) unavailable due to PersistentVolumeClaim with ReadWriteOncePod access mode already in-use by another pod"
)
// preFilterState computed at PreFilter and used at Filter.
type preFilterState struct {
// Names of the pod's volumes using the ReadWriteOncePod access mode.
readWriteOncePodPVCs sets.Set[string]
// The number of references to these ReadWriteOncePod volumes by scheduled pods.
conflictingPVCRefCount int
}
func (s *preFilterState) updateWithPod(podInfo fwk.PodInfo, multiplier int) {
s.conflictingPVCRefCount += multiplier * s.conflictingPVCRefCountForPod(podInfo)
}
func (s *preFilterState) conflictingPVCRefCountForPod(podInfo fwk.PodInfo) int {
conflicts := 0
for _, volume := range podInfo.GetPod().Spec.Volumes {
if volume.PersistentVolumeClaim == nil {
continue
}
if s.readWriteOncePodPVCs.Has(volume.PersistentVolumeClaim.ClaimName) {
conflicts += 1
}
}
return conflicts
}
// Clone the prefilter state.
func (s *preFilterState) Clone() fwk.StateData {
if s == nil {
return nil
}
return &preFilterState{
readWriteOncePodPVCs: s.readWriteOncePodPVCs,
conflictingPVCRefCount: s.conflictingPVCRefCount,
}
}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeRestrictions) Name() string {
return Name
}
func isVolumeConflict(volume *v1.Volume, pod *v1.Pod) bool {
for _, existingVolume := range pod.Spec.Volumes {
// Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only.
if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil {
disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk
if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) {
return true
}
}
if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil {
if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID {
return true
}
}
if volume.ISCSI != nil && existingVolume.ISCSI != nil {
iqn := volume.ISCSI.IQN
eiqn := existingVolume.ISCSI.IQN
// two ISCSI volumes are same, if they share the same iqn. As iscsi volumes are of type
// RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods
// conflict unless all other pods mount as read only.
if iqn == eiqn && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) {
return true
}
}
if volume.RBD != nil && existingVolume.RBD != nil {
mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage
emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage
// two RBDs images are the same if they share the same Ceph monitor, are in the same RADOS Pool, and have the same image name
// only one read-write mount is permitted for the same RBD image.
// same RBD image mounted by multiple Pods conflicts unless all Pods mount the image read-only
if haveOverlap(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) {
return true
}
}
}
return false
}
// haveOverlap searches two arrays and returns true if they have at least one common element; returns false otherwise.
func haveOverlap(a1, a2 []string) bool {
if len(a1) > len(a2) {
a1, a2 = a2, a1
}
m := sets.New(a1...)
for _, val := range a2 {
if _, ok := m[val]; ok {
return true
}
}
return false
}
// return true if there are conflict checking targets.
func needsRestrictionsCheck(v v1.Volume) bool {
return v.GCEPersistentDisk != nil || v.AWSElasticBlockStore != nil || v.RBD != nil || v.ISCSI != nil
}
// PreFilter computes and stores cycleState containing details for enforcing ReadWriteOncePod.
func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
needsCheck := false
for i := range pod.Spec.Volumes {
if needsRestrictionsCheck(pod.Spec.Volumes[i]) {
needsCheck = true
break
}
}
pvcs, err := pl.readWriteOncePodPVCsForPod(ctx, pod)
if err != nil {
if apierrors.IsNotFound(err) {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, err.Error())
}
return nil, fwk.AsStatus(err)
}
s, err := pl.calPreFilterState(ctx, pod, pvcs)
if err != nil {
return nil, fwk.AsStatus(err)
}
if !needsCheck && s.conflictingPVCRefCount == 0 {
return nil, fwk.NewStatus(fwk.Skip)
}
cycleState.Write(preFilterStateKey, s)
return nil, nil
}
// AddPod from pre-computed data in cycleState.
func (pl *VolumeRestrictions) AddPod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
state.updateWithPod(podInfoToAdd, 1)
return nil
}
// RemovePod from pre-computed data in cycleState.
func (pl *VolumeRestrictions) RemovePod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
state.updateWithPod(podInfoToRemove, -1)
return nil
}
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
return nil, fmt.Errorf("cannot read %q from cycleState", preFilterStateKey)
}
s, ok := c.(*preFilterState)
if !ok {
return nil, fmt.Errorf("%+v convert to volumerestrictions.state error", c)
}
return s, nil
}
// calPreFilterState computes preFilterState describing which PVCs use ReadWriteOncePod
// and which pods in the cluster are in conflict.
func (pl *VolumeRestrictions) calPreFilterState(ctx context.Context, pod *v1.Pod, pvcs sets.Set[string]) (*preFilterState, error) {
conflictingPVCRefCount := 0
for pvc := range pvcs {
key := framework.GetNamespacedName(pod.Namespace, pvc)
if pl.sharedLister.StorageInfos().IsPVCUsedByPods(key) {
// There can only be at most one pod using the ReadWriteOncePod PVC.
conflictingPVCRefCount += 1
}
}
return &preFilterState{
readWriteOncePodPVCs: pvcs,
conflictingPVCRefCount: conflictingPVCRefCount,
}, nil
}
func (pl *VolumeRestrictions) readWriteOncePodPVCsForPod(ctx context.Context, pod *v1.Pod) (sets.Set[string], error) {
pvcs := sets.New[string]()
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim == nil {
continue
}
pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName)
if err != nil {
return nil, err
}
if !v1helper.ContainsAccessMode(pvc.Spec.AccessModes, v1.ReadWriteOncePod) {
continue
}
pvcs.Insert(pvc.Name)
}
return pvcs, nil
}
// Checks if scheduling the pod onto this node would cause any conflicts with
// existing volumes.
func satisfyVolumeConflicts(pod *v1.Pod, nodeInfo fwk.NodeInfo) bool {
for i := range pod.Spec.Volumes {
v := pod.Spec.Volumes[i]
if !needsRestrictionsCheck(v) {
continue
}
for _, ev := range nodeInfo.GetPods() {
if isVolumeConflict(&v, ev.GetPod()) {
return false
}
}
}
return true
}
// Checks if scheduling the pod would cause any ReadWriteOncePod PVC access mode conflicts.
func satisfyReadWriteOncePod(ctx context.Context, state *preFilterState) *fwk.Status {
if state == nil {
return nil
}
if state.conflictingPVCRefCount > 0 {
return fwk.NewStatus(fwk.Unschedulable, ErrReasonReadWriteOncePodConflict)
}
return nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *VolumeRestrictions) PreFilterExtensions() fwk.PreFilterExtensions {
return pl
}
// Filter invoked at the filter extension point.
// It evaluates if a pod can fit due to the volumes it requests, and those that
// are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume
// can't be scheduled there.
// This is GCE, Amazon EBS, ISCSI and Ceph RBD specific for now:
// - GCE PD allows multiple mounts as long as they're all read-only
// - AWS EBS forbids any two pods mounting the same volume ID
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only
// - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only
// If the pod uses PVCs with the ReadWriteOncePod access mode, it evaluates if
// these PVCs are already in-use and if preemption will help.
func (pl *VolumeRestrictions) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
if !satisfyVolumeConflicts(pod, nodeInfo) {
return fwk.NewStatus(fwk.Unschedulable, ErrReasonDiskConflict)
}
state, err := getPreFilterState(cycleState)
if err != nil {
return fwk.AsStatus(err)
}
return satisfyReadWriteOncePod(ctx, state)
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *VolumeRestrictions) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// A note about UpdateNodeTaint/UpdateNodeLabel event:
// Ideally, it's supposed to register only Add because any Node update event will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeTaint | fwk.UpdateNodeLabel
if pl.enableSchedulingQueueHint {
// preCheck is not used when QHint is enabled, and hence Update event isn't necessary.
nodeActionType = fwk.Add
}
return []fwk.ClusterEventWithHint{
// Pods may fail to schedule because of volumes conflicting with other pods on same node.
// Once running pods are deleted and volumes have been released, the unschedulable pod will be schedulable.
// Due to immutable fields `spec.volumes`, pod update events are ignored.
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.Delete}, QueueingHintFn: pl.isSchedulableAfterPodDeleted},
// A new Node may make a pod schedulable.
// We intentionally don't set QueueingHint since all Node/Add events could make Pods schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}},
// Pods may fail to schedule because the PVC it uses has not yet been created.
// This PVC is required to exist to check its access modes.
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolumeClaim, ActionType: fwk.Add},
QueueingHintFn: pl.isSchedulableAfterPersistentVolumeClaimAdded},
}, nil
}
// isSchedulableAfterPersistentVolumeClaimAdded is invoked whenever a PersistentVolumeClaim added or changed, It checks whether
// that change made a previously unschedulable pod schedulable.
func (pl *VolumeRestrictions) isSchedulableAfterPersistentVolumeClaimAdded(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, newPersistentVolumeClaim, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPersistentVolumeClaimChange: %w", err)
}
if newPersistentVolumeClaim.Namespace != pod.Namespace {
return fwk.QueueSkip, nil
}
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim == nil {
continue
}
if volume.PersistentVolumeClaim.ClaimName == newPersistentVolumeClaim.Name {
logger.V(5).Info("PVC that is referred from the pod was created, which might make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(newPersistentVolumeClaim))
return fwk.Queue, nil
}
}
logger.V(5).Info("PVC irrelevant to the Pod was created, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(newPersistentVolumeClaim))
return fwk.QueueSkip, nil
}
// isSchedulableAfterPodDeleted is invoked whenever a pod deleted,
// It checks whether the deleted pod will conflict with volumes of other pods on the same node
func (pl *VolumeRestrictions) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
deletedPod, _, err := util.As[*v1.Pod](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPodDeleted: %w", err)
}
if deletedPod.Namespace != pod.Namespace {
return fwk.QueueSkip, nil
}
nodeInfo := framework.NewNodeInfo(deletedPod)
if !satisfyVolumeConflicts(pod, nodeInfo) {
logger.V(5).Info("Pod with the volume that the target pod requires was deleted, which might make this pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod))
return fwk.Queue, nil
}
// Return Queue if a deleted pod uses the same PVC since the pod may be unschedulable due to the ReadWriteOncePod access mode of the PVC.
//
// For now, we don't actually fetch PVC and check the access mode because that operation could be expensive.
// Once the observability around QHint is established,
// we may want to do that depending on how much the operation would impact the QHint latency negatively.
// https://github.com/kubernetes/kubernetes/issues/124566
claims := sets.New[string]()
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
claims.Insert(volume.PersistentVolumeClaim.ClaimName)
}
}
for _, volume := range deletedPod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil && claims.Has(volume.PersistentVolumeClaim.ClaimName) {
logger.V(5).Info("Pod with the same PVC that the target pod requires was deleted, which might make this pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod))
return fwk.Queue, nil
}
}
logger.V(5).Info("An irrelevant Pod was deleted, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod))
return fwk.QueueSkip, nil
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, handle fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
informerFactory := handle.SharedInformerFactory()
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
sharedLister := handle.SnapshotSharedLister()
return &VolumeRestrictions{
pvcLister: pvcLister,
sharedLister: sharedLister,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumezone
import (
"context"
"errors"
"fmt"
"reflect"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// VolumeZone is a plugin that checks volume zone.
type VolumeZone struct {
pvLister corelisters.PersistentVolumeLister
pvcLister corelisters.PersistentVolumeClaimLister
scLister storagelisters.StorageClassLister
enableSchedulingQueueHint bool
}
var _ fwk.FilterPlugin = &VolumeZone{}
var _ fwk.PreFilterPlugin = &VolumeZone{}
var _ fwk.EnqueueExtensions = &VolumeZone{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.VolumeZone
preFilterStateKey fwk.StateKey = "PreFilter" + Name
// ErrReasonConflict is used for NoVolumeZoneConflict predicate error.
ErrReasonConflict = "node(s) had no available volume zone"
)
// pvTopology holds the value of a pv's topologyLabel
type pvTopology struct {
pvName string
key string
values sets.Set[string]
}
// the state is initialized in PreFilter phase. because we save the pointer in
// fwk.CycleState, in the later phases we don't need to call Write method
// to update the value
type stateData struct {
// podPVTopologies holds the pv information we need
// it's initialized in the PreFilter phase
podPVTopologies []pvTopology
}
func (d *stateData) Clone() fwk.StateData {
return d
}
var topologyLabels = []string{
v1.LabelFailureDomainBetaZone,
v1.LabelFailureDomainBetaRegion,
v1.LabelTopologyZone,
v1.LabelTopologyRegion,
}
func translateToGALabel(label string) string {
if label == v1.LabelFailureDomainBetaRegion {
return v1.LabelTopologyRegion
}
if label == v1.LabelFailureDomainBetaZone {
return v1.LabelTopologyZone
}
return label
}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeZone) Name() string {
return Name
}
// PreFilter invoked at the prefilter extension point
//
// # It finds the topology of the PersistentVolumes corresponding to the volumes a pod requests
//
// Currently, this is only supported with PersistentVolumeClaims,
// and only looks for the bound PersistentVolume.
func (pl *VolumeZone) PreFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
logger := klog.FromContext(ctx)
podPVTopologies, status := pl.getPVbyPod(logger, pod)
if !status.IsSuccess() {
return nil, status
}
if len(podPVTopologies) == 0 {
return nil, fwk.NewStatus(fwk.Skip)
}
cs.Write(preFilterStateKey, &stateData{podPVTopologies: podPVTopologies})
return nil, nil
}
// getPVbyPod gets PVTopology from pod
func (pl *VolumeZone) getPVbyPod(logger klog.Logger, pod *v1.Pod) ([]pvTopology, *fwk.Status) {
podPVTopologies := make([]pvTopology, 0)
pvcNames := pl.getPersistentVolumeClaimNameFromPod(pod)
for _, pvcName := range pvcNames {
if pvcName == "" {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no name")
}
pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName)
if s := getErrorAsStatus(err); !s.IsSuccess() {
return nil, s
}
pvName := pvc.Spec.VolumeName
if pvName == "" {
scName := storagehelpers.GetPersistentVolumeClaimClass(pvc)
if len(scName) == 0 {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, "PersistentVolumeClaim had no pv name and storageClass name")
}
class, err := pl.scLister.Get(scName)
if s := getErrorAsStatus(err); !s.IsSuccess() {
return nil, s
}
if class.VolumeBindingMode == nil {
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, fmt.Sprintf("VolumeBindingMode not set for StorageClass %q", scName))
}
if *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer {
// Skip unbound volumes
continue
}
return nil, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, "PersistentVolume had no name")
}
pv, err := pl.pvLister.Get(pvName)
if s := getErrorAsStatus(err); !s.IsSuccess() {
return nil, s
}
podPVTopologies = append(podPVTopologies, pl.getPVTopologies(logger, pv)...)
}
return podPVTopologies, nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *VolumeZone) PreFilterExtensions() fwk.PreFilterExtensions {
return nil
}
// Filter invoked at the filter extension point.
//
// It evaluates if a pod can fit due to the volumes it requests, given
// that some volumes may have zone scheduling constraints. The requirement is that any
// volume zone-labels must match the equivalent zone-labels on the node. It is OK for
// the node to have more zone-label constraints (for example, a hypothetical replicated
// volume might allow region-wide access)
//
// Currently this is only supported with PersistentVolumeClaims, and looks to the labels
// only on the bound PersistentVolume.
//
// Working with volumes declared inline in the pod specification (i.e. not
// using a PersistentVolume) is likely to be harder, as it would require
// determining the zone of a volume during scheduling, and that is likely to
// require calling out to the cloud provider. It seems that we are moving away
// from inline volume declarations anyway.
func (pl *VolumeZone) Filter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
logger := klog.FromContext(ctx)
// If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 {
return nil
}
var podPVTopologies []pvTopology
state, err := getStateData(cs)
if err != nil {
// Fallback to calculate pv list here
var status *fwk.Status
podPVTopologies, status = pl.getPVbyPod(logger, pod)
if !status.IsSuccess() {
return status
}
} else {
podPVTopologies = state.podPVTopologies
}
node := nodeInfo.Node()
hasAnyNodeConstraint := false
for _, topologyLabel := range topologyLabels {
if _, ok := node.Labels[topologyLabel]; ok {
hasAnyNodeConstraint = true
break
}
}
if !hasAnyNodeConstraint {
// The node has no zone constraints, so we're OK to schedule.
// This is to handle a single-zone cluster scenario where the node may not have any topology labels.
return nil
}
for _, pvTopology := range podPVTopologies {
v, ok := node.Labels[pvTopology.key]
if !ok {
// if we can't match the beta label, try to match pv's beta label with node's ga label
v, ok = node.Labels[translateToGALabel(pvTopology.key)]
}
if !ok || !pvTopology.values.Has(v) {
logger.V(10).Info("Won't schedule pod onto node due to volume (mismatch on label key)", "pod", klog.KObj(pod), "node", klog.KObj(node), "PV", klog.KRef("", pvTopology.pvName), "PVLabelKey", pvTopology.key)
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, ErrReasonConflict)
}
}
return nil
}
func getStateData(cs fwk.CycleState) (*stateData, error) {
state, err := cs.Read(preFilterStateKey)
if err != nil {
return nil, err
}
s, ok := state.(*stateData)
if !ok {
return nil, errors.New("unable to convert state into stateData")
}
return s, nil
}
func getErrorAsStatus(err error) *fwk.Status {
if err != nil {
if apierrors.IsNotFound(err) {
return fwk.NewStatus(fwk.UnschedulableAndUnresolvable, err.Error())
}
return fwk.AsStatus(err)
}
return nil
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *VolumeZone) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// A new node or updating a node's volume zone labels may make a pod schedulable.
// A note about UpdateNodeTaint event:
// Ideally, it's supposed to register only Add | UpdateNodeLabel because UpdateNodeTaint will never change the result from this plugin.
// But, we may miss Node/Add event due to preCheck, and we decided to register UpdateNodeTaint | UpdateNodeLabel for all plugins registering Node/Add.
// See: https://github.com/kubernetes/kubernetes/issues/109437
nodeActionType := fwk.Add | fwk.UpdateNodeLabel | fwk.UpdateNodeTaint
if pl.enableSchedulingQueueHint {
// preCheck is not used when QHint is enabled.
nodeActionType = fwk.Add | fwk.UpdateNodeLabel
}
return []fwk.ClusterEventWithHint{
// New storageClass with bind mode `VolumeBindingWaitForFirstConsumer` will make a pod schedulable.
// Due to immutable field `storageClass.volumeBindingMode`, storageClass update events are ignored.
{Event: fwk.ClusterEvent{Resource: fwk.StorageClass, ActionType: fwk.Add}, QueueingHintFn: pl.isSchedulableAfterStorageClassAdded},
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: nodeActionType}},
// A new pvc may make a pod schedulable.
// Also, if pvc's VolumeName is filled, that also could make a pod schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolumeClaim, ActionType: fwk.Add | fwk.Update}, QueueingHintFn: pl.isSchedulableAfterPersistentVolumeClaimChange},
// A new pv or updating a pv's volume zone labels may make a pod schedulable.
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolume, ActionType: fwk.Add | fwk.Update}, QueueingHintFn: pl.isSchedulableAfterPersistentVolumeChange},
}, nil
}
// getPersistentVolumeClaimNameFromPod gets pvc names bound to a pod.
func (pl *VolumeZone) getPersistentVolumeClaimNameFromPod(pod *v1.Pod) []string {
var pvcNames []string
for i := range pod.Spec.Volumes {
volume := pod.Spec.Volumes[i]
if volume.PersistentVolumeClaim == nil {
continue
}
pvcName := volume.PersistentVolumeClaim.ClaimName
pvcNames = append(pvcNames, pvcName)
}
return pvcNames
}
// isSchedulableAfterPersistentVolumeClaimChange is invoked whenever a PersistentVolumeClaim added or updated.
// It checks whether the change of PVC has made a previously unschedulable pod schedulable.
func (pl *VolumeZone) isSchedulableAfterPersistentVolumeClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, modifiedPVC, err := util.As[*v1.PersistentVolumeClaim](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPersistentVolumeClaimChange: %w", err)
}
if pl.isPVCRequestedFromPod(logger, modifiedPVC, pod) {
logger.V(5).Info("PVC that is referred from the pod was created or updated, which might make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(modifiedPVC))
return fwk.Queue, nil
}
logger.V(5).Info("PVC irrelevant to the Pod was created or updated, which doesn't make this pod schedulable", "pod", klog.KObj(pod), "PVC", klog.KObj(modifiedPVC))
return fwk.QueueSkip, nil
}
// isPVCRequestedFromPod verifies if the PVC is requested from a given Pod.
func (pl *VolumeZone) isPVCRequestedFromPod(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pod *v1.Pod) bool {
if (pvc == nil) || (pod.Namespace != pvc.Namespace) {
return false
}
pvcNames := pl.getPersistentVolumeClaimNameFromPod(pod)
for _, pvcName := range pvcNames {
if pvc.Name == pvcName {
logger.V(5).Info("PVC is referred from the pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc))
return true
}
}
logger.V(5).Info("PVC is not referred from the pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc))
return false
}
// isSchedulableAfterStorageClassAdded is invoked whenever a StorageClass is added.
// It checks whether the addition of StorageClass has made a previously unschedulable pod schedulable.
// Only a new StorageClass with WaitForFirstConsumer will cause a pod to become schedulable.
func (pl *VolumeZone) isSchedulableAfterStorageClassAdded(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
_, addedStorageClass, err := util.As[*storage.StorageClass](nil, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterStorageClassAdded: %w", err)
}
if (addedStorageClass.VolumeBindingMode == nil) || (*addedStorageClass.VolumeBindingMode != storage.VolumeBindingWaitForFirstConsumer) {
logger.V(5).Info("StorageClass is created, but its VolumeBindingMode is not waitForFirstConsumer, which doesn't make the pod schedulable", "storageClass", klog.KObj(addedStorageClass), "pod", klog.KObj(pod))
return fwk.QueueSkip, nil
}
logger.V(5).Info("StorageClass with waitForFirstConsumer mode was created and it might make this pod schedulable", "pod", klog.KObj(pod), "StorageClass", klog.KObj(addedStorageClass))
return fwk.Queue, nil
}
// isSchedulableAfterPersistentVolumeChange is invoked whenever a PersistentVolume added or updated.
// It checks whether the change of PV has made a previously unschedulable pod schedulable.
// Changing the PV topology labels could cause the pod to become schedulable.
func (pl *VolumeZone) isSchedulableAfterPersistentVolumeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (fwk.QueueingHint, error) {
originalPV, modifiedPV, err := util.As[*v1.PersistentVolume](oldObj, newObj)
if err != nil {
return fwk.Queue, fmt.Errorf("unexpected objects in isSchedulableAfterPersistentVolumeChange: %w", err)
}
if originalPV == nil {
logger.V(5).Info("PV is newly created, which might make the pod schedulable")
return fwk.Queue, nil
}
originalPVTopologies := pl.getPVTopologies(logger, originalPV)
modifiedPVTopologies := pl.getPVTopologies(logger, modifiedPV)
if !reflect.DeepEqual(originalPVTopologies, modifiedPVTopologies) {
logger.V(5).Info("PV's topology was updated, which might make the pod schedulable.", "pod", klog.KObj(pod), "PV", klog.KObj(modifiedPV))
return fwk.Queue, nil
}
logger.V(5).Info("PV was updated, but the topology is unchanged, which it doesn't make the pod schedulable", "pod", klog.KObj(pod), "PV", klog.KObj(modifiedPV))
return fwk.QueueSkip, nil
}
// getPVTopologies retrieves pvTopology from a given PV and returns the array
// This function doesn't check spec.nodeAffinity
// because it's read-only after creation and thus cannot be updated
// and nodeAffinity is being handled in node affinity plugin
func (pl *VolumeZone) getPVTopologies(logger klog.Logger, pv *v1.PersistentVolume) []pvTopology {
podPVTopologies := make([]pvTopology, 0)
for _, key := range topologyLabels {
if value, ok := pv.ObjectMeta.Labels[key]; ok {
labelZonesSet, err := volumehelpers.LabelZonesToSet(value)
if err != nil {
logger.V(5).Info("failed to parse PV's topology label, ignoring the label", "label", fmt.Sprintf("%s:%s", key, value), "err", err)
continue
}
podPVTopologies = append(podPVTopologies, pvTopology{
pvName: pv.Name,
key: key,
values: sets.Set[string](labelZonesSet),
})
}
}
return podPVTopologies
}
// New initializes a new plugin and returns it.
func New(_ context.Context, _ runtime.Object, handle fwk.Handle, fts feature.Features) (fwk.Plugin, error) {
informerFactory := handle.SharedInformerFactory()
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
return &VolumeZone{
pvLister: pvLister,
pvcLister: pvcLister,
scLister: scLister,
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
}, nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package preemption
import (
"context"
"errors"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
policylisters "k8s.io/client-go/listers/policy/v1"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// Candidate represents a nominated node on which the preemptor can be scheduled,
// along with the list of victims that should be evicted for the preemptor to fit the node.
type Candidate interface {
// Victims wraps a list of to-be-preempted Pods and the number of PDB violation.
Victims() *extenderv1.Victims
// Name returns the target node name where the preemptor gets nominated to run.
Name() string
}
type candidate struct {
victims *extenderv1.Victims
name string
}
// Victims returns s.victims.
func (s *candidate) Victims() *extenderv1.Victims {
return s.victims
}
// Name returns s.name.
func (s *candidate) Name() string {
return s.name
}
type candidateList struct {
idx int32
items []Candidate
}
func newCandidateList(size int32) *candidateList {
return &candidateList{idx: -1, items: make([]Candidate, size)}
}
// add adds a new candidate to the internal array atomically.
func (cl *candidateList) add(c *candidate) {
if idx := atomic.AddInt32(&cl.idx, 1); idx < int32(len(cl.items)) {
cl.items[idx] = c
}
}
// size returns the number of candidate stored. Note that some add() operations
// might still be executing when this is called, so care must be taken to
// ensure that all add() operations complete before accessing the elements of
// the list.
func (cl *candidateList) size() int32 {
n := atomic.LoadInt32(&cl.idx) + 1
if n >= int32(len(cl.items)) {
n = int32(len(cl.items))
}
return n
}
// get returns the internal candidate array. This function is NOT atomic and
// assumes that all add() operations have been completed.
func (cl *candidateList) get() []Candidate {
return cl.items[:cl.size()]
}
// Interface is expected to be implemented by different preemption plugins as all those member
// methods might have different behavior compared with the default preemption.
type Interface interface {
// GetOffsetAndNumCandidates chooses a random offset and calculates the number of candidates that should be
// shortlisted for dry running preemption.
GetOffsetAndNumCandidates(nodes int32) (int32, int32)
// CandidatesToVictimsMap builds a map from the target node to a list of to-be-preempted Pods and the number of PDB violation.
CandidatesToVictimsMap(candidates []Candidate) map[string]*extenderv1.Victims
// PodEligibleToPreemptOthers returns one bool and one string. The bool indicates whether this pod should be considered for
// preempting other pods or not. The string includes the reason if this pod isn't eligible.
PodEligibleToPreemptOthers(ctx context.Context, pod *v1.Pod, nominatedNodeStatus *fwk.Status) (bool, string)
// SelectVictimsOnNode finds minimum set of pods on the given node that should be preempted in order to make enough room
// for "pod" to be scheduled.
// Note that both `state` and `nodeInfo` are deep copied.
SelectVictimsOnNode(ctx context.Context, state fwk.CycleState,
pod *v1.Pod, nodeInfo fwk.NodeInfo, pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *fwk.Status)
// OrderedScoreFuncs returns a list of ordered score functions to select preferable node where victims will be preempted.
// The ordered score functions will be processed one by one iff we find more than one node with the highest score.
// Default score functions will be processed if nil returned here for backwards-compatibility.
OrderedScoreFuncs(ctx context.Context, nodesToVictims map[string]*extenderv1.Victims) []func(node string) int64
}
type Evaluator struct {
PluginName string
Handler fwk.Handle
PodLister corelisters.PodLister
PdbLister policylisters.PodDisruptionBudgetLister
enableAsyncPreemption bool
mu sync.RWMutex
// preempting is a set that records the pods that are currently triggering preemption asynchronously,
// which is used to prevent the pods from entering the scheduling cycle meanwhile.
preempting sets.Set[types.UID]
// PreemptPod is a function that actually makes API calls to preempt a specific Pod.
// This is exposed to be replaced during tests.
PreemptPod func(ctx context.Context, c Candidate, preemptor, victim *v1.Pod, pluginName string) error
Interface
}
func NewEvaluator(pluginName string, fh fwk.Handle, i Interface, enableAsyncPreemption bool) *Evaluator {
podLister := fh.SharedInformerFactory().Core().V1().Pods().Lister()
pdbLister := fh.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister()
ev := &Evaluator{
PluginName: pluginName,
Handler: fh,
PodLister: podLister,
PdbLister: pdbLister,
Interface: i,
enableAsyncPreemption: enableAsyncPreemption,
preempting: sets.New[types.UID](),
}
// PreemptPod actually makes API calls to preempt a specific Pod.
//
// We implement it here directly, rather than creating a separate method like ev.preemptPod(...)
// to prevent the misuse of the PreemptPod function.
ev.PreemptPod = func(ctx context.Context, c Candidate, preemptor, victim *v1.Pod, pluginName string) error {
logger := klog.FromContext(ctx)
// If the victim is a WaitingPod, send a reject message to the PermitPlugin.
// Otherwise we should delete the victim.
if waitingPod := ev.Handler.GetWaitingPod(victim.UID); waitingPod != nil {
waitingPod.Reject(pluginName, "preempted")
logger.V(2).Info("Preemptor pod rejected a waiting pod", "preemptor", klog.KObj(preemptor), "waitingPod", klog.KObj(victim), "node", c.Name())
} else {
condition := &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: apipod.CalculatePodConditionObservedGeneration(&victim.Status, victim.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: v1.PodReasonPreemptionByScheduler,
Message: fmt.Sprintf("%s: preempting to accommodate a higher priority pod", preemptor.Spec.SchedulerName),
}
newStatus := victim.Status.DeepCopy()
updated := apipod.UpdatePodCondition(newStatus, condition)
if updated {
if err := util.PatchPodStatus(ctx, ev.Handler.ClientSet(), victim.Name, victim.Namespace, &victim.Status, newStatus); err != nil {
logger.Error(err, "Could not add DisruptionTarget condition due to preemption", "pod", klog.KObj(victim), "preemptor", klog.KObj(preemptor))
return err
}
}
if err := util.DeletePod(ctx, ev.Handler.ClientSet(), victim); err != nil {
if apierrors.IsNotFound(err) {
logger.V(2).Info("Victim Pod is already deleted", "preemptor", klog.KObj(preemptor), "victim", klog.KObj(victim), "node", c.Name())
} else {
logger.Error(err, "Tried to preempted pod", "pod", klog.KObj(victim), "preemptor", klog.KObj(preemptor))
}
return err
}
logger.V(2).Info("Preemptor Pod preempted victim Pod", "preemptor", klog.KObj(preemptor), "victim", klog.KObj(victim), "node", c.Name())
}
ev.Handler.EventRecorder().Eventf(victim, preemptor, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by pod %v on node %v", preemptor.UID, c.Name())
return nil
}
return ev
}
// IsPodRunningPreemption returns true if the pod is currently triggering preemption asynchronously.
func (ev *Evaluator) IsPodRunningPreemption(podUID types.UID) bool {
ev.mu.RLock()
defer ev.mu.RUnlock()
return ev.preempting.Has(podUID)
}
// Preempt returns a PostFilterResult carrying suggested nominatedNodeName, along with a Status.
// The semantics of returned <PostFilterResult, Status> varies on different scenarios:
//
// - <nil, Error>. This denotes it's a transient/rare error that may be self-healed in future cycles.
//
// - <nil, Unschedulable>. This status is mostly as expected like the preemptor is waiting for the
// victims to be fully terminated.
//
// - In both cases above, a nil PostFilterResult is returned to keep the pod's nominatedNodeName unchanged.
//
// - <non-nil PostFilterResult, Unschedulable>. It indicates the pod cannot be scheduled even with preemption.
// In this case, a non-nil PostFilterResult is returned and result.NominatingMode instructs how to deal with
// the nominatedNodeName.
//
// - <non-nil PostFilterResult, Success>. It's the regular happy path
// and the non-empty nominatedNodeName will be applied to the preemptor pod.
func (ev *Evaluator) Preempt(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
logger := klog.FromContext(ctx)
// 0) Fetch the latest version of <pod>.
// It's safe to directly fetch pod here. Because the informer cache has already been
// initialized when creating the Scheduler obj.
// However, tests may need to manually initialize the shared pod informer.
podNamespace, podName := pod.Namespace, pod.Name
pod, err := ev.PodLister.Pods(pod.Namespace).Get(pod.Name)
if err != nil {
logger.Error(err, "Could not get the updated preemptor pod object", "pod", klog.KRef(podNamespace, podName))
return nil, fwk.AsStatus(err)
}
// 1) Ensure the preemptor is eligible to preempt other pods.
nominatedNodeStatus := m.Get(pod.Status.NominatedNodeName)
if ok, msg := ev.PodEligibleToPreemptOthers(ctx, pod, nominatedNodeStatus); !ok {
logger.V(5).Info("Pod is not eligible for preemption", "pod", klog.KObj(pod), "reason", msg)
return nil, fwk.NewStatus(fwk.Unschedulable, msg)
}
// 2) Find all preemption candidates.
allNodes, err := ev.Handler.SnapshotSharedLister().NodeInfos().List()
if err != nil {
return nil, fwk.AsStatus(err)
}
candidates, nodeToStatusMap, err := ev.findCandidates(ctx, state, allNodes, pod, m)
if err != nil && len(candidates) == 0 {
return nil, fwk.AsStatus(err)
}
// Return a FitError only when there are no candidates that fit the pod.
if len(candidates) == 0 {
logger.V(2).Info("No preemption candidate is found; preemption is not helpful for scheduling", "pod", klog.KObj(pod))
fitError := &framework.FitError{
Pod: pod,
NumAllNodes: len(allNodes),
Diagnosis: framework.Diagnosis{
NodeToStatus: nodeToStatusMap,
// Leave UnschedulablePlugins or PendingPlugins as nil as it won't be used on moving Pods.
},
}
fitError.Diagnosis.NodeToStatus.SetAbsentNodesStatus(fwk.NewStatus(fwk.UnschedulableAndUnresolvable, "Preemption is not helpful for scheduling"))
// Specify nominatedNodeName to clear the pod's nominatedNodeName status, if applicable.
return framework.NewPostFilterResultWithNominatedNode(""), fwk.NewStatus(fwk.Unschedulable, fitError.Error())
}
// 3) Interact with registered Extenders to filter out some candidates if needed.
candidates, status := ev.callExtenders(logger, pod, candidates)
if !status.IsSuccess() {
return nil, status
}
// 4) Find the best candidate.
bestCandidate := ev.SelectCandidate(ctx, candidates)
if bestCandidate == nil || len(bestCandidate.Name()) == 0 {
return nil, fwk.NewStatus(fwk.Unschedulable, "no candidate node for preemption")
}
logger.V(2).Info("the target node for the preemption is determined", "node", bestCandidate.Name(), "pod", klog.KObj(pod))
// 5) Perform preparation work before nominating the selected candidate.
if ev.enableAsyncPreemption {
ev.prepareCandidateAsync(bestCandidate, pod, ev.PluginName)
} else {
if status := ev.prepareCandidate(ctx, bestCandidate, pod, ev.PluginName); !status.IsSuccess() {
return nil, status
}
}
return framework.NewPostFilterResultWithNominatedNode(bestCandidate.Name()), fwk.NewStatus(fwk.Success)
}
// FindCandidates calculates a slice of preemption candidates.
// Each candidate is executable to make the given <pod> schedulable.
func (ev *Evaluator) findCandidates(ctx context.Context, state fwk.CycleState, allNodes []fwk.NodeInfo, pod *v1.Pod, m fwk.NodeToStatusReader) ([]Candidate, *framework.NodeToStatus, error) {
if len(allNodes) == 0 {
return nil, nil, errors.New("no nodes available")
}
logger := klog.FromContext(ctx)
// Get a list of nodes with failed predicates (Unschedulable) that may be satisfied by removing pods from the node.
potentialNodes, err := m.NodesForStatusCode(ev.Handler.SnapshotSharedLister().NodeInfos(), fwk.Unschedulable)
if err != nil {
return nil, nil, err
}
if len(potentialNodes) == 0 {
logger.V(3).Info("Preemption will not help schedule pod on any node", "pod", klog.KObj(pod))
return nil, framework.NewDefaultNodeToStatus(), nil
}
pdbs, err := getPodDisruptionBudgets(ev.PdbLister)
if err != nil {
return nil, nil, err
}
offset, candidatesNum := ev.GetOffsetAndNumCandidates(int32(len(potentialNodes)))
return ev.DryRunPreemption(ctx, state, pod, potentialNodes, pdbs, offset, candidatesNum)
}
// callExtenders calls given <extenders> to select the list of feasible candidates.
// We will only check <candidates> with extenders that support preemption.
// Extenders which do not support preemption may later prevent preemptor from being scheduled on the nominated
// node. In that case, scheduler will find a different host for the preemptor in subsequent scheduling cycles.
func (ev *Evaluator) callExtenders(logger klog.Logger, pod *v1.Pod, candidates []Candidate) ([]Candidate, *fwk.Status) {
extenders := ev.Handler.Extenders()
nodeLister := ev.Handler.SnapshotSharedLister().NodeInfos()
if len(extenders) == 0 {
return candidates, nil
}
// Migrate candidate slice to victimsMap to adapt to the Extender interface.
// It's only applicable for candidate slice that have unique nominated node name.
victimsMap := ev.CandidatesToVictimsMap(candidates)
if len(victimsMap) == 0 {
return candidates, nil
}
for _, extender := range extenders {
if !extender.SupportsPreemption() || !extender.IsInterested(pod) {
continue
}
nodeNameToVictims, err := extender.ProcessPreemption(pod, victimsMap, nodeLister)
if err != nil {
if extender.IsIgnorable() {
logger.V(2).Info("Skipped extender as it returned error and has ignorable flag set",
"extender", extender.Name(), "err", err)
continue
}
return nil, fwk.AsStatus(err)
}
// Check if the returned victims are valid.
for nodeName, victims := range nodeNameToVictims {
if victims == nil || len(victims.Pods) == 0 {
if extender.IsIgnorable() {
delete(nodeNameToVictims, nodeName)
logger.V(2).Info("Ignored node for which the extender didn't report victims", "node", klog.KRef("", nodeName), "extender", extender.Name())
continue
}
return nil, fwk.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeName))
}
}
// Replace victimsMap with new result after preemption. So the
// rest of extenders can continue use it as parameter.
victimsMap = nodeNameToVictims
// If node list becomes empty, no preemption can happen regardless of other extenders.
if len(victimsMap) == 0 {
break
}
}
var newCandidates []Candidate
for nodeName := range victimsMap {
newCandidates = append(newCandidates, &candidate{
victims: victimsMap[nodeName],
name: nodeName,
})
}
return newCandidates, nil
}
// SelectCandidate chooses the best-fit candidate from given <candidates> and return it.
// NOTE: This method is exported for easier testing in default preemption.
func (ev *Evaluator) SelectCandidate(ctx context.Context, candidates []Candidate) Candidate {
logger := klog.FromContext(ctx)
if len(candidates) == 0 {
return nil
}
if len(candidates) == 1 {
return candidates[0]
}
victimsMap := ev.CandidatesToVictimsMap(candidates)
scoreFuncs := ev.OrderedScoreFuncs(ctx, victimsMap)
candidateNode := pickOneNodeForPreemption(logger, victimsMap, scoreFuncs)
// Same as candidatesToVictimsMap, this logic is not applicable for out-of-tree
// preemption plugins that exercise different candidates on the same nominated node.
if victims := victimsMap[candidateNode]; victims != nil {
return &candidate{
victims: victims,
name: candidateNode,
}
}
// We shouldn't reach here.
utilruntime.HandleErrorWithContext(ctx, nil, "Unexpected case no candidate was selected", "candidates", candidates)
// To not break the whole flow, return the first candidate.
return candidates[0]
}
// prepareCandidate does some preparation work before nominating the selected candidate:
// - Evict the victim pods
// - Reject the victim pods if they are in waitingPod map
// - Clear the low-priority pods' nominatedNodeName status if needed
func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1.Pod, pluginName string) *fwk.Status {
fh := ev.Handler
cs := ev.Handler.ClientSet()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
logger := klog.FromContext(ctx)
errCh := parallelize.NewErrorChannel()
fh.Parallelizer().Until(ctx, len(c.Victims().Pods), func(index int) {
victimPod := c.Victims().Pods[index]
if victimPod.DeletionTimestamp != nil {
// If the victim Pod is already being deleted, we don't have to make another deletion api call.
logger.V(2).Info("Victim Pod is already deleted, skipping the API call for it", "preemptor", klog.KObj(pod), "node", c.Name(), "victim", klog.KObj(victimPod))
return
}
if err := ev.PreemptPod(ctx, c, pod, victimPod, pluginName); err != nil && !apierrors.IsNotFound(err) {
errCh.SendErrorWithCancel(err, cancel)
}
}, ev.PluginName)
if err := errCh.ReceiveError(); err != nil {
return fwk.AsStatus(err)
}
metrics.PreemptionVictims.Observe(float64(len(c.Victims().Pods)))
// Lower priority pods nominated to run on this node, may no longer fit on
// this node. So, we should remove their nomination. Removing their
// nomination updates these pods and moves them to the active queue. It
// lets scheduler find another place for them.
nominatedPods := getLowerPriorityNominatedPods(logger, fh, pod, c.Name())
if err := clearNominatedNodeName(ctx, cs, ev.Handler.APICacher(), nominatedPods...); err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Cannot clear 'NominatedNodeName' field")
// We do not return as this error is not critical.
}
return nil
}
// clearNominatedNodeName internally submit a patch request to API server
// to set each pods[*].Status.NominatedNodeName> to "".
func clearNominatedNodeName(ctx context.Context, cs clientset.Interface, apiCacher fwk.APICacher, pods ...*v1.Pod) utilerrors.Aggregate {
var errs []error
for _, p := range pods {
if apiCacher != nil {
// When API cacher is available, use it to clear the NominatedNodeName.
_, err := apiCacher.PatchPodStatus(p, nil, &fwk.NominatingInfo{NominatedNodeName: "", NominatingMode: fwk.ModeOverride})
if err != nil {
errs = append(errs, err)
}
} else {
if len(p.Status.NominatedNodeName) == 0 {
continue
}
podStatusCopy := p.Status.DeepCopy()
podStatusCopy.NominatedNodeName = ""
if err := util.PatchPodStatus(ctx, cs, p.Name, p.Namespace, &p.Status, podStatusCopy); err != nil {
errs = append(errs, err)
}
}
}
return utilerrors.NewAggregate(errs)
}
// prepareCandidateAsync triggers a goroutine for some preparation work:
// - Evict the victim pods
// - Reject the victim pods if they are in waitingPod map
// - Clear the low-priority pods' nominatedNodeName status if needed
// The Pod won't be retried until the goroutine triggered here completes.
//
// See http://kep.k8s.io/4832 for how the async preemption works.
func (ev *Evaluator) prepareCandidateAsync(c Candidate, pod *v1.Pod, pluginName string) {
metrics.PreemptionVictims.Observe(float64(len(c.Victims().Pods)))
// Intentionally create a new context, not using a ctx from the scheduling cycle, to create ctx,
// because this process could continue even after this scheduling cycle finishes.
ctx, cancel := context.WithCancel(context.Background())
logger := klog.FromContext(ctx)
victimPods := make([]*v1.Pod, 0, len(c.Victims().Pods))
for _, victim := range c.Victims().Pods {
if victim.DeletionTimestamp != nil {
// If the victim Pod is already being deleted, we don't have to make another deletion api call.
logger.V(2).Info("Victim Pod is already deleted, skipping the API call for it", "preemptor", klog.KObj(pod), "node", c.Name(), "victim", klog.KObj(victim))
continue
}
victimPods = append(victimPods, victim)
}
if len(victimPods) == 0 {
cancel()
return
}
errCh := parallelize.NewErrorChannel()
// Whether all victim pods are already deleted before making API call.
var allPodsAlreadyDeleted atomic.Bool
allPodsAlreadyDeleted.Store(true)
preemptPod := func(index int) {
victim := victimPods[index]
err := ev.PreemptPod(ctx, c, pod, victim, pluginName)
switch {
case err != nil && !apierrors.IsNotFound(err):
// We don't have to handle NotFound error here, because it means the victim Pod is already deleted, and the preemption didn't have to remove it.
errCh.SendErrorWithCancel(err, cancel)
case err == nil:
allPodsAlreadyDeleted.Store(false)
}
}
ev.mu.Lock()
ev.preempting.Insert(pod.UID)
ev.mu.Unlock()
go func() {
startTime := time.Now()
result := metrics.GoroutineResultSuccess
defer metrics.PreemptionGoroutinesDuration.WithLabelValues(result).Observe(metrics.SinceInSeconds(startTime))
defer metrics.PreemptionGoroutinesExecutionTotal.WithLabelValues(result).Inc()
defer func() {
// When API call isn't successful, the Pod may get stuck in the unschedulable pod pool in the worst case.
// So, we should move the Pod to the activeQ.
if result == metrics.GoroutineResultError ||
// When all pods are already deleted (which is very rare, but could happen in theory),
// it's safe to activate the preemptor Pod because it might miss Pod/delete event that requeues the pod.
allPodsAlreadyDeleted.Load() {
ev.Handler.Activate(logger, map[string]*v1.Pod{pod.Name: pod})
}
}()
defer cancel()
logger.V(2).Info("Start the preemption asynchronously", "preemptor", klog.KObj(pod), "node", c.Name(), "numVictims", len(c.Victims().Pods), "numVictimsToDelete", len(victimPods))
// Lower priority pods nominated to run on this node, may no longer fit on
// this node. So, we should remove their nomination. Removing their
// nomination updates these pods and moves them to the active queue. It
// lets scheduler find another place for them.
nominatedPods := getLowerPriorityNominatedPods(logger, ev.Handler, pod, c.Name())
if err := clearNominatedNodeName(ctx, ev.Handler.ClientSet(), ev.Handler.APICacher(), nominatedPods...); err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Cannot clear 'NominatedNodeName' field from lower priority pods on the same target node", "node", c.Name())
result = metrics.GoroutineResultError
// We do not return as this error is not critical.
}
if len(victimPods) > 1 {
// We can evict all victims in parallel, but the last one.
// We have to remove the pod from the preempting map before the last one is evicted
// because, otherwise, the pod removal might be notified to the scheduling queue before
// we remove this pod from the preempting map,
// and the pod could end up stucking at the unschedulable pod pool
// by all the pod removal events being ignored.
ev.Handler.Parallelizer().Until(ctx, len(victimPods)-1, preemptPod, ev.PluginName)
if err := errCh.ReceiveError(); err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Error occurred during async preemption")
result = metrics.GoroutineResultError
}
}
ev.mu.Lock()
delete(ev.preempting, pod.UID)
ev.mu.Unlock()
err := ev.PreemptPod(ctx, c, pod, victimPods[len(victimPods)-1], pluginName)
switch {
case err != nil && !apierrors.IsNotFound(err):
// We don't have to handle NotFound error here, because it means the victim Pod is already deleted, and the preemption didn't have to remove it.
utilruntime.HandleErrorWithContext(ctx, err, "Error occurred during async preemption")
result = metrics.GoroutineResultError
case err == nil:
allPodsAlreadyDeleted.Store(false)
}
logger.V(2).Info("Async Preemption finished completely", "preemptor", klog.KObj(pod), "node", c.Name(), "result", result)
}()
}
func getPodDisruptionBudgets(pdbLister policylisters.PodDisruptionBudgetLister) ([]*policy.PodDisruptionBudget, error) {
if pdbLister != nil {
return pdbLister.List(labels.Everything())
}
return nil, nil
}
// pickOneNodeForPreemption chooses one node among the given nodes.
// It assumes pods in each map entry are ordered by decreasing priority.
// If the scoreFuns is not empty, It picks a node based on score scoreFuns returns.
// If the scoreFuns is empty,
// It picks a node based on the following criteria:
// 1. A node with minimum number of PDB violations.
// 2. A node with minimum highest priority victim is picked.
// 3. Ties are broken by sum of priorities of all victims.
// 4. If there are still ties, node with the minimum number of victims is picked.
// 5. If there are still ties, node with the latest start time of all highest priority victims is picked.
// 6. If there are still ties, the first such node is picked (sort of randomly).
// The 'minNodes1' and 'minNodes2' are being reused here to save the memory
// allocation and garbage collection time.
func pickOneNodeForPreemption(logger klog.Logger, nodesToVictims map[string]*extenderv1.Victims, scoreFuncs []func(node string) int64) string {
if len(nodesToVictims) == 0 {
return ""
}
allCandidates := make([]string, 0, len(nodesToVictims))
for node := range nodesToVictims {
allCandidates = append(allCandidates, node)
}
if len(scoreFuncs) == 0 {
minNumPDBViolatingScoreFunc := func(node string) int64 {
// The smaller the NumPDBViolations, the higher the score.
return -nodesToVictims[node].NumPDBViolations
}
minHighestPriorityScoreFunc := func(node string) int64 {
// highestPodPriority is the highest priority among the victims on this node.
highestPodPriority := corev1helpers.PodPriority(nodesToVictims[node].Pods[0])
// The smaller the highestPodPriority, the higher the score.
return -int64(highestPodPriority)
}
minSumPrioritiesScoreFunc := func(node string) int64 {
var sumPriorities int64
for _, pod := range nodesToVictims[node].Pods {
// We add MaxInt32+1 to all priorities to make all of them >= 0. This is
// needed so that a node with a few pods with negative priority is not
// picked over a node with a smaller number of pods with the same negative
// priority (and similar scenarios).
sumPriorities += int64(corev1helpers.PodPriority(pod)) + int64(math.MaxInt32+1)
}
// The smaller the sumPriorities, the higher the score.
return -sumPriorities
}
minNumPodsScoreFunc := func(node string) int64 {
// The smaller the length of pods, the higher the score.
return -int64(len(nodesToVictims[node].Pods))
}
latestStartTimeScoreFunc := func(node string) int64 {
// Get the earliest start time of all pods on the current node.
earliestStartTimeOnNode := util.GetEarliestPodStartTime(nodesToVictims[node])
if earliestStartTimeOnNode == nil {
utilruntime.HandleErrorWithLogger(logger, nil, "Unexpected nil earliestStartTime for node", "node", node)
return int64(math.MinInt64)
}
// The bigger the earliestStartTimeOnNode, the higher the score.
return earliestStartTimeOnNode.UnixNano()
}
// Each scoreFunc scores the nodes according to specific rules and keeps the name of the node
// with the highest score. If and only if the scoreFunc has more than one node with the highest
// score, we will execute the other scoreFunc in order of precedence.
scoreFuncs = []func(string) int64{
// A node with a minimum number of PDB is preferable.
minNumPDBViolatingScoreFunc,
// A node with a minimum highest priority victim is preferable.
minHighestPriorityScoreFunc,
// A node with the smallest sum of priorities is preferable.
minSumPrioritiesScoreFunc,
// A node with the minimum number of pods is preferable.
minNumPodsScoreFunc,
// A node with the latest start time of all highest priority victims is preferable.
latestStartTimeScoreFunc,
// If there are still ties, then the first Node in the list is selected.
}
}
for _, f := range scoreFuncs {
selectedNodes := []string{}
maxScore := int64(math.MinInt64)
for _, node := range allCandidates {
score := f(node)
if score > maxScore {
maxScore = score
selectedNodes = []string{}
}
if score == maxScore {
selectedNodes = append(selectedNodes, node)
}
}
if len(selectedNodes) == 1 {
return selectedNodes[0]
}
allCandidates = selectedNodes
}
return allCandidates[0]
}
// getLowerPriorityNominatedPods returns pods whose priority is smaller than the
// priority of the given "pod" and are nominated to run on the given node.
// Note: We could possibly check if the nominated lower priority pods still fit
// and return those that no longer fit, but that would require lots of
// manipulation of NodeInfo and PreFilter state per nominated pod. It may not be
// worth the complexity, especially because we generally expect to have a very
// small number of nominated pods per node.
func getLowerPriorityNominatedPods(logger klog.Logger, pn fwk.PodNominator, pod *v1.Pod, nodeName string) []*v1.Pod {
podInfos := pn.NominatedPodsForNode(nodeName)
if len(podInfos) == 0 {
return nil
}
var lowerPriorityPods []*v1.Pod
podPriority := corev1helpers.PodPriority(pod)
for _, pi := range podInfos {
if corev1helpers.PodPriority(pi.GetPod()) < podPriority {
lowerPriorityPods = append(lowerPriorityPods, pi.GetPod())
}
}
return lowerPriorityPods
}
// DryRunPreemption simulates Preemption logic on <potentialNodes> in parallel,
// returns preemption candidates and a map indicating filtered nodes statuses.
// The number of candidates depends on the constraints defined in the plugin's args. In the returned list of
// candidates, ones that do not violate PDB are preferred over ones that do.
// NOTE: This method is exported for easier testing in default preemption.
func (ev *Evaluator) DryRunPreemption(ctx context.Context, state fwk.CycleState, pod *v1.Pod, potentialNodes []fwk.NodeInfo,
pdbs []*policy.PodDisruptionBudget, offset int32, candidatesNum int32) ([]Candidate, *framework.NodeToStatus, error) {
fh := ev.Handler
nonViolatingCandidates := newCandidateList(candidatesNum)
violatingCandidates := newCandidateList(candidatesNum)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
nodeStatuses := framework.NewDefaultNodeToStatus()
logger := klog.FromContext(ctx)
logger.V(5).Info("Dry run the preemption", "potentialNodesNumber", len(potentialNodes), "pdbsNumber", len(pdbs), "offset", offset, "candidatesNumber", candidatesNum)
var statusesLock sync.Mutex
var errs []error
checkNode := func(i int) {
nodeInfoCopy := potentialNodes[(int(offset)+i)%len(potentialNodes)].Snapshot()
logger.V(5).Info("Check the potential node for preemption", "node", nodeInfoCopy.Node().Name)
stateCopy := state.Clone()
pods, numPDBViolations, status := ev.SelectVictimsOnNode(ctx, stateCopy, pod, nodeInfoCopy, pdbs)
if status.IsSuccess() && len(pods) != 0 {
victims := extenderv1.Victims{
Pods: pods,
NumPDBViolations: int64(numPDBViolations),
}
c := &candidate{
victims: &victims,
name: nodeInfoCopy.Node().Name,
}
if numPDBViolations == 0 {
nonViolatingCandidates.add(c)
} else {
violatingCandidates.add(c)
}
nvcSize, vcSize := nonViolatingCandidates.size(), violatingCandidates.size()
if nvcSize > 0 && nvcSize+vcSize >= candidatesNum {
cancel()
}
return
}
if status.IsSuccess() && len(pods) == 0 {
status = fwk.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeInfoCopy.Node().Name))
}
statusesLock.Lock()
if status.Code() == fwk.Error {
errs = append(errs, status.AsError())
}
nodeStatuses.Set(nodeInfoCopy.Node().Name, status)
statusesLock.Unlock()
}
fh.Parallelizer().Until(ctx, len(potentialNodes), checkNode, ev.PluginName)
return append(nonViolatingCandidates.get(), violatingCandidates.get()...), nodeStatuses, utilerrors.NewAggregate(errs)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"context"
"errors"
"fmt"
"io"
"reflect"
"sort"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/events"
"k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/backend/api_dispatcher"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/util/slice"
)
const (
// Specifies the maximum timeout a permit plugin can return.
maxTimeout = 15 * time.Minute
)
// frameworkImpl is the component responsible for initializing and running scheduler
// plugins.
type frameworkImpl struct {
registry Registry
snapshotSharedLister fwk.SharedLister
waitingPods *waitingPodsMap
scorePluginWeight map[string]int
preEnqueuePlugins []fwk.PreEnqueuePlugin
enqueueExtensions []fwk.EnqueueExtensions
queueSortPlugins []fwk.QueueSortPlugin
preFilterPlugins []fwk.PreFilterPlugin
filterPlugins []fwk.FilterPlugin
postFilterPlugins []fwk.PostFilterPlugin
preScorePlugins []fwk.PreScorePlugin
scorePlugins []fwk.ScorePlugin
reservePlugins []fwk.ReservePlugin
preBindPlugins []fwk.PreBindPlugin
bindPlugins []fwk.BindPlugin
postBindPlugins []fwk.PostBindPlugin
permitPlugins []fwk.PermitPlugin
// pluginsMap contains all plugins, by name.
pluginsMap map[string]fwk.Plugin
clientSet clientset.Interface
kubeConfig *restclient.Config
eventRecorder events.EventRecorder
informerFactory informers.SharedInformerFactory
sharedDRAManager fwk.SharedDRAManager
logger klog.Logger
metricsRecorder *metrics.MetricAsyncRecorder
profileName string
percentageOfNodesToScore *int32
extenders []fwk.Extender
fwk.PodNominator
fwk.PodActivator
apiDispatcher *apidispatcher.APIDispatcher
apiCacher fwk.APICacher
parallelizer fwk.Parallelizer
}
// extensionPoint encapsulates desired and applied set of plugins at a specific extension
// point. This is used to simplify iterating over all extension points supported by the
// frameworkImpl.
type extensionPoint struct {
// the set of plugins to be configured at this extension point.
plugins *config.PluginSet
// a pointer to the slice storing plugins implementations that will run at this
// extension point.
slicePtr interface{}
}
func (f *frameworkImpl) getExtensionPoints(plugins *config.Plugins) []extensionPoint {
return []extensionPoint{
{&plugins.PreFilter, &f.preFilterPlugins},
{&plugins.Filter, &f.filterPlugins},
{&plugins.PostFilter, &f.postFilterPlugins},
{&plugins.Reserve, &f.reservePlugins},
{&plugins.PreScore, &f.preScorePlugins},
{&plugins.Score, &f.scorePlugins},
{&plugins.PreBind, &f.preBindPlugins},
{&plugins.Bind, &f.bindPlugins},
{&plugins.PostBind, &f.postBindPlugins},
{&plugins.Permit, &f.permitPlugins},
{&plugins.PreEnqueue, &f.preEnqueuePlugins},
{&plugins.QueueSort, &f.queueSortPlugins},
}
}
// Extenders returns the registered extenders.
func (f *frameworkImpl) Extenders() []fwk.Extender {
return f.extenders
}
type frameworkOptions struct {
componentConfigVersion string
clientSet clientset.Interface
kubeConfig *restclient.Config
eventRecorder events.EventRecorder
informerFactory informers.SharedInformerFactory
sharedDRAManager fwk.SharedDRAManager
snapshotSharedLister fwk.SharedLister
metricsRecorder *metrics.MetricAsyncRecorder
podNominator fwk.PodNominator
podActivator fwk.PodActivator
extenders []fwk.Extender
captureProfile CaptureProfile
parallelizer parallelize.Parallelizer
waitingPods *waitingPodsMap
apiDispatcher *apidispatcher.APIDispatcher
logger *klog.Logger
}
// Option for the frameworkImpl.
type Option func(*frameworkOptions)
// WithComponentConfigVersion sets the component config version to the
// KubeSchedulerConfiguration version used. The string should be the full
// scheme group/version of the external type we converted from (for example
// "kubescheduler.config.k8s.io/v1")
func WithComponentConfigVersion(componentConfigVersion string) Option {
return func(o *frameworkOptions) {
o.componentConfigVersion = componentConfigVersion
}
}
// WithClientSet sets clientSet for the scheduling frameworkImpl.
func WithClientSet(clientSet clientset.Interface) Option {
return func(o *frameworkOptions) {
o.clientSet = clientSet
}
}
// WithKubeConfig sets kubeConfig for the scheduling frameworkImpl.
func WithKubeConfig(kubeConfig *restclient.Config) Option {
return func(o *frameworkOptions) {
o.kubeConfig = kubeConfig
}
}
// WithEventRecorder sets clientSet for the scheduling frameworkImpl.
func WithEventRecorder(recorder events.EventRecorder) Option {
return func(o *frameworkOptions) {
o.eventRecorder = recorder
}
}
// WithInformerFactory sets informer factory for the scheduling frameworkImpl.
func WithInformerFactory(informerFactory informers.SharedInformerFactory) Option {
return func(o *frameworkOptions) {
o.informerFactory = informerFactory
}
}
// WithSharedDRAManager sets SharedDRAManager for the framework.
func WithSharedDRAManager(sharedDRAManager fwk.SharedDRAManager) Option {
return func(o *frameworkOptions) {
o.sharedDRAManager = sharedDRAManager
}
}
// WithSnapshotSharedLister sets the SharedLister of the snapshot.
func WithSnapshotSharedLister(snapshotSharedLister fwk.SharedLister) Option {
return func(o *frameworkOptions) {
o.snapshotSharedLister = snapshotSharedLister
}
}
// WithPodNominator sets podNominator for the scheduling frameworkImpl.
func WithPodNominator(nominator fwk.PodNominator) Option {
return func(o *frameworkOptions) {
o.podNominator = nominator
}
}
func WithPodActivator(activator fwk.PodActivator) Option {
return func(o *frameworkOptions) {
o.podActivator = activator
}
}
// WithExtenders sets extenders for the scheduling frameworkImpl.
func WithExtenders(extenders []fwk.Extender) Option {
return func(o *frameworkOptions) {
o.extenders = extenders
}
}
// WithParallelism sets parallelism for the scheduling frameworkImpl.
func WithParallelism(parallelism int) Option {
return func(o *frameworkOptions) {
o.parallelizer = parallelize.NewParallelizer(parallelism)
}
}
// WithAPIDispatcher sets API dispatcher for the scheduling frameworkImpl.
func WithAPIDispatcher(apiDispatcher *apidispatcher.APIDispatcher) Option {
return func(o *frameworkOptions) {
o.apiDispatcher = apiDispatcher
}
}
// CaptureProfile is a callback to capture a finalized profile.
type CaptureProfile func(config.KubeSchedulerProfile)
// WithCaptureProfile sets a callback to capture the finalized profile.
func WithCaptureProfile(c CaptureProfile) Option {
return func(o *frameworkOptions) {
o.captureProfile = c
}
}
// WithMetricsRecorder sets metrics recorder for the scheduling frameworkImpl.
func WithMetricsRecorder(r *metrics.MetricAsyncRecorder) Option {
return func(o *frameworkOptions) {
o.metricsRecorder = r
}
}
// WithWaitingPods sets waitingPods for the scheduling frameworkImpl.
func WithWaitingPods(wp *waitingPodsMap) Option {
return func(o *frameworkOptions) {
o.waitingPods = wp
}
}
// WithLogger overrides the default logger from k8s.io/klog.
func WithLogger(logger klog.Logger) Option {
return func(o *frameworkOptions) {
o.logger = &logger
}
}
// defaultFrameworkOptions are applied when no option corresponding to those fields exist.
func defaultFrameworkOptions(stopCh <-chan struct{}) frameworkOptions {
return frameworkOptions{
metricsRecorder: metrics.NewMetricsAsyncRecorder(1000, time.Second, stopCh),
parallelizer: parallelize.NewParallelizer(parallelize.DefaultParallelism),
}
}
var _ framework.Framework = &frameworkImpl{}
// NewFramework initializes plugins given the configuration and the registry.
func NewFramework(ctx context.Context, r Registry, profile *config.KubeSchedulerProfile, opts ...Option) (framework.Framework, error) {
options := defaultFrameworkOptions(ctx.Done())
for _, opt := range opts {
opt(&options)
}
logger := klog.FromContext(ctx)
if options.logger != nil {
logger = *options.logger
}
f := &frameworkImpl{
registry: r,
snapshotSharedLister: options.snapshotSharedLister,
scorePluginWeight: make(map[string]int),
waitingPods: options.waitingPods,
clientSet: options.clientSet,
kubeConfig: options.kubeConfig,
eventRecorder: options.eventRecorder,
informerFactory: options.informerFactory,
sharedDRAManager: options.sharedDRAManager,
metricsRecorder: options.metricsRecorder,
extenders: options.extenders,
PodNominator: options.podNominator,
PodActivator: options.podActivator,
apiDispatcher: options.apiDispatcher,
parallelizer: options.parallelizer,
logger: logger,
}
if len(f.extenders) > 0 {
// Extender doesn't support any kind of requeueing feature like EnqueueExtensions in the scheduling framework.
// We register a defaultEnqueueExtension to fwk.ExtenderName here.
// And, in the scheduling cycle, when Extenders reject some Nodes and the pod ends up being unschedulable,
// we put fwk.ExtenderName to pInfo.UnschedulablePlugins.
f.enqueueExtensions = []fwk.EnqueueExtensions{&defaultEnqueueExtension{pluginName: framework.ExtenderName}}
}
if profile == nil {
return f, nil
}
f.profileName = profile.SchedulerName
f.percentageOfNodesToScore = profile.PercentageOfNodesToScore
if profile.Plugins == nil {
return f, nil
}
// get needed plugins from config
pg := f.pluginsNeeded(profile.Plugins)
pluginConfig := make(map[string]runtime.Object, len(profile.PluginConfig))
for i := range profile.PluginConfig {
name := profile.PluginConfig[i].Name
if _, ok := pluginConfig[name]; ok {
return nil, fmt.Errorf("repeated config for plugin %s", name)
}
pluginConfig[name] = profile.PluginConfig[i].Args
}
outputProfile := config.KubeSchedulerProfile{
SchedulerName: f.profileName,
PercentageOfNodesToScore: f.percentageOfNodesToScore,
Plugins: profile.Plugins,
PluginConfig: make([]config.PluginConfig, 0, len(pg)),
}
f.pluginsMap = make(map[string]fwk.Plugin)
for name, factory := range r {
// initialize only needed plugins.
if !pg.Has(name) {
continue
}
args := pluginConfig[name]
if args != nil {
outputProfile.PluginConfig = append(outputProfile.PluginConfig, config.PluginConfig{
Name: name,
Args: args,
})
}
p, err := factory(ctx, args, f)
if err != nil {
return nil, fmt.Errorf("initializing plugin %q: %w", name, err)
}
f.pluginsMap[name] = p
f.fillEnqueueExtensions(p)
}
// initialize plugins per individual extension points
for _, e := range f.getExtensionPoints(profile.Plugins) {
if err := updatePluginList(e.slicePtr, *e.plugins, f.pluginsMap); err != nil {
return nil, err
}
}
// initialize multiPoint plugins to their expanded extension points
if len(profile.Plugins.MultiPoint.Enabled) > 0 {
if err := f.expandMultiPointPlugins(logger, profile); err != nil {
return nil, err
}
}
if len(f.queueSortPlugins) != 1 {
return nil, fmt.Errorf("only one queue sort plugin required for profile with scheduler name %q, but got %d", profile.SchedulerName, len(f.queueSortPlugins))
}
if len(f.bindPlugins) == 0 {
return nil, fmt.Errorf("at least one bind plugin is needed for profile with scheduler name %q", profile.SchedulerName)
}
if err := getScoreWeights(f, append(profile.Plugins.Score.Enabled, profile.Plugins.MultiPoint.Enabled...)); err != nil {
return nil, err
}
// Verifying the score weights again since Plugin.Name() could return a different
// value from the one used in the configuration.
for _, scorePlugin := range f.scorePlugins {
if f.scorePluginWeight[scorePlugin.Name()] == 0 {
return nil, fmt.Errorf("score plugin %q is not configured with weight", scorePlugin.Name())
}
}
if options.captureProfile != nil {
if len(outputProfile.PluginConfig) != 0 {
sort.Slice(outputProfile.PluginConfig, func(i, j int) bool {
return outputProfile.PluginConfig[i].Name < outputProfile.PluginConfig[j].Name
})
} else {
outputProfile.PluginConfig = nil
}
options.captureProfile(outputProfile)
}
// Logs Enabled Plugins at each extension point, taking default plugins, given config, and multipoint into consideration
logger.V(2).Info("the scheduler starts to work with those plugins", "Plugins", *f.ListPlugins())
f.setInstrumentedPlugins()
return f, nil
}
// setInstrumentedPlugins initializes instrumented plugins from current plugins that frameworkImpl has.
func (f *frameworkImpl) setInstrumentedPlugins() {
// Cache metric streams for prefilter and filter plugins.
for i, pl := range f.preFilterPlugins {
f.preFilterPlugins[i] = &instrumentedPreFilterPlugin{
PreFilterPlugin: f.preFilterPlugins[i],
metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreFilter, f.profileName),
}
}
for i, pl := range f.filterPlugins {
f.filterPlugins[i] = &instrumentedFilterPlugin{
FilterPlugin: f.filterPlugins[i],
metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Filter, f.profileName),
}
}
// Cache metric streams for prescore and score plugins.
for i, pl := range f.preScorePlugins {
f.preScorePlugins[i] = &instrumentedPreScorePlugin{
PreScorePlugin: f.preScorePlugins[i],
metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreScore, f.profileName),
}
}
for i, pl := range f.scorePlugins {
f.scorePlugins[i] = &instrumentedScorePlugin{
ScorePlugin: f.scorePlugins[i],
metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Score, f.profileName),
}
}
}
func (f *frameworkImpl) SetPodNominator(n fwk.PodNominator) {
f.PodNominator = n
}
func (f *frameworkImpl) SetPodActivator(a fwk.PodActivator) {
f.PodActivator = a
}
func (f *frameworkImpl) SetAPICacher(c fwk.APICacher) {
f.apiCacher = c
}
// Close closes each plugin, when they implement io.Closer interface.
func (f *frameworkImpl) Close() error {
var errs []error
for name, plugin := range f.pluginsMap {
if closer, ok := plugin.(io.Closer); ok {
err := closer.Close()
if err != nil {
errs = append(errs, fmt.Errorf("%s failed to close: %w", name, err))
// We try to close all plugins even if we got errors from some.
}
}
}
return errors.Join(errs...)
}
// getScoreWeights makes sure that, between MultiPoint-Score plugin weights and individual Score
// plugin weights there is not an overflow of MaxTotalScore.
func getScoreWeights(f *frameworkImpl, plugins []config.Plugin) error {
var totalPriority int64
scorePlugins := reflect.ValueOf(&f.scorePlugins).Elem()
pluginType := scorePlugins.Type().Elem()
for _, e := range plugins {
pg := f.pluginsMap[e.Name]
if !reflect.TypeOf(pg).Implements(pluginType) {
continue
}
// We append MultiPoint plugins to the list of Score plugins. So if this plugin has already been
// encountered, let the individual Score weight take precedence.
if _, ok := f.scorePluginWeight[e.Name]; ok {
continue
}
// a weight of zero is not permitted, plugins can be disabled explicitly
// when configured.
f.scorePluginWeight[e.Name] = int(e.Weight)
if f.scorePluginWeight[e.Name] == 0 {
f.scorePluginWeight[e.Name] = 1
}
// Checks totalPriority against MaxTotalScore to avoid overflow
if int64(f.scorePluginWeight[e.Name])*fwk.MaxNodeScore > fwk.MaxTotalScore-totalPriority {
return fmt.Errorf("total score of Score plugins could overflow")
}
totalPriority += int64(f.scorePluginWeight[e.Name]) * fwk.MaxNodeScore
}
return nil
}
type orderedSet struct {
set map[string]int
list []string
deletionCnt int
}
func newOrderedSet() *orderedSet {
return &orderedSet{set: make(map[string]int)}
}
func (os *orderedSet) insert(s string) {
if os.has(s) {
return
}
os.set[s] = len(os.list)
os.list = append(os.list, s)
}
func (os *orderedSet) has(s string) bool {
_, found := os.set[s]
return found
}
func (os *orderedSet) delete(s string) {
if i, found := os.set[s]; found {
delete(os.set, s)
os.list = append(os.list[:i-os.deletionCnt], os.list[i+1-os.deletionCnt:]...)
os.deletionCnt++
}
}
func (f *frameworkImpl) expandMultiPointPlugins(logger klog.Logger, profile *config.KubeSchedulerProfile) error {
// initialize MultiPoint plugins
for _, e := range f.getExtensionPoints(profile.Plugins) {
plugins := reflect.ValueOf(e.slicePtr).Elem()
pluginType := plugins.Type().Elem()
// build enabledSet of plugins already registered via normal extension points
// to check double registration
enabledSet := newOrderedSet()
for _, plugin := range e.plugins.Enabled {
enabledSet.insert(plugin.Name)
}
disabledSet := sets.New[string]()
for _, disabledPlugin := range e.plugins.Disabled {
disabledSet.Insert(disabledPlugin.Name)
}
if disabledSet.Has("*") {
logger.V(4).Info("Skipped MultiPoint expansion because all plugins are disabled for extension point", "extension", pluginType)
continue
}
// track plugins enabled via multipoint separately from those enabled by specific extensions,
// so that we can distinguish between double-registration and explicit overrides
multiPointEnabled := newOrderedSet()
overridePlugins := newOrderedSet()
for _, ep := range profile.Plugins.MultiPoint.Enabled {
pg, ok := f.pluginsMap[ep.Name]
if !ok {
return fmt.Errorf("%s %q does not exist", pluginType.Name(), ep.Name)
}
// if this plugin doesn't implement the type for the current extension we're trying to expand, skip
if !reflect.TypeOf(pg).Implements(pluginType) {
continue
}
// a plugin that's enabled via MultiPoint can still be disabled for specific extension points
if disabledSet.Has(ep.Name) {
logger.V(4).Info("Skipped disabled plugin for extension point", "plugin", ep.Name, "extension", pluginType)
continue
}
// if this plugin has already been enabled by the specific extension point,
// the user intent is to override the default plugin or make some other explicit setting.
// Either way, discard the MultiPoint value for this plugin.
// This maintains expected behavior for overriding default plugins (see https://github.com/kubernetes/kubernetes/pull/99582)
if enabledSet.has(ep.Name) {
overridePlugins.insert(ep.Name)
logger.Info("MultiPoint plugin is explicitly re-configured; overriding", "plugin", ep.Name)
continue
}
// if this plugin is already registered via MultiPoint, then this is
// a double registration and an error in the config.
if multiPointEnabled.has(ep.Name) {
return fmt.Errorf("plugin %q already registered as %q", ep.Name, pluginType.Name())
}
// we only need to update the multipoint set, since we already have the specific extension set from above
multiPointEnabled.insert(ep.Name)
}
// Reorder plugins. Here is the expected order:
// - part 1: overridePlugins. Their order stay intact as how they're specified in regular extension point.
// - part 2: multiPointEnabled - i.e., plugin defined in multipoint but not in regular extension point.
// - part 3: other plugins (excluded by part 1 & 2) in regular extension point.
newPlugins := reflect.New(reflect.TypeOf(e.slicePtr).Elem()).Elem()
// part 1
for _, name := range slice.CopyStrings(enabledSet.list) {
if overridePlugins.has(name) {
newPlugins = reflect.Append(newPlugins, reflect.ValueOf(f.pluginsMap[name]))
enabledSet.delete(name)
}
}
// part 2
for _, name := range multiPointEnabled.list {
newPlugins = reflect.Append(newPlugins, reflect.ValueOf(f.pluginsMap[name]))
}
// part 3
for _, name := range enabledSet.list {
newPlugins = reflect.Append(newPlugins, reflect.ValueOf(f.pluginsMap[name]))
}
plugins.Set(newPlugins)
}
return nil
}
func shouldHaveEnqueueExtensions(p fwk.Plugin) bool {
switch p.(type) {
// Only PreEnqueue, PreFilter, Filter, Reserve, and Permit plugins can (should) have EnqueueExtensions.
// See the comment of EnqueueExtensions for more detailed reason here.
case fwk.PreEnqueuePlugin, fwk.PreFilterPlugin, fwk.FilterPlugin, fwk.ReservePlugin, fwk.PermitPlugin:
return true
}
return false
}
func (f *frameworkImpl) fillEnqueueExtensions(p fwk.Plugin) {
if !shouldHaveEnqueueExtensions(p) {
// Ignore EnqueueExtensions from plugin which isn't PreEnqueue, PreFilter, Filter, Reserve, and Permit.
return
}
ext, ok := p.(fwk.EnqueueExtensions)
if !ok {
// If interface EnqueueExtensions is not implemented, register the default enqueue extensions
// to the plugin because we don't know which events the plugin is interested in.
// This is to ensure backward compatibility.
f.enqueueExtensions = append(f.enqueueExtensions, &defaultEnqueueExtension{pluginName: p.Name()})
return
}
f.enqueueExtensions = append(f.enqueueExtensions, ext)
}
// defaultEnqueueExtension is used when a plugin does not implement EnqueueExtensions interface.
type defaultEnqueueExtension struct {
pluginName string
}
func (p *defaultEnqueueExtension) Name() string { return p.pluginName }
func (p *defaultEnqueueExtension) EventsToRegister(_ context.Context) ([]fwk.ClusterEventWithHint, error) {
// need to return all specific cluster events with framework.All action instead of wildcard event
// because the returning values are used to register event handlers.
// If we return the wildcard here, it won't affect the event handlers registered by the plugin
// and some events may not be registered in the event handlers.
return framework.UnrollWildCardResource(), nil
}
func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, pluginsMap map[string]fwk.Plugin) error {
plugins := reflect.ValueOf(pluginList).Elem()
pluginType := plugins.Type().Elem()
set := sets.New[string]()
for _, ep := range pluginSet.Enabled {
pg, ok := pluginsMap[ep.Name]
if !ok {
return fmt.Errorf("%s %q does not exist", pluginType.Name(), ep.Name)
}
if !reflect.TypeOf(pg).Implements(pluginType) {
return fmt.Errorf("plugin %q does not extend %s plugin", ep.Name, pluginType.Name())
}
if set.Has(ep.Name) {
return fmt.Errorf("plugin %q already registered as %q", ep.Name, pluginType.Name())
}
set.Insert(ep.Name)
newPlugins := reflect.Append(plugins, reflect.ValueOf(pg))
plugins.Set(newPlugins)
}
return nil
}
// PreEnqueuePlugins returns the registered preEnqueue plugins.
func (f *frameworkImpl) PreEnqueuePlugins() []fwk.PreEnqueuePlugin {
return f.preEnqueuePlugins
}
// EnqueueExtensions returns the registered reenqueue plugins.
func (f *frameworkImpl) EnqueueExtensions() []fwk.EnqueueExtensions {
return f.enqueueExtensions
}
// QueueSortFunc returns the function to sort pods in scheduling queue
func (f *frameworkImpl) QueueSortFunc() fwk.LessFunc {
if f == nil {
// If frameworkImpl is nil, simply keep their order unchanged.
// NOTE: this is primarily for tests.
return func(_, _ fwk.QueuedPodInfo) bool { return false }
}
if len(f.queueSortPlugins) == 0 {
panic("No QueueSort plugin is registered in the frameworkImpl.")
}
// Only one QueueSort plugin can be enabled.
return f.queueSortPlugins[0].Less
}
// RunPreFilterPlugins runs the set of configured PreFilter plugins. It returns
// *Status and its code is set to non-success if any of the plugins returns
// anything but Success/Skip.
// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored,
// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle.
// If a non-success status is returned, then the scheduling cycle is aborted.
func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (_ *fwk.PreFilterResult, status *fwk.Status, _ sets.Set[string]) {
startTime := time.Now()
skipPlugins := sets.New[string]()
defer func() {
state.SetSkipFilterPlugins(skipPlugins)
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
nodes, err := f.SnapshotSharedLister().NodeInfos().List()
if err != nil {
return nil, fwk.AsStatus(fmt.Errorf("getting all nodes: %w", err)), nil
}
var result *fwk.PreFilterResult
pluginsWithNodes := sets.New[string]()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PreFilter")
}
var returnStatus *fwk.Status
for _, pl := range f.preFilterPlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
r, s := f.runPreFilterPlugin(ctx, pl, state, pod, nodes)
if s.IsSkip() {
skipPlugins.Insert(pl.Name())
continue
}
if !s.IsSuccess() {
s.SetPlugin(pl.Name())
if s.Code() == fwk.UnschedulableAndUnresolvable {
// In this case, the preemption shouldn't happen in this scheduling cycle.
// So, no need to execute all PreFilter.
return nil, s, nil
}
if s.Code() == fwk.Unschedulable {
// In this case, the preemption should happen later in this scheduling cycle.
// So we need to execute all PreFilter.
// https://github.com/kubernetes/kubernetes/issues/119770
returnStatus = s
continue
}
return nil, fwk.AsStatus(fmt.Errorf("running PreFilter plugin %q: %w", pl.Name(), s.AsError())).WithPlugin(pl.Name()), nil
}
if !r.AllNodes() {
pluginsWithNodes.Insert(pl.Name())
}
result = result.Merge(r)
if !result.AllNodes() && len(result.NodeNames) == 0 {
msg := fmt.Sprintf("node(s) didn't satisfy plugin(s) %v simultaneously", sets.List(pluginsWithNodes))
if len(pluginsWithNodes) == 1 {
msg = fmt.Sprintf("node(s) didn't satisfy plugin %v", sets.List(pluginsWithNodes)[0])
}
// When PreFilterResult filters out Nodes, the framework considers Nodes that are filtered out as getting "UnschedulableAndUnresolvable".
return result, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, msg), pluginsWithNodes
}
}
return result, returnStatus, pluginsWithNodes
}
func (f *frameworkImpl) runPreFilterPlugin(ctx context.Context, pl fwk.PreFilterPlugin, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilter(ctx, state, pod, nodes)
}
startTime := time.Now()
result, status := pl.PreFilter(ctx, state, pod, nodes)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PreFilter, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return result, status
}
// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
func (f *frameworkImpl) RunPreFilterExtensionAddPod(
ctx context.Context,
state fwk.CycleState,
podToSchedule *v1.Pod,
podInfoToAdd fwk.PodInfo,
nodeInfo fwk.NodeInfo,
) (status *fwk.Status) {
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PreFilterExtension")
}
for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil || state.GetSkipFilterPlugins().Has(pl.Name()) {
continue
}
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status = f.runPreFilterExtensionAddPod(ctx, pl, state, podToSchedule, podInfoToAdd, nodeInfo)
if !status.IsSuccess() {
err := status.AsError()
logger.Error(err, "Plugin failed", "pod", klog.KObj(podToSchedule), "node", klog.KObj(nodeInfo.Node()), "operation", "addPod", "plugin", pl.Name())
return fwk.AsStatus(fmt.Errorf("running AddPod on PreFilter plugin %q: %w", pl.Name(), err))
}
}
return nil
}
func (f *frameworkImpl) runPreFilterExtensionAddPod(ctx context.Context, pl fwk.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podInfoToAdd, nodeInfo)
}
startTime := time.Now()
status := pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podInfoToAdd, nodeInfo)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PreFilterExtensionAddPod, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
func (f *frameworkImpl) RunPreFilterExtensionRemovePod(
ctx context.Context,
state fwk.CycleState,
podToSchedule *v1.Pod,
podInfoToRemove fwk.PodInfo,
nodeInfo fwk.NodeInfo,
) (status *fwk.Status) {
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PreFilterExtension")
}
for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil || state.GetSkipFilterPlugins().Has(pl.Name()) {
continue
}
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status = f.runPreFilterExtensionRemovePod(ctx, pl, state, podToSchedule, podInfoToRemove, nodeInfo)
if !status.IsSuccess() {
err := status.AsError()
var node *v1.Node
if nodeInfo != nil {
node = nodeInfo.Node()
}
logger.Error(err, "Plugin failed", "node", klog.KObj(node), "operation", "removePod", "plugin", pl.Name(), "pod", klog.KObj(podToSchedule))
return fwk.AsStatus(fmt.Errorf("running RemovePod on PreFilter plugin %q: %w", pl.Name(), err))
}
}
return nil
}
func (f *frameworkImpl) runPreFilterExtensionRemovePod(ctx context.Context, pl fwk.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove fwk.PodInfo, nodeInfo fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podInfoToRemove, nodeInfo)
}
startTime := time.Now()
status := pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podInfoToRemove, nodeInfo)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PreFilterExtensionRemovePod, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunFilterPlugins runs the set of configured Filter plugins for pod on
// the given node. If any of these plugins doesn't return "Success", the
// given node is not suitable for running pod.
// Meanwhile, the failure message and status are set for the given node.
func (f *frameworkImpl) RunFilterPlugins(
ctx context.Context,
state fwk.CycleState,
pod *v1.Pod,
nodeInfo fwk.NodeInfo,
) *fwk.Status {
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "Filter")
}
for _, pl := range f.filterPlugins {
if state.GetSkipFilterPlugins().Has(pl.Name()) {
continue
}
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
if status := f.runFilterPlugin(ctx, pl, state, pod, nodeInfo); !status.IsSuccess() {
if !status.IsRejected() {
// Filter plugins are not supposed to return any status other than
// Success or Unschedulable.
status = fwk.AsStatus(fmt.Errorf("running %q filter plugin: %w", pl.Name(), status.AsError()))
}
status.SetPlugin(pl.Name())
return status
}
}
return nil
}
func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl fwk.FilterPlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.Filter(ctx, state, pod, nodeInfo)
}
startTime := time.Now()
status := pl.Filter(ctx, state, pod, nodeInfo)
f.metricsRecorder.ObservePluginDurationAsync(metrics.Filter, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunPostFilterPlugins runs the set of configured PostFilter plugins until the first
// Success, Error or UnschedulableAndUnresolvable is met; otherwise continues to execute all plugins.
func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (_ *fwk.PostFilterResult, status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PostFilter")
}
// `result` records the last meaningful(non-noop) PostFilterResult.
var result *fwk.PostFilterResult
var reasons []string
var rejectorPlugin string
for _, pl := range f.postFilterPlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
r, s := f.runPostFilterPlugin(ctx, pl, state, pod, filteredNodeStatusMap)
if s.IsSuccess() {
return r, s
} else if s.Code() == fwk.UnschedulableAndUnresolvable {
return r, s.WithPlugin(pl.Name())
} else if !s.IsRejected() {
// Any status other than Success, Unschedulable or UnschedulableAndUnresolvable is Error.
return nil, fwk.AsStatus(s.AsError()).WithPlugin(pl.Name())
} else if r != nil && r.Mode() != fwk.ModeNoop {
result = r
}
reasons = append(reasons, s.Reasons()...)
// Record the first failed plugin unless we proved that
// the latter is more relevant.
if len(rejectorPlugin) == 0 {
rejectorPlugin = pl.Name()
}
}
return result, fwk.NewStatus(fwk.Unschedulable, reasons...).WithPlugin(rejectorPlugin)
}
func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl fwk.PostFilterPlugin, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap fwk.NodeToStatusReader) (*fwk.PostFilterResult, *fwk.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.PostFilter(ctx, state, pod, filteredNodeStatusMap)
}
startTime := time.Now()
r, s := pl.PostFilter(ctx, state, pod, filteredNodeStatusMap)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PostFilter, pl.Name(), s.Code().String(), metrics.SinceInSeconds(startTime))
return r, s
}
// RunFilterPluginsWithNominatedPods runs the set of configured filter plugins
// for nominated pod on the given node.
// This function is called from two different places: Schedule and Preempt.
// When it is called from Schedule, we want to test whether the pod is
// schedulable on the node with all the existing pods on the node plus higher
// and equal priority pods nominated to run on the node.
// When it is called from Preempt, we should remove the victims of preemption
// and add the nominated pods. Removal of the victims is done by
// SelectVictimsOnNode(). Preempt removes victims from PreFilter state and
// NodeInfo before calling this function.
func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, state fwk.CycleState, pod *v1.Pod, info fwk.NodeInfo) *fwk.Status {
var status *fwk.Status
podsAdded := false
// We run filters twice in some cases. If the node has greater or equal priority
// nominated pods, we run them when those pods are added to PreFilter state and nodeInfo.
// If all filters succeed in this pass, we run them again when these
// nominated pods are not added. This second pass is necessary because some
// filters such as inter-pod affinity may not pass without the nominated pods.
// If there are no nominated pods for the node or if the first run of the
// filters fail, we don't run the second pass.
// We consider only equal or higher priority pods in the first pass, because
// those are the current "pod" must yield to them and not take a space opened
// for running them. It is ok if the current "pod" take resources freed for
// lower priority pods.
// Requiring that the new pod is schedulable in both circumstances ensures that
// we are making a conservative decision: filters like resources and inter-pod
// anti-affinity are more likely to fail when the nominated pods are treated
// as running, while filters like pod affinity are more likely to fail when
// the nominated pods are treated as not running. We can't just assume the
// nominated pods are running because they are not running right now and in fact,
// they may end up getting scheduled to a different node.
logger := klog.FromContext(ctx)
logger = klog.LoggerWithName(logger, "FilterWithNominatedPods")
ctx = klog.NewContext(ctx, logger)
for i := 0; i < 2; i++ {
stateToUse := state
nodeInfoToUse := info
if i == 0 {
var err error
podsAdded, stateToUse, nodeInfoToUse, err = addGENominatedPods(ctx, f, pod, state, info)
if err != nil {
return fwk.AsStatus(err)
}
} else if !podsAdded || !status.IsSuccess() {
break
}
status = f.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse)
if !status.IsSuccess() && !status.IsRejected() {
return status
}
}
return status
}
// addGENominatedPods adds pods with equal or greater priority which are nominated
// to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState,
// 3) augmented nodeInfo.
func addGENominatedPods(ctx context.Context, fh fwk.Handle, pod *v1.Pod, state fwk.CycleState, nodeInfo fwk.NodeInfo) (bool, fwk.CycleState, fwk.NodeInfo, error) {
if fh == nil {
// This may happen only in tests.
return false, state, nodeInfo, nil
}
nominatedPodInfos := fh.NominatedPodsForNode(nodeInfo.Node().Name)
if len(nominatedPodInfos) == 0 {
return false, state, nodeInfo, nil
}
nodeInfoOut := nodeInfo.Snapshot()
stateOut := state.Clone()
podsAdded := false
for _, pi := range nominatedPodInfos {
if corev1.PodPriority(pi.GetPod()) >= corev1.PodPriority(pod) && pi.GetPod().UID != pod.UID {
nodeInfoOut.AddPodInfo(pi)
status := fh.RunPreFilterExtensionAddPod(ctx, stateOut, pod, pi, nodeInfoOut)
if !status.IsSuccess() {
return false, state, nodeInfo, status.AsError()
}
podsAdded = true
}
}
return podsAdded, stateOut, nodeInfoOut, nil
}
// RunPreScorePlugins runs the set of configured pre-score plugins. If any
// of these plugins returns any status other than Success/Skip, the given pod is rejected.
// When it returns Skip status, other fields in status are just ignored,
// and coupled Score plugin will be skipped in this scheduling cycle.
func (f *frameworkImpl) RunPreScorePlugins(
ctx context.Context,
state fwk.CycleState,
pod *v1.Pod,
nodes []fwk.NodeInfo,
) (status *fwk.Status) {
startTime := time.Now()
skipPlugins := sets.New[string]()
defer func() {
state.SetSkipScorePlugins(skipPlugins)
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreScore, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PreScore")
}
for _, pl := range f.preScorePlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status = f.runPreScorePlugin(ctx, pl, state, pod, nodes)
if status.IsSkip() {
skipPlugins.Insert(pl.Name())
continue
}
if !status.IsSuccess() {
return fwk.AsStatus(fmt.Errorf("running PreScore plugin %q: %w", pl.Name(), status.AsError()))
}
}
return nil
}
func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl fwk.PreScorePlugin, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreScore(ctx, state, pod, nodes)
}
startTime := time.Now()
status := pl.PreScore(ctx, state, pod, nodes)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PreScore, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunScorePlugins runs the set of configured scoring plugins.
// It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *Status, which is set to non-success if any of the plugins returns
// a non-success status.
func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (ns []fwk.NodePluginScores, status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Score, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
allNodePluginScores := make([]fwk.NodePluginScores, len(nodes))
numPlugins := len(f.scorePlugins)
plugins := make([]fwk.ScorePlugin, 0, numPlugins)
pluginToNodeScores := make(map[string]fwk.NodeScoreList, numPlugins)
for _, pl := range f.scorePlugins {
if state.GetSkipScorePlugins().Has(pl.Name()) {
continue
}
plugins = append(plugins, pl)
pluginToNodeScores[pl.Name()] = make(fwk.NodeScoreList, len(nodes))
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
errCh := parallelize.NewErrorChannel()
if len(plugins) > 0 {
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "Score")
}
// Run Score method for each node in parallel.
f.Parallelizer().Until(ctx, len(nodes), func(index int) {
nodeInfo := nodes[index]
nodeName := nodeInfo.Node().Name
logger := logger
if verboseLogs {
logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName})
}
for _, pl := range plugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
s, status := f.runScorePlugin(ctx, pl, state, pod, nodeInfo)
if !status.IsSuccess() {
err := fmt.Errorf("plugin %q failed with: %w", pl.Name(), status.AsError())
errCh.SendErrorWithCancel(err, cancel)
return
}
pluginToNodeScores[pl.Name()][index] = fwk.NodeScore{
Name: nodeName,
Score: s,
}
}
}, metrics.Score)
if err := errCh.ReceiveError(); err != nil {
return nil, fwk.AsStatus(fmt.Errorf("running Score plugins: %w", err))
}
}
// Run NormalizeScore method for each ScorePlugin in parallel.
f.Parallelizer().Until(ctx, len(plugins), func(index int) {
pl := plugins[index]
if pl.ScoreExtensions() == nil {
return
}
nodeScoreList := pluginToNodeScores[pl.Name()]
status := f.runScoreExtension(ctx, pl, state, pod, nodeScoreList)
if !status.IsSuccess() {
err := fmt.Errorf("plugin %q failed with: %w", pl.Name(), status.AsError())
errCh.SendErrorWithCancel(err, cancel)
return
}
}, metrics.Score)
if err := errCh.ReceiveError(); err != nil {
return nil, fwk.AsStatus(fmt.Errorf("running Normalize on Score plugins: %w", err))
}
// Apply score weight for each ScorePlugin in parallel,
// and then, build allNodePluginScores.
f.Parallelizer().Until(ctx, len(nodes), func(index int) {
nodePluginScores := fwk.NodePluginScores{
Name: nodes[index].Node().Name,
Scores: make([]fwk.PluginScore, len(plugins)),
}
for i, pl := range plugins {
weight := f.scorePluginWeight[pl.Name()]
nodeScoreList := pluginToNodeScores[pl.Name()]
score := nodeScoreList[index].Score
if score > fwk.MaxNodeScore || score < fwk.MinNodeScore {
err := fmt.Errorf("plugin %q returns an invalid score %v, it should in the range of [%v, %v] after normalizing", pl.Name(), score, fwk.MinNodeScore, fwk.MaxNodeScore)
errCh.SendErrorWithCancel(err, cancel)
return
}
weightedScore := score * int64(weight)
nodePluginScores.Scores[i] = fwk.PluginScore{
Name: pl.Name(),
Score: weightedScore,
}
nodePluginScores.TotalScore += weightedScore
}
allNodePluginScores[index] = nodePluginScores
}, metrics.Score)
if err := errCh.ReceiveError(); err != nil {
return nil, fwk.AsStatus(fmt.Errorf("applying score defaultWeights on Score plugins: %w", err))
}
return allNodePluginScores, nil
}
func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl fwk.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.Score(ctx, state, pod, nodeInfo)
}
startTime := time.Now()
s, status := pl.Score(ctx, state, pod, nodeInfo)
f.metricsRecorder.ObservePluginDurationAsync(metrics.Score, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return s, status
}
func (f *frameworkImpl) runScoreExtension(ctx context.Context, pl fwk.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeScoreList fwk.NodeScoreList) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList)
}
startTime := time.Now()
status := pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList)
f.metricsRecorder.ObservePluginDurationAsync(metrics.ScoreExtensionNormalize, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunPreBindPlugins runs the set of configured prebind plugins. It returns a
// failure (bool) if any of the plugins returns an error. It also returns an
// error containing the rejection message or the error occurred in the plugin.
func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreBind, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PreBind")
logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName})
}
for _, pl := range f.preBindPlugins {
if state.GetSkipPreBindPlugins().Has(pl.Name()) {
continue
}
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status = f.runPreBindPlugin(ctx, pl, state, pod, nodeName)
if !status.IsSuccess() {
if status.IsRejected() {
logger.V(4).Info("Pod rejected by PreBind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message())
status.SetPlugin(pl.Name())
return status
}
err := status.AsError()
logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName)
return fwk.AsStatus(fmt.Errorf("running PreBind plugin %q: %w", pl.Name(), err))
}
}
return nil
}
func (f *frameworkImpl) runPreBindPlugin(ctx context.Context, pl fwk.PreBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreBind(ctx, state, pod, nodeName)
}
startTime := time.Now()
status := pl.PreBind(ctx, state, pod, nodeName)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PreBind, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunPreBindPreFlights runs the set of configured PreBindPreFlight functions from PreBind plugins.
// The returning value is:
// - Success: one or more plugins return success, meaning, some PreBind plugins will work for this pod.
// - Skip: all plugins return skip.
// - Error: any plugin return error.
func (f *frameworkImpl) RunPreBindPreFlights(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreBindPreFlight, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PreBindPreFlight")
logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName})
}
skipPlugins := sets.New[string]()
returningStatus := fwk.NewStatus(fwk.Skip)
for _, pl := range f.preBindPlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status = f.runPreBindPreFlight(ctx, pl, state, pod, nodeName)
switch {
case status.Code() == fwk.Error:
err := status.AsError()
logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName)
return fwk.AsStatus(fmt.Errorf("running PreBindPreFlight %q: %w", pl.Name(), err))
case status.IsSuccess():
// We return success when one or more plugins return success.
returningStatus = nil
case status.IsSkip():
skipPlugins.Insert(pl.Name())
default:
// Other statuses are unexpected
return fwk.AsStatus(fmt.Errorf("PreBindPreFlight %s returned %q, which is unsupported. It is supposed to return Success, Skip, or Error status", pl.Name(), status.Code()))
}
}
state.SetSkipPreBindPlugins(skipPlugins)
return returningStatus
}
func (f *frameworkImpl) runPreBindPreFlight(ctx context.Context, pl fwk.PreBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreBindPreFlight(ctx, state, pod, nodeName)
}
startTime := time.Now()
status := pl.PreBindPreFlight(ctx, state, pod, nodeName)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PreBindPreFlight, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunBindPlugins runs the set of configured bind plugins until one returns a non `Skip` status.
func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Bind, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
if len(f.bindPlugins) == 0 {
return fwk.NewStatus(fwk.Skip, "")
}
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "Bind")
}
for _, pl := range f.bindPlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status = f.runBindPlugin(ctx, pl, state, pod, nodeName)
if status.IsSkip() {
continue
}
if !status.IsSuccess() {
if status.IsRejected() {
logger.V(4).Info("Pod rejected by Bind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message())
status.SetPlugin(pl.Name())
return status
}
err := status.AsError()
logger.Error(err, "Plugin Failed", "plugin", pl.Name(), "pod", klog.KObj(pod), "node", nodeName)
return fwk.AsStatus(fmt.Errorf("running Bind plugin %q: %w", pl.Name(), err))
}
return status
}
return status
}
func (f *frameworkImpl) runBindPlugin(ctx context.Context, bp fwk.BindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return bp.Bind(ctx, state, pod, nodeName)
}
startTime := time.Now()
status := bp.Bind(ctx, state, pod, nodeName)
f.metricsRecorder.ObservePluginDurationAsync(metrics.Bind, bp.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunPostBindPlugins runs the set of configured postbind plugins.
func (f *frameworkImpl) RunPostBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostBind, fwk.Success.String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "PostBind")
}
for _, pl := range f.postBindPlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
f.runPostBindPlugin(ctx, pl, state, pod, nodeName)
}
}
func (f *frameworkImpl) runPostBindPlugin(ctx context.Context, pl fwk.PostBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
if !state.ShouldRecordPluginMetrics() {
pl.PostBind(ctx, state, pod, nodeName)
return
}
startTime := time.Now()
pl.PostBind(ctx, state, pod, nodeName)
f.metricsRecorder.ObservePluginDurationAsync(metrics.PostBind, pl.Name(), fwk.Success.String(), metrics.SinceInSeconds(startTime))
}
// RunReservePluginsReserve runs the Reserve method in the set of configured
// reserve plugins. If any of these plugins returns an error, it does not
// continue running the remaining ones and returns the error. In such a case,
// the pod will not be scheduled and the caller will be expected to call
// RunReservePluginsUnreserve.
func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Reserve, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "Reserve")
logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName})
}
for _, pl := range f.reservePlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status = f.runReservePluginReserve(ctx, pl, state, pod, nodeName)
if !status.IsSuccess() {
if status.IsRejected() {
logger.V(4).Info("Pod rejected by plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message())
status.SetPlugin(pl.Name())
return status
}
err := status.AsError()
logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod))
return fwk.AsStatus(fmt.Errorf("running Reserve plugin %q: %w", pl.Name(), err))
}
}
return nil
}
func (f *frameworkImpl) runReservePluginReserve(ctx context.Context, pl fwk.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *fwk.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.Reserve(ctx, state, pod, nodeName)
}
startTime := time.Now()
status := pl.Reserve(ctx, state, pod, nodeName)
f.metricsRecorder.ObservePluginDurationAsync(metrics.Reserve, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status
}
// RunReservePluginsUnreserve runs the Unreserve method in the set of
// configured reserve plugins.
func (f *frameworkImpl) RunReservePluginsUnreserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Unreserve, fwk.Success.String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
// Execute the Unreserve operation of each reserve plugin in the
// *reverse* order in which the Reserve operation was executed.
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "Unreserve")
logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName})
}
for i := len(f.reservePlugins) - 1; i >= 0; i-- {
pl := f.reservePlugins[i]
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
f.runReservePluginUnreserve(ctx, pl, state, pod, nodeName)
}
}
func (f *frameworkImpl) runReservePluginUnreserve(ctx context.Context, pl fwk.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
if !state.ShouldRecordPluginMetrics() {
pl.Unreserve(ctx, state, pod, nodeName)
return
}
startTime := time.Now()
pl.Unreserve(ctx, state, pod, nodeName)
f.metricsRecorder.ObservePluginDurationAsync(metrics.Unreserve, pl.Name(), fwk.Success.String(), metrics.SinceInSeconds(startTime))
}
// RunPermitPlugins runs the set of configured permit plugins. If any of these
// plugins returns a status other than "Success" or "Wait", it does not continue
// running the remaining plugins and returns an error. Otherwise, if any of the
// plugins returns "Wait", then this function will create and add waiting pod
// to a map of currently waiting pods and return status with "Wait" code.
// Pod will remain waiting pod for the minimum duration returned by the permit plugins.
func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *fwk.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Permit, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
pluginsWaitTime := make(map[string]time.Duration)
statusCode := fwk.Success
logger := klog.FromContext(ctx)
verboseLogs := logger.V(4).Enabled()
if verboseLogs {
logger = klog.LoggerWithName(logger, "Permit")
logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName})
}
for _, pl := range f.permitPlugins {
ctx := ctx
if verboseLogs {
logger := klog.LoggerWithName(logger, pl.Name())
ctx = klog.NewContext(ctx, logger)
}
status, timeout := f.runPermitPlugin(ctx, pl, state, pod, nodeName)
if !status.IsSuccess() {
if status.IsRejected() {
logger.V(4).Info("Pod rejected by plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message())
return status.WithPlugin(pl.Name())
}
if status.IsWait() {
// Not allowed to be greater than maxTimeout.
if timeout > maxTimeout {
timeout = maxTimeout
}
pluginsWaitTime[pl.Name()] = timeout
statusCode = fwk.Wait
} else {
err := status.AsError()
logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod))
return fwk.AsStatus(fmt.Errorf("running Permit plugin %q: %w", pl.Name(), err)).WithPlugin(pl.Name())
}
}
}
if statusCode == fwk.Wait {
waitingPod := newWaitingPod(pod, pluginsWaitTime)
f.waitingPods.add(waitingPod)
msg := fmt.Sprintf("one or more plugins asked to wait and no plugin rejected pod %q", pod.Name)
logger.V(4).Info("One or more plugins asked to wait and no plugin rejected pod", "pod", klog.KObj(pod))
return fwk.NewStatus(fwk.Wait, msg)
}
return nil
}
func (f *frameworkImpl) runPermitPlugin(ctx context.Context, pl fwk.PermitPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) (*fwk.Status, time.Duration) {
if !state.ShouldRecordPluginMetrics() {
return pl.Permit(ctx, state, pod, nodeName)
}
startTime := time.Now()
status, timeout := pl.Permit(ctx, state, pod, nodeName)
f.metricsRecorder.ObservePluginDurationAsync(metrics.Permit, pl.Name(), status.Code().String(), metrics.SinceInSeconds(startTime))
return status, timeout
}
func (f *frameworkImpl) WillWaitOnPermit(ctx context.Context, pod *v1.Pod) bool {
return f.waitingPods.get(pod.UID) != nil
}
// WaitOnPermit will block, if the pod is a waiting pod, until the waiting pod is rejected or allowed.
func (f *frameworkImpl) WaitOnPermit(ctx context.Context, pod *v1.Pod) *fwk.Status {
waitingPod := f.waitingPods.get(pod.UID)
if waitingPod == nil {
return nil
}
defer f.waitingPods.remove(pod.UID)
logger := klog.FromContext(ctx)
logger.V(4).Info("Pod waiting on permit", "pod", klog.KObj(pod))
startTime := time.Now()
s := <-waitingPod.s
metrics.PermitWaitDuration.WithLabelValues(s.Code().String()).Observe(metrics.SinceInSeconds(startTime))
if !s.IsSuccess() {
if s.IsRejected() {
logger.V(4).Info("Pod rejected while waiting on permit", "pod", klog.KObj(pod), "status", s.Message())
return s
}
err := s.AsError()
logger.Error(err, "Failed waiting on permit for pod", "pod", klog.KObj(pod))
return fwk.AsStatus(fmt.Errorf("waiting on permit for pod: %w", err)).WithPlugin(s.Plugin())
}
return nil
}
// SnapshotSharedLister returns the scheduler's SharedLister of the latest NodeInfo
// snapshot. The snapshot is taken at the beginning of a scheduling cycle and remains
// unchanged until a pod finishes "Reserve". There is no guarantee that the information
// remains unchanged after "Reserve".
func (f *frameworkImpl) SnapshotSharedLister() fwk.SharedLister {
return f.snapshotSharedLister
}
// IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map.
func (f *frameworkImpl) IterateOverWaitingPods(callback func(fwk.WaitingPod)) {
f.waitingPods.iterate(callback)
}
// GetWaitingPod returns a reference to a WaitingPod given its UID.
func (f *frameworkImpl) GetWaitingPod(uid types.UID) fwk.WaitingPod {
if wp := f.waitingPods.get(uid); wp != nil {
return wp
}
return nil // Returning nil instead of *waitingPod(nil).
}
// RejectWaitingPod rejects a WaitingPod given its UID.
// The returned value indicates if the given pod is waiting or not.
func (f *frameworkImpl) RejectWaitingPod(uid types.UID) bool {
if waitingPod := f.waitingPods.get(uid); waitingPod != nil {
waitingPod.Reject("", "removed")
return true
}
return false
}
// HasFilterPlugins returns true if at least one filter plugin is defined.
func (f *frameworkImpl) HasFilterPlugins() bool {
return len(f.filterPlugins) > 0
}
// HasPostFilterPlugins returns true if at least one postFilter plugin is defined.
func (f *frameworkImpl) HasPostFilterPlugins() bool {
return len(f.postFilterPlugins) > 0
}
// HasScorePlugins returns true if at least one score plugin is defined.
func (f *frameworkImpl) HasScorePlugins() bool {
return len(f.scorePlugins) > 0
}
// ListPlugins returns a map of extension point name to plugin names configured at each extension
// point. Returns nil if no plugins where configured.
func (f *frameworkImpl) ListPlugins() *config.Plugins {
m := config.Plugins{}
for _, e := range f.getExtensionPoints(&m) {
plugins := reflect.ValueOf(e.slicePtr).Elem()
extName := plugins.Type().Elem().Name()
var cfgs []config.Plugin
for i := 0; i < plugins.Len(); i++ {
name := plugins.Index(i).Interface().(fwk.Plugin).Name()
p := config.Plugin{Name: name}
if extName == "ScorePlugin" {
// Weights apply only to score plugins.
p.Weight = int32(f.scorePluginWeight[name])
}
cfgs = append(cfgs, p)
}
if len(cfgs) > 0 {
e.plugins.Enabled = cfgs
}
}
return &m
}
// ClientSet returns a kubernetes clientset.
func (f *frameworkImpl) ClientSet() clientset.Interface {
return f.clientSet
}
// KubeConfig returns a kubernetes config.
func (f *frameworkImpl) KubeConfig() *restclient.Config {
return f.kubeConfig
}
// EventRecorder returns an event recorder.
func (f *frameworkImpl) EventRecorder() events.EventRecorder {
return f.eventRecorder
}
// SharedInformerFactory returns a shared informer factory.
func (f *frameworkImpl) SharedInformerFactory() informers.SharedInformerFactory {
return f.informerFactory
}
// SharedDRAManager returns the SharedDRAManager of the framework.
func (f *frameworkImpl) SharedDRAManager() fwk.SharedDRAManager {
return f.sharedDRAManager
}
func (f *frameworkImpl) pluginsNeeded(plugins *config.Plugins) sets.Set[string] {
pgSet := sets.Set[string]{}
if plugins == nil {
return pgSet
}
find := func(pgs *config.PluginSet) {
for _, pg := range pgs.Enabled {
pgSet.Insert(pg.Name)
}
}
for _, e := range f.getExtensionPoints(plugins) {
find(e.plugins)
}
// Parse MultiPoint separately since they are not returned by f.getExtensionPoints()
find(&plugins.MultiPoint)
return pgSet
}
// ProfileName returns the profile name associated to this framework.
func (f *frameworkImpl) ProfileName() string {
return f.profileName
}
// PercentageOfNodesToScore returns percentageOfNodesToScore associated to a profile.
func (f *frameworkImpl) PercentageOfNodesToScore() *int32 {
return f.percentageOfNodesToScore
}
// Parallelizer returns a parallelizer holding parallelism for scheduler.
func (f *frameworkImpl) Parallelizer() fwk.Parallelizer {
return f.parallelizer
}
// APIDispatcher returns an apiDispatcher that can be used to dispatch API calls.
// This requires SchedulerAsyncAPICalls feature gate to be enabled.
func (f *frameworkImpl) APIDispatcher() fwk.APIDispatcher {
if f.apiDispatcher == nil {
return nil
}
return f.apiDispatcher
}
// APICacher returns an apiCacher that can be used to dispatch API calls through scheduler's cache
// instead of directly using APIDispatcher().
// This requires SchedulerAsyncAPICalls feature gate to be enabled.
func (f *frameworkImpl) APICacher() fwk.APICacher {
if f.apiCacher == nil {
return nil
}
return f.apiCacher
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"context"
v1 "k8s.io/api/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
fwk "k8s.io/kube-scheduler/framework"
)
type instrumentedFilterPlugin struct {
fwk.FilterPlugin
metric compbasemetrics.CounterMetric
}
var _ fwk.FilterPlugin = &instrumentedFilterPlugin{}
func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) *fwk.Status {
p.metric.Inc()
return p.FilterPlugin.Filter(ctx, state, pod, nodeInfo)
}
type instrumentedPreFilterPlugin struct {
fwk.PreFilterPlugin
metric compbasemetrics.CounterMetric
}
var _ fwk.PreFilterPlugin = &instrumentedPreFilterPlugin{}
func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) (*fwk.PreFilterResult, *fwk.Status) {
result, status := p.PreFilterPlugin.PreFilter(ctx, state, pod, nodes)
if !status.IsSkip() {
p.metric.Inc()
}
return result, status
}
type instrumentedPreScorePlugin struct {
fwk.PreScorePlugin
metric compbasemetrics.CounterMetric
}
var _ fwk.PreScorePlugin = &instrumentedPreScorePlugin{}
func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []fwk.NodeInfo) *fwk.Status {
status := p.PreScorePlugin.PreScore(ctx, state, pod, nodes)
if !status.IsSkip() {
p.metric.Inc()
}
return status
}
type instrumentedScorePlugin struct {
fwk.ScorePlugin
metric compbasemetrics.CounterMetric
}
var _ fwk.ScorePlugin = &instrumentedScorePlugin{}
func (p *instrumentedScorePlugin) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo fwk.NodeInfo) (int64, *fwk.Status) {
p.metric.Inc()
return p.ScorePlugin.Score(ctx, state, pod, nodeInfo)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
fwk "k8s.io/kube-scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"sigs.k8s.io/yaml"
)
// PluginFactory is a function that builds a plugin.
type PluginFactory = func(ctx context.Context, configuration runtime.Object, f fwk.Handle) (fwk.Plugin, error)
// PluginFactoryWithFts is a function that builds a plugin with certain feature gates.
type PluginFactoryWithFts[T fwk.Plugin] func(context.Context, runtime.Object, fwk.Handle, plfeature.Features) (T, error)
// FactoryAdapter can be used to inject feature gates for a plugin that needs
// them when the caller expects the older PluginFactory method.
func FactoryAdapter[T fwk.Plugin](fts plfeature.Features, withFts PluginFactoryWithFts[T]) PluginFactory {
return func(ctx context.Context, plArgs runtime.Object, fh fwk.Handle) (fwk.Plugin, error) {
return withFts(ctx, plArgs, fh, fts)
}
}
// DecodeInto decodes configuration whose type is *runtime.Unknown to the interface into.
func DecodeInto(obj runtime.Object, into interface{}) error {
if obj == nil {
return nil
}
configuration, ok := obj.(*runtime.Unknown)
if !ok {
return fmt.Errorf("want args of type runtime.Unknown, got %T", obj)
}
if configuration.Raw == nil {
return nil
}
switch configuration.ContentType {
// If ContentType is empty, it means ContentTypeJSON by default.
case runtime.ContentTypeJSON, "":
return json.Unmarshal(configuration.Raw, into)
case runtime.ContentTypeYAML:
return yaml.Unmarshal(configuration.Raw, into)
default:
return fmt.Errorf("not supported content type %s", configuration.ContentType)
}
}
// Registry is a collection of all available plugins. The framework uses a
// registry to enable and initialize configured plugins.
// All plugins must be in the registry before initializing the framework.
type Registry map[string]PluginFactory
// Register adds a new plugin to the registry. If a plugin with the same name
// exists, it returns an error.
func (r Registry) Register(name string, factory PluginFactory) error {
if _, ok := r[name]; ok {
return fmt.Errorf("a plugin named %v already exists", name)
}
r[name] = factory
return nil
}
// Unregister removes an existing plugin from the registry. If no plugin with
// the provided name exists, it returns an error.
func (r Registry) Unregister(name string) error {
if _, ok := r[name]; !ok {
return fmt.Errorf("no plugin named %v exists", name)
}
delete(r, name)
return nil
}
// Merge merges the provided registry to the current one.
func (r Registry) Merge(in Registry) error {
for name, factory := range in {
if err := r.Register(name, factory); err != nil {
return err
}
}
return nil
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"fmt"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
fwk "k8s.io/kube-scheduler/framework"
)
// waitingPodsMap a thread-safe map used to maintain pods waiting in the permit phase.
type waitingPodsMap struct {
pods map[types.UID]*waitingPod
mu sync.RWMutex
}
// NewWaitingPodsMap returns a new waitingPodsMap.
func NewWaitingPodsMap() *waitingPodsMap {
return &waitingPodsMap{
pods: make(map[types.UID]*waitingPod),
}
}
// add a new WaitingPod to the map.
func (m *waitingPodsMap) add(wp *waitingPod) {
m.mu.Lock()
defer m.mu.Unlock()
m.pods[wp.GetPod().UID] = wp
}
// remove a WaitingPod from the map.
func (m *waitingPodsMap) remove(uid types.UID) {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.pods, uid)
}
// get a WaitingPod from the map.
func (m *waitingPodsMap) get(uid types.UID) *waitingPod {
m.mu.RLock()
defer m.mu.RUnlock()
return m.pods[uid]
}
// iterate acquires a read lock and iterates over the WaitingPods map.
func (m *waitingPodsMap) iterate(callback func(fwk.WaitingPod)) {
m.mu.RLock()
defer m.mu.RUnlock()
for _, v := range m.pods {
callback(v)
}
}
// waitingPod represents a pod waiting in the permit phase.
type waitingPod struct {
pod *v1.Pod
pendingPlugins map[string]*time.Timer
s chan *fwk.Status
mu sync.RWMutex
}
var _ fwk.WaitingPod = &waitingPod{}
// newWaitingPod returns a new waitingPod instance.
func newWaitingPod(pod *v1.Pod, pluginsMaxWaitTime map[string]time.Duration) *waitingPod {
wp := &waitingPod{
pod: pod,
// Allow() and Reject() calls are non-blocking. This property is guaranteed
// by using non-blocking send to this channel. This channel has a buffer of size 1
// to ensure that non-blocking send will not be ignored - possible situation when
// receiving from this channel happens after non-blocking send.
s: make(chan *fwk.Status, 1),
}
wp.pendingPlugins = make(map[string]*time.Timer, len(pluginsMaxWaitTime))
// The time.AfterFunc calls wp.Reject which iterates through pendingPlugins map. Acquire the
// lock here so that time.AfterFunc can only execute after newWaitingPod finishes.
wp.mu.Lock()
defer wp.mu.Unlock()
for k, v := range pluginsMaxWaitTime {
plugin, waitTime := k, v
wp.pendingPlugins[plugin] = time.AfterFunc(waitTime, func() {
msg := fmt.Sprintf("rejected due to timeout after waiting %v at plugin %v",
waitTime, plugin)
wp.Reject(plugin, msg)
})
}
return wp
}
// GetPod returns a reference to the waiting pod.
func (w *waitingPod) GetPod() *v1.Pod {
return w.pod
}
// GetPendingPlugins returns a list of pending permit plugin's name.
func (w *waitingPod) GetPendingPlugins() []string {
w.mu.RLock()
defer w.mu.RUnlock()
plugins := make([]string, 0, len(w.pendingPlugins))
for p := range w.pendingPlugins {
plugins = append(plugins, p)
}
return plugins
}
// Allow declares the waiting pod is allowed to be scheduled by plugin pluginName.
// If this is the last remaining plugin to allow, then a success signal is delivered
// to unblock the pod.
func (w *waitingPod) Allow(pluginName string) {
w.mu.Lock()
defer w.mu.Unlock()
if timer, exist := w.pendingPlugins[pluginName]; exist {
timer.Stop()
delete(w.pendingPlugins, pluginName)
}
// Only signal success status after all plugins have allowed
if len(w.pendingPlugins) != 0 {
return
}
// The select clause works as a non-blocking send.
// If there is no receiver, it's a no-op (default case).
select {
case w.s <- fwk.NewStatus(fwk.Success, ""):
default:
}
}
// Reject declares the waiting pod unschedulable.
func (w *waitingPod) Reject(pluginName, msg string) {
w.mu.RLock()
defer w.mu.RUnlock()
for _, timer := range w.pendingPlugins {
timer.Stop()
}
// The select clause works as a non-blocking send.
// If there is no receiver, it's a no-op (default case).
select {
case w.s <- fwk.NewStatus(fwk.Unschedulable, msg).WithPlugin(pluginName):
default:
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"fmt"
"slices"
"sort"
"strings"
"sync/atomic"
"time"
v1 "k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/resource"
resourcehelper "k8s.io/component-helpers/resource"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
var generation int64
var (
// basicActionTypes is a list of basic ActionTypes.
basicActionTypes = []fwk.ActionType{fwk.Add, fwk.Delete, fwk.Update}
// podActionTypes is a list of ActionTypes that are only applicable for Pod events.
podActionTypes = []fwk.ActionType{fwk.UpdatePodLabel, fwk.UpdatePodScaleDown, fwk.UpdatePodToleration, fwk.UpdatePodSchedulingGatesEliminated, fwk.UpdatePodGeneratedResourceClaim}
// nodeActionTypes is a list of ActionTypes that are only applicable for Node events.
nodeActionTypes = []fwk.ActionType{fwk.UpdateNodeAllocatable, fwk.UpdateNodeLabel, fwk.UpdateNodeTaint, fwk.UpdateNodeCondition, fwk.UpdateNodeAnnotation}
)
// Constants for GVKs.
const (
// These assignedPod and unschedulablePod are internal resources that are used to represent the type of Pod.
// We don't expose them to the plugins deliberately because we don't publish Pod events with unschedulable Pods in the first place.
assignedPod fwk.EventResource = "AssignedPod"
unschedulablePod fwk.EventResource = "UnschedulablePod"
)
var (
// allResources is a list of all resources.
allResources = []fwk.EventResource{
fwk.Pod,
assignedPod,
unschedulablePod,
fwk.Node,
fwk.PersistentVolume,
fwk.PersistentVolumeClaim,
fwk.CSINode,
fwk.CSIDriver,
fwk.CSIStorageCapacity,
fwk.StorageClass,
fwk.VolumeAttachment,
fwk.ResourceClaim,
fwk.ResourceSlice,
fwk.DeviceClass,
}
)
// AllClusterEventLabels returns all possible cluster event labels given to the metrics.
func AllClusterEventLabels() []string {
labels := []string{UnschedulableTimeout, ForceActivate}
for _, r := range allResources {
for _, a := range basicActionTypes {
labels = append(labels, fwk.ClusterEvent{Resource: r, ActionType: a}.Label())
}
}
for _, a := range podActionTypes {
labels = append(labels, fwk.ClusterEvent{Resource: fwk.Pod, ActionType: a}.Label())
}
for _, a := range nodeActionTypes {
labels = append(labels, fwk.ClusterEvent{Resource: fwk.Node, ActionType: a}.Label())
}
return labels
}
// ClusterEventIsWildCard returns true if the given ClusterEvent follows WildCard semantics
func ClusterEventIsWildCard(ce fwk.ClusterEvent) bool {
return ce.Resource == fwk.WildCard && ce.ActionType == fwk.All
}
// MatchClusterEvents returns true if ce is matched with incomingEvent.
// "match" means that incomingEvent is the same or more specific than the ce.
// e.g. when ce.ActionType is Update and incomingEvent.ActionType is UpdateNodeLabel, it will return true
// because UpdateNodeLabel is more specific than Update.
// On the other hand, when ce.ActionType is UpdateNodeLabel and incomingEvent.ActionType is Update, it returns false.
// This is based on the fact that the scheduler interprets the incoming cluster event as specific event as possible;
// meaning, if incomingEvent is Node/Update, it means that Node's update is not something that can be interpreted
// as any of Node's specific Update events.
//
// If the ce.Resource is "*", there's no requirement for incomingEvent.Resource.
// Contrarily, if incomingEvent.Resource is "*", the only accepted ce.Resource is "*" (which should never
// happen in the current implementation of the scheduling queue).
//
// Note: we have a special case here when incomingEvent is a wildcard event, it will force all Pods to move
// to activeQ/backoffQ, but we take it as an unmatched event unless ce is also a wildcard event.
func MatchClusterEvents(ce, incomingEvent fwk.ClusterEvent) bool {
return ClusterEventIsWildCard(ce) ||
matchEventResources(ce.Resource, incomingEvent.Resource) && ce.ActionType&incomingEvent.ActionType != 0 && incomingEvent.ActionType <= ce.ActionType
}
// match returns true if the resource is matched with the coming resource.
func matchEventResources(r, resource fwk.EventResource) bool {
// WildCard matches all resources
return r == fwk.WildCard ||
// Exact match
r == resource ||
// Pod matches assignedPod and unschedulablePod.
// (assignedPod and unschedulablePod aren't exposed and hence only used for incoming events and never used in EventsToRegister)
r == fwk.Pod && (resource == assignedPod || resource == unschedulablePod)
}
func MatchAnyClusterEvent(ce fwk.ClusterEvent, incomingEvents []fwk.ClusterEvent) bool {
for _, e := range incomingEvents {
if MatchClusterEvents(e, ce) {
return true
}
}
return false
}
func UnrollWildCardResource() []fwk.ClusterEventWithHint {
return []fwk.ClusterEventWithHint{
{Event: fwk.ClusterEvent{Resource: fwk.Pod, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolume, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.PersistentVolumeClaim, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.CSINode, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.CSIDriver, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.CSIStorageCapacity, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.StorageClass, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.ResourceClaim, ActionType: fwk.All}},
{Event: fwk.ClusterEvent{Resource: fwk.DeviceClass, ActionType: fwk.All}},
}
}
// NodeInfo is node level aggregated information.
type NodeInfo struct {
// Overall node information.
node *v1.Node
// Pods running on the node.
Pods []fwk.PodInfo
// The subset of pods with affinity.
PodsWithAffinity []fwk.PodInfo
// The subset of pods with required anti-affinity.
PodsWithRequiredAntiAffinity []fwk.PodInfo
// Ports allocated on the node.
UsedPorts fwk.HostPortInfo
// Total requested resources of all pods on this node. This includes assumed
// pods, which scheduler has sent for binding, but may not be scheduled yet.
Requested *Resource
// Total requested resources of all pods on this node with a minimum value
// applied to each container's CPU and memory requests. This does not reflect
// the actual resource requests for this node, but is used to avoid scheduling
// many zero-request pods onto one node.
NonZeroRequested *Resource
// We store allocatedResources (which is Node.Status.Allocatable.*) explicitly
// as int64, to avoid conversions and accessing map.
Allocatable *Resource
// ImageStates holds the entry of an image if and only if this image is on the node. The entry can be used for
// checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image
// state information.
ImageStates map[string]*fwk.ImageStateSummary
// PVCRefCounts contains a mapping of PVC names to the number of pods on the node using it.
// Keys are in the format "namespace/name".
PVCRefCounts map[string]int
// Whenever NodeInfo changes, generation is bumped.
// This is used to avoid cloning it if the object didn't change.
Generation int64
}
func (n *NodeInfo) GetPods() []fwk.PodInfo {
return n.Pods
}
func (n *NodeInfo) GetPodsWithAffinity() []fwk.PodInfo {
return n.PodsWithAffinity
}
func (n *NodeInfo) GetPodsWithRequiredAntiAffinity() []fwk.PodInfo {
return n.PodsWithRequiredAntiAffinity
}
func (n *NodeInfo) GetUsedPorts() fwk.HostPortInfo {
return n.UsedPorts
}
func (n *NodeInfo) GetRequested() fwk.Resource {
return n.Requested
}
func (n *NodeInfo) GetNonZeroRequested() fwk.Resource {
return n.NonZeroRequested
}
func (n *NodeInfo) GetAllocatable() fwk.Resource {
return n.Allocatable
}
func (n *NodeInfo) GetImageStates() map[string]*fwk.ImageStateSummary {
return n.ImageStates
}
func (n *NodeInfo) GetPVCRefCounts() map[string]int {
return n.PVCRefCounts
}
func (n *NodeInfo) GetGeneration() int64 {
return n.Generation
}
// NodeInfo implements KMetadata, so for example klog.KObjSlice(nodes) works
// when nodes is a []*NodeInfo.
var _ klog.KMetadata = &NodeInfo{}
// GetName returns the name of the node wrapped by this NodeInfo object, or a meaningful nil representation if node or node name is nil.
// This method is a part of interface KMetadata.
func (n *NodeInfo) GetName() string {
if n == nil {
return "<nil>"
}
if n.node == nil {
return "<no node>"
}
return n.node.Name
}
// GetNamespace is a part of interface KMetadata. For NodeInfo it should always return an empty string, since Node is not a namespaced resource.
func (n *NodeInfo) GetNamespace() string {
return ""
}
// Node returns overall information about this node.
func (n *NodeInfo) Node() *v1.Node {
if n == nil {
return nil
}
return n.node
}
// Snapshot returns a copy of this node, same as SnapshotConcrete, but with returned type fwk.NodeInfo
// (the purpose is to have NodeInfo implement interface fwk.NodeInfo).
func (n *NodeInfo) Snapshot() fwk.NodeInfo {
return n.SnapshotConcrete()
}
// SnapshotConcrete returns a copy of this node, Except that ImageStates is copied without the Nodes field.
func (n *NodeInfo) SnapshotConcrete() *NodeInfo {
clone := &NodeInfo{
node: n.node,
Requested: n.Requested.Clone(),
NonZeroRequested: n.NonZeroRequested.Clone(),
Allocatable: n.Allocatable.Clone(),
UsedPorts: make(fwk.HostPortInfo),
ImageStates: make(map[string]*fwk.ImageStateSummary),
PVCRefCounts: make(map[string]int),
Generation: n.Generation,
}
if len(n.Pods) > 0 {
clone.Pods = append([]fwk.PodInfo(nil), n.Pods...)
}
if len(n.UsedPorts) > 0 {
// HostPortInfo is a map-in-map struct
// make sure it's deep copied
for ip, portMap := range n.UsedPorts {
clone.UsedPorts[ip] = make(map[fwk.ProtocolPort]struct{})
for protocolPort, v := range portMap {
clone.UsedPorts[ip][protocolPort] = v
}
}
}
if len(n.PodsWithAffinity) > 0 {
clone.PodsWithAffinity = append([]fwk.PodInfo(nil), n.PodsWithAffinity...)
}
if len(n.PodsWithRequiredAntiAffinity) > 0 {
clone.PodsWithRequiredAntiAffinity = append([]fwk.PodInfo(nil), n.PodsWithRequiredAntiAffinity...)
}
if len(n.ImageStates) > 0 {
state := make(map[string]*fwk.ImageStateSummary, len(n.ImageStates))
for imageName, imageState := range n.ImageStates {
state[imageName] = imageState.Snapshot()
}
clone.ImageStates = state
}
for key, value := range n.PVCRefCounts {
clone.PVCRefCounts[key] = value
}
return clone
}
// String returns representation of human readable format of this NodeInfo.
func (n *NodeInfo) String() string {
podKeys := make([]string, len(n.Pods))
for i, p := range n.Pods {
podKeys[i] = p.GetPod().Name
}
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}",
podKeys, n.Requested, n.NonZeroRequested, n.UsedPorts, n.Allocatable)
}
// AddPodInfo adds pod information to this NodeInfo.
// Consider using this instead of AddPod if a PodInfo is already computed.
func (n *NodeInfo) AddPodInfo(podInfo fwk.PodInfo) {
n.Pods = append(n.Pods, podInfo)
if podWithAffinity(podInfo.GetPod()) {
n.PodsWithAffinity = append(n.PodsWithAffinity, podInfo)
}
if podWithRequiredAntiAffinity(podInfo.GetPod()) {
n.PodsWithRequiredAntiAffinity = append(n.PodsWithRequiredAntiAffinity, podInfo)
}
n.update(podInfo, 1)
}
// AddPod is a wrapper around AddPodInfo.
func (n *NodeInfo) AddPod(pod *v1.Pod) {
// ignore this err since apiserver doesn't properly validate affinity terms
// and we can't fix the validation for backwards compatibility.
podInfo, _ := NewPodInfo(pod)
n.AddPodInfo(podInfo)
}
func podWithAffinity(p *v1.Pod) bool {
affinity := p.Spec.Affinity
return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil)
}
func podWithRequiredAntiAffinity(p *v1.Pod) bool {
affinity := p.Spec.Affinity
return affinity != nil && affinity.PodAntiAffinity != nil &&
len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0
}
func removeFromSlice(logger klog.Logger, s []fwk.PodInfo, k string) ([]fwk.PodInfo, fwk.PodInfo) {
var removedPod fwk.PodInfo
for i := range s {
tmpKey, err := GetPodKey(s[i].GetPod())
if err != nil {
utilruntime.HandleErrorWithLogger(logger, err, "Cannot get pod key", "pod", klog.KObj(s[i].GetPod()))
continue
}
if k == tmpKey {
removedPod = s[i]
// delete the element
s[i] = s[len(s)-1]
s = s[:len(s)-1]
break
}
}
// resets the slices to nil so that we can do DeepEqual in unit tests.
if len(s) == 0 {
return nil, removedPod
}
return s, removedPod
}
// RemovePod subtracts pod information from this NodeInfo.
func (n *NodeInfo) RemovePod(logger klog.Logger, pod *v1.Pod) error {
k, err := GetPodKey(pod)
if err != nil {
return err
}
if podWithAffinity(pod) {
n.PodsWithAffinity, _ = removeFromSlice(logger, n.PodsWithAffinity, k)
}
if podWithRequiredAntiAffinity(pod) {
n.PodsWithRequiredAntiAffinity, _ = removeFromSlice(logger, n.PodsWithRequiredAntiAffinity, k)
}
var removedPod fwk.PodInfo
if n.Pods, removedPod = removeFromSlice(logger, n.Pods, k); removedPod != nil {
n.update(removedPod, -1)
return nil
}
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
}
// update node info based on the pod, and sign.
// The sign will be set to `+1` when AddPod and to `-1` when RemovePod.
func (n *NodeInfo) update(podInfo fwk.PodInfo, sign int64) {
podResource := podInfo.CalculateResource()
n.Requested.MilliCPU += sign * podResource.Resource.GetMilliCPU()
n.Requested.Memory += sign * podResource.Resource.GetMemory()
n.Requested.EphemeralStorage += sign * podResource.Resource.GetEphemeralStorage()
if n.Requested.ScalarResources == nil && len(podResource.Resource.GetScalarResources()) > 0 {
n.Requested.ScalarResources = map[v1.ResourceName]int64{}
}
for rName, rQuant := range podResource.Resource.GetScalarResources() {
n.Requested.ScalarResources[rName] += sign * rQuant
}
n.NonZeroRequested.MilliCPU += sign * podResource.Non0CPU
n.NonZeroRequested.Memory += sign * podResource.Non0Mem
// Consume ports when pod added or release ports when pod removed.
n.updateUsedPorts(podInfo.GetPod(), sign > 0)
n.updatePVCRefCounts(podInfo.GetPod(), sign > 0)
n.Generation = nextGeneration()
}
// updateUsedPorts updates the UsedPorts of NodeInfo.
func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) {
for _, port := range schedutil.GetHostPorts(pod) {
if add {
n.UsedPorts.Add(port.HostIP, string(port.Protocol), port.HostPort)
} else {
n.UsedPorts.Remove(port.HostIP, string(port.Protocol), port.HostPort)
}
}
}
// updatePVCRefCounts updates the PVCRefCounts of NodeInfo.
func (n *NodeInfo) updatePVCRefCounts(pod *v1.Pod, add bool) {
for _, v := range pod.Spec.Volumes {
if v.PersistentVolumeClaim == nil {
continue
}
key := GetNamespacedName(pod.Namespace, v.PersistentVolumeClaim.ClaimName)
if add {
n.PVCRefCounts[key] += 1
} else {
n.PVCRefCounts[key] -= 1
if n.PVCRefCounts[key] <= 0 {
delete(n.PVCRefCounts, key)
}
}
}
}
// SetNode sets the overall node information.
func (n *NodeInfo) SetNode(node *v1.Node) {
n.node = node
n.Allocatable = NewResource(node.Status.Allocatable)
n.Generation = nextGeneration()
}
// RemoveNode removes the node object, leaving all other tracking information.
func (n *NodeInfo) RemoveNode() {
n.node = nil
n.Generation = nextGeneration()
}
// nextGeneration: Let's make sure history never forgets the name...
// Increments the generation number monotonically ensuring that generation numbers never collide.
// Collision of the generation numbers would be particularly problematic if a node was deleted and
// added back with the same name. See issue#63262.
func nextGeneration() int64 {
return atomic.AddInt64(&generation, 1)
}
// QueuedPodInfo is a Pod wrapper with additional information related to
// the pod's status in the scheduling queue, such as the timestamp when
// it's added to the queue.
type QueuedPodInfo struct {
*PodInfo
// The time pod added to the scheduling queue.
Timestamp time.Time
// Number of all schedule attempts before successfully scheduled.
// It's used to record the # attempts metric.
Attempts int
// BackoffExpiration is the time when the Pod will complete its backoff.
// If the SchedulerPopFromBackoffQ feature is enabled, the value is aligned to the backoff ordering window.
// Then, two Pods with the same BackoffExpiration (time bucket) are ordered by priority and eventually the timestamp,
// to make sure popping from the backoffQ considers priority of pods that are close to the expiration time.
BackoffExpiration time.Time
// The total number of the scheduling attempts that this Pod gets unschedulable.
// Basically it equals Attempts, but when the Pod fails with the Error status (e.g., the network error),
// this count won't be incremented.
// It's used to calculate the backoff time this Pod is obliged to get before retrying.
UnschedulableCount int
// The number of the error status that this Pod gets sequentially.
// This count is reset when the Pod gets another status than Error.
//
// If the error status is returned (e.g., kube-apiserver is unstable), we don't want to immediately retry the Pod and hence need a backoff retry mechanism
// because that might push more burden to the kube-apiserver.
// But, we don't want to calculate the backoff time in the same way as the normal unschedulable reason
// since the purpose is different; the backoff for a unschedulable status etc is for the punishment of wasting the scheduling cycles,
// whereas the backoff for the error status is for the protection of the kube-apiserver.
// That's why we need to distinguish ConsecutiveErrorsCount for the error status and UnschedulableCount for the unschedulable status.
// See https://github.com/kubernetes/kubernetes/issues/128744 for the discussion.
ConsecutiveErrorsCount int
// The time when the pod is added to the queue for the first time. The pod may be added
// back to the queue multiple times before it's successfully scheduled.
// It shouldn't be updated once initialized. It's used to record the e2e scheduling
// latency for a pod.
InitialAttemptTimestamp *time.Time
// UnschedulablePlugins records the plugin names that the Pod failed with Unschedulable or UnschedulableAndUnresolvable status
// at specific extension points: PreFilter, Filter, Reserve, or Permit (WaitOnPermit).
// If Pods are rejected at other extension points,
// they're assumed to be unexpected errors (e.g., temporal network issue, plugin implementation issue, etc)
// and retried soon after a backoff period.
// That is because such failures could be solved regardless of incoming cluster events (registered in EventsToRegister).
UnschedulablePlugins sets.Set[string]
// PendingPlugins records the plugin names that the Pod failed with Pending status.
PendingPlugins sets.Set[string]
// GatingPlugin records the plugin name that gated the Pod at PreEnqueue.
GatingPlugin string
// GatingPluginEvents records the events registered by the plugin that gated the Pod at PreEnqueue.
// We have it as a cache purpose to avoid re-computing which event(s) might ungate the Pod.
GatingPluginEvents []fwk.ClusterEvent
}
func (pqi *QueuedPodInfo) GetPodInfo() fwk.PodInfo {
return pqi.PodInfo
}
func (pqi *QueuedPodInfo) GetTimestamp() time.Time {
return pqi.Timestamp
}
func (pqi *QueuedPodInfo) GetAttempts() int {
return pqi.Attempts
}
func (pqi *QueuedPodInfo) GetBackoffExpiration() time.Time {
return pqi.BackoffExpiration
}
func (pqi *QueuedPodInfo) GetUnschedulableCount() int {
return pqi.UnschedulableCount
}
func (pqi *QueuedPodInfo) GetConsecutiveErrorsCount() int {
return pqi.ConsecutiveErrorsCount
}
func (pqi *QueuedPodInfo) GetInitialAttemptTimestamp() *time.Time {
return pqi.InitialAttemptTimestamp
}
func (pqi *QueuedPodInfo) GetUnschedulablePlugins() sets.Set[string] {
return pqi.UnschedulablePlugins
}
func (pqi *QueuedPodInfo) GetPendingPlugins() sets.Set[string] {
return pqi.PendingPlugins
}
func (pqi *QueuedPodInfo) GetGatingPlugin() string {
return pqi.GatingPlugin
}
func (pqi *QueuedPodInfo) GetGatingPluginEvents() []fwk.ClusterEvent {
return pqi.GatingPluginEvents
}
// Gated returns true if the pod is gated by any plugin.
func (pqi *QueuedPodInfo) Gated() bool {
return pqi.GatingPlugin != ""
}
// DeepCopy returns a deep copy of the QueuedPodInfo object.
func (pqi *QueuedPodInfo) DeepCopy() *QueuedPodInfo {
return &QueuedPodInfo{
PodInfo: pqi.PodInfo.DeepCopy(),
Timestamp: pqi.Timestamp,
Attempts: pqi.Attempts,
UnschedulableCount: pqi.UnschedulableCount,
InitialAttemptTimestamp: pqi.InitialAttemptTimestamp,
UnschedulablePlugins: pqi.UnschedulablePlugins.Clone(),
BackoffExpiration: pqi.BackoffExpiration,
GatingPlugin: pqi.GatingPlugin,
GatingPluginEvents: slices.Clone(pqi.GatingPluginEvents),
PendingPlugins: pqi.PendingPlugins.Clone(),
ConsecutiveErrorsCount: pqi.ConsecutiveErrorsCount,
}
}
// PodInfo is a wrapper to a Pod with additional pre-computed information to
// accelerate processing. This information is typically immutable (e.g., pre-processed
// inter-pod affinity selectors).
type PodInfo struct {
Pod *v1.Pod
RequiredAffinityTerms []fwk.AffinityTerm
RequiredAntiAffinityTerms []fwk.AffinityTerm
PreferredAffinityTerms []fwk.WeightedAffinityTerm
PreferredAntiAffinityTerms []fwk.WeightedAffinityTerm
// cachedResource contains precomputed resources for Pod (podResource).
// The value can change only if InPlacePodVerticalScaling is enabled.
// In that case, the whole PodInfo object is recreated (for assigned pods in cache).
// cachedResource contains a podResource, computed when adding a scheduled pod to NodeInfo.
// When removing a pod from a NodeInfo, i.e. finding victims for preemption or removing a pod from a cluster,
// cachedResource is used instead, what provides a noticeable performance boost.
// Note: cachedResource field shouldn't be accessed directly.
// Use calculateResource method to obtain it instead.
cachedResource *fwk.PodResource
}
func (pi *PodInfo) GetPod() *v1.Pod {
return pi.Pod
}
func (pi *PodInfo) GetRequiredAffinityTerms() []fwk.AffinityTerm {
return pi.RequiredAffinityTerms
}
func (pi *PodInfo) GetRequiredAntiAffinityTerms() []fwk.AffinityTerm {
return pi.RequiredAntiAffinityTerms
}
func (pi *PodInfo) GetPreferredAffinityTerms() []fwk.WeightedAffinityTerm {
return pi.PreferredAffinityTerms
}
func (pi *PodInfo) GetPreferredAntiAffinityTerms() []fwk.WeightedAffinityTerm {
return pi.PreferredAntiAffinityTerms
}
// DeepCopy returns a deep copy of the PodInfo object.
func (pi *PodInfo) DeepCopy() *PodInfo {
return &PodInfo{
Pod: pi.Pod.DeepCopy(),
RequiredAffinityTerms: pi.RequiredAffinityTerms,
RequiredAntiAffinityTerms: pi.RequiredAntiAffinityTerms,
PreferredAffinityTerms: pi.PreferredAffinityTerms,
PreferredAntiAffinityTerms: pi.PreferredAntiAffinityTerms,
cachedResource: pi.cachedResource,
}
}
// Update creates a full new PodInfo by default. And only updates the pod when the PodInfo
// has been instantiated and the passed pod is the exact same one as the original pod.
func (pi *PodInfo) Update(pod *v1.Pod) error {
if pod != nil && pi.Pod != nil && pi.Pod.UID == pod.UID {
// PodInfo includes immutable information, and so it is safe to update the pod in place if it is
// the exact same pod
pi.Pod = pod
return nil
}
var preferredAffinityTerms []v1.WeightedPodAffinityTerm
var preferredAntiAffinityTerms []v1.WeightedPodAffinityTerm
if affinity := pod.Spec.Affinity; affinity != nil {
if a := affinity.PodAffinity; a != nil {
preferredAffinityTerms = a.PreferredDuringSchedulingIgnoredDuringExecution
}
if a := affinity.PodAntiAffinity; a != nil {
preferredAntiAffinityTerms = a.PreferredDuringSchedulingIgnoredDuringExecution
}
}
// Attempt to parse the affinity terms
var parseErrs []error
requiredAffinityTerms, err := fwk.GetAffinityTerms(pod, fwk.GetPodAffinityTerms(pod.Spec.Affinity))
if err != nil {
parseErrs = append(parseErrs, fmt.Errorf("requiredAffinityTerms: %w", err))
}
requiredAntiAffinityTerms, err := fwk.GetAffinityTerms(pod,
fwk.GetPodAntiAffinityTerms(pod.Spec.Affinity))
if err != nil {
parseErrs = append(parseErrs, fmt.Errorf("requiredAntiAffinityTerms: %w", err))
}
weightedAffinityTerms, err := fwk.GetWeightedAffinityTerms(pod, preferredAffinityTerms)
if err != nil {
parseErrs = append(parseErrs, fmt.Errorf("preferredAffinityTerms: %w", err))
}
weightedAntiAffinityTerms, err := fwk.GetWeightedAffinityTerms(pod, preferredAntiAffinityTerms)
if err != nil {
parseErrs = append(parseErrs, fmt.Errorf("preferredAntiAffinityTerms: %w", err))
}
pi.Pod = pod
pi.RequiredAffinityTerms = requiredAffinityTerms
pi.RequiredAntiAffinityTerms = requiredAntiAffinityTerms
pi.PreferredAffinityTerms = weightedAffinityTerms
pi.PreferredAntiAffinityTerms = weightedAntiAffinityTerms
pi.cachedResource = nil
return utilerrors.NewAggregate(parseErrs)
}
func (pi *PodInfo) CalculateResource() fwk.PodResource {
if pi.cachedResource != nil {
return *pi.cachedResource
}
inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling)
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources)
requests := resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
UseStatusResources: inPlacePodVerticalScalingEnabled,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
})
isPodLevelResourcesSet := podLevelResourcesEnabled && resourcehelper.IsPodLevelRequestsSet(pi.Pod)
nonMissingContainerRequests := getNonMissingContainerRequests(requests, isPodLevelResourcesSet)
non0Requests := requests
if len(nonMissingContainerRequests) > 0 {
non0Requests = resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
UseStatusResources: inPlacePodVerticalScalingEnabled,
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
SkipPodLevelResources: !podLevelResourcesEnabled,
NonMissingContainerRequests: nonMissingContainerRequests,
})
}
non0CPU := non0Requests[v1.ResourceCPU]
non0Mem := non0Requests[v1.ResourceMemory]
var res Resource
res.Add(requests)
podResource := fwk.PodResource{
Resource: &res,
Non0CPU: non0CPU.MilliValue(),
Non0Mem: non0Mem.Value(),
}
pi.cachedResource = &podResource
return podResource
}
// ExtenderName is a fake plugin name put in UnschedulablePlugins when Extender rejected some Nodes.
const ExtenderName = "Extender"
// Diagnosis records the details to diagnose a scheduling failure.
type Diagnosis struct {
// NodeToStatus records the status of nodes and generic status for absent ones.
// if they're rejected in PreFilter (via PreFilterResult) or Filter plugins.
// Nodes that pass PreFilter/Filter plugins are not included in this map.
NodeToStatus *NodeToStatus
// UnschedulablePlugins are plugins that returns Unschedulable or UnschedulableAndUnresolvable.
UnschedulablePlugins sets.Set[string]
// UnschedulablePlugins are plugins that returns Pending.
PendingPlugins sets.Set[string]
// PreFilterMsg records the messages returned from PreFilter plugins.
PreFilterMsg string
// PostFilterMsg records the messages returned from PostFilter plugins.
PostFilterMsg string
}
// FitError describes a fit error of a pod.
type FitError struct {
Pod *v1.Pod
NumAllNodes int
Diagnosis Diagnosis
}
const (
// NoNodeAvailableMsg is used to format message when no nodes available.
NoNodeAvailableMsg = "0/%v nodes are available"
)
func (d *Diagnosis) AddPluginStatus(sts *fwk.Status) {
if sts.Plugin() == "" {
return
}
if sts.IsRejected() {
if d.UnschedulablePlugins == nil {
d.UnschedulablePlugins = sets.New[string]()
}
d.UnschedulablePlugins.Insert(sts.Plugin())
}
if sts.Code() == fwk.Pending {
if d.PendingPlugins == nil {
d.PendingPlugins = sets.New[string]()
}
d.PendingPlugins.Insert(sts.Plugin())
}
}
// Error returns detailed information of why the pod failed to fit on each node.
// A message format is "0/X nodes are available: <PreFilterMsg>. <FilterMsg>. <PostFilterMsg>."
func (f *FitError) Error() string {
reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+":", f.NumAllNodes)
preFilterMsg := f.Diagnosis.PreFilterMsg
if preFilterMsg != "" {
// PreFilter plugin returns unschedulable.
// Add the messages from PreFilter plugins to reasonMsg.
reasonMsg += fmt.Sprintf(" %v.", preFilterMsg)
}
if preFilterMsg == "" {
// the scheduling cycle went through PreFilter extension point successfully.
//
// When the prefilter plugin returns unschedulable,
// the scheduling framework inserts the same unschedulable status to all nodes in NodeToStatusReader.
// So, we shouldn't add the message from NodeToStatusReader when the PreFilter failed.
// Otherwise, we will have duplicated reasons in the error message.
reasons := make(map[string]int)
f.Diagnosis.NodeToStatus.ForEachExplicitNode(func(_ string, status *fwk.Status) {
for _, reason := range status.Reasons() {
reasons[reason]++
}
})
if f.Diagnosis.NodeToStatus.Len() < f.NumAllNodes {
// Adding predefined reasons for nodes that are absent in NodeToStatusReader
for _, reason := range f.Diagnosis.NodeToStatus.AbsentNodesStatus().Reasons() {
reasons[reason] += f.NumAllNodes - f.Diagnosis.NodeToStatus.Len()
}
}
sortReasonsHistogram := func() []string {
var reasonStrings []string
for k, v := range reasons {
reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
}
sort.Strings(reasonStrings)
return reasonStrings
}
sortedFilterMsg := sortReasonsHistogram()
if len(sortedFilterMsg) != 0 {
reasonMsg += fmt.Sprintf(" %v.", strings.Join(sortedFilterMsg, ", "))
}
}
// Add the messages from PostFilter plugins to reasonMsg.
// We can add this message regardless of whether the scheduling cycle fails at PreFilter or Filter
// since we may run PostFilter (if enabled) in both cases.
postFilterMsg := f.Diagnosis.PostFilterMsg
if postFilterMsg != "" {
reasonMsg += fmt.Sprintf(" %v", postFilterMsg)
}
return reasonMsg
}
// NewPodInfo returns a new PodInfo.
func NewPodInfo(pod *v1.Pod) (*PodInfo, error) {
pInfo := &PodInfo{}
err := pInfo.Update(pod)
return pInfo, err
}
// Resource is a collection of compute resource.
// Implementation is separate from interface fwk.Resource, because implementation of functions Add and SetMaxResource
// depends on internal scheduler util functions.
type Resource struct {
MilliCPU int64
Memory int64
EphemeralStorage int64
// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
// explicitly as int, to avoid conversions and improve performance.
AllowedPodNumber int
// ScalarResources
ScalarResources map[v1.ResourceName]int64
}
func (r *Resource) GetMilliCPU() int64 {
return r.MilliCPU
}
func (r *Resource) GetMemory() int64 {
return r.Memory
}
func (r *Resource) GetEphemeralStorage() int64 {
return r.EphemeralStorage
}
func (r *Resource) GetAllowedPodNumber() int {
return r.AllowedPodNumber
}
func (r *Resource) GetScalarResources() map[v1.ResourceName]int64 {
return r.ScalarResources
}
// NewResource creates a Resource from ResourceList
func NewResource(rl v1.ResourceList) *Resource {
r := &Resource{}
r.Add(rl)
return r
}
// Add adds ResourceList into Resource.
func (r *Resource) Add(rl v1.ResourceList) {
if r == nil {
return
}
for rName, rQuant := range rl {
switch rName {
case v1.ResourceCPU:
r.MilliCPU += rQuant.MilliValue()
case v1.ResourceMemory:
r.Memory += rQuant.Value()
case v1.ResourcePods:
r.AllowedPodNumber += int(rQuant.Value())
case v1.ResourceEphemeralStorage:
r.EphemeralStorage += rQuant.Value()
default:
if schedutil.IsScalarResourceName(rName) {
r.AddScalar(rName, rQuant.Value())
}
}
}
}
// Clone returns a copy of this resource.
func (r *Resource) Clone() *Resource {
res := &Resource{
MilliCPU: r.MilliCPU,
Memory: r.Memory,
AllowedPodNumber: r.AllowedPodNumber,
EphemeralStorage: r.EphemeralStorage,
}
if r.ScalarResources != nil {
res.ScalarResources = make(map[v1.ResourceName]int64, len(r.ScalarResources))
for k, v := range r.ScalarResources {
res.ScalarResources[k] = v
}
}
return res
}
// AddScalar adds a resource by a scalar value of this resource.
func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
r.SetScalar(name, r.ScalarResources[name]+quantity)
}
// SetScalar sets a resource by a scalar value of this resource.
func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
// Lazily allocate scalar resource map.
if r.ScalarResources == nil {
r.ScalarResources = map[v1.ResourceName]int64{}
}
r.ScalarResources[name] = quantity
}
// SetMaxResource compares with ResourceList and takes max value for each Resource.
func (r *Resource) SetMaxResource(rl v1.ResourceList) {
if r == nil {
return
}
for rName, rQuantity := range rl {
switch rName {
case v1.ResourceMemory:
r.Memory = max(r.Memory, rQuantity.Value())
case v1.ResourceCPU:
r.MilliCPU = max(r.MilliCPU, rQuantity.MilliValue())
case v1.ResourceEphemeralStorage:
r.EphemeralStorage = max(r.EphemeralStorage, rQuantity.Value())
default:
if schedutil.IsScalarResourceName(rName) {
r.SetScalar(rName, max(r.ScalarResources[rName], rQuantity.Value()))
}
}
}
}
// NewNodeInfo returns a ready to use empty NodeInfo object.
// If any pods are given in arguments, their information will be aggregated in
// the returned object.
func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
ni := &NodeInfo{
Requested: &Resource{},
NonZeroRequested: &Resource{},
Allocatable: &Resource{},
Generation: nextGeneration(),
UsedPorts: make(fwk.HostPortInfo),
ImageStates: make(map[string]*fwk.ImageStateSummary),
PVCRefCounts: make(map[string]int),
}
for _, pod := range pods {
ni.AddPod(pod)
}
return ni
}
// getNonMissingContainerRequests returns the default non-zero CPU and memory
// requests for a container that the scheduler uses when container-level and
// pod-level requests are not set for a resource. It returns a ResourceList that
// includes these default non-zero requests, which are essential for the
// scheduler to function correctly.
// The method's behavior depends on whether pod-level resources are set or not:
// 1. When the pod level resources are not set, the method returns a ResourceList
// with the following defaults:
// - CPU: schedutil.DefaultMilliCPURequest
// - Memory: schedutil.DefaultMemoryRequest
//
// These defaults ensure that each container has a minimum resource request,
// allowing the scheduler to aggregate these requests and find a suitable node
// for the pod.
//
// 2. When the pod level resources are set, if a CPU or memory request is
// missing at the container-level *and* at the pod-level, the corresponding
// default value (schedutil.DefaultMilliCPURequest or schedutil.DefaultMemoryRequest)
// is included in the returned ResourceList.
// Note that these default values are not set in the Pod object itself, they are only used
// by the scheduler during node selection.
func getNonMissingContainerRequests(requests v1.ResourceList, podLevelResourcesSet bool) v1.ResourceList {
if !podLevelResourcesSet {
return v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
}
}
nonMissingContainerRequests := make(v1.ResourceList, 2)
// DefaultMilliCPURequest serves as the fallback value when both
// pod-level and container-level CPU requests are not set.
// Note that the apiserver defaulting logic will propagate a non-zero
// container-level CPU request to the pod level if a pod-level request
// is not explicitly set.
if _, exists := requests[v1.ResourceCPU]; !exists {
nonMissingContainerRequests[v1.ResourceCPU] = *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI)
}
// DefaultMemoryRequest serves as the fallback value when both
// pod-level and container-level CPU requests are unspecified.
// Note that the apiserver defaulting logic will propagate a non-zero
// container-level memory request to the pod level if a pod-level request
// is not explicitly set.
if _, exists := requests[v1.ResourceMemory]; !exists {
nonMissingContainerRequests[v1.ResourceMemory] = *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI)
}
return nonMissingContainerRequests
}
// GetPodKey returns the string key of a pod.
func GetPodKey(pod *v1.Pod) (string, error) {
uid := string(pod.UID)
if len(uid) == 0 {
return "", errors.New("cannot get cache key for pod with empty UID")
}
return uid, nil
}
// GetNamespacedName returns the string format of a namespaced resource name.
func GetNamespacedName(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"time"
"k8s.io/component-base/metrics"
)
// MetricRecorder represents a metric recorder which takes action when the
// metric Inc(), Dec() and Clear()
type MetricRecorder interface {
Inc()
Dec()
Clear()
}
var _ MetricRecorder = &PendingPodsRecorder{}
// PendingPodsRecorder is an implementation of MetricRecorder
type PendingPodsRecorder struct {
recorder metrics.GaugeMetric
}
// NewActivePodsRecorder returns ActivePods in a Prometheus metric fashion
func NewActivePodsRecorder() *PendingPodsRecorder {
return &PendingPodsRecorder{
recorder: ActivePods(),
}
}
// NewUnschedulablePodsRecorder returns UnschedulablePods in a Prometheus metric fashion
func NewUnschedulablePodsRecorder() *PendingPodsRecorder {
return &PendingPodsRecorder{
recorder: UnschedulablePods(),
}
}
// NewBackoffPodsRecorder returns BackoffPods in a Prometheus metric fashion
func NewBackoffPodsRecorder() *PendingPodsRecorder {
return &PendingPodsRecorder{
recorder: BackoffPods(),
}
}
// NewGatedPodsRecorder returns GatedPods in a Prometheus metric fashion
func NewGatedPodsRecorder() *PendingPodsRecorder {
return &PendingPodsRecorder{
recorder: GatedPods(),
}
}
// Inc increases a metric counter by 1, in an atomic way
func (r *PendingPodsRecorder) Inc() {
r.recorder.Inc()
}
// Dec decreases a metric counter by 1, in an atomic way
func (r *PendingPodsRecorder) Dec() {
r.recorder.Dec()
}
// Clear set a metric counter to 0, in an atomic way
func (r *PendingPodsRecorder) Clear() {
r.recorder.Set(float64(0))
}
// histogramVecMetric is the data structure passed in the buffer channel between the main framework thread
// and the metricsRecorder goroutine.
type histogramVecMetric struct {
metric *metrics.HistogramVec
labelValues []string
value float64
}
type gaugeVecMetric struct {
metric *metrics.GaugeVec
labelValues []string
valueToAdd float64
}
type gaugeVecMetricKey struct {
metricName string
labelValue string
}
// MetricAsyncRecorder records metric in a separate goroutine to avoid overhead in the critical path.
type MetricAsyncRecorder struct {
// bufferCh is a channel that serves as a metrics buffer before the metricsRecorder goroutine reports it.
bufferCh chan *histogramVecMetric
// if bufferSize is reached, incoming metrics will be discarded.
bufferSize int
// how often the recorder runs to flush the metrics.
interval time.Duration
// aggregatedInflightEventMetric is only to record InFlightEvents metric asynchronously.
// It's a map from gaugeVecMetricKey to the aggregated value
// and the aggregated value is flushed to Prometheus every time the interval is reached.
// Note that we don't lock the map deliberately because we assume the queue takes lock before updating the in-flight events.
aggregatedInflightEventMetric map[gaugeVecMetricKey]int
aggregatedInflightEventMetricLastFlushTime time.Time
aggregatedInflightEventMetricBufferCh chan *gaugeVecMetric
// stopCh is used to stop the goroutine which periodically flushes metrics.
stopCh <-chan struct{}
// IsStoppedCh indicates whether the goroutine is stopped. It's used in tests only to make sure
// the metric flushing goroutine is stopped so that tests can collect metrics for verification.
IsStoppedCh chan struct{}
}
func NewMetricsAsyncRecorder(bufferSize int, interval time.Duration, stopCh <-chan struct{}) *MetricAsyncRecorder {
recorder := &MetricAsyncRecorder{
bufferCh: make(chan *histogramVecMetric, bufferSize),
bufferSize: bufferSize,
interval: interval,
stopCh: stopCh,
aggregatedInflightEventMetric: make(map[gaugeVecMetricKey]int),
aggregatedInflightEventMetricLastFlushTime: time.Now(),
aggregatedInflightEventMetricBufferCh: make(chan *gaugeVecMetric, bufferSize),
IsStoppedCh: make(chan struct{}),
}
go recorder.run()
return recorder
}
// ObservePluginDurationAsync observes the plugin_execution_duration_seconds metric.
// The metric will be flushed to Prometheus asynchronously.
func (r *MetricAsyncRecorder) ObservePluginDurationAsync(extensionPoint, pluginName, status string, value float64) {
r.observeMetricAsync(PluginExecutionDuration, value, pluginName, extensionPoint, status)
}
// ObserveQueueingHintDurationAsync observes the queueing_hint_execution_duration_seconds metric.
// The metric will be flushed to Prometheus asynchronously.
func (r *MetricAsyncRecorder) ObserveQueueingHintDurationAsync(pluginName, event, hint string, value float64) {
r.observeMetricAsync(queueingHintExecutionDuration, value, pluginName, event, hint)
}
// ObserveInFlightEventsAsync observes the in_flight_events metric.
//
// Note that this function is not goroutine-safe;
// we don't lock the map deliberately for the performance reason and we assume the queue (i.e., the caller) takes lock before updating the in-flight events.
func (r *MetricAsyncRecorder) ObserveInFlightEventsAsync(eventLabel string, valueToAdd float64, forceFlush bool) {
r.aggregatedInflightEventMetric[gaugeVecMetricKey{metricName: InFlightEvents.Name, labelValue: eventLabel}] += int(valueToAdd)
// Only flush the metric to the channel if the interval is reached.
// The values are flushed to Prometheus in the run() function, which runs once the interval time.
// Note: we implement this flushing here, not in FlushMetrics, because, if we did so, we would need to implement a lock for the map, which we want to avoid.
if forceFlush || time.Since(r.aggregatedInflightEventMetricLastFlushTime) > r.interval {
for key, value := range r.aggregatedInflightEventMetric {
newMetric := &gaugeVecMetric{
metric: InFlightEvents,
labelValues: []string{key.labelValue},
valueToAdd: float64(value),
}
select {
case r.aggregatedInflightEventMetricBufferCh <- newMetric:
default:
}
}
r.aggregatedInflightEventMetricLastFlushTime = time.Now()
// reset
r.aggregatedInflightEventMetric = make(map[gaugeVecMetricKey]int)
}
}
func (r *MetricAsyncRecorder) observeMetricAsync(m *metrics.HistogramVec, value float64, labelsValues ...string) {
newMetric := &histogramVecMetric{
metric: m,
labelValues: labelsValues,
value: value,
}
select {
case r.bufferCh <- newMetric:
default:
}
}
// run flushes buffered metrics into Prometheus every second.
func (r *MetricAsyncRecorder) run() {
for {
select {
case <-r.stopCh:
close(r.IsStoppedCh)
return
default:
}
r.FlushMetrics()
time.Sleep(r.interval)
}
}
// FlushMetrics tries to clean up the bufferCh by reading at most bufferSize metrics.
func (r *MetricAsyncRecorder) FlushMetrics() {
for i := 0; i < r.bufferSize; i++ {
select {
case m := <-r.bufferCh:
m.metric.WithLabelValues(m.labelValues...).Observe(m.value)
default:
// no more value
}
select {
case m := <-r.aggregatedInflightEventMetricBufferCh:
m.metric.WithLabelValues(m.labelValues...).Add(m.valueToAdd)
default:
// no more value
}
}
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"time"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/kubernetes/pkg/features"
volumebindingmetrics "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
)
const (
// SchedulerSubsystem - subsystem name used by scheduler.
SchedulerSubsystem = "scheduler"
)
// Below are possible values for the work and operation label.
const (
// PrioritizingExtender - prioritizing extender work/operation label value.
PrioritizingExtender = "prioritizing_extender"
// Binding - binding work/operation label value.
Binding = "binding"
)
const (
GoroutineResultSuccess = "success"
GoroutineResultError = "error"
)
// ExtensionPoints is a list of possible values for the extension_point label.
var ExtensionPoints = []string{
PreFilter,
Filter,
PreFilterExtensionAddPod,
PreFilterExtensionRemovePod,
PostFilter,
PreScore,
Score,
ScoreExtensionNormalize,
PreBind,
PreBindPreFlight,
Bind,
PostBind,
Reserve,
Unreserve,
Permit,
}
const (
PreFilter = "PreFilter"
Filter = "Filter"
PreFilterExtensionAddPod = "PreFilterExtensionAddPod"
PreFilterExtensionRemovePod = "PreFilterExtensionRemovePod"
PostFilter = "PostFilter"
PreScore = "PreScore"
Score = "Score"
ScoreExtensionNormalize = "ScoreExtensionNormalize"
PreBind = "PreBind"
PreBindPreFlight = "PreBindPreFlight"
Bind = "Bind"
PostBind = "PostBind"
Reserve = "Reserve"
Unreserve = "Unreserve"
Permit = "Permit"
)
const (
QueueingHintResultQueue = "Queue"
QueueingHintResultQueueSkip = "QueueSkip"
QueueingHintResultError = "Error"
)
const (
PodPoppedInFlightEvent = "PodPopped"
)
// All the histogram based metrics have 1ms as size for the smallest bucket.
var (
scheduleAttempts *metrics.CounterVec
EventHandlingLatency *metrics.HistogramVec
schedulingLatency *metrics.HistogramVec
SchedulingAlgorithmLatency *metrics.Histogram
PreemptionVictims *metrics.Histogram
PreemptionAttempts *metrics.Counter
pendingPods *metrics.GaugeVec
InFlightEvents *metrics.GaugeVec
Goroutines *metrics.GaugeVec
PodSchedulingSLIDuration *metrics.HistogramVec
PodSchedulingAttempts *metrics.Histogram
FrameworkExtensionPointDuration *metrics.HistogramVec
PluginExecutionDuration *metrics.HistogramVec
PermitWaitDuration *metrics.HistogramVec
CacheSize *metrics.GaugeVec
unschedulableReasons *metrics.GaugeVec
PluginEvaluationTotal *metrics.CounterVec
// The below two are only available when the QHint feature gate is enabled.
queueingHintExecutionDuration *metrics.HistogramVec
SchedulerQueueIncomingPods *metrics.CounterVec
// The below two are only available when the async-preemption feature gate is enabled.
PreemptionGoroutinesDuration *metrics.HistogramVec
PreemptionGoroutinesExecutionTotal *metrics.CounterVec
// The below are only available when the SchedulerAsyncAPICalls feature gate is enabled.
AsyncAPICallsTotal *metrics.CounterVec
AsyncAPICallDuration *metrics.HistogramVec
AsyncAPIPendingCalls *metrics.GaugeVec
// metricsList is a list of all metrics that should be registered always, regardless of any feature gate's value.
metricsList []metrics.Registerable
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
// Register the metrics.
registerMetrics.Do(func() {
InitMetrics()
RegisterMetrics(metricsList...)
volumebindingmetrics.RegisterVolumeSchedulingMetrics()
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
RegisterMetrics(queueingHintExecutionDuration, InFlightEvents)
}
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption) {
RegisterMetrics(PreemptionGoroutinesDuration, PreemptionGoroutinesExecutionTotal)
}
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerAsyncAPICalls) {
RegisterMetrics(
AsyncAPICallsTotal,
AsyncAPICallDuration,
AsyncAPIPendingCalls,
)
}
})
}
func InitMetrics() {
scheduleAttempts = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: SchedulerSubsystem,
Name: "schedule_attempts_total",
Help: "Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem.",
StabilityLevel: metrics.STABLE,
}, []string{"result", "profile"})
EventHandlingLatency = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "event_handling_duration_seconds",
Help: "Event handling latency in seconds.",
// Start with 0.1ms with the last bucket being [~200ms, Inf)
Buckets: metrics.ExponentialBuckets(0.0001, 2, 12),
StabilityLevel: metrics.ALPHA,
}, []string{"event"})
schedulingLatency = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "scheduling_attempt_duration_seconds",
Help: "Scheduling attempt latency in seconds (scheduling algorithm + binding)",
Buckets: metrics.ExponentialBuckets(0.001, 2, 15),
StabilityLevel: metrics.STABLE,
}, []string{"result", "profile"})
SchedulingAlgorithmLatency = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "scheduling_algorithm_duration_seconds",
Help: "Scheduling algorithm latency in seconds",
Buckets: metrics.ExponentialBuckets(0.001, 2, 15),
StabilityLevel: metrics.ALPHA,
},
)
PreemptionVictims = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "preemption_victims",
Help: "Number of selected preemption victims",
// we think #victims>64 is pretty rare, therefore [64, +Inf) is considered a single bucket.
Buckets: metrics.ExponentialBuckets(1, 2, 7),
StabilityLevel: metrics.STABLE,
})
PreemptionAttempts = metrics.NewCounter(
&metrics.CounterOpts{
Subsystem: SchedulerSubsystem,
Name: "preemption_attempts_total",
Help: "Total preemption attempts in the cluster till now",
StabilityLevel: metrics.STABLE,
})
pendingPods = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: SchedulerSubsystem,
Name: "pending_pods",
Help: "Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulablePods that the scheduler attempted to schedule and failed; 'gated' is the number of unschedulable pods that the scheduler never attempted to schedule because they are gated.",
StabilityLevel: metrics.STABLE,
}, []string{"queue"})
InFlightEvents = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: SchedulerSubsystem,
Name: "inflight_events",
Help: "Number of events currently tracked in the scheduling queue.",
StabilityLevel: metrics.ALPHA,
}, []string{"event"})
Goroutines = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: SchedulerSubsystem,
Name: "goroutines",
Help: "Number of running goroutines split by the work they do such as binding.",
StabilityLevel: metrics.ALPHA,
}, []string{"operation"})
PodSchedulingSLIDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "pod_scheduling_sli_duration_seconds",
Help: "E2e latency for a pod being scheduled, from the time the pod enters the scheduling queue and might involve multiple scheduling attempts.",
// Start with 10ms with the last bucket being [~88m, Inf).
Buckets: metrics.ExponentialBuckets(0.01, 2, 20),
StabilityLevel: metrics.BETA,
},
[]string{"attempts"})
PodSchedulingAttempts = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "pod_scheduling_attempts",
Help: "Number of attempts to successfully schedule a pod.",
Buckets: metrics.ExponentialBuckets(1, 2, 5),
StabilityLevel: metrics.STABLE,
})
FrameworkExtensionPointDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "framework_extension_point_duration_seconds",
Help: "Latency for running all plugins of a specific extension point.",
// Start with 0.1ms with the last bucket being [~200ms, Inf)
Buckets: metrics.ExponentialBuckets(0.0001, 2, 12),
StabilityLevel: metrics.STABLE,
},
[]string{"extension_point", "status", "profile"})
PluginExecutionDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "plugin_execution_duration_seconds",
Help: "Duration for running a plugin at a specific extension point.",
// Start with 0.01ms with the last bucket being [~22ms, Inf). We use a small factor (1.5)
// so that we have better granularity since plugin latency is very sensitive.
Buckets: metrics.ExponentialBuckets(0.00001, 1.5, 20),
StabilityLevel: metrics.ALPHA,
},
[]string{"plugin", "extension_point", "status"})
// This is only available when the QHint feature gate is enabled.
queueingHintExecutionDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "queueing_hint_execution_duration_seconds",
Help: "Duration for running a queueing hint function of a plugin.",
// Start with 0.01ms with the last bucket being [~22ms, Inf). We use a small factor (1.5)
// so that we have better granularity since plugin latency is very sensitive.
Buckets: metrics.ExponentialBuckets(0.00001, 1.5, 20),
StabilityLevel: metrics.ALPHA,
},
[]string{"plugin", "event", "hint"})
SchedulerQueueIncomingPods = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: SchedulerSubsystem,
Name: "queue_incoming_pods_total",
Help: "Number of pods added to scheduling queues by event and queue type.",
StabilityLevel: metrics.STABLE,
}, []string{"queue", "event"})
PermitWaitDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "permit_wait_duration_seconds",
Help: "Duration of waiting on permit.",
Buckets: metrics.ExponentialBuckets(0.001, 2, 15),
StabilityLevel: metrics.ALPHA,
},
[]string{"result"})
CacheSize = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: SchedulerSubsystem,
Name: "cache_size",
Help: "Number of nodes, pods, and assumed (bound) pods in the scheduler cache.",
StabilityLevel: metrics.ALPHA,
}, []string{"type"})
unschedulableReasons = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: SchedulerSubsystem,
Name: "unschedulable_pods",
Help: "The number of unschedulable pods broken down by plugin name. A pod will increment the gauge for all plugins that caused it to not schedule and so this metric have meaning only when broken down by plugin.",
StabilityLevel: metrics.ALPHA,
}, []string{"plugin", "profile"})
PluginEvaluationTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: SchedulerSubsystem,
Name: "plugin_evaluation_total",
Help: "Number of attempts to schedule pods by each plugin and the extension point (available only in PreFilter, Filter, PreScore, and Score).",
StabilityLevel: metrics.ALPHA,
}, []string{"plugin", "extension_point", "profile"})
PreemptionGoroutinesDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "preemption_goroutines_duration_seconds",
Help: "Duration in seconds for running goroutines for the preemption.",
Buckets: metrics.ExponentialBuckets(0.01, 2, 20),
StabilityLevel: metrics.ALPHA,
},
[]string{"result"})
PreemptionGoroutinesExecutionTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: SchedulerSubsystem,
Name: "preemption_goroutines_execution_total",
Help: "Number of preemption goroutines executed.",
StabilityLevel: metrics.ALPHA,
},
[]string{"result"})
// The below (AsyncAPICallsTotal, AsyncAPICallDuration and AsyncAPIPendingCalls) are only available when the SchedulerAsyncAPICalls feature gate is enabled.
AsyncAPICallsTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: SchedulerSubsystem,
Name: "async_api_call_execution_total",
Help: "Total number of API calls executed by the async dispatcher.",
StabilityLevel: metrics.ALPHA,
},
[]string{"call_type", "result"})
AsyncAPICallDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: SchedulerSubsystem,
Name: "async_api_call_execution_duration_seconds",
Help: "Duration in seconds for executing API call in the async dispatcher.",
Buckets: metrics.ExponentialBuckets(0.001, 2, 15),
StabilityLevel: metrics.ALPHA,
},
[]string{"call_type", "result"})
AsyncAPIPendingCalls = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Subsystem: SchedulerSubsystem,
Name: "pending_async_api_calls",
Help: "Number of API calls currently pending in the async queue.",
StabilityLevel: metrics.ALPHA,
},
[]string{"call_type"})
metricsList = []metrics.Registerable{
scheduleAttempts,
schedulingLatency,
SchedulingAlgorithmLatency,
EventHandlingLatency,
PreemptionVictims,
PreemptionAttempts,
pendingPods,
PodSchedulingSLIDuration,
PodSchedulingAttempts,
FrameworkExtensionPointDuration,
PluginExecutionDuration,
SchedulerQueueIncomingPods,
Goroutines,
PermitWaitDuration,
CacheSize,
unschedulableReasons,
PluginEvaluationTotal,
}
}
// RegisterMetrics registers a list of metrics.
// This function is exported because it is intended to be used by out-of-tree plugins to register their custom metrics.
func RegisterMetrics(extraMetrics ...metrics.Registerable) {
for _, metric := range extraMetrics {
legacyregistry.MustRegister(metric)
}
}
// GetGather returns the gatherer. It used by test case outside current package.
func GetGather() metrics.Gatherer {
return legacyregistry.DefaultGatherer
}
// ActivePods returns the pending pods metrics with the label active
func ActivePods() metrics.GaugeMetric {
return pendingPods.With(metrics.Labels{"queue": "active"})
}
// BackoffPods returns the pending pods metrics with the label backoff
func BackoffPods() metrics.GaugeMetric {
return pendingPods.With(metrics.Labels{"queue": "backoff"})
}
// UnschedulablePods returns the pending pods metrics with the label unschedulable
func UnschedulablePods() metrics.GaugeMetric {
return pendingPods.With(metrics.Labels{"queue": "unschedulable"})
}
// GatedPods returns the pending pods metrics with the label gated
func GatedPods() metrics.GaugeMetric {
return pendingPods.With(metrics.Labels{"queue": "gated"})
}
// SinceInSeconds gets the time since the specified start in seconds.
func SinceInSeconds(start time.Time) float64 {
return time.Since(start).Seconds()
}
func UnschedulableReason(plugin string, profile string) metrics.GaugeMetric {
return unschedulableReasons.With(metrics.Labels{"plugin": plugin, "profile": profile})
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
// This file contains helpers for metrics that are associated to a profile.
var (
ScheduledResult = "scheduled"
UnschedulableResult = "unschedulable"
ErrorResult = "error"
)
// PodScheduled can records a successful scheduling attempt and the duration
// since `start`.
func PodScheduled(profile string, duration float64) {
observeScheduleAttemptAndLatency(ScheduledResult, profile, duration)
}
// PodUnschedulable can records a scheduling attempt for an unschedulable pod
// and the duration since `start`.
func PodUnschedulable(profile string, duration float64) {
observeScheduleAttemptAndLatency(UnschedulableResult, profile, duration)
}
// PodScheduleError can records a scheduling attempt that had an error and the
// duration since `start`.
func PodScheduleError(profile string, duration float64) {
observeScheduleAttemptAndLatency(ErrorResult, profile, duration)
}
func observeScheduleAttemptAndLatency(result, profile string, duration float64) {
schedulingLatency.WithLabelValues(result, profile).Observe(duration)
scheduleAttempts.WithLabelValues(result, profile).Inc()
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package profile holds the definition of a scheduling Profile.
package profile
import (
"context"
"errors"
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/events"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
)
// RecorderFactory builds an EventRecorder for a given scheduler name.
type RecorderFactory func(string) events.EventRecorder
// newProfile builds a Profile for the given configuration.
func newProfile(ctx context.Context, cfg config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory,
opts ...frameworkruntime.Option) (framework.Framework, error) {
recorder := recorderFact(cfg.SchedulerName)
opts = append(opts, frameworkruntime.WithEventRecorder(recorder))
return frameworkruntime.NewFramework(ctx, r, &cfg, opts...)
}
// Map holds frameworks indexed by scheduler name.
type Map map[string]framework.Framework
// NewMap builds the frameworks given by the configuration, indexed by name.
func NewMap(ctx context.Context, cfgs []config.KubeSchedulerProfile, r frameworkruntime.Registry, recorderFact RecorderFactory,
opts ...frameworkruntime.Option) (Map, error) {
m := make(Map)
v := cfgValidator{m: m}
for _, cfg := range cfgs {
p, err := newProfile(ctx, cfg, r, recorderFact, opts...)
if err != nil {
return nil, fmt.Errorf("creating profile for scheduler name %s: %v", cfg.SchedulerName, err)
}
if err := v.validate(cfg, p); err != nil {
return nil, err
}
m[cfg.SchedulerName] = p
}
return m, nil
}
// HandlesSchedulerName returns whether a profile handles the given scheduler name.
func (m Map) HandlesSchedulerName(name string) bool {
_, ok := m[name]
return ok
}
// Close closes all frameworks registered in this map.
func (m Map) Close() error {
var errs []error
for name, f := range m {
err := f.Close()
if err != nil {
errs = append(errs, fmt.Errorf("framework %s failed to close: %w", name, err))
}
}
return errors.Join(errs...)
}
// NewRecorderFactory returns a RecorderFactory for the broadcaster.
func NewRecorderFactory(b events.EventBroadcaster) RecorderFactory {
return func(name string) events.EventRecorder {
return b.NewRecorder(scheme.Scheme, name)
}
}
type cfgValidator struct {
m Map
queueSort string
queueSortArgs runtime.Object
}
func (v *cfgValidator) validate(cfg config.KubeSchedulerProfile, f framework.Framework) error {
if len(f.ProfileName()) == 0 {
return errors.New("scheduler name is needed")
}
if cfg.Plugins == nil {
return fmt.Errorf("plugins required for profile with scheduler name %q", f.ProfileName())
}
if v.m[f.ProfileName()] != nil {
return fmt.Errorf("duplicate profile with scheduler name %q", f.ProfileName())
}
queueSort := f.ListPlugins().QueueSort.Enabled[0].Name
var queueSortArgs runtime.Object
for _, plCfg := range cfg.PluginConfig {
if plCfg.Name == queueSort {
queueSortArgs = plCfg.Args
break
}
}
if len(v.queueSort) == 0 {
v.queueSort = queueSort
v.queueSortArgs = queueSortArgs
return nil
}
if v.queueSort != queueSort {
return fmt.Errorf("different queue sort plugins for profile %q: %q, first: %q", cfg.SchedulerName, queueSort, v.queueSort)
}
if diff.Diff(v.queueSortArgs, queueSortArgs) != "" {
return fmt.Errorf("different queue sort plugin args for profile %q", cfg.SchedulerName)
}
return nil
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"container/heap"
"context"
"errors"
"fmt"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util"
utiltrace "k8s.io/utils/trace"
)
const (
// Percentage of plugin metrics to be sampled.
pluginMetricsSamplePercent = 10
// minFeasibleNodesToFind is the minimum number of nodes that would be scored
// in each scheduling cycle. This is a semi-arbitrary value to ensure that a
// certain minimum of nodes are checked for feasibility. This in turn helps
// ensure a minimum level of spreading.
minFeasibleNodesToFind = 100
// minFeasibleNodesPercentageToFind is the minimum percentage of nodes that
// would be scored in each scheduling cycle. This is a semi-arbitrary value
// to ensure that a certain minimum of nodes are checked for feasibility.
// This in turn helps ensure a minimum level of spreading.
minFeasibleNodesPercentageToFind = 5
// numberOfHighestScoredNodesToReport is the number of node scores
// to be included in ScheduleResult.
numberOfHighestScoredNodesToReport = 3
)
// ScheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting.
func (sched *Scheduler) ScheduleOne(ctx context.Context) {
logger := klog.FromContext(ctx)
podInfo, err := sched.NextPod(logger)
if err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Error while retrieving next pod from scheduling queue")
return
}
// pod could be nil when schedulerQueue is closed
if podInfo == nil || podInfo.Pod == nil {
return
}
pod := podInfo.Pod
// TODO(knelasevero): Remove duplicated keys from log entry calls
// When contextualized logging hits GA
// https://github.com/kubernetes/kubernetes/issues/111672
logger = klog.LoggerWithValues(logger, "pod", klog.KObj(pod))
ctx = klog.NewContext(ctx, logger)
logger.V(4).Info("About to try and schedule pod", "pod", klog.KObj(pod))
fwk, err := sched.frameworkForPod(pod)
if err != nil {
// This shouldn't happen, because we only accept for scheduling the pods
// which specify a scheduler name that matches one of the profiles.
logger.Error(err, "Error occurred")
sched.SchedulingQueue.Done(pod.UID)
return
}
if sched.skipPodSchedule(ctx, fwk, pod) {
// We don't put this Pod back to the queue, but we have to cleanup the in-flight pods/events.
sched.SchedulingQueue.Done(pod.UID)
return
}
logger.V(3).Info("Attempting to schedule pod", "pod", klog.KObj(pod))
// Synchronously attempt to find a fit for the pod.
start := time.Now()
state := framework.NewCycleState()
// For the sake of performance, scheduler does not measure and export the scheduler_plugin_execution_duration metric
// for every plugin execution in each scheduling cycle. Instead it samples a portion of scheduling cycles - percentage
// determined by pluginMetricsSamplePercent. The line below helps to randomly pick appropriate scheduling cycles.
state.SetRecordPluginMetrics(rand.Intn(100) < pluginMetricsSamplePercent)
// Initialize an empty podsToActivate struct, which will be filled up by plugins or stay empty.
podsToActivate := framework.NewPodsToActivate()
state.Write(framework.PodsToActivateKey, podsToActivate)
schedulingCycleCtx, cancel := context.WithCancel(ctx)
defer cancel()
scheduleResult, assumedPodInfo, status := sched.schedulingCycle(schedulingCycleCtx, state, fwk, podInfo, start, podsToActivate)
if !status.IsSuccess() {
sched.FailureHandler(schedulingCycleCtx, fwk, assumedPodInfo, status, scheduleResult.nominatingInfo, start)
return
}
// bind the pod to its host asynchronously (we can do this b/c of the assumption step above).
go func() {
bindingCycleCtx, cancel := context.WithCancel(ctx)
defer cancel()
metrics.Goroutines.WithLabelValues(metrics.Binding).Inc()
defer metrics.Goroutines.WithLabelValues(metrics.Binding).Dec()
status := sched.bindingCycle(bindingCycleCtx, state, fwk, scheduleResult, assumedPodInfo, start, podsToActivate)
if !status.IsSuccess() {
sched.handleBindingCycleError(bindingCycleCtx, state, fwk, assumedPodInfo, start, scheduleResult, status)
return
}
}()
}
// newFailureNominatingInfo returns the appropriate NominatingInfo for scheduling failures.
// When NominatedNodeNameForExpectation feature is enabled, it returns nil (no clearing).
// Otherwise, it returns NominatingInfo to clear the pod's nominated node.
func (sched *Scheduler) newFailureNominatingInfo() *fwk.NominatingInfo {
if sched.nominatedNodeNameForExpectationEnabled {
return nil
}
return &fwk.NominatingInfo{NominatingMode: fwk.ModeOverride, NominatedNodeName: ""}
}
// schedulingCycle tries to schedule a single Pod.
func (sched *Scheduler) schedulingCycle(
ctx context.Context,
state fwk.CycleState,
schedFramework framework.Framework,
podInfo *framework.QueuedPodInfo,
start time.Time,
podsToActivate *framework.PodsToActivate,
) (ScheduleResult, *framework.QueuedPodInfo, *fwk.Status) {
logger := klog.FromContext(ctx)
pod := podInfo.Pod
scheduleResult, err := sched.SchedulePod(ctx, schedFramework, state, pod)
if err != nil {
defer func() {
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInSeconds(start))
}()
if err == ErrNoNodesAvailable {
status := fwk.NewStatus(fwk.UnschedulableAndUnresolvable).WithError(err)
return ScheduleResult{nominatingInfo: sched.newFailureNominatingInfo()}, podInfo, status
}
fitError, ok := err.(*framework.FitError)
if !ok {
logger.Error(err, "Error selecting node for pod", "pod", klog.KObj(pod))
return ScheduleResult{nominatingInfo: sched.newFailureNominatingInfo()}, podInfo, fwk.AsStatus(err)
}
// SchedulePod() may have failed because the pod would not fit on any host, so we try to
// preempt, with the expectation that the next time the pod is tried for scheduling it
// will fit due to the preemption. It is also possible that a different pod will schedule
// into the resources that were preempted, but this is harmless.
if !schedFramework.HasPostFilterPlugins() {
logger.V(3).Info("No PostFilter plugins are registered, so no preemption will be performed")
return ScheduleResult{}, podInfo, fwk.NewStatus(fwk.Unschedulable).WithError(err)
}
// Run PostFilter plugins to attempt to make the pod schedulable in a future scheduling cycle.
result, status := schedFramework.RunPostFilterPlugins(ctx, state, pod, fitError.Diagnosis.NodeToStatus)
msg := status.Message()
fitError.Diagnosis.PostFilterMsg = msg
if status.Code() == fwk.Error {
utilruntime.HandleErrorWithContext(ctx, nil, "Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg)
} else {
logger.V(5).Info("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", msg)
}
var nominatingInfo *fwk.NominatingInfo
if result != nil {
nominatingInfo = result.NominatingInfo
}
return ScheduleResult{nominatingInfo: nominatingInfo}, podInfo, fwk.NewStatus(fwk.Unschedulable).WithError(err)
}
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInSeconds(start))
// Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet.
// This allows us to keep scheduling without waiting on binding to occur.
assumedPodInfo := podInfo.DeepCopy()
assumedPod := assumedPodInfo.Pod
// assume modifies `assumedPod` by setting NodeName=scheduleResult.SuggestedHost
err = sched.assume(logger, assumedPod, scheduleResult.SuggestedHost)
if err != nil {
// This is most probably result of a BUG in retrying logic.
// We report an error here so that pod scheduling can be retried.
// This relies on the fact that Error will check if the pod has been bound
// to a node and if so will not add it back to the unscheduled pods queue
// (otherwise this would cause an infinite loop).
return ScheduleResult{nominatingInfo: sched.newFailureNominatingInfo()}, assumedPodInfo, fwk.AsStatus(err)
}
// Run the Reserve method of reserve plugins.
if sts := schedFramework.RunReservePluginsReserve(ctx, state, assumedPod, scheduleResult.SuggestedHost); !sts.IsSuccess() {
// trigger un-reserve to clean up state associated with the reserved Pod
schedFramework.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil {
utilruntime.HandleErrorWithContext(ctx, forgetErr, "Scheduler cache ForgetPod failed")
}
if sts.IsRejected() {
fitErr := &framework.FitError{
NumAllNodes: 1,
Pod: pod,
Diagnosis: framework.Diagnosis{
NodeToStatus: framework.NewDefaultNodeToStatus(),
},
}
fitErr.Diagnosis.NodeToStatus.Set(scheduleResult.SuggestedHost, sts)
fitErr.Diagnosis.AddPluginStatus(sts)
return ScheduleResult{nominatingInfo: sched.newFailureNominatingInfo()}, assumedPodInfo, fwk.NewStatus(sts.Code()).WithError(fitErr)
}
return ScheduleResult{nominatingInfo: sched.newFailureNominatingInfo()}, assumedPodInfo, sts
}
// Run "permit" plugins.
runPermitStatus := schedFramework.RunPermitPlugins(ctx, state, assumedPod, scheduleResult.SuggestedHost)
if !runPermitStatus.IsWait() && !runPermitStatus.IsSuccess() {
// trigger un-reserve to clean up state associated with the reserved Pod
schedFramework.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil {
utilruntime.HandleErrorWithContext(ctx, forgetErr, "Scheduler cache ForgetPod failed")
}
if runPermitStatus.IsRejected() {
fitErr := &framework.FitError{
NumAllNodes: 1,
Pod: pod,
Diagnosis: framework.Diagnosis{
NodeToStatus: framework.NewDefaultNodeToStatus(),
},
}
fitErr.Diagnosis.NodeToStatus.Set(scheduleResult.SuggestedHost, runPermitStatus)
fitErr.Diagnosis.AddPluginStatus(runPermitStatus)
return ScheduleResult{nominatingInfo: sched.newFailureNominatingInfo()}, assumedPodInfo, fwk.NewStatus(runPermitStatus.Code()).WithError(fitErr)
}
return ScheduleResult{nominatingInfo: sched.newFailureNominatingInfo()}, assumedPodInfo, runPermitStatus
}
// At the end of a successful scheduling cycle, pop and move up Pods if needed.
if len(podsToActivate.Map) != 0 {
sched.SchedulingQueue.Activate(logger, podsToActivate.Map)
// Clear the entries after activation.
podsToActivate.Map = make(map[string]*v1.Pod)
}
return scheduleResult, assumedPodInfo, nil
}
// bindingCycle tries to bind an assumed Pod.
func (sched *Scheduler) bindingCycle(
ctx context.Context,
state fwk.CycleState,
schedFramework framework.Framework,
scheduleResult ScheduleResult,
assumedPodInfo *framework.QueuedPodInfo,
start time.Time,
podsToActivate *framework.PodsToActivate) *fwk.Status {
logger := klog.FromContext(ctx)
assumedPod := assumedPodInfo.Pod
if sched.nominatedNodeNameForExpectationEnabled {
preFlightStatus := schedFramework.RunPreBindPreFlights(ctx, state, assumedPod, scheduleResult.SuggestedHost)
if preFlightStatus.Code() == fwk.Error ||
// Unschedulable status is not supported in PreBindPreFlight and hence we regard it as an error.
preFlightStatus.IsRejected() {
return preFlightStatus
}
if preFlightStatus.IsSuccess() || schedFramework.WillWaitOnPermit(ctx, assumedPod) {
// Add NominatedNodeName to tell the external components (e.g., the cluster autoscaler) that the pod is about to be bound to the node.
// We only do this when any of WaitOnPermit or PreBind will work because otherwise the pod will be soon bound anyway.
if err := updatePod(ctx, sched.client, schedFramework.APICacher(), assumedPod, nil, &fwk.NominatingInfo{
NominatedNodeName: scheduleResult.SuggestedHost,
NominatingMode: fwk.ModeOverride,
}); err != nil {
logger.Error(err, "Failed to update the nominated node name in the binding cycle", "pod", klog.KObj(assumedPod), "nominatedNodeName", scheduleResult.SuggestedHost)
// We continue the processing because it's not critical enough to stop binding cycles here.
}
}
}
// Run "permit" plugins.
if status := schedFramework.WaitOnPermit(ctx, assumedPod); !status.IsSuccess() {
if status.IsRejected() {
fitErr := &framework.FitError{
NumAllNodes: 1,
Pod: assumedPodInfo.Pod,
Diagnosis: framework.Diagnosis{
NodeToStatus: framework.NewDefaultNodeToStatus(),
UnschedulablePlugins: sets.New(status.Plugin()),
},
}
fitErr.Diagnosis.NodeToStatus.Set(scheduleResult.SuggestedHost, status)
return fwk.NewStatus(status.Code()).WithError(fitErr)
}
return status
}
// Any failures after this point cannot lead to the Pod being considered unschedulable.
// We define the Pod as "unschedulable" only when Pods are rejected at specific extension points, and Permit is the last one in the scheduling/binding cycle.
// If a Pod fails on PreBind or Bind, it should be moved to BackoffQ for retry.
//
// We can call Done() here because
// we can free the cluster events stored in the scheduling queue sooner, which is worth for busy clusters memory consumption wise.
sched.SchedulingQueue.Done(assumedPod.UID)
// Run "prebind" plugins.
if status := schedFramework.RunPreBindPlugins(ctx, state, assumedPod, scheduleResult.SuggestedHost); !status.IsSuccess() {
return status
}
// Run "bind" plugins.
if status := sched.bind(ctx, schedFramework, assumedPod, scheduleResult.SuggestedHost, state); !status.IsSuccess() {
return status
}
// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
logger.V(2).Info("Successfully bound pod to node", "pod", klog.KObj(assumedPod), "node", scheduleResult.SuggestedHost, "evaluatedNodes", scheduleResult.EvaluatedNodes, "feasibleNodes", scheduleResult.FeasibleNodes)
metrics.PodScheduled(schedFramework.ProfileName(), metrics.SinceInSeconds(start))
metrics.PodSchedulingAttempts.Observe(float64(assumedPodInfo.Attempts))
if assumedPodInfo.InitialAttemptTimestamp != nil {
metrics.PodSchedulingSLIDuration.WithLabelValues(getAttemptsLabel(assumedPodInfo)).Observe(metrics.SinceInSeconds(*assumedPodInfo.InitialAttemptTimestamp))
}
// Run "postbind" plugins.
schedFramework.RunPostBindPlugins(ctx, state, assumedPod, scheduleResult.SuggestedHost)
// At the end of a successful binding cycle, move up Pods if needed.
if len(podsToActivate.Map) != 0 {
sched.SchedulingQueue.Activate(logger, podsToActivate.Map)
// Unlike the logic in schedulingCycle(), we don't bother deleting the entries
// as `podsToActivate.Map` is no longer consumed.
}
return nil
}
func (sched *Scheduler) handleBindingCycleError(
ctx context.Context,
state fwk.CycleState,
fwk framework.Framework,
podInfo *framework.QueuedPodInfo,
start time.Time,
scheduleResult ScheduleResult,
status *fwk.Status) {
logger := klog.FromContext(ctx)
assumedPod := podInfo.Pod
// trigger un-reserve plugins to clean up state associated with the reserved Pod
fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
if forgetErr := sched.Cache.ForgetPod(logger, assumedPod); forgetErr != nil {
utilruntime.HandleErrorWithContext(ctx, forgetErr, "scheduler cache ForgetPod failed")
} else {
// "Forget"ing an assumed Pod in binding cycle should be treated as a PodDelete event,
// as the assumed Pod had occupied a certain amount of resources in scheduler cache.
//
// Avoid moving the assumed Pod itself as it's always Unschedulable.
// It's intentional to "defer" this operation; otherwise MoveAllToActiveOrBackoffQueue() would
// add this event to in-flight events and thus move the assumed pod to backoffQ anyways if the plugins don't have appropriate QueueingHint.
if status.IsRejected() {
defer sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, assumedPod, nil, func(pod *v1.Pod) bool {
return assumedPod.UID != pod.UID
})
} else {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, assumedPod, nil, nil)
}
}
sched.FailureHandler(ctx, fwk, podInfo, status, sched.newFailureNominatingInfo(), start)
}
func (sched *Scheduler) frameworkForPod(pod *v1.Pod) (framework.Framework, error) {
fwk, ok := sched.Profiles[pod.Spec.SchedulerName]
if !ok {
return nil, fmt.Errorf("profile not found for scheduler name %q", pod.Spec.SchedulerName)
}
return fwk, nil
}
// skipPodSchedule returns true if we could skip scheduling the pod for specified cases.
func (sched *Scheduler) skipPodSchedule(ctx context.Context, fwk framework.Framework, pod *v1.Pod) bool {
// Case 1: pod is being deleted.
if pod.DeletionTimestamp != nil {
fwk.EventRecorder().Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
klog.FromContext(ctx).V(3).Info("Skip schedule deleting pod", "pod", klog.KObj(pod))
return true
}
// Case 2: pod that has been assumed could be skipped.
// An assumed pod can be added again to the scheduling queue if it got an update event
// during its previous scheduling cycle but before getting assumed.
isAssumed, err := sched.Cache.IsAssumedPod(pod)
if err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Failed to check whether pod is assumed", "pod", klog.KObj(pod))
return false
}
return isAssumed
}
// schedulePod tries to schedule the given pod to one of the nodes in the node list.
// If it succeeds, it will return the name of the node.
// If it fails, it will return a FitError with reasons.
func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (result ScheduleResult, err error) {
trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name})
defer trace.LogIfLong(100 * time.Millisecond)
if err := sched.Cache.UpdateSnapshot(klog.FromContext(ctx), sched.nodeInfoSnapshot); err != nil {
return result, err
}
trace.Step("Snapshotting scheduler cache and node infos done")
if sched.nodeInfoSnapshot.NumNodes() == 0 {
return result, ErrNoNodesAvailable
}
feasibleNodes, diagnosis, err := sched.findNodesThatFitPod(ctx, fwk, state, pod)
if err != nil {
return result, err
}
trace.Step("Computing predicates done")
if len(feasibleNodes) == 0 {
return result, &framework.FitError{
Pod: pod,
NumAllNodes: sched.nodeInfoSnapshot.NumNodes(),
Diagnosis: diagnosis,
}
}
// When only one node after predicate, just use it.
if len(feasibleNodes) == 1 {
return ScheduleResult{
SuggestedHost: feasibleNodes[0].Node().Name,
EvaluatedNodes: 1 + diagnosis.NodeToStatus.Len(),
FeasibleNodes: 1,
}, nil
}
priorityList, err := prioritizeNodes(ctx, sched.Extenders, fwk, state, pod, feasibleNodes)
if err != nil {
return result, err
}
host, _, err := selectHost(priorityList, numberOfHighestScoredNodesToReport)
trace.Step("Prioritizing done")
return ScheduleResult{
SuggestedHost: host,
EvaluatedNodes: len(feasibleNodes) + diagnosis.NodeToStatus.Len(),
FeasibleNodes: len(feasibleNodes),
}, err
}
// Filters the nodes to find the ones that fit the pod based on the framework
// filter plugins and filter extenders.
func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, schedFramework framework.Framework, state fwk.CycleState, pod *v1.Pod) ([]fwk.NodeInfo, framework.Diagnosis, error) {
logger := klog.FromContext(ctx)
diagnosis := framework.Diagnosis{
NodeToStatus: framework.NewDefaultNodeToStatus(),
}
allNodes, err := sched.nodeInfoSnapshot.NodeInfos().List()
if err != nil {
return nil, diagnosis, err
}
// Run "prefilter" plugins.
preRes, s, unscheduledPlugins := schedFramework.RunPreFilterPlugins(ctx, state, pod)
diagnosis.UnschedulablePlugins = unscheduledPlugins
if !s.IsSuccess() {
if !s.IsRejected() {
return nil, diagnosis, s.AsError()
}
// All nodes in NodeToStatus will have the same status so that they can be handled in the preemption.
diagnosis.NodeToStatus.SetAbsentNodesStatus(s)
// Record the messages from PreFilter in Diagnosis.PreFilterMsg.
msg := s.Message()
diagnosis.PreFilterMsg = msg
logger.V(5).Info("Status after running PreFilter plugins for pod", "pod", klog.KObj(pod), "status", msg)
diagnosis.AddPluginStatus(s)
return nil, diagnosis, nil
}
// "NominatedNodeName" can potentially be set in a previous scheduling cycle as a result of preemption.
// This node is likely the only candidate that will fit the pod, and hence we try it first before iterating over all nodes.
if len(pod.Status.NominatedNodeName) > 0 {
feasibleNodes, err := sched.evaluateNominatedNode(ctx, pod, schedFramework, state, diagnosis)
if err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Evaluation failed on nominated node", "pod", klog.KObj(pod), "node", pod.Status.NominatedNodeName)
}
// Nominated node passes all the filters, scheduler is good to assign this node to the pod.
if len(feasibleNodes) != 0 {
return feasibleNodes, diagnosis, nil
}
}
nodes := allNodes
if !preRes.AllNodes() {
nodes = make([]fwk.NodeInfo, 0, len(preRes.NodeNames))
for nodeName := range preRes.NodeNames {
// PreRes may return nodeName(s) which do not exist; we verify
// node exists in the Snapshot.
if nodeInfo, err := sched.nodeInfoSnapshot.Get(nodeName); err == nil {
nodes = append(nodes, nodeInfo)
}
}
diagnosis.NodeToStatus.SetAbsentNodesStatus(fwk.NewStatus(fwk.UnschedulableAndUnresolvable, fmt.Sprintf("node(s) didn't satisfy plugin(s) %v", sets.List(unscheduledPlugins))))
}
feasibleNodes, err := sched.findNodesThatPassFilters(ctx, schedFramework, state, pod, &diagnosis, nodes)
// always try to update the sched.nextStartNodeIndex regardless of whether an error has occurred
// this is helpful to make sure that all the nodes have a chance to be searched
processedNodes := len(feasibleNodes) + diagnosis.NodeToStatus.Len()
sched.nextStartNodeIndex = (sched.nextStartNodeIndex + processedNodes) % len(allNodes)
if err != nil {
return nil, diagnosis, err
}
feasibleNodesAfterExtender, err := findNodesThatPassExtenders(ctx, sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatus)
if err != nil {
return nil, diagnosis, err
}
if len(feasibleNodesAfterExtender) != len(feasibleNodes) {
// Extenders filtered out some nodes.
//
// Extender doesn't support any kind of requeueing feature like EnqueueExtensions in the scheduling framework.
// When Extenders reject some Nodes and the pod ends up being unschedulable,
// we put fwk.ExtenderName to pInfo.UnschedulablePlugins.
// This Pod will be requeued from unschedulable pod pool to activeQ/backoffQ
// by any kind of cluster events.
// https://github.com/kubernetes/kubernetes/issues/122019
if diagnosis.UnschedulablePlugins == nil {
diagnosis.UnschedulablePlugins = sets.New[string]()
}
diagnosis.UnschedulablePlugins.Insert(framework.ExtenderName)
}
return feasibleNodesAfterExtender, diagnosis, nil
}
func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, schedFramework framework.Framework, state fwk.CycleState, diagnosis framework.Diagnosis) ([]fwk.NodeInfo, error) {
nnn := pod.Status.NominatedNodeName
nodeInfo, err := sched.nodeInfoSnapshot.Get(nnn)
if err != nil {
return nil, err
}
node := []fwk.NodeInfo{nodeInfo}
feasibleNodes, err := sched.findNodesThatPassFilters(ctx, schedFramework, state, pod, &diagnosis, node)
if err != nil {
return nil, err
}
feasibleNodes, err = findNodesThatPassExtenders(ctx, sched.Extenders, pod, feasibleNodes, diagnosis.NodeToStatus)
if err != nil {
return nil, err
}
return feasibleNodes, nil
}
// hasScoring checks if scoring nodes is configured.
func (sched *Scheduler) hasScoring(fwk framework.Framework) bool {
if fwk.HasScorePlugins() {
return true
}
for _, extender := range sched.Extenders {
if extender.IsPrioritizer() {
return true
}
}
return false
}
// hasExtenderFilters checks if any extenders filter nodes.
func (sched *Scheduler) hasExtenderFilters() bool {
for _, extender := range sched.Extenders {
if extender.IsFilter() {
return true
}
}
return false
}
// findNodesThatPassFilters finds the nodes that fit the filter plugins.
func (sched *Scheduler) findNodesThatPassFilters(
ctx context.Context,
schedFramework framework.Framework,
state fwk.CycleState,
pod *v1.Pod,
diagnosis *framework.Diagnosis,
nodes []fwk.NodeInfo) ([]fwk.NodeInfo, error) {
numAllNodes := len(nodes)
numNodesToFind := sched.numFeasibleNodesToFind(schedFramework.PercentageOfNodesToScore(), int32(numAllNodes))
if !sched.hasExtenderFilters() && !sched.hasScoring(schedFramework) {
numNodesToFind = 1
}
// Create feasible list with enough space to avoid growing it
// and allow assigning.
feasibleNodes := make([]fwk.NodeInfo, numNodesToFind)
if !schedFramework.HasFilterPlugins() {
for i := range feasibleNodes {
feasibleNodes[i] = nodes[(sched.nextStartNodeIndex+i)%numAllNodes]
}
return feasibleNodes, nil
}
errCh := parallelize.NewErrorChannel()
var feasibleNodesLen int32
ctx, cancel := context.WithCancelCause(ctx)
defer cancel(errors.New("findNodesThatPassFilters has completed"))
type nodeStatus struct {
node string
status *fwk.Status
}
result := make([]*nodeStatus, numAllNodes)
checkNode := func(i int) {
// We check the nodes starting from where we left off in the previous scheduling cycle,
// this is to make sure all nodes have the same chance of being examined across pods.
nodeInfo := nodes[(sched.nextStartNodeIndex+i)%numAllNodes]
status := schedFramework.RunFilterPluginsWithNominatedPods(ctx, state, pod, nodeInfo)
if status.Code() == fwk.Error {
errCh.SendErrorWithCancel(status.AsError(), func() {
cancel(errors.New("some other Filter operation failed"))
})
return
}
if status.IsSuccess() {
length := atomic.AddInt32(&feasibleNodesLen, 1)
if length > numNodesToFind {
cancel(errors.New("findNodesThatPassFilters has found enough nodes"))
atomic.AddInt32(&feasibleNodesLen, -1)
} else {
feasibleNodes[length-1] = nodeInfo
}
} else {
result[i] = &nodeStatus{node: nodeInfo.Node().Name, status: status}
}
}
beginCheckNode := time.Now()
statusCode := fwk.Success
defer func() {
// We record Filter extension point latency here instead of in framework.go because framework.RunFilterPlugins
// function is called for each node, whereas we want to have an overall latency for all nodes per scheduling cycle.
// Note that this latency also includes latency for `addNominatedPods`, which calls framework.RunPreFilterAddPod.
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Filter, statusCode.String(), schedFramework.ProfileName()).Observe(metrics.SinceInSeconds(beginCheckNode))
}()
// Stops searching for more nodes once the configured number of feasible nodes
// are found.
schedFramework.Parallelizer().Until(ctx, numAllNodes, checkNode, metrics.Filter)
feasibleNodes = feasibleNodes[:feasibleNodesLen]
for _, item := range result {
if item == nil {
continue
}
diagnosis.NodeToStatus.Set(item.node, item.status)
diagnosis.AddPluginStatus(item.status)
}
if err := errCh.ReceiveError(); err != nil {
statusCode = fwk.Error
return feasibleNodes, err
}
return feasibleNodes, nil
}
// numFeasibleNodesToFind returns the number of feasible nodes that once found, the scheduler stops
// its search for more feasible nodes.
func (sched *Scheduler) numFeasibleNodesToFind(percentageOfNodesToScore *int32, numAllNodes int32) (numNodes int32) {
if numAllNodes < minFeasibleNodesToFind {
return numAllNodes
}
// Use profile percentageOfNodesToScore if it's set. Otherwise, use global percentageOfNodesToScore.
var percentage int32
if percentageOfNodesToScore != nil {
percentage = *percentageOfNodesToScore
} else {
percentage = sched.percentageOfNodesToScore
}
if percentage == 0 {
percentage = int32(50) - numAllNodes/125
if percentage < minFeasibleNodesPercentageToFind {
percentage = minFeasibleNodesPercentageToFind
}
}
numNodes = numAllNodes * percentage / 100
if numNodes < minFeasibleNodesToFind {
return minFeasibleNodesToFind
}
return numNodes
}
func findNodesThatPassExtenders(ctx context.Context, extenders []fwk.Extender, pod *v1.Pod, feasibleNodes []fwk.NodeInfo, statuses *framework.NodeToStatus) ([]fwk.NodeInfo, error) {
logger := klog.FromContext(ctx)
// Extenders are called sequentially.
// Nodes in original feasibleNodes can be excluded in one extender, and pass on to the next
// extender in a decreasing manner.
for _, extender := range extenders {
if len(feasibleNodes) == 0 {
break
}
if !extender.IsInterested(pod) {
continue
}
// Status of failed nodes in failedAndUnresolvableMap will be added to <statuses>,
// so that the scheduler framework can respect the UnschedulableAndUnresolvable status for
// particular nodes, and this may eventually improve preemption efficiency.
// Note: users are recommended to configure the extenders that may return UnschedulableAndUnresolvable
// status ahead of others.
feasibleList, failedMap, failedAndUnresolvableMap, err := extender.Filter(pod, feasibleNodes)
if err != nil {
if extender.IsIgnorable() {
logger.Info("Skipping extender as it returned error and has ignorable flag set", "extender", extender, "err", err)
continue
}
return nil, err
}
for failedNodeName, failedMsg := range failedAndUnresolvableMap {
statuses.Set(failedNodeName, fwk.NewStatus(fwk.UnschedulableAndUnresolvable, failedMsg))
}
for failedNodeName, failedMsg := range failedMap {
if _, found := failedAndUnresolvableMap[failedNodeName]; found {
// failedAndUnresolvableMap takes precedence over failedMap
// note that this only happens if the extender returns the node in both maps
continue
}
statuses.Set(failedNodeName, fwk.NewStatus(fwk.Unschedulable, failedMsg))
}
feasibleNodes = feasibleList
}
return feasibleNodes, nil
}
// prioritizeNodes prioritizes the nodes by running the score plugins,
// which return a score for each node from the call to RunScorePlugins().
// The scores from each plugin are added together to make the score for that node, then
// any extenders are run as well.
// All scores are finally combined (added) to get the total weighted scores of all nodes
func prioritizeNodes(
ctx context.Context,
extenders []fwk.Extender,
schedFramework framework.Framework,
state fwk.CycleState,
pod *v1.Pod,
nodes []fwk.NodeInfo,
) ([]fwk.NodePluginScores, error) {
logger := klog.FromContext(ctx)
// If no priority configs are provided, then all nodes will have a score of one.
// This is required to generate the priority list in the required format
if len(extenders) == 0 && !schedFramework.HasScorePlugins() {
result := make([]fwk.NodePluginScores, 0, len(nodes))
for i := range nodes {
result = append(result, fwk.NodePluginScores{
Name: nodes[i].Node().Name,
TotalScore: 1,
})
}
return result, nil
}
// Run PreScore plugins.
preScoreStatus := schedFramework.RunPreScorePlugins(ctx, state, pod, nodes)
if !preScoreStatus.IsSuccess() {
return nil, preScoreStatus.AsError()
}
// Run the Score plugins.
nodesScores, scoreStatus := schedFramework.RunScorePlugins(ctx, state, pod, nodes)
if !scoreStatus.IsSuccess() {
return nil, scoreStatus.AsError()
}
// Additional details logged at level 10 if enabled.
loggerVTen := logger.V(10)
if loggerVTen.Enabled() {
for _, nodeScore := range nodesScores {
for _, pluginScore := range nodeScore.Scores {
loggerVTen.Info("Plugin scored node for pod", "pod", klog.KObj(pod), "plugin", pluginScore.Name, "node", nodeScore.Name, "score", pluginScore.Score)
}
}
}
if len(extenders) != 0 && nodes != nil {
// allNodeExtendersScores has all extenders scores for all nodes.
// It is keyed with node name.
allNodeExtendersScores := make(map[string]*fwk.NodePluginScores, len(nodes))
var mu sync.Mutex
var wg sync.WaitGroup
for i := range extenders {
if !extenders[i].IsInterested(pod) {
continue
}
wg.Add(1)
go func(extIndex int) {
metrics.Goroutines.WithLabelValues(metrics.PrioritizingExtender).Inc()
defer func() {
metrics.Goroutines.WithLabelValues(metrics.PrioritizingExtender).Dec()
wg.Done()
}()
prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes)
if err != nil {
// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
logger.V(5).Info("Failed to run extender's priority function. No score given by this extender.", "error", err, "pod", klog.KObj(pod), "extender", extenders[extIndex].Name())
return
}
mu.Lock()
defer mu.Unlock()
for i := range *prioritizedList {
nodename := (*prioritizedList)[i].Host
score := (*prioritizedList)[i].Score
if loggerVTen.Enabled() {
loggerVTen.Info("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", nodename, "score", score)
}
// MaxExtenderPriority may diverge from the max priority used in the scheduler and defined by MaxNodeScore,
// therefore we need to scale the score returned by extenders to the score range used by the scheduler.
finalscore := score * weight * (fwk.MaxNodeScore / extenderv1.MaxExtenderPriority)
if allNodeExtendersScores[nodename] == nil {
allNodeExtendersScores[nodename] = &fwk.NodePluginScores{
Name: nodename,
Scores: make([]fwk.PluginScore, 0, len(extenders)),
}
}
allNodeExtendersScores[nodename].Scores = append(allNodeExtendersScores[nodename].Scores, fwk.PluginScore{
Name: extenders[extIndex].Name(),
Score: finalscore,
})
allNodeExtendersScores[nodename].TotalScore += finalscore
}
}(i)
}
// wait for all go routines to finish
wg.Wait()
for i := range nodesScores {
if score, ok := allNodeExtendersScores[nodes[i].Node().Name]; ok {
nodesScores[i].Scores = append(nodesScores[i].Scores, score.Scores...)
nodesScores[i].TotalScore += score.TotalScore
}
}
}
if loggerVTen.Enabled() {
for i := range nodesScores {
loggerVTen.Info("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", nodesScores[i].Name, "score", nodesScores[i].TotalScore)
}
}
return nodesScores, nil
}
var errEmptyPriorityList = errors.New("empty priorityList")
// selectHost takes a prioritized list of nodes and then picks one
// in a reservoir sampling manner from the nodes that had the highest score.
// It also returns the top {count} Nodes,
// and the top of the list will be always the selected host.
func selectHost(nodeScoreList []fwk.NodePluginScores, count int) (string, []fwk.NodePluginScores, error) {
if len(nodeScoreList) == 0 {
return "", nil, errEmptyPriorityList
}
var h nodeScoreHeap = nodeScoreList
heap.Init(&h)
cntOfMaxScore := 1
selectedIndex := 0
// The top of the heap is the NodeScoreResult with the highest score.
sortedNodeScoreList := make([]fwk.NodePluginScores, 0, count)
sortedNodeScoreList = append(sortedNodeScoreList, heap.Pop(&h).(fwk.NodePluginScores))
// This for-loop will continue until all Nodes with the highest scores get checked for a reservoir sampling,
// and sortedNodeScoreList gets (count - 1) elements.
for ns := heap.Pop(&h).(fwk.NodePluginScores); ; ns = heap.Pop(&h).(fwk.NodePluginScores) {
if ns.TotalScore != sortedNodeScoreList[0].TotalScore && len(sortedNodeScoreList) == count {
break
}
if ns.TotalScore == sortedNodeScoreList[0].TotalScore {
cntOfMaxScore++
if rand.Intn(cntOfMaxScore) == 0 {
// Replace the candidate with probability of 1/cntOfMaxScore
selectedIndex = cntOfMaxScore - 1
}
}
sortedNodeScoreList = append(sortedNodeScoreList, ns)
if h.Len() == 0 {
break
}
}
if selectedIndex != 0 {
// replace the first one with selected one
previous := sortedNodeScoreList[0]
sortedNodeScoreList[0] = sortedNodeScoreList[selectedIndex]
sortedNodeScoreList[selectedIndex] = previous
}
if len(sortedNodeScoreList) > count {
sortedNodeScoreList = sortedNodeScoreList[:count]
}
return sortedNodeScoreList[0].Name, sortedNodeScoreList, nil
}
// nodeScoreHeap is a heap of fwk.NodePluginScores.
type nodeScoreHeap []fwk.NodePluginScores
// nodeScoreHeap implements heap.Interface.
var _ heap.Interface = &nodeScoreHeap{}
func (h nodeScoreHeap) Len() int { return len(h) }
func (h nodeScoreHeap) Less(i, j int) bool { return h[i].TotalScore > h[j].TotalScore }
func (h nodeScoreHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *nodeScoreHeap) Push(x interface{}) {
*h = append(*h, x.(fwk.NodePluginScores))
}
func (h *nodeScoreHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// assume signals to the cache that a pod is already in the cache, so that binding can be asynchronous.
// assume modifies `assumed`.
func (sched *Scheduler) assume(logger klog.Logger, assumed *v1.Pod, host string) error {
// Optimistically assume that the binding will succeed and send it to apiserver
// in the background.
// If the binding fails, scheduler will release resources allocated to assumed pod
// immediately.
assumed.Spec.NodeName = host
if err := sched.Cache.AssumePod(logger, assumed); err != nil {
logger.Error(err, "Scheduler cache AssumePod failed")
return err
}
// if "assumed" is a nominated pod, we should remove it from internal cache
if sched.SchedulingQueue != nil {
sched.SchedulingQueue.DeleteNominatedPodIfExists(assumed)
}
return nil
}
// bind binds a pod to a given node defined in a binding object.
// The precedence for binding is: (1) extenders and (2) framework plugins.
// We expect this to run asynchronously, so we handle binding metrics internally.
func (sched *Scheduler) bind(ctx context.Context, schedFramework framework.Framework, assumed *v1.Pod, targetNode string, state fwk.CycleState) (status *fwk.Status) {
logger := klog.FromContext(ctx)
defer func() {
sched.finishBinding(logger, schedFramework, assumed, targetNode, status)
}()
bound, err := sched.extendersBinding(logger, assumed, targetNode)
if bound {
return fwk.AsStatus(err)
}
return schedFramework.RunBindPlugins(ctx, state, assumed, targetNode)
}
// TODO(#87159): Move this to a Plugin.
func (sched *Scheduler) extendersBinding(logger klog.Logger, pod *v1.Pod, node string) (bool, error) {
for _, extender := range sched.Extenders {
if !extender.IsBinder() || !extender.IsInterested(pod) {
continue
}
err := extender.Bind(&v1.Binding{
ObjectMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name, UID: pod.UID},
Target: v1.ObjectReference{Kind: "Node", Name: node},
})
if err != nil && extender.IsIgnorable() {
logger.Info("Skipping extender in bind as it returned error and has ignorable flag set", "extender", extender, "err", err)
continue
}
return true, err
}
return false, nil
}
func (sched *Scheduler) finishBinding(logger klog.Logger, fwk framework.Framework, assumed *v1.Pod, targetNode string, status *fwk.Status) {
if finErr := sched.Cache.FinishBinding(logger, assumed); finErr != nil {
utilruntime.HandleErrorWithLogger(logger, finErr, "Scheduler cache FinishBinding failed")
}
if !status.IsSuccess() {
logger.V(1).Info("Failed to bind pod", "pod", klog.KObj(assumed))
return
}
fwk.EventRecorder().Eventf(assumed, nil, v1.EventTypeNormal, "Scheduled", "Binding", "Successfully assigned %v/%v to %v", assumed.Namespace, assumed.Name, targetNode)
}
func getAttemptsLabel(p *framework.QueuedPodInfo) string {
// We breakdown the pod scheduling duration by attempts capped to a limit
// to avoid ending up with a high cardinality metric.
if p.Attempts >= 15 {
return "15+"
}
return strconv.Itoa(p.Attempts)
}
// handleSchedulingFailure records an event for the pod that indicates the
// pod has failed to schedule. Also, update the pod condition and nominated node name if set.
func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *fwk.Status, nominatingInfo *fwk.NominatingInfo, start time.Time) {
calledDone := false
defer func() {
if !calledDone {
// Basically, AddUnschedulableIfNotPresent calls DonePod internally.
// But, AddUnschedulableIfNotPresent isn't called in some corner cases.
// Here, we call DonePod explicitly to avoid leaking the pod.
sched.SchedulingQueue.Done(podInfo.Pod.UID)
}
}()
logger := klog.FromContext(ctx)
reason := v1.PodReasonSchedulerError
if status.IsRejected() {
reason = v1.PodReasonUnschedulable
}
switch reason {
case v1.PodReasonUnschedulable:
metrics.PodUnschedulable(fwk.ProfileName(), metrics.SinceInSeconds(start))
case v1.PodReasonSchedulerError:
metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
}
pod := podInfo.Pod
err := status.AsError()
errMsg := status.Message()
if err == ErrNoNodesAvailable {
logger.V(2).Info("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod))
} else if fitError, ok := err.(*framework.FitError); ok { // Inject UnschedulablePlugins to PodInfo, which will be used later for moving Pods between queues efficiently.
podInfo.UnschedulablePlugins = fitError.Diagnosis.UnschedulablePlugins
podInfo.PendingPlugins = fitError.Diagnosis.PendingPlugins
logger.V(2).Info("Unable to schedule pod; no fit; waiting", "pod", klog.KObj(pod), "err", errMsg)
} else {
utilruntime.HandleErrorWithContext(ctx, err, "Error scheduling pod; retrying", "pod", klog.KObj(pod))
}
// Check if the Pod exists in informer cache.
podLister := fwk.SharedInformerFactory().Core().V1().Pods().Lister()
cachedPod, e := podLister.Pods(pod.Namespace).Get(pod.Name)
if e != nil {
logger.Info("Pod doesn't exist in informer cache", "pod", klog.KObj(pod), "err", e)
// We need to call DonePod here because we don't call AddUnschedulableIfNotPresent in this case.
} else {
// In the case of extender, the pod may have been bound successfully, but timed out returning its response to the scheduler.
// It could result in the live version to carry .spec.nodeName, and that's inconsistent with the internal-queued version.
if len(cachedPod.Spec.NodeName) != 0 {
logger.Info("Pod has been assigned to node. Abort adding it back to queue.", "pod", klog.KObj(pod), "node", cachedPod.Spec.NodeName)
// We need to call DonePod here because we don't call AddUnschedulableIfNotPresent in this case.
} else {
// As <cachedPod> is from SharedInformer, we need to do a DeepCopy() here.
// ignore this err since apiserver doesn't properly validate affinity terms
// and we can't fix the validation for backwards compatibility.
podInfo.PodInfo, _ = framework.NewPodInfo(cachedPod.DeepCopy())
if err := sched.SchedulingQueue.AddUnschedulableIfNotPresent(logger, podInfo, sched.SchedulingQueue.SchedulingCycle()); err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Error occurred")
}
calledDone = true
}
}
// Update the scheduling queue with the nominated pod information. Without
// this, there would be a race condition between the next scheduling cycle
// and the time the scheduler receives a Pod Update for the nominated pod.
// Here we check for nil only for tests.
if sched.SchedulingQueue != nil {
sched.SchedulingQueue.AddNominatedPod(logger, podInfo.PodInfo, nominatingInfo)
}
if err == nil {
// Only tests can reach here.
return
}
msg := truncateMessage(errMsg)
fwk.EventRecorder().Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", msg)
if err := updatePod(ctx, sched.client, fwk.APICacher(), pod, &v1.PodCondition{
Type: v1.PodScheduled,
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(&pod.Status, pod.Generation, v1.PodScheduled),
Status: v1.ConditionFalse,
Reason: reason,
Message: errMsg,
}, nominatingInfo); err != nil {
utilruntime.HandleErrorWithContext(ctx, err, "Error updating pod", "pod", klog.KObj(pod))
}
}
// truncateMessage truncates a message if it hits the NoteLengthLimit.
func truncateMessage(message string) string {
max := validation.NoteLengthLimit
if len(message) <= max {
return message
}
suffix := " ..."
return message[:max-len(suffix)] + suffix
}
func updatePod(ctx context.Context, client clientset.Interface, apiCacher fwk.APICacher, pod *v1.Pod, condition *v1.PodCondition, nominatingInfo *fwk.NominatingInfo) error {
if apiCacher != nil {
// When API cacher is available, use it to patch the status.
_, err := apiCacher.PatchPodStatus(pod, condition, nominatingInfo)
return err
}
logger := klog.FromContext(ctx)
logValues := []any{"pod", klog.KObj(pod)}
if condition != nil {
logValues = append(logValues, "conditionType", condition.Type, "conditionStatus", condition.Status, "conditionReason", condition.Reason)
}
if nominatingInfo != nil {
logValues = append(logValues, "nominatedNodeName", nominatingInfo.NominatedNodeName, "nominatingMode", nominatingInfo.Mode())
}
logger.V(3).Info("Updating pod condition and nominated node name", logValues...)
podStatusCopy := pod.Status.DeepCopy()
// NominatedNodeName is updated only if we are trying to set it, and the value is
// different from the existing one.
nnnNeedsUpdate := nominatingInfo.Mode() == fwk.ModeOverride && pod.Status.NominatedNodeName != nominatingInfo.NominatedNodeName
podConditionNeedsUpdate := condition != nil && podutil.UpdatePodCondition(podStatusCopy, condition)
if !podConditionNeedsUpdate && !nnnNeedsUpdate {
return nil
}
if nnnNeedsUpdate {
podStatusCopy.NominatedNodeName = nominatingInfo.NominatedNodeName
}
return util.PatchPodStatus(ctx, client, pod.Name, pod.Namespace, &pod.Status, podStatusCopy)
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"context"
"errors"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
apicache "k8s.io/kubernetes/pkg/scheduler/backend/api_cache"
apidispatcher "k8s.io/kubernetes/pkg/scheduler/backend/api_dispatcher"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
cachedebugger "k8s.io/kubernetes/pkg/scheduler/backend/cache/debugger"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
"k8s.io/kubernetes/pkg/scheduler/framework"
apicalls "k8s.io/kubernetes/pkg/scheduler/framework/api_calls"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/profile"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
"k8s.io/utils/clock"
)
const (
// Duration the scheduler will wait before expiring an assumed pod.
// See issue #106361 for more details about this parameter and its value.
durationToExpireAssumedPod time.Duration = 0
)
// ErrNoNodesAvailable is used to describe the error that no nodes available to schedule pods.
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
// Scheduler watches for new unscheduled pods. It attempts to find
// nodes that they fit on and writes bindings back to the api server.
type Scheduler struct {
// It is expected that changes made via Cache will be observed
// by NodeLister and Algorithm.
Cache internalcache.Cache
Extenders []fwk.Extender
// NextPod should be a function that blocks until the next pod
// is available. We don't use a channel for this, because scheduling
// a pod may take some amount of time and we don't want pods to get
// stale while they sit in a channel.
NextPod func(logger klog.Logger) (*framework.QueuedPodInfo, error)
// FailureHandler is called upon a scheduling failure.
FailureHandler FailureHandlerFn
// SchedulePod tries to schedule the given pod to one of the nodes in the node list.
// Return a struct of ScheduleResult with the name of suggested host on success,
// otherwise will return a FitError with reasons.
SchedulePod func(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (ScheduleResult, error)
// Close this to shut down the scheduler.
StopEverything <-chan struct{}
// SchedulingQueue holds pods to be scheduled
SchedulingQueue internalqueue.SchedulingQueue
// If possible, indirect operation on APIDispatcher, e.g. through SchedulingQueue, is preferred.
// Is nil iff SchedulerAsyncAPICalls feature gate is disabled.
// Adding a call to APIDispatcher should not be done directly by in-tree usages.
// framework.APICache should be used instead.
APIDispatcher *apidispatcher.APIDispatcher
// Profiles are the scheduling profiles.
Profiles profile.Map
client clientset.Interface
nodeInfoSnapshot *internalcache.Snapshot
percentageOfNodesToScore int32
nextStartNodeIndex int
// logger *must* be initialized when creating a Scheduler,
// otherwise logging functions will access a nil sink and
// panic.
logger klog.Logger
// registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start.
registeredHandlers []cache.ResourceEventHandlerRegistration
nominatedNodeNameForExpectationEnabled bool
}
func (sched *Scheduler) applyDefaultHandlers() {
sched.SchedulePod = sched.schedulePod
sched.FailureHandler = sched.handleSchedulingFailure
}
type schedulerOptions struct {
clock clock.WithTicker
componentConfigVersion string
kubeConfig *restclient.Config
// Overridden by profile level percentageOfNodesToScore if set in v1.
percentageOfNodesToScore int32
podInitialBackoffSeconds int64
podMaxBackoffSeconds int64
podMaxInUnschedulablePodsDuration time.Duration
// Contains out-of-tree plugins to be merged with the in-tree registry.
frameworkOutOfTreeRegistry frameworkruntime.Registry
profiles []schedulerapi.KubeSchedulerProfile
extenders []schedulerapi.Extender
frameworkCapturer FrameworkCapturer
parallelism int32
applyDefaultProfile bool
}
// Option configures a Scheduler
type Option func(*schedulerOptions)
// ScheduleResult represents the result of scheduling a pod.
type ScheduleResult struct {
// Name of the selected node.
SuggestedHost string
// The number of nodes the scheduler evaluated the pod against in the filtering
// phase and beyond.
EvaluatedNodes int
// The number of nodes out of the evaluated ones that fit the pod.
FeasibleNodes int
// The nominating info for scheduling cycle.
nominatingInfo *fwk.NominatingInfo
}
// WithComponentConfigVersion sets the component config version to the
// KubeSchedulerConfiguration version used. The string should be the full
// scheme group/version of the external type we converted from (for example
// "kubescheduler.config.k8s.io/v1")
func WithComponentConfigVersion(apiVersion string) Option {
return func(o *schedulerOptions) {
o.componentConfigVersion = apiVersion
}
}
// WithKubeConfig sets the kube config for Scheduler.
func WithKubeConfig(cfg *restclient.Config) Option {
return func(o *schedulerOptions) {
o.kubeConfig = cfg
}
}
// WithProfiles sets profiles for Scheduler. By default, there is one profile
// with the name "default-scheduler".
func WithProfiles(p ...schedulerapi.KubeSchedulerProfile) Option {
return func(o *schedulerOptions) {
o.profiles = p
o.applyDefaultProfile = false
}
}
// WithParallelism sets the parallelism for all scheduler algorithms. Default is 16.
func WithParallelism(threads int32) Option {
return func(o *schedulerOptions) {
o.parallelism = threads
}
}
// WithPercentageOfNodesToScore sets percentageOfNodesToScore for Scheduler.
// The default value of 0 will use an adaptive percentage: 50 - (num of nodes)/125.
func WithPercentageOfNodesToScore(percentageOfNodesToScore *int32) Option {
return func(o *schedulerOptions) {
if percentageOfNodesToScore != nil {
o.percentageOfNodesToScore = *percentageOfNodesToScore
}
}
}
// WithFrameworkOutOfTreeRegistry sets the registry for out-of-tree plugins. Those plugins
// will be appended to the default registry.
func WithFrameworkOutOfTreeRegistry(registry frameworkruntime.Registry) Option {
return func(o *schedulerOptions) {
o.frameworkOutOfTreeRegistry = registry
}
}
// WithPodInitialBackoffSeconds sets podInitialBackoffSeconds for Scheduler, the default value is 1
func WithPodInitialBackoffSeconds(podInitialBackoffSeconds int64) Option {
return func(o *schedulerOptions) {
o.podInitialBackoffSeconds = podInitialBackoffSeconds
}
}
// WithPodMaxBackoffSeconds sets podMaxBackoffSeconds for Scheduler, the default value is 10
func WithPodMaxBackoffSeconds(podMaxBackoffSeconds int64) Option {
return func(o *schedulerOptions) {
o.podMaxBackoffSeconds = podMaxBackoffSeconds
}
}
// WithPodMaxInUnschedulablePodsDuration sets podMaxInUnschedulablePodsDuration for PriorityQueue.
func WithPodMaxInUnschedulablePodsDuration(duration time.Duration) Option {
return func(o *schedulerOptions) {
o.podMaxInUnschedulablePodsDuration = duration
}
}
// WithExtenders sets extenders for the Scheduler
func WithExtenders(e ...schedulerapi.Extender) Option {
return func(o *schedulerOptions) {
o.extenders = e
}
}
// WithClock sets clock for PriorityQueue, the default clock is clock.RealClock.
func WithClock(clock clock.WithTicker) Option {
return func(o *schedulerOptions) {
o.clock = clock
}
}
// FrameworkCapturer is used for registering a notify function in building framework.
type FrameworkCapturer func(schedulerapi.KubeSchedulerProfile)
// WithBuildFrameworkCapturer sets a notify function for getting buildFramework details.
func WithBuildFrameworkCapturer(fc FrameworkCapturer) Option {
return func(o *schedulerOptions) {
o.frameworkCapturer = fc
}
}
var defaultSchedulerOptions = schedulerOptions{
clock: clock.RealClock{},
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
podInitialBackoffSeconds: int64(internalqueue.DefaultPodInitialBackoffDuration.Seconds()),
podMaxBackoffSeconds: int64(internalqueue.DefaultPodMaxBackoffDuration.Seconds()),
podMaxInUnschedulablePodsDuration: internalqueue.DefaultPodMaxInUnschedulablePodsDuration,
parallelism: int32(parallelize.DefaultParallelism),
// Ideally we would statically set the default profile here, but we can't because
// creating the default profile may require testing feature gates, which may get
// set dynamically in tests. Therefore, we delay creating it until New is actually
// invoked.
applyDefaultProfile: true,
}
// New returns a Scheduler
func New(ctx context.Context,
client clientset.Interface,
informerFactory informers.SharedInformerFactory,
dynInformerFactory dynamicinformer.DynamicSharedInformerFactory,
recorderFactory profile.RecorderFactory,
opts ...Option) (*Scheduler, error) {
logger := klog.FromContext(ctx)
stopEverything := ctx.Done()
options := defaultSchedulerOptions
for _, opt := range opts {
opt(&options)
}
if options.applyDefaultProfile {
var versionedCfg configv1.KubeSchedulerConfiguration
scheme.Scheme.Default(&versionedCfg)
cfg := schedulerapi.KubeSchedulerConfiguration{}
if err := scheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
return nil, err
}
options.profiles = cfg.Profiles
}
registry := frameworkplugins.NewInTreeRegistry()
if err := registry.Merge(options.frameworkOutOfTreeRegistry); err != nil {
return nil, err
}
metrics.Register()
extenders, err := buildExtenders(logger, options.extenders, options.profiles)
if err != nil {
return nil, fmt.Errorf("couldn't build extenders: %w", err)
}
podLister := informerFactory.Core().V1().Pods().Lister()
nodeLister := informerFactory.Core().V1().Nodes().Lister()
snapshot := internalcache.NewEmptySnapshot()
metricsRecorder := metrics.NewMetricsAsyncRecorder(1000, time.Second, stopEverything)
// waitingPods holds all the pods that are in the scheduler and waiting in the permit stage
waitingPods := frameworkruntime.NewWaitingPodsMap()
var resourceClaimCache *assumecache.AssumeCache
var resourceSliceTracker *resourceslicetracker.Tracker
var draManager fwk.SharedDRAManager
if feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
resourceClaimInformer := informerFactory.Resource().V1().ResourceClaims().Informer()
resourceClaimCache = assumecache.NewAssumeCache(logger, resourceClaimInformer, "ResourceClaim", "", nil)
resourceSliceTrackerOpts := resourceslicetracker.Options{
EnableDeviceTaints: feature.DefaultFeatureGate.Enabled(features.DRADeviceTaints),
EnableConsumableCapacity: feature.DefaultFeatureGate.Enabled(features.DRAConsumableCapacity),
SliceInformer: informerFactory.Resource().V1().ResourceSlices(),
KubeClient: client,
}
// If device taints are disabled, the additional informers are not needed and
// the tracker turns into a simple wrapper around the slice informer.
if resourceSliceTrackerOpts.EnableDeviceTaints {
resourceSliceTrackerOpts.TaintInformer = informerFactory.Resource().V1alpha3().DeviceTaintRules()
resourceSliceTrackerOpts.ClassInformer = informerFactory.Resource().V1().DeviceClasses()
}
resourceSliceTracker, err = resourceslicetracker.StartTracker(ctx, resourceSliceTrackerOpts)
if err != nil {
return nil, fmt.Errorf("couldn't start resource slice tracker: %w", err)
}
draManager = dynamicresources.NewDRAManager(ctx, resourceClaimCache, resourceSliceTracker, informerFactory)
}
var apiDispatcher *apidispatcher.APIDispatcher
if feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncAPICalls) {
apiDispatcher = apidispatcher.New(client, int(options.parallelism), apicalls.Relevances)
}
profiles, err := profile.NewMap(ctx, options.profiles, registry, recorderFactory,
frameworkruntime.WithComponentConfigVersion(options.componentConfigVersion),
frameworkruntime.WithClientSet(client),
frameworkruntime.WithKubeConfig(options.kubeConfig),
frameworkruntime.WithInformerFactory(informerFactory),
frameworkruntime.WithSharedDRAManager(draManager),
frameworkruntime.WithSnapshotSharedLister(snapshot),
frameworkruntime.WithCaptureProfile(frameworkruntime.CaptureProfile(options.frameworkCapturer)),
frameworkruntime.WithParallelism(int(options.parallelism)),
frameworkruntime.WithExtenders(extenders),
frameworkruntime.WithMetricsRecorder(metricsRecorder),
frameworkruntime.WithWaitingPods(waitingPods),
frameworkruntime.WithAPIDispatcher(apiDispatcher),
)
if err != nil {
return nil, fmt.Errorf("initializing profiles: %v", err)
}
if len(profiles) == 0 {
return nil, errors.New("at least one profile is required")
}
preEnqueuePluginMap := make(map[string]map[string]fwk.PreEnqueuePlugin)
queueingHintsPerProfile := make(internalqueue.QueueingHintMapPerProfile)
var returnErr error
for profileName, profile := range profiles {
plugins := profile.PreEnqueuePlugins()
preEnqueuePluginMap[profileName] = make(map[string]fwk.PreEnqueuePlugin, len(plugins))
for _, plugin := range plugins {
preEnqueuePluginMap[profileName][plugin.Name()] = plugin
}
queueingHintsPerProfile[profileName], err = buildQueueingHintMap(ctx, profile.EnqueueExtensions())
if err != nil {
returnErr = errors.Join(returnErr, err)
}
}
if returnErr != nil {
return nil, returnErr
}
podQueue := internalqueue.NewSchedulingQueue(
profiles[options.profiles[0].SchedulerName].QueueSortFunc(),
informerFactory,
internalqueue.WithClock(options.clock),
internalqueue.WithPodInitialBackoffDuration(time.Duration(options.podInitialBackoffSeconds)*time.Second),
internalqueue.WithPodMaxBackoffDuration(time.Duration(options.podMaxBackoffSeconds)*time.Second),
internalqueue.WithPodLister(podLister),
internalqueue.WithPodMaxInUnschedulablePodsDuration(options.podMaxInUnschedulablePodsDuration),
internalqueue.WithPreEnqueuePluginMap(preEnqueuePluginMap),
internalqueue.WithQueueingHintMapPerProfile(queueingHintsPerProfile),
internalqueue.WithPluginMetricsSamplePercent(pluginMetricsSamplePercent),
internalqueue.WithMetricsRecorder(metricsRecorder),
internalqueue.WithAPIDispatcher(apiDispatcher),
)
schedulerCache := internalcache.New(ctx, durationToExpireAssumedPod, apiDispatcher)
var apiCache fwk.APICacher
if apiDispatcher != nil {
apiCache = apicache.New(podQueue, schedulerCache)
}
for _, fwk := range profiles {
fwk.SetPodNominator(podQueue)
fwk.SetPodActivator(podQueue)
fwk.SetAPICacher(apiCache)
}
// Setup cache debugger.
debugger := cachedebugger.New(nodeLister, podLister, schedulerCache, podQueue)
debugger.ListenForSignal(ctx)
sched := &Scheduler{
Cache: schedulerCache,
client: client,
nodeInfoSnapshot: snapshot,
percentageOfNodesToScore: options.percentageOfNodesToScore,
Extenders: extenders,
StopEverything: stopEverything,
SchedulingQueue: podQueue,
Profiles: profiles,
logger: logger,
APIDispatcher: apiDispatcher,
nominatedNodeNameForExpectationEnabled: feature.DefaultFeatureGate.Enabled(features.NominatedNodeNameForExpectation),
}
sched.NextPod = podQueue.Pop
sched.applyDefaultHandlers()
if err = addAllEventHandlers(sched, informerFactory, dynInformerFactory, resourceClaimCache, resourceSliceTracker, unionedGVKs(queueingHintsPerProfile)); err != nil {
return nil, fmt.Errorf("adding event handlers: %w", err)
}
return sched, nil
}
// defaultQueueingHintFn is the default queueing hint function.
// It always returns Queue as the queueing hint.
var defaultQueueingHintFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (fwk.QueueingHint, error) {
return fwk.Queue, nil
}
func buildQueueingHintMap(ctx context.Context, es []fwk.EnqueueExtensions) (internalqueue.QueueingHintMap, error) {
queueingHintMap := make(internalqueue.QueueingHintMap)
var returnErr error
for _, e := range es {
events, err := e.EventsToRegister(ctx)
if err != nil {
returnErr = errors.Join(returnErr, err)
}
// This will happen when plugin registers with empty events, it's usually the case a pod
// will become reschedulable only for self-update, e.g. schedulingGates plugin, the pod
// will enter into the activeQ via priorityQueue.Update().
if len(events) == 0 {
continue
}
// Note: Rarely, a plugin implements EnqueueExtensions but returns nil.
// We treat it as: the plugin is not interested in any event, and hence pod failed by that plugin
// cannot be moved by any regular cluster event.
// So, we can just ignore such EventsToRegister here.
registerNodeAdded := false
registerNodeTaintUpdated := false
for _, event := range events {
fn := event.QueueingHintFn
if fn == nil || !feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
fn = defaultQueueingHintFn
}
if event.Event.Resource == fwk.Node {
if event.Event.ActionType&fwk.Add != 0 {
registerNodeAdded = true
}
if event.Event.ActionType&fwk.UpdateNodeTaint != 0 {
registerNodeTaintUpdated = true
}
}
queueingHintMap[event.Event] = append(queueingHintMap[event.Event], &internalqueue.QueueingHintFunction{
PluginName: e.Name(),
QueueingHintFn: fn,
})
}
if registerNodeAdded && !registerNodeTaintUpdated {
// Temporally fix for the issue https://github.com/kubernetes/kubernetes/issues/109437
// NodeAdded QueueingHint isn't always called because of preCheck.
// It's definitely not something expected for plugin developers,
// and registering UpdateNodeTaint event is the only mitigation for now.
//
// So, here registers UpdateNodeTaint event for plugins that has NodeAdded event, but don't have UpdateNodeTaint event.
// It has a bad impact for the requeuing efficiency though, a lot better than some Pods being stuch in the
// unschedulable pod pool.
// This behavior will be removed when we remove the preCheck feature.
// See: https://github.com/kubernetes/kubernetes/issues/110175
queueingHintMap[fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.UpdateNodeTaint}] =
append(queueingHintMap[fwk.ClusterEvent{Resource: fwk.Node, ActionType: fwk.UpdateNodeTaint}],
&internalqueue.QueueingHintFunction{
PluginName: e.Name(),
QueueingHintFn: defaultQueueingHintFn,
},
)
}
}
if returnErr != nil {
return nil, returnErr
}
return queueingHintMap, nil
}
// Run begins watching and scheduling. It starts scheduling and blocked until the context is done.
func (sched *Scheduler) Run(ctx context.Context) {
logger := klog.FromContext(ctx)
sched.SchedulingQueue.Run(logger)
if sched.APIDispatcher != nil {
sched.APIDispatcher.Run(logger)
}
// We need to start scheduleOne loop in a dedicated goroutine,
// because scheduleOne function hangs on getting the next item
// from the SchedulingQueue.
// If there are no new pods to schedule, it will be hanging there
// and if done in this goroutine it will be blocking closing
// SchedulingQueue, in effect causing a deadlock on shutdown.
go wait.UntilWithContext(ctx, sched.ScheduleOne, 0)
<-ctx.Done()
if sched.APIDispatcher != nil {
sched.APIDispatcher.Close()
}
sched.SchedulingQueue.Close()
// If the plugins satisfy the io.Closer interface, they are closed.
err := sched.Profiles.Close()
if err != nil {
logger.Error(err, "Failed to close plugins")
}
}
// NewInformerFactory creates a SharedInformerFactory and initializes a scheduler specific
// in-place podInformer.
func NewInformerFactory(cs clientset.Interface, resyncPeriod time.Duration) informers.SharedInformerFactory {
informerFactory := informers.NewSharedInformerFactory(cs, resyncPeriod)
informerFactory.InformerFor(&v1.Pod{}, newPodInformer)
return informerFactory
}
func buildExtenders(logger klog.Logger, extenders []schedulerapi.Extender, profiles []schedulerapi.KubeSchedulerProfile) ([]fwk.Extender, error) {
var fExtenders []fwk.Extender
if len(extenders) == 0 {
return nil, nil
}
var ignoredExtendedResources []string
var ignorableExtenders []fwk.Extender
for i := range extenders {
logger.V(2).Info("Creating extender", "extender", extenders[i])
extender, err := NewHTTPExtender(&extenders[i])
if err != nil {
return nil, err
}
if !extender.IsIgnorable() {
fExtenders = append(fExtenders, extender)
} else {
ignorableExtenders = append(ignorableExtenders, extender)
}
for _, r := range extenders[i].ManagedResources {
if r.IgnoredByScheduler {
ignoredExtendedResources = append(ignoredExtendedResources, r.Name)
}
}
}
// place ignorable extenders to the tail of extenders
fExtenders = append(fExtenders, ignorableExtenders...)
// If there are any extended resources found from the Extenders, append them to the pluginConfig for each profile.
// This should only have an effect on ComponentConfig, where it is possible to configure Extenders and
// plugin args (and in which case the extender ignored resources take precedence).
if len(ignoredExtendedResources) == 0 {
return fExtenders, nil
}
for i := range profiles {
prof := &profiles[i]
var found = false
for k := range prof.PluginConfig {
if prof.PluginConfig[k].Name == noderesources.Name {
// Update the existing args
pc := &prof.PluginConfig[k]
args, ok := pc.Args.(*schedulerapi.NodeResourcesFitArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", pc.Args)
}
args.IgnoredResources = ignoredExtendedResources
found = true
break
}
}
if !found {
return nil, fmt.Errorf("can't find NodeResourcesFitArgs in plugin config")
}
}
return fExtenders, nil
}
type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *fwk.Status, nominatingInfo *fwk.NominatingInfo, start time.Time)
func unionedGVKs(queueingHintsPerProfile internalqueue.QueueingHintMapPerProfile) map[fwk.EventResource]fwk.ActionType {
gvkMap := make(map[fwk.EventResource]fwk.ActionType)
for _, queueingHints := range queueingHintsPerProfile {
for evt := range queueingHints {
if _, ok := gvkMap[evt.Resource]; ok {
gvkMap[evt.Resource] |= evt.ActionType
} else {
gvkMap[evt.Resource] = evt.ActionType
}
}
}
return gvkMap
}
// newPodInformer creates a shared index informer that returns only non-terminal pods.
// The PodInformer allows indexers to be added, but note that only non-conflict indexers are allowed.
func newPodInformer(cs clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
selector := fmt.Sprintf("status.phase!=%v,status.phase!=%v", v1.PodSucceeded, v1.PodFailed)
tweakListOptions := func(options *metav1.ListOptions) {
options.FieldSelector = selector
}
informer := coreinformers.NewFilteredPodInformer(cs, metav1.NamespaceAll, resyncPeriod, cache.Indexers{}, tweakListOptions)
// Dropping `.metadata.managedFields` to improve memory usage.
// The Extract workflow (i.e. `ExtractPod`) should be unused.
trim := func(obj interface{}) (interface{}, error) {
if accessor, err := meta.Accessor(obj); err == nil {
if accessor.GetManagedFields() != nil {
accessor.SetManagedFields(nil)
}
}
return obj, nil
}
informer.SetTransform(trim)
return informer
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package assumecache
import (
"errors"
"fmt"
"strconv"
"sync"
"k8s.io/klog/v2"
"k8s.io/utils/buffer"
"k8s.io/apimachinery/pkg/api/meta"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
)
// Informer is the subset of [cache.SharedInformer] that NewAssumeCache depends upon.
type Informer interface {
AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error)
}
// AddTestObject adds an object to the assume cache.
// Only use this for unit testing!
func AddTestObject(cache *AssumeCache, obj interface{}) {
cache.add(obj)
}
// UpdateTestObject updates an object in the assume cache.
// Only use this for unit testing!
func UpdateTestObject(cache *AssumeCache, obj interface{}) {
cache.update(nil, obj)
}
// DeleteTestObject deletes object in the assume cache.
// Only use this for unit testing!
func DeleteTestObject(cache *AssumeCache, obj interface{}) {
cache.delete(obj)
}
// Sentinel errors that can be checked for with errors.Is.
var (
ErrWrongType = errors.New("object has wrong type")
ErrNotFound = errors.New("object not found")
ErrObjectName = errors.New("cannot determine object name")
)
type WrongTypeError struct {
TypeName string
Object interface{}
}
func (e WrongTypeError) Error() string {
return fmt.Sprintf("could not convert object to type %v: %+v", e.TypeName, e.Object)
}
func (e WrongTypeError) Is(err error) bool {
return err == ErrWrongType
}
type NotFoundError struct {
TypeName string
ObjectKey string
}
func (e NotFoundError) Error() string {
return fmt.Sprintf("could not find %v %q", e.TypeName, e.ObjectKey)
}
func (e NotFoundError) Is(err error) bool {
return err == ErrNotFound
}
type ObjectNameError struct {
DetailedErr error
}
func (e ObjectNameError) Error() string {
return fmt.Sprintf("failed to get object name: %v", e.DetailedErr)
}
func (e ObjectNameError) Is(err error) bool {
return err == ErrObjectName
}
// AssumeCache is a cache on top of the informer that allows for updating
// objects outside of informer events and also restoring the informer
// cache's version of the object. Objects are assumed to be
// Kubernetes API objects that are supported by [meta.Accessor].
//
// Objects can referenced via their key, with [cache.MetaNamespaceKeyFunc]
// as key function.
//
// AssumeCache stores two pointers to represent a single object:
// - The pointer to the informer object.
// - The pointer to the latest object, which could be the same as
// the informer object, or an in-memory object.
//
// An informer update always overrides the latest object pointer.
//
// Assume() only updates the latest object pointer.
// Restore() sets the latest object pointer back to the informer object.
// Get/List() always returns the latest object pointer.
type AssumeCache struct {
// The logger that was chosen when setting up the cache.
// Will be used for all operations.
logger klog.Logger
// Synchronizes updates to all fields below.
rwMutex sync.RWMutex
// All registered event handlers.
eventHandlers []cache.ResourceEventHandler
handlerRegistration cache.ResourceEventHandlerRegistration
// The eventQueue contains functions which deliver an event to one
// event handler.
//
// These functions must be invoked while *not locking* rwMutex because
// the event handlers are allowed to access the assume cache. Holding
// rwMutex then would cause a deadlock.
//
// New functions get added as part of processing a cache update while
// the rwMutex is locked. Each function which adds something to the queue
// also drains the queue before returning, therefore it is guaranteed
// that all event handlers get notified immediately (useful for unit
// testing).
//
// A channel cannot be used here because it cannot have an unbounded
// capacity. This could lead to a deadlock (writer holds rwMutex,
// gets blocked because capacity is exhausted, reader is in a handler
// which tries to lock the rwMutex). Writing into such a channel
// while not holding the rwMutex doesn't work because in-order delivery
// of events would no longer be guaranteed.
eventQueue buffer.Ring[func()]
// describes the object stored
description string
// Stores objInfo pointers
store cache.Indexer
// Index function for object
indexFunc cache.IndexFunc
indexName string
}
type objInfo struct {
// name of the object
name string
// Latest version of object could be cached-only or from informer
latestObj interface{}
// Latest object from informer
apiObj interface{}
}
func objInfoKeyFunc(obj interface{}) (string, error) {
objInfo, ok := obj.(*objInfo)
if !ok {
return "", &WrongTypeError{TypeName: "objInfo", Object: obj}
}
return objInfo.name, nil
}
func (c *AssumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
objInfo, ok := obj.(*objInfo)
if !ok {
return []string{""}, &WrongTypeError{TypeName: "objInfo", Object: obj}
}
return c.indexFunc(objInfo.latestObj)
}
// NewAssumeCache creates an assume cache for general objects.
func NewAssumeCache(logger klog.Logger, informer Informer, description, indexName string, indexFunc cache.IndexFunc) *AssumeCache {
c := &AssumeCache{
logger: logger,
description: description,
indexFunc: indexFunc,
indexName: indexName,
eventQueue: *buffer.NewRing[func()](buffer.RingOptions{InitialSize: 0, NormalSize: 4}),
}
indexers := cache.Indexers{}
if indexName != "" && indexFunc != nil {
indexers[indexName] = c.objInfoIndexFunc
}
c.store = cache.NewIndexer(objInfoKeyFunc, indexers)
// Unit tests don't use informers
if informer != nil {
// Cannot fail in practice?! No-one bothers checking the error.
c.handlerRegistration, _ = informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.add,
UpdateFunc: c.update,
DeleteFunc: c.delete,
},
)
}
return c
}
func (c *AssumeCache) add(obj interface{}) {
if obj == nil {
return
}
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
utilruntime.HandleErrorWithLogger(c.logger, &ObjectNameError{err}, "Add failed")
return
}
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
var oldObj interface{}
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
utilruntime.HandleErrorWithLogger(c.logger, err, "Add failed: couldn't get object version")
return
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
utilruntime.HandleErrorWithLogger(c.logger, err, "Add failed: couldn't get stored object version")
return
}
// Only update object if version is newer.
// This is so we don't override assumed objects due to informer resync.
if newVersion <= storedVersion {
c.logger.V(10).Info("Skip adding object to assume cache because version is not newer than storedVersion", "description", c.description, "cacheKey", name, "newVersion", newVersion, "storedVersion", storedVersion)
return
}
oldObj = objInfo.latestObj
}
objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj}
if err = c.store.Update(objInfo); err != nil {
c.logger.Info("Error occurred while updating stored object", "err", err)
} else {
c.logger.V(10).Info("Adding object to assume cache", "description", c.description, "cacheKey", name, "assumeCache", obj)
c.pushEvent(oldObj, obj)
}
}
func (c *AssumeCache) update(oldObj interface{}, newObj interface{}) {
c.add(newObj)
}
func (c *AssumeCache) delete(obj interface{}) {
if obj == nil {
return
}
name, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
utilruntime.HandleErrorWithLogger(c.logger, &ObjectNameError{err}, "Failed to delete")
return
}
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
var oldObj interface{}
if len(c.eventHandlers) > 0 {
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
oldObj = objInfo.latestObj
}
}
objInfo := &objInfo{name: name}
err = c.store.Delete(objInfo)
if err != nil {
utilruntime.HandleErrorWithLogger(c.logger, err, "Failed to delete", "description", c.description, "cacheKey", name)
}
c.pushEvent(oldObj, nil)
}
// pushEvent gets called while the mutex is locked for writing.
// It ensures that all currently registered event handlers get
// notified about a change when the caller starts delivering
// those with emitEvents.
//
// For a delete event, newObj is nil. For an add, oldObj is nil.
// An update has both as non-nil.
func (c *AssumeCache) pushEvent(oldObj, newObj interface{}) {
for _, handler := range c.eventHandlers {
handler := handler
if oldObj == nil {
c.eventQueue.WriteOne(func() {
handler.OnAdd(newObj, false)
})
} else if newObj == nil {
c.eventQueue.WriteOne(func() {
handler.OnDelete(oldObj)
})
} else {
c.eventQueue.WriteOne(func() {
handler.OnUpdate(oldObj, newObj)
})
}
}
}
func (c *AssumeCache) getObjVersion(name string, obj interface{}) (int64, error) {
objAccessor, err := meta.Accessor(obj)
if err != nil {
return -1, err
}
objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
if err != nil {
//nolint:errorlint // Intentionally not wrapping the error, the underlying error is an implementation detail.
return -1, fmt.Errorf("error parsing ResourceVersion %q for %v %q: %v", objAccessor.GetResourceVersion(), c.description, name, err)
}
return objResourceVersion, nil
}
func (c *AssumeCache) getObjInfo(key string) (*objInfo, error) {
obj, ok, err := c.store.GetByKey(key)
if err != nil {
return nil, err
}
if !ok {
return nil, &NotFoundError{TypeName: c.description, ObjectKey: key}
}
objInfo, ok := obj.(*objInfo)
if !ok {
return nil, &WrongTypeError{"objInfo", obj}
}
return objInfo, nil
}
// Get the object by its key.
func (c *AssumeCache) Get(key string) (interface{}, error) {
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
objInfo, err := c.getObjInfo(key)
if err != nil {
return nil, err
}
return objInfo.latestObj, nil
}
// GetAPIObj gets the informer cache's version by its key.
func (c *AssumeCache) GetAPIObj(key string) (interface{}, error) {
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
objInfo, err := c.getObjInfo(key)
if err != nil {
return nil, err
}
return objInfo.apiObj, nil
}
// List all the objects in the cache.
func (c *AssumeCache) List(indexObj interface{}) []interface{} {
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
return c.listLocked(indexObj)
}
func (c *AssumeCache) listLocked(indexObj interface{}) []interface{} {
allObjs := []interface{}{}
var objs []interface{}
if c.indexName != "" {
o, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
if err != nil {
utilruntime.HandleErrorWithLogger(c.logger, err, "List index error")
return nil
}
objs = o
} else {
objs = c.store.List()
}
for _, obj := range objs {
objInfo, ok := obj.(*objInfo)
if !ok {
utilruntime.HandleErrorWithLogger(c.logger, &WrongTypeError{TypeName: "objInfo", Object: obj}, "List error")
continue
}
allObjs = append(allObjs, objInfo.latestObj)
}
return allObjs
}
// Assume updates the object in-memory only.
//
// The version of the object must be greater or equal to
// the current object, otherwise an error is returned.
//
// Storing an object with the same version is supported
// by the assume cache, but suffers from a race: if an
// update is received via the informer while such an
// object is assumed, it gets dropped in favor of the
// newer object from the apiserver.
//
// Only assuming objects that were returned by an apiserver
// operation (Update, Patch) is safe.
func (c *AssumeCache) Assume(obj interface{}) error {
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
return &ObjectNameError{err}
}
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(name)
if err != nil {
return err
}
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
return err
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
return err
}
if newVersion < storedVersion {
return fmt.Errorf("%v %q is out of sync (stored: %d, assume: %d)", c.description, name, storedVersion, newVersion)
}
c.pushEvent(objInfo.latestObj, obj)
// Only update the cached object
objInfo.latestObj = obj
c.logger.V(4).Info("Assumed object", "description", c.description, "cacheKey", name, "version", newVersion)
return nil
}
// Restore the informer cache's version of the object.
func (c *AssumeCache) Restore(objName string) {
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {
// This could be expected if object got deleted
c.logger.V(5).Info("Restore object", "description", c.description, "cacheKey", objName, "err", err)
} else {
if objInfo.latestObj != objInfo.apiObj {
c.pushEvent(objInfo.latestObj, objInfo.apiObj)
objInfo.latestObj = objInfo.apiObj
}
c.logger.V(4).Info("Restored object", "description", c.description, "cacheKey", objName)
}
}
// AddEventHandler adds an event handler to the cache. Events to a
// single handler are delivered sequentially, but there is no
// coordination between different handlers. A handler may use the
// cache.
//
// The return value can be used to wait for cache synchronization.
func (c *AssumeCache) AddEventHandler(handler cache.ResourceEventHandler) cache.ResourceEventHandlerRegistration {
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
c.eventHandlers = append(c.eventHandlers, handler)
allObjs := c.listLocked(nil)
for _, obj := range allObjs {
c.eventQueue.WriteOne(func() {
handler.OnAdd(obj, true)
})
}
if c.handlerRegistration == nil {
// No informer, so immediately synced.
return syncedHandlerRegistration{}
}
return c.handlerRegistration
}
// emitEvents delivers all pending events that are in the queue, in the order
// in which they were stored there (FIFO).
func (c *AssumeCache) emitEvents() {
for {
c.rwMutex.Lock()
deliver, ok := c.eventQueue.ReadOne()
c.rwMutex.Unlock()
if !ok {
return
}
func() {
defer utilruntime.HandleCrash()
deliver()
}()
}
}
// syncedHandlerRegistration is an implementation of ResourceEventHandlerRegistration
// which always returns true.
type syncedHandlerRegistration struct{}
func (syncedHandlerRegistration) HasSynced() bool { return true }
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"encoding/json"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
)
// GetPodFullName returns a name that uniquely identifies a pod.
func GetPodFullName(pod *v1.Pod) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format).
return pod.Name + "_" + pod.Namespace
}
// GetPodStartTime returns start time of the given pod or current timestamp
// if it hasn't started yet.
func GetPodStartTime(pod *v1.Pod) *metav1.Time {
if pod.Status.StartTime != nil {
return pod.Status.StartTime
}
// Assumed pods and bound pods that haven't started don't have a StartTime yet.
return &metav1.Time{Time: time.Now()}
}
// GetEarliestPodStartTime returns the earliest start time of all pods that
// have the highest priority among all victims.
func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time {
if len(victims.Pods) == 0 {
// should not reach here.
klog.Background().Error(nil, "victims.Pods is empty. Should not reach here")
return nil
}
earliestPodStartTime := GetPodStartTime(victims.Pods[0])
maxPriority := corev1helpers.PodPriority(victims.Pods[0])
for _, pod := range victims.Pods {
if podPriority := corev1helpers.PodPriority(pod); podPriority == maxPriority {
if podStartTime := GetPodStartTime(pod); podStartTime.Before(earliestPodStartTime) {
earliestPodStartTime = podStartTime
}
} else if podPriority > maxPriority {
maxPriority = podPriority
earliestPodStartTime = GetPodStartTime(pod)
}
}
return earliestPodStartTime
}
// MoreImportantPod return true when priority of the first pod is higher than
// the second one. If two pods' priorities are equal, compare their StartTime,
// treating the older pod as more important.
func MoreImportantPod(pod1, pod2 *v1.Pod) bool {
p1 := corev1helpers.PodPriority(pod1)
p2 := corev1helpers.PodPriority(pod2)
if p1 != p2 {
return p1 > p2
}
return GetPodStartTime(pod1).Before(GetPodStartTime(pod2))
}
// Retriable defines the retriable errors during a scheduling cycle.
func Retriable(err error) bool {
return apierrors.IsInternalError(err) || apierrors.IsServiceUnavailable(err) ||
net.IsConnectionRefused(err)
}
// PatchPodStatus calculates the delta bytes change from <old.Status> to <newStatus>,
// and then submit a request to API server to patch the pod changes.
func PatchPodStatus(ctx context.Context, cs kubernetes.Interface, name string, namespace string, oldStatus *v1.PodStatus, newStatus *v1.PodStatus) error {
if newStatus == nil {
return nil
}
oldData, err := json.Marshal(v1.Pod{Status: *oldStatus})
if err != nil {
return err
}
newData, err := json.Marshal(v1.Pod{Status: *newStatus})
if err != nil {
return err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Pod{})
if err != nil {
return fmt.Errorf("failed to create merge patch for pod %q/%q: %w", namespace, name, err)
}
if "{}" == string(patchBytes) {
return nil
}
patchFn := func() error {
_, err := cs.CoreV1().Pods(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
return err
}
return retry.OnError(retry.DefaultBackoff, Retriable, patchFn)
}
// DeletePod deletes the given <pod> from API server
func DeletePod(ctx context.Context, cs kubernetes.Interface, pod *v1.Pod) error {
return cs.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
}
// IsScalarResourceName validates the resource for Extended, Hugepages, Native and AttachableVolume resources
func IsScalarResourceName(name v1.ResourceName) bool {
return v1helper.IsExtendedResourceName(name) || v1helper.IsHugePageResourceName(name) ||
v1helper.IsPrefixedNativeResource(name) || v1helper.IsAttachableVolumeResourceName(name)
}
// As converts two objects to the given type.
// Both objects must be of the same type. If not, an error is returned.
// nil objects are allowed and will be converted to nil.
// For oldObj, cache.DeletedFinalStateUnknown is handled and the
// object stored in it will be converted instead.
func As[T any](oldObj, newobj interface{}) (T, T, error) {
var oldTyped T
var newTyped T
var ok bool
if newobj != nil {
newTyped, ok = newobj.(T)
if !ok {
return oldTyped, newTyped, fmt.Errorf("expected %T, but got %T", newTyped, newobj)
}
}
if oldObj != nil {
if realOldObj, ok := oldObj.(cache.DeletedFinalStateUnknown); ok {
oldObj = realOldObj.Obj
}
oldTyped, ok = oldObj.(T)
if !ok {
return oldTyped, newTyped, fmt.Errorf("expected %T, but got %T", oldTyped, oldObj)
}
}
return oldTyped, newTyped, nil
}
// GetHostPorts returns the used host ports of pod containers and
// initContainers with restartPolicy: Always.
func GetHostPorts(pod *v1.Pod) []v1.ContainerPort {
var ports []v1.ContainerPort
if pod == nil {
return ports
}
hostPort := func(p v1.ContainerPort) bool {
return p.HostPort > 0
}
for _, c := range pod.Spec.InitContainers {
// Only consider initContainers that will be running the entire
// duration of the Pod.
if c.RestartPolicy == nil || *c.RestartPolicy != v1.ContainerRestartPolicyAlways {
continue
}
for _, p := range c.Ports {
if !hostPort(p) {
continue
}
ports = append(ports, p)
}
}
for _, c := range pod.Spec.Containers {
for _, p := range c.Ports {
if !hostPort(p) {
continue
}
ports = append(ports, p)
}
}
return ports
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apparmor
import (
"strings"
v1 "k8s.io/api/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// Checks whether app armor is required for the pod to run. AppArmor is considered required if any
// non-unconfined profiles are specified.
func isRequired(pod *v1.Pod) bool {
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.AppArmorProfile != nil &&
pod.Spec.SecurityContext.AppArmorProfile.Type != v1.AppArmorProfileTypeUnconfined {
return true
}
inUse := !podutil.VisitContainers(&pod.Spec, podutil.AllContainers, func(c *v1.Container, _ podutil.ContainerType) bool {
if c.SecurityContext != nil && c.SecurityContext.AppArmorProfile != nil &&
c.SecurityContext.AppArmorProfile.Type != v1.AppArmorProfileTypeUnconfined {
return false // is in use; short-circuit
}
return true
})
if inUse {
return true
}
for key, value := range pod.Annotations {
if strings.HasPrefix(key, v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix) {
return value != v1.DeprecatedAppArmorBetaProfileNameUnconfined
}
}
return false
}
// GetProfileName returns the name of the profile to use with the container.
func GetProfile(pod *v1.Pod, container *v1.Container) *v1.AppArmorProfile {
if container.SecurityContext != nil && container.SecurityContext.AppArmorProfile != nil {
return container.SecurityContext.AppArmorProfile
}
// Static pods may not have had annotations synced to fields, so fallback to annotations before
// the pod profile.
if profile := getProfileFromPodAnnotations(pod.Annotations, container.Name); profile != nil {
return profile
}
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.AppArmorProfile != nil {
return pod.Spec.SecurityContext.AppArmorProfile
}
return nil
}
// getProfileFromPodAnnotations gets the AppArmor profile to use with container from
// (deprecated) pod annotations.
func getProfileFromPodAnnotations(annotations map[string]string, containerName string) *v1.AppArmorProfile {
val, ok := annotations[v1.DeprecatedAppArmorBetaContainerAnnotationKeyPrefix+containerName]
if !ok {
return nil
}
switch {
case val == v1.DeprecatedAppArmorBetaProfileRuntimeDefault:
return &v1.AppArmorProfile{Type: v1.AppArmorProfileTypeRuntimeDefault}
case val == v1.DeprecatedAppArmorBetaProfileNameUnconfined:
return &v1.AppArmorProfile{Type: v1.AppArmorProfileTypeUnconfined}
case strings.HasPrefix(val, v1.DeprecatedAppArmorBetaProfileNamePrefix):
// Note: an invalid empty localhost profile will be rejected by kubelet admission.
profileName := strings.TrimPrefix(val, v1.DeprecatedAppArmorBetaProfileNamePrefix)
return &v1.AppArmorProfile{
Type: v1.AppArmorProfileTypeLocalhost,
LocalhostProfile: &profileName,
}
default:
// Invalid annotation.
return nil
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apparmor
import (
"errors"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/third_party/forked/libcontainer/apparmor"
)
// Whether AppArmor should be disabled by default.
// Set to true if the wrong build tags are set (see validate_disabled.go).
var isDisabledBuild bool
// Validator is a interface for validating that a pod with an AppArmor profile can be run by a Node.
type Validator interface {
Validate(pod *v1.Pod) error
ValidateHost() error
}
// NewValidator is in order to find AppArmor FS
func NewValidator() Validator {
if err := validateHost(); err != nil {
return &validator{validateHostErr: err}
}
return &validator{}
}
type validator struct {
validateHostErr error
}
func (v *validator) Validate(pod *v1.Pod) error {
if !isRequired(pod) {
return nil
}
if v.ValidateHost() != nil {
return v.validateHostErr
}
var retErr error
podutil.VisitContainers(&pod.Spec, podutil.AllContainers, func(container *v1.Container, containerType podutil.ContainerType) bool {
profile := GetProfile(pod, container)
if profile == nil {
return true
}
// TODO(#64841): This would ideally be part of validation.ValidateAppArmorProfileFormat, but
// that is called for API validation, and this is tightening validation.
if profile.Type == v1.AppArmorProfileTypeLocalhost {
if profile.LocalhostProfile == nil || strings.TrimSpace(*profile.LocalhostProfile) == "" {
retErr = fmt.Errorf("invalid empty AppArmor profile name: %q", profile)
return false
}
}
return true
})
return retErr
}
// ValidateHost verifies that the host and runtime is capable of enforcing AppArmor profiles.
// Note, this is intentionally only check the host at kubelet startup and never re-evaluates the host
// as the expectation is that the kubelet restart will be needed to enable or disable AppArmor support.
func (v *validator) ValidateHost() error {
return v.validateHostErr
}
// validateHost verifies that the host and runtime is capable of enforcing AppArmor profiles.
func validateHost() error {
// Check build support.
if isDisabledBuild {
return errors.New("binary not compiled for linux")
}
// Check kernel support.
if !apparmor.IsEnabled() {
return errors.New("AppArmor is not enabled on the host")
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package securitycontext
import (
"reflect"
api "k8s.io/kubernetes/pkg/apis/core"
)
// PodSecurityContextAccessor allows reading the values of a PodSecurityContext object
type PodSecurityContextAccessor interface {
HostNetwork() bool
HostPID() bool
HostIPC() bool
HostUsers() *bool
SELinuxOptions() *api.SELinuxOptions
RunAsUser() *int64
RunAsGroup() *int64
RunAsNonRoot() *bool
SeccompProfile() *api.SeccompProfile
SupplementalGroups() []int64
FSGroup() *int64
FSGroupChangePolicy() *api.PodFSGroupChangePolicy
}
// PodSecurityContextMutator allows reading and writing the values of a PodSecurityContext object
type PodSecurityContextMutator interface {
PodSecurityContextAccessor
SetHostNetwork(bool)
SetHostPID(bool)
SetHostIPC(bool)
SetHostUsers(*bool)
SetSELinuxOptions(*api.SELinuxOptions)
SetRunAsUser(*int64)
SetRunAsGroup(*int64)
SetRunAsNonRoot(*bool)
SetSeccompProfile(*api.SeccompProfile)
SetSupplementalGroups([]int64)
SetFSGroup(*int64)
SetFSGroupChangePolicy(*api.PodFSGroupChangePolicy)
// PodSecurityContext returns the current PodSecurityContext object
PodSecurityContext() *api.PodSecurityContext
}
// NewPodSecurityContextAccessor returns an accessor for the given pod security context.
// May be initialized with a nil PodSecurityContext.
func NewPodSecurityContextAccessor(podSC *api.PodSecurityContext) PodSecurityContextAccessor {
return &podSecurityContextWrapper{podSC: podSC}
}
// NewPodSecurityContextMutator returns a mutator for the given pod security context.
// May be initialized with a nil PodSecurityContext.
func NewPodSecurityContextMutator(podSC *api.PodSecurityContext) PodSecurityContextMutator {
return &podSecurityContextWrapper{podSC: podSC}
}
type podSecurityContextWrapper struct {
podSC *api.PodSecurityContext
}
func (w *podSecurityContextWrapper) PodSecurityContext() *api.PodSecurityContext {
return w.podSC
}
func (w *podSecurityContextWrapper) ensurePodSC() {
if w.podSC == nil {
w.podSC = &api.PodSecurityContext{}
}
}
func (w *podSecurityContextWrapper) HostNetwork() bool {
if w.podSC == nil {
return false
}
return w.podSC.HostNetwork
}
func (w *podSecurityContextWrapper) SetHostNetwork(v bool) {
if w.podSC == nil && v == false {
return
}
w.ensurePodSC()
w.podSC.HostNetwork = v
}
func (w *podSecurityContextWrapper) HostPID() bool {
if w.podSC == nil {
return false
}
return w.podSC.HostPID
}
func (w *podSecurityContextWrapper) SetHostPID(v bool) {
if w.podSC == nil && v == false {
return
}
w.ensurePodSC()
w.podSC.HostPID = v
}
func (w *podSecurityContextWrapper) HostIPC() bool {
if w.podSC == nil {
return false
}
return w.podSC.HostIPC
}
func (w *podSecurityContextWrapper) SetHostIPC(v bool) {
if w.podSC == nil && v == false {
return
}
w.ensurePodSC()
w.podSC.HostIPC = v
}
func (w *podSecurityContextWrapper) HostUsers() *bool {
if w.podSC == nil {
return nil
}
return w.podSC.HostUsers
}
func (w *podSecurityContextWrapper) SetHostUsers(v *bool) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.HostUsers = v
}
func (w *podSecurityContextWrapper) SELinuxOptions() *api.SELinuxOptions {
if w.podSC == nil {
return nil
}
return w.podSC.SELinuxOptions
}
func (w *podSecurityContextWrapper) SetSELinuxOptions(v *api.SELinuxOptions) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.SELinuxOptions = v
}
func (w *podSecurityContextWrapper) RunAsUser() *int64 {
if w.podSC == nil {
return nil
}
return w.podSC.RunAsUser
}
func (w *podSecurityContextWrapper) SetRunAsUser(v *int64) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.RunAsUser = v
}
func (w *podSecurityContextWrapper) RunAsGroup() *int64 {
if w.podSC == nil {
return nil
}
return w.podSC.RunAsGroup
}
func (w *podSecurityContextWrapper) SetRunAsGroup(v *int64) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.RunAsGroup = v
}
func (w *podSecurityContextWrapper) RunAsNonRoot() *bool {
if w.podSC == nil {
return nil
}
return w.podSC.RunAsNonRoot
}
func (w *podSecurityContextWrapper) SetRunAsNonRoot(v *bool) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.RunAsNonRoot = v
}
func (w *podSecurityContextWrapper) SeccompProfile() *api.SeccompProfile {
if w.podSC == nil {
return nil
}
return w.podSC.SeccompProfile
}
func (w *podSecurityContextWrapper) SetSeccompProfile(p *api.SeccompProfile) {
if w.podSC == nil && p == nil {
return
}
w.ensurePodSC()
w.podSC.SeccompProfile = p
}
func (w *podSecurityContextWrapper) SupplementalGroups() []int64 {
if w.podSC == nil {
return nil
}
return w.podSC.SupplementalGroups
}
func (w *podSecurityContextWrapper) SetSupplementalGroups(v []int64) {
if w.podSC == nil && len(v) == 0 {
return
}
w.ensurePodSC()
if len(v) == 0 && len(w.podSC.SupplementalGroups) == 0 {
return
}
w.podSC.SupplementalGroups = v
}
func (w *podSecurityContextWrapper) FSGroup() *int64 {
if w.podSC == nil {
return nil
}
return w.podSC.FSGroup
}
func (w *podSecurityContextWrapper) SetFSGroup(v *int64) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.FSGroup = v
}
func (w *podSecurityContextWrapper) FSGroupChangePolicy() *api.PodFSGroupChangePolicy {
if w.podSC == nil {
return nil
}
return w.podSC.FSGroupChangePolicy
}
func (w *podSecurityContextWrapper) SetFSGroupChangePolicy(v *api.PodFSGroupChangePolicy) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.FSGroupChangePolicy = v
}
// ContainerSecurityContextAccessor allows reading the values of a SecurityContext object
type ContainerSecurityContextAccessor interface {
Capabilities() *api.Capabilities
Privileged() *bool
ProcMount() api.ProcMountType
SELinuxOptions() *api.SELinuxOptions
RunAsUser() *int64
RunAsGroup() *int64
RunAsNonRoot() *bool
ReadOnlyRootFilesystem() *bool
SeccompProfile() *api.SeccompProfile
AllowPrivilegeEscalation() *bool
}
// ContainerSecurityContextMutator allows reading and writing the values of a SecurityContext object
type ContainerSecurityContextMutator interface {
ContainerSecurityContextAccessor
ContainerSecurityContext() *api.SecurityContext
SetCapabilities(*api.Capabilities)
SetPrivileged(*bool)
SetSELinuxOptions(*api.SELinuxOptions)
SetRunAsUser(*int64)
SetRunAsGroup(*int64)
SetRunAsNonRoot(*bool)
SetReadOnlyRootFilesystem(*bool)
SetSeccompProfile(*api.SeccompProfile)
SetAllowPrivilegeEscalation(*bool)
}
// NewContainerSecurityContextAccessor returns an accessor for the provided container security context
// May be initialized with a nil SecurityContext
func NewContainerSecurityContextAccessor(containerSC *api.SecurityContext) ContainerSecurityContextAccessor {
return &containerSecurityContextWrapper{containerSC: containerSC}
}
// NewContainerSecurityContextMutator returns a mutator for the provided container security context
// May be initialized with a nil SecurityContext
func NewContainerSecurityContextMutator(containerSC *api.SecurityContext) ContainerSecurityContextMutator {
return &containerSecurityContextWrapper{containerSC: containerSC}
}
type containerSecurityContextWrapper struct {
containerSC *api.SecurityContext
}
func (w *containerSecurityContextWrapper) ContainerSecurityContext() *api.SecurityContext {
return w.containerSC
}
func (w *containerSecurityContextWrapper) ensureContainerSC() {
if w.containerSC == nil {
w.containerSC = &api.SecurityContext{}
}
}
func (w *containerSecurityContextWrapper) Capabilities() *api.Capabilities {
if w.containerSC == nil {
return nil
}
return w.containerSC.Capabilities
}
func (w *containerSecurityContextWrapper) SetCapabilities(v *api.Capabilities) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.Capabilities = v
}
func (w *containerSecurityContextWrapper) Privileged() *bool {
if w.containerSC == nil {
return nil
}
return w.containerSC.Privileged
}
func (w *containerSecurityContextWrapper) SetPrivileged(v *bool) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.Privileged = v
}
func (w *containerSecurityContextWrapper) ProcMount() api.ProcMountType {
if w.containerSC == nil {
return api.DefaultProcMount
}
if w.containerSC.ProcMount == nil {
return api.DefaultProcMount
}
return *w.containerSC.ProcMount
}
func (w *containerSecurityContextWrapper) SELinuxOptions() *api.SELinuxOptions {
if w.containerSC == nil {
return nil
}
return w.containerSC.SELinuxOptions
}
func (w *containerSecurityContextWrapper) SetSELinuxOptions(v *api.SELinuxOptions) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.SELinuxOptions = v
}
func (w *containerSecurityContextWrapper) RunAsUser() *int64 {
if w.containerSC == nil {
return nil
}
return w.containerSC.RunAsUser
}
func (w *containerSecurityContextWrapper) SetRunAsUser(v *int64) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.RunAsUser = v
}
func (w *containerSecurityContextWrapper) RunAsGroup() *int64 {
if w.containerSC == nil {
return nil
}
return w.containerSC.RunAsGroup
}
func (w *containerSecurityContextWrapper) SetRunAsGroup(v *int64) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.RunAsGroup = v
}
func (w *containerSecurityContextWrapper) RunAsNonRoot() *bool {
if w.containerSC == nil {
return nil
}
return w.containerSC.RunAsNonRoot
}
func (w *containerSecurityContextWrapper) SetRunAsNonRoot(v *bool) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.RunAsNonRoot = v
}
func (w *containerSecurityContextWrapper) ReadOnlyRootFilesystem() *bool {
if w.containerSC == nil {
return nil
}
return w.containerSC.ReadOnlyRootFilesystem
}
func (w *containerSecurityContextWrapper) SetReadOnlyRootFilesystem(v *bool) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.ReadOnlyRootFilesystem = v
}
func (w *containerSecurityContextWrapper) SeccompProfile() *api.SeccompProfile {
if w.containerSC == nil {
return nil
}
return w.containerSC.SeccompProfile
}
func (w *containerSecurityContextWrapper) SetSeccompProfile(p *api.SeccompProfile) {
if w.containerSC == nil && p == nil {
return
}
w.ensureContainerSC()
w.containerSC.SeccompProfile = p
}
func (w *containerSecurityContextWrapper) AllowPrivilegeEscalation() *bool {
if w.containerSC == nil {
return nil
}
return w.containerSC.AllowPrivilegeEscalation
}
func (w *containerSecurityContextWrapper) SetAllowPrivilegeEscalation(v *bool) {
if w.containerSC == nil && v == nil {
return
}
w.ensureContainerSC()
w.containerSC.AllowPrivilegeEscalation = v
}
// NewEffectiveContainerSecurityContextAccessor returns an accessor for reading effective values
// for the provided pod security context and container security context
func NewEffectiveContainerSecurityContextAccessor(podSC PodSecurityContextAccessor, containerSC ContainerSecurityContextMutator) ContainerSecurityContextAccessor {
return &effectiveContainerSecurityContextWrapper{podSC: podSC, containerSC: containerSC}
}
// NewEffectiveContainerSecurityContextMutator returns a mutator for reading and writing effective values
// for the provided pod security context and container security context
func NewEffectiveContainerSecurityContextMutator(podSC PodSecurityContextAccessor, containerSC ContainerSecurityContextMutator) ContainerSecurityContextMutator {
return &effectiveContainerSecurityContextWrapper{podSC: podSC, containerSC: containerSC}
}
type effectiveContainerSecurityContextWrapper struct {
podSC PodSecurityContextAccessor
containerSC ContainerSecurityContextMutator
}
func (w *effectiveContainerSecurityContextWrapper) ContainerSecurityContext() *api.SecurityContext {
return w.containerSC.ContainerSecurityContext()
}
func (w *effectiveContainerSecurityContextWrapper) Capabilities() *api.Capabilities {
return w.containerSC.Capabilities()
}
func (w *effectiveContainerSecurityContextWrapper) SetCapabilities(v *api.Capabilities) {
if !reflect.DeepEqual(w.Capabilities(), v) {
w.containerSC.SetCapabilities(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) Privileged() *bool {
return w.containerSC.Privileged()
}
func (w *effectiveContainerSecurityContextWrapper) SetPrivileged(v *bool) {
if !reflect.DeepEqual(w.Privileged(), v) {
w.containerSC.SetPrivileged(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) ProcMount() api.ProcMountType {
return w.containerSC.ProcMount()
}
func (w *effectiveContainerSecurityContextWrapper) SELinuxOptions() *api.SELinuxOptions {
if v := w.containerSC.SELinuxOptions(); v != nil {
return v
}
return w.podSC.SELinuxOptions()
}
func (w *effectiveContainerSecurityContextWrapper) SetSELinuxOptions(v *api.SELinuxOptions) {
if !reflect.DeepEqual(w.SELinuxOptions(), v) {
w.containerSC.SetSELinuxOptions(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) RunAsUser() *int64 {
if v := w.containerSC.RunAsUser(); v != nil {
return v
}
return w.podSC.RunAsUser()
}
func (w *effectiveContainerSecurityContextWrapper) SetRunAsUser(v *int64) {
if !reflect.DeepEqual(w.RunAsUser(), v) {
w.containerSC.SetRunAsUser(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) RunAsGroup() *int64 {
if v := w.containerSC.RunAsGroup(); v != nil {
return v
}
return w.podSC.RunAsGroup()
}
func (w *effectiveContainerSecurityContextWrapper) SetRunAsGroup(v *int64) {
if !reflect.DeepEqual(w.RunAsGroup(), v) {
w.containerSC.SetRunAsGroup(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) RunAsNonRoot() *bool {
if v := w.containerSC.RunAsNonRoot(); v != nil {
return v
}
return w.podSC.RunAsNonRoot()
}
func (w *effectiveContainerSecurityContextWrapper) SetRunAsNonRoot(v *bool) {
if !reflect.DeepEqual(w.RunAsNonRoot(), v) {
w.containerSC.SetRunAsNonRoot(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) ReadOnlyRootFilesystem() *bool {
return w.containerSC.ReadOnlyRootFilesystem()
}
func (w *effectiveContainerSecurityContextWrapper) SetReadOnlyRootFilesystem(v *bool) {
if !reflect.DeepEqual(w.ReadOnlyRootFilesystem(), v) {
w.containerSC.SetReadOnlyRootFilesystem(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) SeccompProfile() *api.SeccompProfile {
return w.containerSC.SeccompProfile()
}
func (w *effectiveContainerSecurityContextWrapper) SetSeccompProfile(p *api.SeccompProfile) {
if !reflect.DeepEqual(w.SeccompProfile(), p) {
w.containerSC.SetSeccompProfile(p)
}
}
func (w *effectiveContainerSecurityContextWrapper) AllowPrivilegeEscalation() *bool {
return w.containerSC.AllowPrivilegeEscalation()
}
func (w *effectiveContainerSecurityContextWrapper) SetAllowPrivilegeEscalation(v *bool) {
if !reflect.DeepEqual(w.AllowPrivilegeEscalation(), v) {
w.containerSC.SetAllowPrivilegeEscalation(v)
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package securitycontext
import (
"k8s.io/api/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
)
// ValidSecurityContextWithContainerDefaults creates a valid security context provider based on
// empty container defaults. Used for testing.
func ValidSecurityContextWithContainerDefaults() *v1.SecurityContext {
priv := false
defProcMount := v1.DefaultProcMount
return &v1.SecurityContext{
Capabilities: &v1.Capabilities{},
Privileged: &priv,
ProcMount: &defProcMount,
}
}
// ValidInternalSecurityContextWithContainerDefaults creates a valid security context provider based on
// empty container defaults. Used for testing.
func ValidInternalSecurityContextWithContainerDefaults() *api.SecurityContext {
priv := false
dpm := api.DefaultProcMount
return &api.SecurityContext{
Capabilities: &api.Capabilities{},
Privileged: &priv,
ProcMount: &dpm,
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package securitycontext
import (
"fmt"
"os"
"sync"
v1 "k8s.io/api/core/v1"
)
// HasWindowsHostProcessRequest returns true if container should run as HostProcess container,
// taking into account nils
func HasWindowsHostProcessRequest(pod *v1.Pod, container *v1.Container) bool {
effectiveSc := DetermineEffectiveSecurityContext(pod, container)
if effectiveSc.WindowsOptions == nil {
return false
}
if effectiveSc.WindowsOptions.HostProcess == nil {
return false
}
return *effectiveSc.WindowsOptions.HostProcess
}
// DetermineEffectiveSecurityContext returns a synthesized SecurityContext for reading effective configurations
// from the provided pod's and container's security context. Container's fields take precedence in cases where both
// are set
func DetermineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container) *v1.SecurityContext {
effectiveSc := securityContextFromPodSecurityContext(pod)
containerSc := container.SecurityContext
if effectiveSc == nil && containerSc == nil {
return &v1.SecurityContext{}
}
if effectiveSc != nil && containerSc == nil {
return effectiveSc
}
if effectiveSc == nil && containerSc != nil {
return containerSc
}
if containerSc.SELinuxOptions != nil {
effectiveSc.SELinuxOptions = new(v1.SELinuxOptions)
*effectiveSc.SELinuxOptions = *containerSc.SELinuxOptions
}
if containerSc.WindowsOptions != nil {
// only override fields that are set at the container level, not the whole thing
if effectiveSc.WindowsOptions == nil {
effectiveSc.WindowsOptions = &v1.WindowsSecurityContextOptions{}
}
if containerSc.WindowsOptions.GMSACredentialSpecName != nil || containerSc.WindowsOptions.GMSACredentialSpec != nil {
// both GMSA fields go hand in hand
effectiveSc.WindowsOptions.GMSACredentialSpecName = containerSc.WindowsOptions.GMSACredentialSpecName
effectiveSc.WindowsOptions.GMSACredentialSpec = containerSc.WindowsOptions.GMSACredentialSpec
}
if containerSc.WindowsOptions.RunAsUserName != nil {
effectiveSc.WindowsOptions.RunAsUserName = containerSc.WindowsOptions.RunAsUserName
}
if containerSc.WindowsOptions.HostProcess != nil {
effectiveSc.WindowsOptions.HostProcess = containerSc.WindowsOptions.HostProcess
}
}
if containerSc.Capabilities != nil {
effectiveSc.Capabilities = new(v1.Capabilities)
*effectiveSc.Capabilities = *containerSc.Capabilities
}
if containerSc.Privileged != nil {
effectiveSc.Privileged = new(bool)
*effectiveSc.Privileged = *containerSc.Privileged
}
if containerSc.RunAsUser != nil {
effectiveSc.RunAsUser = new(int64)
*effectiveSc.RunAsUser = *containerSc.RunAsUser
}
if containerSc.RunAsGroup != nil {
effectiveSc.RunAsGroup = new(int64)
*effectiveSc.RunAsGroup = *containerSc.RunAsGroup
}
if containerSc.RunAsNonRoot != nil {
effectiveSc.RunAsNonRoot = new(bool)
*effectiveSc.RunAsNonRoot = *containerSc.RunAsNonRoot
}
if containerSc.ReadOnlyRootFilesystem != nil {
effectiveSc.ReadOnlyRootFilesystem = new(bool)
*effectiveSc.ReadOnlyRootFilesystem = *containerSc.ReadOnlyRootFilesystem
}
if containerSc.AllowPrivilegeEscalation != nil {
effectiveSc.AllowPrivilegeEscalation = new(bool)
*effectiveSc.AllowPrivilegeEscalation = *containerSc.AllowPrivilegeEscalation
}
if containerSc.ProcMount != nil {
effectiveSc.ProcMount = new(v1.ProcMountType)
*effectiveSc.ProcMount = *containerSc.ProcMount
}
return effectiveSc
}
// DetermineEffectiveRunAsUser returns a pointer of UID from the provided pod's
// and container's security context and a bool value to indicate if it is absent.
// Container's runAsUser take precedence in cases where both are set.
func DetermineEffectiveRunAsUser(pod *v1.Pod, container *v1.Container) (*int64, bool) {
var runAsUser *int64
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.RunAsUser != nil {
runAsUser = new(int64)
*runAsUser = *pod.Spec.SecurityContext.RunAsUser
}
if container.SecurityContext != nil && container.SecurityContext.RunAsUser != nil {
runAsUser = new(int64)
*runAsUser = *container.SecurityContext.RunAsUser
}
if runAsUser == nil {
return nil, false
}
return runAsUser, true
}
func securityContextFromPodSecurityContext(pod *v1.Pod) *v1.SecurityContext {
if pod.Spec.SecurityContext == nil {
return nil
}
synthesized := &v1.SecurityContext{}
if pod.Spec.SecurityContext.SELinuxOptions != nil {
synthesized.SELinuxOptions = &v1.SELinuxOptions{}
*synthesized.SELinuxOptions = *pod.Spec.SecurityContext.SELinuxOptions
}
if pod.Spec.SecurityContext.WindowsOptions != nil {
synthesized.WindowsOptions = &v1.WindowsSecurityContextOptions{}
*synthesized.WindowsOptions = *pod.Spec.SecurityContext.WindowsOptions
}
if pod.Spec.SecurityContext.RunAsUser != nil {
synthesized.RunAsUser = new(int64)
*synthesized.RunAsUser = *pod.Spec.SecurityContext.RunAsUser
}
if pod.Spec.SecurityContext.RunAsGroup != nil {
synthesized.RunAsGroup = new(int64)
*synthesized.RunAsGroup = *pod.Spec.SecurityContext.RunAsGroup
}
if pod.Spec.SecurityContext.RunAsNonRoot != nil {
synthesized.RunAsNonRoot = new(bool)
*synthesized.RunAsNonRoot = *pod.Spec.SecurityContext.RunAsNonRoot
}
return synthesized
}
// AddNoNewPrivileges returns if we should add the no_new_privs option.
func AddNoNewPrivileges(sc *v1.SecurityContext) bool {
if sc == nil {
return false
}
// handle the case where the user did not set the default and did not explicitly set allowPrivilegeEscalation
if sc.AllowPrivilegeEscalation == nil {
return false
}
// handle the case where defaultAllowPrivilegeEscalation is false or the user explicitly set allowPrivilegeEscalation to true/false
return !*sc.AllowPrivilegeEscalation
}
var (
// These *must* be kept in sync with moby/moby.
// https://github.com/moby/moby/blob/ecb03c4cdae6f323150fc11b303dcc5dc4d82416/oci/defaults.go#L190-L218
defaultMaskedPaths = sync.OnceValue(func() []string {
maskedPaths := []string{
"/proc/asound",
"/proc/acpi",
"/proc/interrupts",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware",
"/sys/devices/virtual/powercap",
}
for _, cpu := range possibleCPUs() {
path := fmt.Sprintf("/sys/devices/system/cpu/cpu%d/thermal_throttle", cpu)
if _, err := os.Stat(path); err == nil {
maskedPaths = append(maskedPaths, path)
}
}
return maskedPaths
})
defaultReadonlyPaths = []string{
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger",
}
)
// ConvertToRuntimeMaskedPaths converts the ProcMountType to the specified or default
// masked paths.
func ConvertToRuntimeMaskedPaths(opt *v1.ProcMountType) []string {
if opt != nil && *opt == v1.UnmaskedProcMount {
// Unmasked proc mount should have no paths set as masked.
return []string{}
}
// Otherwise, add the default masked paths to the runtime security context.
return defaultMaskedPaths()
}
// ConvertToRuntimeReadonlyPaths converts the ProcMountType to the specified or default
// readonly paths.
func ConvertToRuntimeReadonlyPaths(opt *v1.ProcMountType) []string {
if opt != nil && *opt == v1.UnmaskedProcMount {
// Unmasked proc mount should have no paths set as readonly.
return []string{}
}
// Otherwise, add the default readonly paths to the runtime security context.
return defaultReadonlyPaths
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package securitycontext
import (
"os"
"runtime"
"strconv"
"strings"
"sync"
)
// possibleCPUs returns the number of possible CPUs on this host.
func possibleCPUs() (cpus []int) {
if ncpu := possibleCPUsParsed(); ncpu != nil {
return ncpu
}
for i := range runtime.NumCPU() {
cpus = append(cpus, i)
}
return cpus
}
// possibleCPUsParsed is parsing the amount of possible CPUs on this host from
// /sys/devices.
var possibleCPUsParsed = sync.OnceValue(func() (cpus []int) {
data, err := os.ReadFile("/sys/devices/system/cpu/possible")
if err != nil {
return nil
}
ranges := strings.SplitSeq(strings.TrimSpace(string(data)), ",")
for r := range ranges {
if rStart, rEnd, ok := strings.Cut(r, "-"); !ok {
cpu, err := strconv.Atoi(rStart)
if err != nil {
return nil
}
cpus = append(cpus, cpu)
} else {
var start, end int
start, err := strconv.Atoi(rStart)
if err != nil {
return nil
}
end, err = strconv.Atoi(rEnd)
if err != nil {
return nil
}
for i := start; i <= end; i++ {
cpus = append(cpus, i)
}
}
}
return cpus
})
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
// DefaultFs implements Filesystem using same-named functions from "os" and "io"
type DefaultFs struct {
root string
}
var _ Filesystem = &DefaultFs{}
// NewTempFs returns a fake Filesystem in temporary directory, useful for unit tests
func NewTempFs() Filesystem {
path, _ := os.MkdirTemp("", "tmpfs")
return &DefaultFs{
root: path,
}
}
func (fs *DefaultFs) prefix(path string) string {
if len(fs.root) == 0 {
return path
}
return filepath.Join(fs.root, path)
}
// Stat via os.Stat
func (fs *DefaultFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(fs.prefix(name))
}
// Create via os.Create
func (fs *DefaultFs) Create(name string) (File, error) {
file, err := os.Create(fs.prefix(name))
if err != nil {
return nil, err
}
return &defaultFile{file}, nil
}
// Rename via os.Rename
func (fs *DefaultFs) Rename(oldpath, newpath string) error {
if !strings.HasPrefix(oldpath, fs.root) {
oldpath = fs.prefix(oldpath)
}
if !strings.HasPrefix(newpath, fs.root) {
newpath = fs.prefix(newpath)
}
return os.Rename(oldpath, newpath)
}
func (fs *DefaultFs) MkdirAll(path string, perm os.FileMode) error {
return MkdirAll(fs.prefix(path), perm)
}
// MkdirAllWithPathCheck checks if path exists already. If not, it creates a directory
// named path, along with any necessary parents, and returns nil, or else returns an error.
// Permission bits perm (before umask) are used for all directories that
// MkdirAllWithPathCheck creates.
// If path is already a directory, MkdirAllWithPathCheck does nothing and returns nil.
// NOTE: In case of Windows NTFS, mount points are implemented as reparse-point
// (similar to symlink) and do not represent actual directory. Hence Directory existence
// check for windows NTFS will NOT check for dir, but for symlink presence.
func MkdirAllWithPathCheck(path string, perm os.FileMode) error {
if dir, err := os.Lstat(path); err == nil {
// If the path exists already,
// 1. for Unix/Linux OS, check if the path is directory.
// 2. for windows NTFS, check if the path is symlink instead of directory.
if dir.IsDir() ||
(runtime.GOOS == "windows" && (dir.Mode()&os.ModeSymlink != 0 || dir.Mode()&os.ModeIrregular != 0)) {
return nil
}
return fmt.Errorf("path %v exists but is not a directory", path)
}
// If existence of path not known, attempt to create it.
if err := MkdirAll(path, perm); err != nil {
return err
}
return nil
}
// Chtimes via os.Chtimes
func (fs *DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(fs.prefix(name), atime, mtime)
}
// RemoveAll via os.RemoveAll
func (fs *DefaultFs) RemoveAll(path string) error {
return os.RemoveAll(fs.prefix(path))
}
// Remove via os.Remove
func (fs *DefaultFs) Remove(name string) error {
return os.Remove(fs.prefix(name))
}
// ReadFile via os.ReadFile
func (fs *DefaultFs) ReadFile(filename string) ([]byte, error) {
return os.ReadFile(fs.prefix(filename))
}
// TempDir via os.MkdirTemp
func (fs *DefaultFs) TempDir(dir, prefix string) (string, error) {
return os.MkdirTemp(fs.prefix(dir), prefix)
}
// TempFile via os.CreateTemp
func (fs *DefaultFs) TempFile(dir, prefix string) (File, error) {
file, err := os.CreateTemp(fs.prefix(dir), prefix)
if err != nil {
return nil, err
}
return &defaultFile{file}, nil
}
// ReadDir via os.ReadDir
func (fs *DefaultFs) ReadDir(dirname string) ([]os.DirEntry, error) {
return os.ReadDir(fs.prefix(dirname))
}
// Walk via filepath.Walk
func (fs *DefaultFs) Walk(root string, walkFn filepath.WalkFunc) error {
return filepath.Walk(fs.prefix(root), walkFn)
}
// defaultFile implements File using same-named functions from "os"
type defaultFile struct {
file *os.File
}
// Name via os.File.Name
func (file *defaultFile) Name() string {
return file.file.Name()
}
// Write via os.File.Write
func (file *defaultFile) Write(b []byte) (n int, err error) {
return file.file.Write(b)
}
// Sync via os.File.Sync
func (file *defaultFile) Sync() error {
return file.file.Sync()
}
// Close via os.File.Close
func (file *defaultFile) Close() error {
return file.file.Close()
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"path/filepath"
)
// IsPathClean will replace slashes to Separator (which is OS-specific).
// This will make sure that all slashes are the same before comparing.
func IsPathClean(path string) bool {
return filepath.ToSlash(filepath.Clean(path)) == filepath.ToSlash(path)
}
//go:build freebsd || linux || darwin
// +build freebsd linux darwin
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"fmt"
"os"
"path/filepath"
)
// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file
func IsUnixDomainSocket(filePath string) (bool, error) {
fi, err := os.Stat(filePath)
if err != nil {
return false, fmt.Errorf("stat file %s failed: %v", filePath, err)
}
if fi.Mode()&os.ModeSocket == 0 {
return false, nil
}
return true, nil
}
// Chmod is the same as os.Chmod on Unix.
func Chmod(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
// MkdirAll is same as os.MkdirAll on Unix.
func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
// IsAbs is same as filepath.IsAbs on Unix.
func IsAbs(path string) bool {
return filepath.IsAbs(path)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"context"
"fmt"
"time"
"github.com/fsnotify/fsnotify"
)
// FSWatcher is a callback-based filesystem watcher abstraction for fsnotify.
type FSWatcher interface {
// Initializes the watcher with the given watch handlers.
// Called before all other methods.
Init(FSEventHandler, FSErrorHandler) error
// Starts listening for events and errors.
// When an event or error occurs, the corresponding handler is called.
Run()
// Add a filesystem path to watch
AddWatch(path string) error
}
// FSEventHandler is called when a fsnotify event occurs.
type FSEventHandler func(event fsnotify.Event)
// FSErrorHandler is called when a fsnotify error occurs.
type FSErrorHandler func(err error)
type fsnotifyWatcher struct {
watcher *fsnotify.Watcher
eventHandler FSEventHandler
errorHandler FSErrorHandler
}
var _ FSWatcher = &fsnotifyWatcher{}
// NewFsnotifyWatcher returns an implementation of FSWatcher that continuously listens for
// fsnotify events and calls the event handler as soon as an event is received.
func NewFsnotifyWatcher() FSWatcher {
return &fsnotifyWatcher{}
}
func (w *fsnotifyWatcher) AddWatch(path string) error {
return w.watcher.Add(path)
}
func (w *fsnotifyWatcher) Init(eventHandler FSEventHandler, errorHandler FSErrorHandler) error {
var err error
w.watcher, err = fsnotify.NewWatcher()
if err != nil {
return err
}
w.eventHandler = eventHandler
w.errorHandler = errorHandler
return nil
}
func (w *fsnotifyWatcher) Run() {
go func() {
defer w.watcher.Close()
for {
select {
case event := <-w.watcher.Events:
if w.eventHandler != nil {
w.eventHandler(event)
}
case err := <-w.watcher.Errors:
if w.errorHandler != nil {
w.errorHandler(err)
}
}
}
}()
}
type watchAddRemover interface {
Add(path string) error
Remove(path string) error
}
type noopWatcher struct{}
func (noopWatcher) Add(path string) error { return nil }
func (noopWatcher) Remove(path string) error { return nil }
// WatchUntil watches the specified path for changes and blocks until ctx is canceled.
// eventHandler() must be non-nil, and pollInterval must be greater than 0.
// eventHandler() is invoked whenever a change event is observed or pollInterval elapses.
// errorHandler() is invoked (if non-nil) whenever an error occurs initializing or watching the specified path.
//
// If path is a directory, only the directory and immediate children are watched.
//
// If path does not exist or cannot be watched, an error is passed to errorHandler() and eventHandler() is called at pollInterval.
//
// Multiple observed events may collapse to a single invocation of eventHandler().
//
// eventHandler() is invoked immediately after successful initialization of the filesystem watch,
// in case the path changed concurrent with calling WatchUntil().
func WatchUntil(ctx context.Context, pollInterval time.Duration, path string, eventHandler func(), errorHandler func(err error)) {
if pollInterval <= 0 {
panic(fmt.Errorf("pollInterval must be > 0"))
}
if eventHandler == nil {
panic(fmt.Errorf("eventHandler must be non-nil"))
}
if errorHandler == nil {
errorHandler = func(err error) {}
}
// Initialize watcher, fall back to no-op
var (
eventsCh chan fsnotify.Event
errorCh chan error
watcher watchAddRemover
)
if w, err := fsnotify.NewWatcher(); err != nil {
errorHandler(fmt.Errorf("error creating file watcher, falling back to poll at interval %s: %w", pollInterval, err))
watcher = noopWatcher{}
} else {
watcher = w
eventsCh = w.Events
errorCh = w.Errors
defer func() {
_ = w.Close()
}()
}
// Initialize background poll
t := time.NewTicker(pollInterval)
defer t.Stop()
attemptPeriodicRewatch := false
// Start watching the path
if err := watcher.Add(path); err != nil {
errorHandler(err)
attemptPeriodicRewatch = true
} else {
// Invoke handle() at least once after successfully registering the listener,
// in case the file changed concurrent with calling WatchUntil.
eventHandler()
}
for {
select {
case <-ctx.Done():
return
case <-t.C:
// Prioritize exiting if context is canceled
if ctx.Err() != nil {
return
}
// Try to re-establish the watcher if we previously got a watch error
if attemptPeriodicRewatch {
_ = watcher.Remove(path)
if err := watcher.Add(path); err != nil {
errorHandler(err)
} else {
attemptPeriodicRewatch = false
}
}
// Handle
eventHandler()
case e := <-eventsCh:
// Prioritize exiting if context is canceled
if ctx.Err() != nil {
return
}
// Try to re-establish the watcher for events which dropped the existing watch
if e.Name == path && (e.Has(fsnotify.Remove) || e.Has(fsnotify.Rename)) {
_ = watcher.Remove(path)
if err := watcher.Add(path); err != nil {
errorHandler(err)
attemptPeriodicRewatch = true
}
}
// Handle
eventHandler()
case err := <-errorCh:
// Prioritize exiting if context is canceled
if ctx.Err() != nil {
return
}
// If the error occurs in response to calling watcher.Add, re-adding here could hot-loop.
// The periodic poll will attempt to re-establish the watch.
errorHandler(err)
attemptPeriodicRewatch = true
}
}
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package exponentialbackoff contains logic for implementing exponential
// backoff for GoRoutineMap and NestedPendingOperations.
package exponentialbackoff
import (
"fmt"
"time"
)
const (
// initialDurationBeforeRetry is the amount of time after an error occurs
// that GoroutineMap will refuse to allow another operation to start with
// the same target (if exponentialBackOffOnError is enabled). Each
// successive error results in a wait 2x times the previous.
initialDurationBeforeRetry = 500 * time.Millisecond
// maxDurationBeforeRetry is the maximum amount of time that
// durationBeforeRetry will grow to due to exponential backoff.
// Value is slightly offset from 2 minutes to make timeouts due to this
// constant recognizable.
maxDurationBeforeRetry = 2*time.Minute + 2*time.Second
)
// ExponentialBackoff contains the last occurrence of an error and the duration
// that retries are not permitted.
type ExponentialBackoff struct {
lastError error
lastErrorTime time.Time
durationBeforeRetry time.Duration
}
// SafeToRetry returns an error if the durationBeforeRetry period for the given
// lastErrorTime has not yet expired. Otherwise it returns nil.
func (expBackoff *ExponentialBackoff) SafeToRetry(operationName string) error {
if time.Since(expBackoff.lastErrorTime) <= expBackoff.durationBeforeRetry {
return NewExponentialBackoffError(operationName, *expBackoff)
}
return nil
}
func (expBackoff *ExponentialBackoff) Update(err *error) {
if expBackoff.durationBeforeRetry == 0 {
expBackoff.durationBeforeRetry = initialDurationBeforeRetry
} else {
expBackoff.durationBeforeRetry = 2 * expBackoff.durationBeforeRetry
if expBackoff.durationBeforeRetry > maxDurationBeforeRetry {
expBackoff.durationBeforeRetry = maxDurationBeforeRetry
}
}
expBackoff.lastError = *err
expBackoff.lastErrorTime = time.Now()
}
func (expBackoff *ExponentialBackoff) GenerateNoRetriesPermittedMsg(operationName string) string {
return fmt.Sprintf("Operation for %q failed. No retries permitted until %v (durationBeforeRetry %v). Error: %v",
operationName,
expBackoff.lastErrorTime.Add(expBackoff.durationBeforeRetry),
expBackoff.durationBeforeRetry,
expBackoff.lastError)
}
// NewExponentialBackoffError returns a new instance of ExponentialBackoff error.
func NewExponentialBackoffError(
operationName string, expBackoff ExponentialBackoff) error {
return exponentialBackoffError{
operationName: operationName,
expBackoff: expBackoff,
}
}
// IsExponentialBackoff returns true if an error returned from GoroutineMap
// indicates that a new operation can not be started because
// exponentialBackOffOnError is enabled and a previous operation with the same
// operation failed within the durationBeforeRetry period.
func IsExponentialBackoff(err error) bool {
switch err.(type) {
case exponentialBackoffError:
return true
default:
return false
}
}
// exponentialBackoffError is the error returned returned from GoroutineMap when
// a new operation can not be started because exponentialBackOffOnError is
// enabled and a previous operation with the same operation failed within the
// durationBeforeRetry period.
type exponentialBackoffError struct {
operationName string
expBackoff ExponentialBackoff
}
var _ error = exponentialBackoffError{}
func (err exponentialBackoffError) Error() string {
return fmt.Sprintf(
"Failed to create operation with name %q. An operation with that name failed at %v. No retries permitted until %v (%v). Last error: %q.",
err.operationName,
err.expBackoff.lastErrorTime,
err.expBackoff.lastErrorTime.Add(err.expBackoff.durationBeforeRetry),
err.expBackoff.durationBeforeRetry,
err.expBackoff.lastError)
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package goroutinemap implements a data structure for managing go routines
by name. It prevents the creation of new go routines if an existing go routine
with the same name exists.
*/
package goroutinemap
import (
"fmt"
"sync"
k8sRuntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
)
// GoRoutineMap defines a type that can run named goroutines and track their
// state. It prevents the creation of multiple goroutines with the same name
// and may prevent recreation of a goroutine until after the a backoff time
// has elapsed after the last goroutine with that name finished.
type GoRoutineMap interface {
// Run adds operation name to the list of running operations and spawns a
// new go routine to execute the operation.
// If an operation with the same operation name already exists, an
// AlreadyExists or ExponentialBackoff error is returned.
// Once the operation is complete, the go routine is terminated and the
// operation name is removed from the list of executing operations allowing
// a new operation to be started with the same operation name without error.
Run(operationName string, operationFunc func() error) error
// Wait blocks until operations map is empty. This is typically
// necessary during tests - the test should wait until all operations finish
// and evaluate results after that.
Wait()
// WaitForCompletion blocks until either all operations have successfully completed
// or have failed but are not pending. The test should wait until operations are either
// complete or have failed.
WaitForCompletion()
// IsOperationPending returns true if the operation is pending (currently
// running), otherwise returns false.
IsOperationPending(operationName string) bool
}
// NewGoRoutineMap returns a new instance of GoRoutineMap.
func NewGoRoutineMap(exponentialBackOffOnError bool) GoRoutineMap {
g := &goRoutineMap{
operations: make(map[string]operation),
exponentialBackOffOnError: exponentialBackOffOnError,
}
g.cond = sync.NewCond(&g.lock)
return g
}
type goRoutineMap struct {
operations map[string]operation
exponentialBackOffOnError bool
cond *sync.Cond
lock sync.RWMutex
}
// operation holds the state of a single goroutine.
type operation struct {
operationPending bool
expBackoff exponentialbackoff.ExponentialBackoff
}
func (grm *goRoutineMap) Run(
operationName string,
operationFunc func() error) error {
grm.lock.Lock()
defer grm.lock.Unlock()
existingOp, exists := grm.operations[operationName]
if exists {
// Operation with name exists
if existingOp.operationPending {
return NewAlreadyExistsError(operationName)
}
if err := existingOp.expBackoff.SafeToRetry(operationName); err != nil {
return err
}
}
grm.operations[operationName] = operation{
operationPending: true,
expBackoff: existingOp.expBackoff,
}
go func() (err error) {
// Handle unhandled panics (very unlikely)
defer k8sRuntime.HandleCrash()
// Handle completion of and error, if any, from operationFunc()
defer grm.operationComplete(operationName, &err)
// Handle panic, if any, from operationFunc()
defer k8sRuntime.RecoverFromPanic(&err)
return operationFunc()
}()
return nil
}
// operationComplete handles the completion of a goroutine run in the
// goRoutineMap.
func (grm *goRoutineMap) operationComplete(
operationName string, err *error) {
// Defer operations are executed in Last-In is First-Out order. In this case
// the lock is acquired first when operationCompletes begins, and is
// released when the method finishes, after the lock is released cond is
// signaled to wake waiting goroutine.
defer grm.cond.Signal()
grm.lock.Lock()
defer grm.lock.Unlock()
if *err == nil || !grm.exponentialBackOffOnError {
// Operation completed without error, or exponentialBackOffOnError disabled
delete(grm.operations, operationName)
if *err != nil {
// Log error
klog.Errorf("operation for %q failed with: %v",
operationName,
*err)
}
} else {
// Operation completed with error and exponentialBackOffOnError Enabled
existingOp := grm.operations[operationName]
existingOp.expBackoff.Update(err)
existingOp.operationPending = false
grm.operations[operationName] = existingOp
// Log error
klog.Errorf("%v",
existingOp.expBackoff.GenerateNoRetriesPermittedMsg(operationName))
}
}
func (grm *goRoutineMap) IsOperationPending(operationName string) bool {
grm.lock.RLock()
defer grm.lock.RUnlock()
existingOp, exists := grm.operations[operationName]
if exists && existingOp.operationPending {
return true
}
return false
}
func (grm *goRoutineMap) Wait() {
grm.lock.Lock()
defer grm.lock.Unlock()
for len(grm.operations) > 0 {
grm.cond.Wait()
}
}
func (grm *goRoutineMap) WaitForCompletion() {
grm.lock.Lock()
defer grm.lock.Unlock()
for {
if len(grm.operations) == 0 || grm.nothingPending() {
break
} else {
grm.cond.Wait()
}
}
}
// Check if any operation is pending. Already assumes caller has the
// necessary locks
func (grm *goRoutineMap) nothingPending() bool {
nothingIsPending := true
for _, operation := range grm.operations {
if operation.operationPending {
nothingIsPending = false
break
}
}
return nothingIsPending
}
// NewAlreadyExistsError returns a new instance of AlreadyExists error.
func NewAlreadyExistsError(operationName string) error {
return alreadyExistsError{operationName}
}
// IsAlreadyExists returns true if an error returned from GoRoutineMap indicates
// a new operation can not be started because an operation with the same
// operation name is already executing.
func IsAlreadyExists(err error) bool {
switch err.(type) {
case alreadyExistsError:
return true
default:
return false
}
}
// alreadyExistsError is the error returned by GoRoutineMap when a new operation
// can not be started because an operation with the same operation name is
// already executing.
type alreadyExistsError struct {
operationName string
}
var _ error = alreadyExistsError{}
func (err alreadyExistsError) Error() string {
return fmt.Sprintf(
"Failed to create operation with name %q. An operation with that name is already executing.",
err.operationName)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hash
import (
"fmt"
"hash"
"k8s.io/apimachinery/pkg/util/dump"
)
// DeepHashObject writes specified object to hash using the spew library
// which follows pointers and prints actual values of the nested objects
// ensuring the hash does not change when a pointer changes.
func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) {
hasher.Reset()
fmt.Fprintf(hasher, "%v", dump.ForHash(objectToWrite))
}
//go:build linux
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
import (
"bufio"
"bytes"
"context"
"fmt"
"regexp"
"strconv"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
utilexec "k8s.io/utils/exec"
utiltrace "k8s.io/utils/trace"
)
// RulePosition holds the -I/-A flags for iptable
type RulePosition string
const (
// Prepend is the insert flag for iptable
Prepend RulePosition = "-I"
// Append is the append flag for iptable
Append RulePosition = "-A"
)
// Interface is an injectable interface for running iptables commands. Implementations must be goroutine-safe.
type Interface interface {
// EnsureChain checks if the specified chain exists and, if not, creates it. If the chain existed, return true.
EnsureChain(table Table, chain Chain) (bool, error)
// FlushChain clears the specified chain. If the chain did not exist, return error.
FlushChain(table Table, chain Chain) error
// DeleteChain deletes the specified chain. If the chain did not exist, return error.
DeleteChain(table Table, chain Chain) error
// ChainExists tests whether the specified chain exists, returning an error if it
// does not, or if it is unable to check.
ChainExists(table Table, chain Chain) (bool, error)
// EnsureRule checks if the specified rule is present and, if not, creates it. If the rule existed, return true.
EnsureRule(position RulePosition, table Table, chain Chain, args ...string) (bool, error)
// DeleteRule checks if the specified rule is present and, if so, deletes it.
DeleteRule(table Table, chain Chain, args ...string) error
// IsIPv6 returns true if this is managing ipv6 tables.
IsIPv6() bool
// Protocol returns the IP family this instance is managing,
Protocol() Protocol
// SaveInto calls `iptables-save` for table and stores result in a given buffer.
SaveInto(table Table, buffer *bytes.Buffer) error
// Restore runs `iptables-restore` passing data through []byte.
// table is the Table to restore
// data should be formatted like the output of SaveInto()
// flush sets the presence of the "--noflush" flag. see: FlushFlag
// counters sets the "--counters" flag. see: RestoreCountersFlag
Restore(table Table, data []byte, flush FlushFlag, counters RestoreCountersFlag) error
// RestoreAll is the same as Restore except that no table is specified.
RestoreAll(data []byte, flush FlushFlag, counters RestoreCountersFlag) error
// Monitor detects when the given iptables tables have been flushed by an external
// tool (e.g. a firewall reload) by creating canary chains and polling to see if
// they have been deleted. (Specifically, it polls tables[0] every interval until
// the canary has been deleted from there, then waits a short additional time for
// the canaries to be deleted from the remaining tables as well. You can optimize
// the polling by listing a relatively empty table in tables[0]). When a flush is
// detected, this calls the reloadFunc so the caller can reload their own iptables
// rules. If it is unable to create the canary chains (either initially or after
// a reload) it will log an error and stop monitoring.
// (This function should be called from a goroutine.)
Monitor(canary Chain, tables []Table, reloadFunc func(), interval time.Duration, stopCh <-chan struct{})
// HasRandomFully reveals whether `-j MASQUERADE` takes the
// `--random-fully` option. This is helpful to work around a
// Linux kernel bug that sometimes causes multiple flows to get
// mapped to the same IP:PORT and consequently some suffer packet
// drops.
HasRandomFully() bool
// Present checks if the kernel supports the iptable interface
Present() error
}
// Protocol defines the ip protocol either ipv4 or ipv6
type Protocol string
const (
// ProtocolIPv4 represents ipv4 protocol in iptables
ProtocolIPv4 Protocol = "IPv4"
// ProtocolIPv6 represents ipv6 protocol in iptables
ProtocolIPv6 Protocol = "IPv6"
)
// Table represents different iptable like filter,nat, mangle and raw
type Table string
const (
// TableNAT represents the built-in nat table
TableNAT Table = "nat"
// TableFilter represents the built-in filter table
TableFilter Table = "filter"
// TableMangle represents the built-in mangle table
TableMangle Table = "mangle"
)
// Chain represents the different rules
type Chain string
const (
// ChainPostrouting used for source NAT in nat table
ChainPostrouting Chain = "POSTROUTING"
// ChainPrerouting used for DNAT (destination NAT) in nat table
ChainPrerouting Chain = "PREROUTING"
// ChainOutput used for the packets going out from local
ChainOutput Chain = "OUTPUT"
// ChainInput used for incoming packets
ChainInput Chain = "INPUT"
// ChainForward used for the packets for another NIC
ChainForward Chain = "FORWARD"
)
const (
cmdIPTablesSave string = "iptables-save"
cmdIPTablesRestore string = "iptables-restore"
cmdIPTables string = "iptables"
cmdIP6TablesRestore string = "ip6tables-restore"
cmdIP6TablesSave string = "ip6tables-save"
cmdIP6Tables string = "ip6tables"
)
// RestoreCountersFlag is an option flag for Restore
type RestoreCountersFlag bool
// RestoreCounters a boolean true constant for the option flag RestoreCountersFlag
const RestoreCounters RestoreCountersFlag = true
// NoRestoreCounters a boolean false constant for the option flag RestoreCountersFlag
const NoRestoreCounters RestoreCountersFlag = false
// FlushFlag an option flag for Flush
type FlushFlag bool
// FlushTables a boolean true constant for option flag FlushFlag
const FlushTables FlushFlag = true
// NoFlushTables a boolean false constant for option flag FlushFlag
const NoFlushTables FlushFlag = false
// MinCheckVersion minimum version to be checked
// Versions of iptables less than this do not support the -C / --check flag
// (test whether a rule exists).
var MinCheckVersion = utilversion.MustParseGeneric("1.4.11")
// RandomFullyMinVersion is the minimum version from which the --random-fully flag is supported,
// used for port mapping to be fully randomized
var RandomFullyMinVersion = utilversion.MustParseGeneric("1.6.2")
// WaitMinVersion a minimum iptables versions supporting the -w and -w<seconds> flags
var WaitMinVersion = utilversion.MustParseGeneric("1.4.20")
// WaitSecondsMinVersion a minimum iptables versions supporting the wait seconds
var WaitSecondsMinVersion = utilversion.MustParseGeneric("1.4.22")
// WaitRestoreMinVersion a minimum iptables versions supporting the wait restore seconds
var WaitRestoreMinVersion = utilversion.MustParseGeneric("1.6.2")
// WaitString a constant for specifying the wait flag
const WaitString = "-w"
// WaitSecondsValue a constant for specifying the default wait seconds
const WaitSecondsValue = "5"
// LockfilePath16x is the iptables 1.6.x lock file acquired by any process that's making any change in the iptable rule
const LockfilePath16x = "/run/xtables.lock"
// LockfilePath14x is the iptables 1.4.x lock file acquired by any process that's making any change in the iptable rule
const LockfilePath14x = "@xtables"
// runner implements Interface in terms of exec("iptables").
type runner struct {
mu sync.Mutex
exec utilexec.Interface
protocol Protocol
hasCheck bool
hasRandomFully bool
waitFlag []string
restoreWaitFlag []string
lockfilePath14x string
lockfilePath16x string
}
// newInternal returns a new Interface which will exec iptables, and allows the
// caller to change the iptables-restore lockfile path
func newInternal(exec utilexec.Interface, protocol Protocol, lockfilePath14x, lockfilePath16x string) Interface {
if lockfilePath16x == "" {
lockfilePath16x = LockfilePath16x
}
if lockfilePath14x == "" {
lockfilePath14x = LockfilePath14x
}
runner := &runner{
exec: exec,
protocol: protocol,
lockfilePath14x: lockfilePath14x,
lockfilePath16x: lockfilePath16x,
}
version, err := getIPTablesVersion(exec, protocol)
if err != nil {
// The only likely error is "no such file or directory", in which case any
// further commands will fail the same way, so we don't need to do
// anything special here.
return runner
}
runner.hasCheck = version.AtLeast(MinCheckVersion)
runner.hasRandomFully = version.AtLeast(RandomFullyMinVersion)
runner.waitFlag = getIPTablesWaitFlag(version)
runner.restoreWaitFlag = getIPTablesRestoreWaitFlag(version, exec, protocol)
return runner
}
// New returns a new Interface which will exec iptables.
// Note that this function will return a single iptables Interface *and* an error, if only
// a single family is supported.
func New(protocol Protocol) Interface {
return newInternal(utilexec.New(), protocol, "", "")
}
func newDualStackInternal(exec utilexec.Interface) (map[v1.IPFamily]Interface, error) {
var err error
interfaces := map[v1.IPFamily]Interface{}
iptv4 := newInternal(exec, ProtocolIPv4, "", "")
if presentErr := iptv4.Present(); presentErr != nil {
err = presentErr
} else {
interfaces[v1.IPv4Protocol] = iptv4
}
iptv6 := newInternal(exec, ProtocolIPv6, "", "")
if presentErr := iptv6.Present(); presentErr != nil {
// If we get an error for both IPv4 and IPv6 Present() calls, it's virtually guaranteed that
// they're going to be the same error. We ignore the error for IPv6 if IPv4 has already failed.
if err == nil {
err = presentErr
}
} else {
interfaces[v1.IPv6Protocol] = iptv6
}
return interfaces, err
}
// NewDualStack returns a map containing an IPv4 Interface (if IPv4 iptables is supported)
// and an IPv6 Interface (if IPv6 iptables is supported). If only one family is supported,
// it will return a map with one Interface *and* an error (indicating the problem with the
// other family). If neither family is supported, it will return an empty map and an
// error.
func NewDualStack() (map[v1.IPFamily]Interface, error) {
return newDualStackInternal(utilexec.New())
}
// NewBestEffort returns a map containing an IPv4 Interface (if IPv4 iptables is
// supported) and an IPv6 Interface (if IPv6 iptables is supported). If iptables is not
// supported, then it just returns an empty map. This function is intended to make things
// simple for callers that just want "best-effort" iptables support, where neither partial
// nor complete lack of iptables support is considered an error.
func NewBestEffort() map[v1.IPFamily]Interface {
ipts, _ := newDualStackInternal(utilexec.New())
return ipts
}
// EnsureChain is part of Interface.
func (runner *runner) EnsureChain(table Table, chain Chain) (bool, error) {
fullArgs := makeFullArgs(table, chain)
runner.mu.Lock()
defer runner.mu.Unlock()
out, err := runner.run(opCreateChain, fullArgs)
if err != nil {
if ee, ok := err.(utilexec.ExitError); ok {
if ee.Exited() && ee.ExitStatus() == 1 {
return true, nil
}
}
return false, fmt.Errorf("error creating chain %q: %v: %s", chain, err, out)
}
return false, nil
}
// FlushChain is part of Interface.
func (runner *runner) FlushChain(table Table, chain Chain) error {
fullArgs := makeFullArgs(table, chain)
runner.mu.Lock()
defer runner.mu.Unlock()
out, err := runner.run(opFlushChain, fullArgs)
if err != nil {
return fmt.Errorf("error flushing chain %q: %v: %s", chain, err, out)
}
return nil
}
// DeleteChain is part of Interface.
func (runner *runner) DeleteChain(table Table, chain Chain) error {
fullArgs := makeFullArgs(table, chain)
runner.mu.Lock()
defer runner.mu.Unlock()
out, err := runner.run(opDeleteChain, fullArgs)
if err != nil {
return fmt.Errorf("error deleting chain %q: %v: %s", chain, err, out)
}
return nil
}
// EnsureRule is part of Interface.
func (runner *runner) EnsureRule(position RulePosition, table Table, chain Chain, args ...string) (bool, error) {
fullArgs := makeFullArgs(table, chain, args...)
runner.mu.Lock()
defer runner.mu.Unlock()
exists, err := runner.checkRule(table, chain, args...)
if err != nil {
return false, err
}
if exists {
return true, nil
}
out, err := runner.run(operation(position), fullArgs)
if err != nil {
return false, fmt.Errorf("error appending rule: %v: %s", err, out)
}
return false, nil
}
// DeleteRule is part of Interface.
func (runner *runner) DeleteRule(table Table, chain Chain, args ...string) error {
fullArgs := makeFullArgs(table, chain, args...)
runner.mu.Lock()
defer runner.mu.Unlock()
exists, err := runner.checkRule(table, chain, args...)
if err != nil {
return err
}
if !exists {
return nil
}
out, err := runner.run(opDeleteRule, fullArgs)
if err != nil {
return fmt.Errorf("error deleting rule: %v: %s", err, out)
}
return nil
}
func (runner *runner) IsIPv6() bool {
return runner.protocol == ProtocolIPv6
}
func (runner *runner) Protocol() Protocol {
return runner.protocol
}
// SaveInto is part of Interface.
func (runner *runner) SaveInto(table Table, buffer *bytes.Buffer) error {
runner.mu.Lock()
defer runner.mu.Unlock()
trace := utiltrace.New("iptables save")
defer trace.LogIfLong(2 * time.Second)
// run and return
iptablesSaveCmd := iptablesSaveCommand(runner.protocol)
args := []string{"-t", string(table)}
klog.V(4).InfoS("Running", "command", iptablesSaveCmd, "arguments", args)
cmd := runner.exec.Command(iptablesSaveCmd, args...)
cmd.SetStdout(buffer)
stderrBuffer := bytes.NewBuffer(nil)
cmd.SetStderr(stderrBuffer)
err := cmd.Run()
if err != nil {
stderrBuffer.WriteTo(buffer) // ignore error, since we need to return the original error
}
return err
}
// Restore is part of Interface.
func (runner *runner) Restore(table Table, data []byte, flush FlushFlag, counters RestoreCountersFlag) error {
// setup args
args := []string{"-T", string(table)}
return runner.restoreInternal(args, data, flush, counters)
}
// RestoreAll is part of Interface.
func (runner *runner) RestoreAll(data []byte, flush FlushFlag, counters RestoreCountersFlag) error {
// setup args
args := make([]string, 0)
return runner.restoreInternal(args, data, flush, counters)
}
type iptablesLocker interface {
Close() error
}
// restoreInternal is the shared part of Restore/RestoreAll
func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFlag, counters RestoreCountersFlag) error {
runner.mu.Lock()
defer runner.mu.Unlock()
trace := utiltrace.New("iptables restore")
defer trace.LogIfLong(2 * time.Second)
if !flush {
args = append(args, "--noflush")
}
if counters {
args = append(args, "--counters")
}
// Grab the iptables lock to prevent iptables-restore and iptables
// from stepping on each other. iptables-restore 1.6.2 will have
// a --wait option like iptables itself, but that's not widely deployed.
if len(runner.restoreWaitFlag) == 0 {
locker, err := grabIptablesLocks(runner.lockfilePath14x, runner.lockfilePath16x)
if err != nil {
return err
}
trace.Step("Locks grabbed")
defer func(locker iptablesLocker) {
if err := locker.Close(); err != nil {
klog.ErrorS(err, "Failed to close iptables locks")
}
}(locker)
}
// run the command and return the output or an error including the output and error
fullArgs := append(runner.restoreWaitFlag, args...)
iptablesRestoreCmd := iptablesRestoreCommand(runner.protocol)
klog.V(4).InfoS("Running", "command", iptablesRestoreCmd, "arguments", fullArgs)
cmd := runner.exec.Command(iptablesRestoreCmd, fullArgs...)
cmd.SetStdin(bytes.NewBuffer(data))
b, err := cmd.CombinedOutput()
if err != nil {
pErr, ok := parseRestoreError(string(b))
if ok {
return pErr
}
return fmt.Errorf("%w: %s", err, b)
}
return nil
}
func iptablesSaveCommand(protocol Protocol) string {
if protocol == ProtocolIPv6 {
return cmdIP6TablesSave
}
return cmdIPTablesSave
}
func iptablesRestoreCommand(protocol Protocol) string {
if protocol == ProtocolIPv6 {
return cmdIP6TablesRestore
}
return cmdIPTablesRestore
}
func iptablesCommand(protocol Protocol) string {
if protocol == ProtocolIPv6 {
return cmdIP6Tables
}
return cmdIPTables
}
func (runner *runner) run(op operation, args []string) ([]byte, error) {
return runner.runContext(context.TODO(), op, args)
}
func (runner *runner) runContext(ctx context.Context, op operation, args []string) ([]byte, error) {
iptablesCmd := iptablesCommand(runner.protocol)
fullArgs := append(runner.waitFlag, string(op))
fullArgs = append(fullArgs, args...)
klog.V(5).InfoS("Running", "command", iptablesCmd, "arguments", fullArgs)
if ctx == nil {
return runner.exec.Command(iptablesCmd, fullArgs...).CombinedOutput()
}
return runner.exec.CommandContext(ctx, iptablesCmd, fullArgs...).CombinedOutput()
// Don't log err here - callers might not think it is an error.
}
// Returns (bool, nil) if it was able to check the existence of the rule, or
// (<undefined>, error) if the process of checking failed.
func (runner *runner) checkRule(table Table, chain Chain, args ...string) (bool, error) {
if runner.hasCheck {
return runner.checkRuleUsingCheck(makeFullArgs(table, chain, args...))
}
return runner.checkRuleWithoutCheck(table, chain, args...)
}
var hexnumRE = regexp.MustCompile("0x0+([0-9])")
func trimhex(s string) string {
return hexnumRE.ReplaceAllString(s, "0x$1")
}
// Executes the rule check without using the "-C" flag, instead parsing iptables-save.
// Present for compatibility with <1.4.11 versions of iptables. This is full
// of hack and half-measures. We should nix this ASAP.
func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...string) (bool, error) {
iptablesSaveCmd := iptablesSaveCommand(runner.protocol)
klog.V(1).InfoS("Running", "command", iptablesSaveCmd, "table", string(table))
out, err := runner.exec.Command(iptablesSaveCmd, "-t", string(table)).CombinedOutput()
if err != nil {
return false, fmt.Errorf("error checking rule: %v", err)
}
// Sadly, iptables has inconsistent quoting rules for comments. Just remove all quotes.
// Also, quoted multi-word comments (which are counted as a single arg)
// will be unpacked into multiple args,
// in order to compare against iptables-save output (which will be split at whitespace boundary)
// e.g. a single arg('"this must be before the NodePort rules"') will be unquoted and unpacked into 7 args.
var argsCopy []string
for i := range args {
tmpField := strings.Trim(args[i], "\"")
tmpField = trimhex(tmpField)
argsCopy = append(argsCopy, strings.Fields(tmpField)...)
}
argset := sets.New(argsCopy...)
for _, line := range strings.Split(string(out), "\n") {
fields := strings.Fields(line)
// Check that this is a rule for the correct chain, and that it has
// the correct number of argument (+2 for "-A <chain name>")
if !strings.HasPrefix(line, fmt.Sprintf("-A %s", string(chain))) || len(fields) != len(argsCopy)+2 {
continue
}
// Sadly, iptables has inconsistent quoting rules for comments.
// Just remove all quotes.
for i := range fields {
fields[i] = strings.Trim(fields[i], "\"")
fields[i] = trimhex(fields[i])
}
// TODO: This misses reorderings e.g. "-x foo ! -y bar" will match "! -x foo -y bar"
if sets.New(fields...).IsSuperset(argset) {
return true, nil
}
klog.V(5).InfoS("DBG: fields is not a superset of args", "fields", fields, "arguments", args)
}
return false, nil
}
// Executes the rule check using the "-C" flag
func (runner *runner) checkRuleUsingCheck(args []string) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
out, err := runner.runContext(ctx, opCheckRule, args)
if ctx.Err() == context.DeadlineExceeded {
return false, fmt.Errorf("timed out while checking rules")
}
if err == nil {
return true, nil
}
if ee, ok := err.(utilexec.ExitError); ok {
// iptables uses exit(1) to indicate a failure of the operation,
// as compared to a malformed commandline, for example.
if ee.Exited() && ee.ExitStatus() == 1 {
return false, nil
}
}
return false, fmt.Errorf("error checking rule: %v: %s", err, out)
}
const (
// Max time we wait for an iptables flush to complete after we notice it has started
iptablesFlushTimeout = 5 * time.Second
// How often we poll while waiting for an iptables flush to complete
iptablesFlushPollTime = 100 * time.Millisecond
)
// Monitor is part of Interface
func (runner *runner) Monitor(canary Chain, tables []Table, reloadFunc func(), interval time.Duration, stopCh <-chan struct{}) {
for {
_ = utilwait.PollImmediateUntil(interval, func() (bool, error) {
for _, table := range tables {
if _, err := runner.EnsureChain(table, canary); err != nil {
klog.ErrorS(err, "Could not set up iptables canary", "table", table, "chain", canary)
return false, nil
}
}
return true, nil
}, stopCh)
// Poll until stopCh is closed or iptables is flushed
err := utilwait.PollUntil(interval, func() (bool, error) {
if exists, err := runner.ChainExists(tables[0], canary); exists {
return false, nil
} else if isResourceError(err) {
klog.ErrorS(err, "Could not check for iptables canary", "table", tables[0], "chain", canary)
return false, nil
}
klog.V(2).InfoS("IPTables canary deleted", "table", tables[0], "chain", canary)
// Wait for the other canaries to be deleted too before returning
// so we don't start reloading too soon.
err := utilwait.PollImmediate(iptablesFlushPollTime, iptablesFlushTimeout, func() (bool, error) {
for i := 1; i < len(tables); i++ {
if exists, err := runner.ChainExists(tables[i], canary); exists || isResourceError(err) {
return false, nil
}
}
return true, nil
})
if err != nil {
klog.InfoS("Inconsistent iptables state detected")
}
return true, nil
}, stopCh)
if err != nil {
// stopCh was closed
for _, table := range tables {
_ = runner.DeleteChain(table, canary)
}
return
}
klog.V(2).InfoS("Reloading after iptables flush")
reloadFunc()
}
}
// ChainExists is part of Interface
func (runner *runner) ChainExists(table Table, chain Chain) (bool, error) {
fullArgs := makeFullArgs(table, chain)
runner.mu.Lock()
defer runner.mu.Unlock()
trace := utiltrace.New("iptables ChainExists")
defer trace.LogIfLong(2 * time.Second)
out, err := runner.run(opListChain, fullArgs)
if err != nil {
return false, fmt.Errorf("error listing chain %q in table %q: %w: %s", chain, table, err, out)
}
return true, nil
}
type operation string
const (
opCreateChain operation = "-N"
opFlushChain operation = "-F"
opDeleteChain operation = "-X"
opListChain operation = "-S"
opCheckRule operation = "-C"
opDeleteRule operation = "-D"
)
func makeFullArgs(table Table, chain Chain, args ...string) []string {
return append([]string{string(chain), "-t", string(table)}, args...)
}
const iptablesVersionPattern = `v([0-9]+(\.[0-9]+)+)`
// getIPTablesVersion runs "iptables --version" and parses the returned version
func getIPTablesVersion(exec utilexec.Interface, protocol Protocol) (*utilversion.Version, error) {
// this doesn't access mutable state so we don't need to use the interface / runner
iptablesCmd := iptablesCommand(protocol)
bytes, err := exec.Command(iptablesCmd, "--version").CombinedOutput()
if err != nil {
return nil, err
}
versionMatcher := regexp.MustCompile(iptablesVersionPattern)
match := versionMatcher.FindStringSubmatch(string(bytes))
if match == nil {
return nil, fmt.Errorf("no iptables version found in string: %s", bytes)
}
version, err := utilversion.ParseGeneric(match[1])
if err != nil {
return nil, fmt.Errorf("iptables version %q is not a valid version string: %v", match[1], err)
}
return version, nil
}
// Checks if iptables version has a "wait" flag
func getIPTablesWaitFlag(version *utilversion.Version) []string {
switch {
case version.AtLeast(WaitSecondsMinVersion):
return []string{WaitString, WaitSecondsValue}
case version.AtLeast(WaitMinVersion):
return []string{WaitString}
default:
return nil
}
}
// Checks if iptables-restore has a "wait" flag
func getIPTablesRestoreWaitFlag(version *utilversion.Version, exec utilexec.Interface, protocol Protocol) []string {
if version.AtLeast(WaitRestoreMinVersion) {
return []string{WaitString, WaitSecondsValue}
}
// Older versions may have backported features; if iptables-restore supports
// --version, assume it also supports --wait
vstring, err := getIPTablesRestoreVersionString(exec, protocol)
if err != nil || vstring == "" {
klog.V(3).InfoS("Couldn't get iptables-restore version; assuming it doesn't support --wait")
return nil
}
if _, err := utilversion.ParseGeneric(vstring); err != nil {
klog.V(3).InfoS("Couldn't parse iptables-restore version; assuming it doesn't support --wait")
return nil
}
return []string{WaitString}
}
// getIPTablesRestoreVersionString runs "iptables-restore --version" to get the version string
// in the form "X.X.X"
func getIPTablesRestoreVersionString(exec utilexec.Interface, protocol Protocol) (string, error) {
// this doesn't access mutable state so we don't need to use the interface / runner
// iptables-restore hasn't always had --version, and worse complains
// about unrecognized commands but doesn't exit when it gets them.
// Work around that by setting stdin to nothing so it exits immediately.
iptablesRestoreCmd := iptablesRestoreCommand(protocol)
cmd := exec.Command(iptablesRestoreCmd, "--version")
cmd.SetStdin(bytes.NewReader([]byte{}))
bytes, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
versionMatcher := regexp.MustCompile(iptablesVersionPattern)
match := versionMatcher.FindStringSubmatch(string(bytes))
if match == nil {
return "", fmt.Errorf("no iptables version found in string: %s", bytes)
}
return match[1], nil
}
func (runner *runner) HasRandomFully() bool {
return runner.hasRandomFully
}
// Present tests if iptable is supported on current kernel by checking the existence
// of default table and chain
func (runner *runner) Present() error {
if _, err := runner.ChainExists(TableNAT, ChainPostrouting); err != nil {
return err
}
return nil
}
var iptablesNotFoundStrings = []string{
// iptables-legacy [-A|-I] BAD-CHAIN [...]
// iptables-legacy [-C|-D] GOOD-CHAIN [...non-matching rule...]
// iptables-legacy [-X|-F|-Z] BAD-CHAIN
// iptables-nft -X BAD-CHAIN
// NB: iptables-nft [-F|-Z] BAD-CHAIN exits with no error
"No chain/target/match by that name",
// iptables-legacy [...] -j BAD-CHAIN
// iptables-nft-1.8.0 [-A|-I] BAD-CHAIN [...]
// iptables-nft-1.8.0 [-A|-I] GOOD-CHAIN -j BAD-CHAIN
// NB: also matches some other things like "-m BAD-MODULE"
"No such file or directory",
// iptables-legacy [-C|-D] BAD-CHAIN [...]
// iptables-nft [-C|-D] GOOD-CHAIN [...non-matching rule...]
"does a matching rule exist",
// iptables-nft-1.8.2 [-A|-C|-D|-I] BAD-CHAIN [...]
// iptables-nft-1.8.2 [...] -j BAD-CHAIN
"does not exist",
}
// IsNotFoundError returns true if the error indicates "not found". It parses
// the error string looking for known values, which is imperfect; beware using
// this function for anything beyond deciding between logging or ignoring an
// error.
func IsNotFoundError(err error) bool {
es := err.Error()
for _, str := range iptablesNotFoundStrings {
if strings.Contains(es, str) {
return true
}
}
return false
}
const iptablesStatusResourceProblem = 4
// isResourceError returns true if the error indicates that iptables ran into a "resource
// problem" and was unable to attempt the request. In particular, this will be true if it
// times out trying to get the iptables lock.
func isResourceError(err error) bool {
if ee, isExitError := err.(utilexec.ExitError); isExitError {
return ee.ExitStatus() == iptablesStatusResourceProblem
}
return false
}
// ParseError records the payload when iptables reports an error parsing its input.
type ParseError interface {
// Line returns the line number on which the parse error was reported.
// NOTE: First line is 1.
Line() int
// Error returns the error message of the parse error, including line number.
Error() string
}
type parseError struct {
cmd string
line int
}
func (e parseError) Line() int {
return e.line
}
func (e parseError) Error() string {
return fmt.Sprintf("%s: input error on line %d: ", e.cmd, e.line)
}
// LineData represents a single numbered line of data.
type LineData struct {
// Line holds the line number (the first line is 1).
Line int
// The data of the line.
Data string
}
var regexpParseError = regexp.MustCompile("line ([1-9][0-9]*) failed$")
// parseRestoreError extracts the line from the error, if it matches returns parseError
// for example:
// input: iptables-restore: line 51 failed
// output: parseError: cmd = iptables-restore, line = 51
// NOTE: parseRestoreError depends on the error format of iptables, if it ever changes
// we need to update this function
func parseRestoreError(str string) (ParseError, bool) {
errors := strings.Split(str, ":")
if len(errors) != 2 {
return nil, false
}
cmd := errors[0]
matches := regexpParseError.FindStringSubmatch(errors[1])
if len(matches) != 2 {
return nil, false
}
line, errMsg := strconv.Atoi(matches[1])
if errMsg != nil {
return nil, false
}
return parseError{cmd: cmd, line: line}, true
}
// ExtractLines extracts the -count and +count data from the lineNum row of lines and return
// NOTE: lines start from line 1
func ExtractLines(lines []byte, line, count int) []LineData {
// first line is line 1, so line can't be smaller than 1
if line < 1 {
return nil
}
start := line - count
if start <= 0 {
start = 1
}
end := line + count + 1
offset := 1
scanner := bufio.NewScanner(bytes.NewBuffer(lines))
extractLines := make([]LineData, 0, count*2)
for scanner.Scan() {
if offset >= start && offset < end {
extractLines = append(extractLines, LineData{
Line: offset,
Data: scanner.Text(),
})
}
if offset == end {
break
}
offset++
}
return extractLines
}
//go:build linux
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
import (
"fmt"
"net"
"os"
"time"
"golang.org/x/sys/unix"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
)
type locker struct {
lock16 *os.File
lock14 *net.UnixListener
}
func (l *locker) Close() error {
errList := []error{}
if l.lock16 != nil {
if err := l.lock16.Close(); err != nil {
errList = append(errList, err)
}
}
if l.lock14 != nil {
if err := l.lock14.Close(); err != nil {
errList = append(errList, err)
}
}
return utilerrors.NewAggregate(errList)
}
func grabIptablesLocks(lockfilePath14x, lockfilePath16x string) (iptablesLocker, error) {
var err error
var success bool
l := &locker{}
defer func(l *locker) {
// Clean up immediately on failure
if !success {
l.Close()
}
}(l)
// Grab both 1.6.x and 1.4.x-style locks; we don't know what the
// iptables-restore version is if it doesn't support --wait, so we
// can't assume which lock method it'll use.
// Roughly duplicate iptables 1.6.x xtables_lock() function.
l.lock16, err = os.OpenFile(lockfilePath16x, os.O_CREATE, 0600)
if err != nil {
return nil, fmt.Errorf("failed to open iptables lock %s: %v", lockfilePath16x, err)
}
if err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (bool, error) {
if err := grabIptablesFileLock(l.lock16); err != nil {
return false, nil
}
return true, nil
}); err != nil {
return nil, fmt.Errorf("failed to acquire new iptables lock: %v", err)
}
// Roughly duplicate iptables 1.4.x xtables_lock() function.
if err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (bool, error) {
l.lock14, err = net.ListenUnix("unix", &net.UnixAddr{Name: lockfilePath14x, Net: "unix"})
if err != nil {
return false, nil
}
return true, nil
}); err != nil {
return nil, fmt.Errorf("failed to acquire old iptables lock: %v", err)
}
success = true
return l, nil
}
func grabIptablesFileLock(f *os.File) error {
return unix.Flock(int(f.Fd()), unix.LOCK_EX|unix.LOCK_NB)
}
//go:build linux
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iptables
import (
"bytes"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
)
// MakeChainLine return an iptables-save/restore formatted chain line given a Chain
func MakeChainLine(chain Chain) string {
return fmt.Sprintf(":%s - [0:0]", chain)
}
// GetChainsFromTable parses iptables-save data to find the chains that are defined. It
// assumes that save contains a single table's data, and returns a set with keys for every
// chain defined in that table.
func GetChainsFromTable(save []byte) sets.Set[Chain] {
chainsSet := sets.New[Chain]()
for {
i := bytes.Index(save, []byte("\n:"))
if i == -1 {
break
}
start := i + 2
save = save[start:]
end := bytes.Index(save, []byte(" "))
if end == -1 {
// shouldn't happen, but...
break
}
chain := Chain(save[:end])
chainsSet.Insert(chain)
save = save[end:]
}
return chainsSet
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kernel
import (
"fmt"
"os"
"strings"
"k8s.io/apimachinery/pkg/util/version"
)
type readFileFunc func(string) ([]byte, error)
// GetVersion returns currently running kernel version.
func GetVersion() (*version.Version, error) {
return getVersion(os.ReadFile)
}
// getVersion reads os release file from the give readFile function.
func getVersion(readFile readFileFunc) (*version.Version, error) {
kernelVersionFile := "/proc/sys/kernel/osrelease"
fileContent, err := readFile(kernelVersionFile)
if err != nil {
return nil, fmt.Errorf("failed to read os-release file: %s", err.Error())
}
kernelVersion, err := version.ParseGeneric(strings.TrimSpace(string(fileContent)))
if err != nil {
return nil, fmt.Errorf("failed to parse kernel version: %s", err.Error())
}
return kernelVersion, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package labels
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Clones the given map and returns a new map with the given key and value added.
// Returns the given map, if labelKey is empty.
func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map[string]string {
if labelKey == "" {
// Don't need to add a label.
return labels
}
// Clone.
newLabels := map[string]string{}
for key, value := range labels {
newLabels[key] = value
}
newLabels[labelKey] = labelValue
return newLabels
}
// CloneAndRemoveLabel clones the given map and returns a new map with the given key removed.
// Returns the given map, if labelKey is empty.
func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]string {
if labelKey == "" {
// Don't need to add a label.
return labels
}
// Clone.
newLabels := map[string]string{}
for key, value := range labels {
newLabels[key] = value
}
delete(newLabels, labelKey)
return newLabels
}
// AddLabel returns a map with the given key and value added to the given map.
func AddLabel(labels map[string]string, labelKey, labelValue string) map[string]string {
if labelKey == "" {
// Don't need to add a label.
return labels
}
if labels == nil {
labels = make(map[string]string)
}
labels[labelKey] = labelValue
return labels
}
// Clones the given selector and returns a new selector with the given key and value added.
// Returns the given selector, if labelKey is empty.
func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector {
if labelKey == "" {
// Don't need to add a label.
return selector
}
// Clone.
newSelector := new(metav1.LabelSelector)
// TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here.
newSelector.MatchLabels = make(map[string]string)
if selector.MatchLabels != nil {
for key, val := range selector.MatchLabels {
newSelector.MatchLabels[key] = val
}
}
newSelector.MatchLabels[labelKey] = labelValue
if selector.MatchExpressions != nil {
newMExps := make([]metav1.LabelSelectorRequirement, len(selector.MatchExpressions))
for i, me := range selector.MatchExpressions {
newMExps[i].Key = me.Key
newMExps[i].Operator = me.Operator
if me.Values != nil {
newMExps[i].Values = make([]string, len(me.Values))
copy(newMExps[i].Values, me.Values)
} else {
newMExps[i].Values = nil
}
}
newSelector.MatchExpressions = newMExps
} else {
newSelector.MatchExpressions = nil
}
return newSelector
}
// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels.
func AddLabelToSelector(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector {
if labelKey == "" {
// Don't need to add a label.
return selector
}
if selector.MatchLabels == nil {
selector.MatchLabels = make(map[string]string)
}
selector.MatchLabels[labelKey] = labelValue
return selector
}
// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels
func SelectorHasLabel(selector *metav1.LabelSelector, labelKey string) bool {
return len(selector.MatchLabels[labelKey]) > 0
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"net"
v1 "k8s.io/api/core/v1"
netutils "k8s.io/utils/net"
)
const (
// NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive
// on the node it is (was) running.
NodeUnreachablePodReason = "NodeLost"
// NodeUnreachablePodMessage is the message on a pod when its state cannot be confirmed as kubelet is unresponsive
// on the node it is (was) running.
NodeUnreachablePodMessage = "Node %v which was running pod %v is unresponsive"
)
// NoMatchError is a typed implementation of the error interface. It indicates a failure to get a matching Node.
type NoMatchError struct {
addresses []v1.NodeAddress
}
// Error is the implementation of the conventional interface for
// representing an error condition, with the nil value representing no error.
func (e *NoMatchError) Error() string {
return fmt.Sprintf("no preferred addresses found; known addresses: %v", e.addresses)
}
// GetPreferredNodeAddress returns the address of the provided node, using the provided preference order.
// If none of the preferred address types are found, an error is returned.
func GetPreferredNodeAddress(node *v1.Node, preferredAddressTypes []v1.NodeAddressType) (string, error) {
for _, addressType := range preferredAddressTypes {
for _, address := range node.Status.Addresses {
if address.Type == addressType {
return address.Address, nil
}
}
}
return "", &NoMatchError{addresses: node.Status.Addresses}
}
// GetNodeHostIPs returns the provided node's IP(s); either a single "primary IP" for the
// node in a single-stack cluster, or a dual-stack pair of IPs in a dual-stack cluster
// (for nodes that actually have dual-stack IPs). Among other things, the IPs returned
// from this function are used as the `.status.PodIPs` values for host-network pods on the
// node, and the first IP is used as the `.status.HostIP` for all pods on the node.
func GetNodeHostIPs(node *v1.Node) ([]net.IP, error) {
// Re-sort the addresses with InternalIPs first and then ExternalIPs
allIPs := make([]net.IP, 0, len(node.Status.Addresses))
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeInternalIP {
ip := netutils.ParseIPSloppy(addr.Address)
if ip != nil {
allIPs = append(allIPs, ip)
}
}
}
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeExternalIP {
ip := netutils.ParseIPSloppy(addr.Address)
if ip != nil {
allIPs = append(allIPs, ip)
}
}
}
if len(allIPs) == 0 {
return nil, fmt.Errorf("host IP unknown; known addresses: %v", node.Status.Addresses)
}
nodeIPs := []net.IP{allIPs[0]}
for _, ip := range allIPs {
if netutils.IsIPv6(ip) != netutils.IsIPv6(nodeIPs[0]) {
nodeIPs = append(nodeIPs, ip)
break
}
}
return nodeIPs, nil
}
// IsNodeReady returns true if a node is ready; false otherwise.
func IsNodeReady(node *v1.Node) bool {
for _, c := range node.Status.Conditions {
if c.Type == v1.NodeReady {
return c.Status == v1.ConditionTrue
}
}
return false
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oom
type FakeOOMAdjuster struct{}
func NewFakeOOMAdjuster() *OOMAdjuster {
return &OOMAdjuster{
pidLister: func(cgroupName string) ([]int, error) { return make([]int, 0), nil },
ApplyOOMScoreAdj: fakeApplyOOMScoreAdj,
ApplyOOMScoreAdjContainer: fakeApplyOOMScoreAdjContainer,
}
}
func fakeApplyOOMScoreAdj(pid int, oomScoreAdj int) error {
return nil
}
func fakeApplyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error {
return nil
}
//go:build linux
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oom
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"time"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/klog/v2"
)
func NewOOMAdjuster() *OOMAdjuster {
oomAdjuster := &OOMAdjuster{
pidLister: getPids,
ApplyOOMScoreAdj: applyOOMScoreAdj,
}
oomAdjuster.ApplyOOMScoreAdjContainer = oomAdjuster.applyOOMScoreAdjContainer
return oomAdjuster
}
func getPids(cgroupName string) ([]int, error) {
return cmutil.GetPids(filepath.Join("/", cgroupName))
}
// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
// Returns os.ErrNotExist if the `pid` does not exist.
func applyOOMScoreAdj(pid int, oomScoreAdj int) error {
if pid < 0 {
return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid)
}
var pidStr string
if pid == 0 {
pidStr = "self"
} else {
pidStr = strconv.Itoa(pid)
}
maxTries := 2
oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj")
value := strconv.Itoa(oomScoreAdj)
klog.V(4).Infof("attempting to set %q to %q", oomScoreAdjPath, value)
var err error
for i := 0; i < maxTries; i++ {
err = os.WriteFile(oomScoreAdjPath, []byte(value), 0700)
if err != nil {
if os.IsNotExist(err) {
klog.V(2).Infof("%q does not exist", oomScoreAdjPath)
return os.ErrNotExist
}
klog.V(3).Info(err)
time.Sleep(100 * time.Millisecond)
continue
}
return nil
}
if err != nil {
klog.V(2).Infof("failed to set %q to %q: %v", oomScoreAdjPath, value, err)
}
return err
}
// Writes 'value' to /proc/<pid>/oom_score_adj for all processes in cgroup cgroupName.
// Keeps trying to write until the process list of the cgroup stabilizes, or until maxTries tries.
func (oomAdjuster *OOMAdjuster) applyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error {
adjustedProcessSet := make(map[int]bool)
for i := 0; i < maxTries; i++ {
continueAdjusting := false
pidList, err := oomAdjuster.pidLister(cgroupName)
if err != nil {
if os.IsNotExist(err) {
// Nothing to do since the container doesn't exist anymore.
return os.ErrNotExist
}
continueAdjusting = true
klog.V(10).Infof("Error getting process list for cgroup %s: %+v", cgroupName, err)
} else if len(pidList) == 0 {
klog.V(10).Infof("Pid list is empty")
continueAdjusting = true
} else {
for _, pid := range pidList {
if !adjustedProcessSet[pid] {
klog.V(10).Infof("pid %d needs to be set", pid)
if err = oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err == nil {
adjustedProcessSet[pid] = true
} else if err == os.ErrNotExist {
continue
} else {
klog.V(10).Infof("cannot adjust oom score for pid %d - %v", pid, err)
continueAdjusting = true
}
// Processes can come and go while we try to apply oom score adjust value. So ignore errors here.
}
}
}
if !continueAdjusting {
return nil
}
// There's a slight race. A process might have forked just before we write its OOM score adjust.
// The fork might copy the parent process's old OOM score, then this function might execute and
// update the parent's OOM score, but the forked process id might not be reflected in cgroup.procs
// for a short amount of time. So this function might return without changing the forked process's
// OOM score. Very unlikely race, so ignoring this for now.
}
return fmt.Errorf("exceeded maxTries, some processes might not have desired OOM score")
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parsers
import (
"fmt"
// Import the crypto sha256 algorithm for the docker image parser to work
_ "crypto/sha256"
// Import the crypto/sha512 algorithm for the docker image parser to work with 384 and 512 sha hashes
_ "crypto/sha512"
dockerref "github.com/distribution/reference"
)
// ParseImageName parses a docker image string into three parts: repo, tag and digest.
// If both tag and digest are empty, a default image tag will be returned.
func ParseImageName(image string) (string, string, string, error) {
named, err := dockerref.ParseNormalizedNamed(image)
if err != nil {
return "", "", "", fmt.Errorf("couldn't parse image name %q: %v", image, err)
}
repoToPull := named.Name()
var tag, digest string
tagged, ok := named.(dockerref.Tagged)
if ok {
tag = tagged.Tag()
}
digested, ok := named.(dockerref.Digested)
if ok {
digest = digested.Digest().String()
}
// If no tag was specified, use the default "latest".
if len(tag) == 0 && len(digest) == 0 {
tag = "latest"
}
return repoToPull, tag, digest, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"bytes"
"context"
"encoding/json"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// PatchPodStatus patches pod status. It returns true and avoids an update if the patch contains no changes.
func PatchPodStatus(ctx context.Context, c clientset.Interface, namespace, name string, uid types.UID, oldPodStatus, newPodStatus v1.PodStatus) (*v1.Pod, []byte, bool, error) {
patchBytes, unchanged, err := preparePatchBytesForPodStatus(namespace, name, uid, oldPodStatus, newPodStatus)
if err != nil {
return nil, nil, false, err
}
if unchanged {
return nil, patchBytes, true, nil
}
updatedPod, err := c.CoreV1().Pods(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
return nil, nil, false, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err)
}
return updatedPod, patchBytes, false, nil
}
func preparePatchBytesForPodStatus(namespace, name string, uid types.UID, oldPodStatus, newPodStatus v1.PodStatus) ([]byte, bool, error) {
oldData, err := json.Marshal(v1.Pod{
Status: oldPodStatus,
})
if err != nil {
return nil, false, fmt.Errorf("failed to Marshal oldData for pod %q/%q: %v", namespace, name, err)
}
newData, err := json.Marshal(v1.Pod{
ObjectMeta: metav1.ObjectMeta{UID: uid}, // only put the uid in the new object to ensure it appears in the patch as a precondition
Status: newPodStatus,
})
if err != nil {
return nil, false, fmt.Errorf("failed to Marshal newData for pod %q/%q: %v", namespace, name, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Pod{})
if err != nil {
return nil, false, fmt.Errorf("failed to CreateTwoWayMergePatch for pod %q/%q: %v", namespace, name, err)
}
return patchBytes, bytes.Equal(patchBytes, []byte(fmt.Sprintf(`{"metadata":{"uid":%q}}`, uid))), nil
}
// ReplaceOrAppendPodCondition replaces the first pod condition with equal type or appends if there is none
func ReplaceOrAppendPodCondition(conditions []v1.PodCondition, condition *v1.PodCondition) []v1.PodCondition {
if i, _ := podutil.GetPodConditionFromList(conditions, condition.Type); i >= 0 {
conditions[i] = *condition
} else {
conditions = append(conditions, *condition)
}
return conditions
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package removeall
import (
"fmt"
"io"
"os"
"syscall"
"k8s.io/mount-utils"
)
// RemoveAllOneFilesystemCommon removes the path and any children it contains,
// using the provided remove function. It removes everything it can but returns
// the first error it encounters. If the path does not exist, RemoveAll
// returns nil (no error).
// It makes sure it does not cross mount boundary, i.e. it does *not* remove
// files from another filesystems. Like 'rm -rf --one-file-system'.
// It is copied from RemoveAll() sources, with IsLikelyNotMountPoint
func RemoveAllOneFilesystemCommon(mounter mount.Interface, path string, remove func(string) error) error {
// Simple case: if Remove works, we're done.
err := remove(path)
if err == nil || os.IsNotExist(err) {
return nil
}
// Otherwise, is this a directory we need to recurse into?
dir, serr := os.Lstat(path)
if serr != nil {
if serr, ok := serr.(*os.PathError); ok && (os.IsNotExist(serr.Err) || serr.Err == syscall.ENOTDIR) {
return nil
}
return serr
}
if !dir.IsDir() {
// Not a directory; return the error from remove.
return err
}
// Directory.
isNotMount, err := mounter.IsLikelyNotMountPoint(path)
if err != nil {
return err
}
if !isNotMount {
return fmt.Errorf("cannot delete directory %s: it is a mount point", path)
}
fd, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
// Race. It was deleted between the Lstat and Open.
// Return nil per RemoveAll's docs.
return nil
}
return err
}
// Remove contents & return first error.
err = nil
for {
names, err1 := fd.Readdirnames(100)
for _, name := range names {
err1 := RemoveAllOneFilesystemCommon(mounter, path+string(os.PathSeparator)+name, remove)
if err == nil {
err = err1
}
}
if err1 == io.EOF {
break
}
// If Readdirnames returned an error, use it.
if err == nil {
err = err1
}
if len(names) == 0 {
break
}
}
// Close directory, because windows won't remove opened directory.
fd.Close()
// Remove directory.
err1 := remove(path)
if err1 == nil || os.IsNotExist(err1) {
return nil
}
if err == nil {
err = err1
}
return err
}
// RemoveAllOneFilesystem removes the path and any children it contains, using
// the os.Remove function. It makes sure it does not cross mount boundaries,
// i.e. it returns an error rather than remove files from another filesystem.
// It removes everything it can but returns the first error it encounters.
// If the path does not exist, it returns nil (no error).
func RemoveAllOneFilesystem(mounter mount.Interface, path string) error {
return RemoveAllOneFilesystemCommon(mounter, path, os.Remove)
}
// RemoveDirsOneFilesystem removes the path and any empty subdirectories it
// contains, using the syscall.Rmdir function. Unlike RemoveAllOneFilesystem,
// RemoveDirsOneFilesystem will remove only directories and returns an error if
// it encounters any files in the directory tree. It makes sure it does not
// cross mount boundaries, i.e. it returns an error rather than remove dirs
// from another filesystem. It removes everything it can but returns the first
// error it encounters. If the path does not exist, it returns nil (no error).
func RemoveDirsOneFilesystem(mounter mount.Interface, path string) error {
return RemoveAllOneFilesystemCommon(mounter, path, syscall.Rmdir)
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package slice provides utility methods for common operations on slices.
package slice
import (
"sort"
)
// CopyStrings copies the contents of the specified string slice
// into a new slice.
func CopyStrings(s []string) []string {
if s == nil {
return nil
}
c := make([]string, len(s))
copy(c, s)
return c
}
// SortStrings sorts the specified string slice in place. It returns the same
// slice that was provided in order to facilitate method chaining.
func SortStrings(s []string) []string {
sort.Strings(s)
return s
}
// ContainsString checks if a given slice of strings contains the provided string.
// If a modifier func is provided, it is called with the slice item before the comparation.
func ContainsString(slice []string, s string, modifier func(s string) string) bool {
for _, item := range slice {
if item == s {
return true
}
if modifier != nil && modifier(item) == s {
return true
}
}
return false
}
// RemoveString returns a newly created []string that contains all items from slice that
// are not equal to s and modifier(s) in case modifier func is provided.
func RemoveString(slice []string, s string, modifier func(s string) string) []string {
newSlice := make([]string, 0)
for _, item := range slice {
if item == s {
continue
}
if modifier != nil && modifier(item) == s {
continue
}
newSlice = append(newSlice, item)
}
if len(newSlice) == 0 {
// Sanitize for unit tests so we don't need to distinguish empty array
// and nil.
newSlice = nil
}
return newSlice
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tail
import (
"io"
"os"
)
const (
// blockSize is the block size used in tail.
blockSize = 1024
)
// ReadAtMost reads at most max bytes from the end of the file identified by path or
// returns an error. It returns true if the file was longer than max. It will
// allocate up to max bytes.
func ReadAtMost(path string, max int64) ([]byte, bool, error) {
f, err := os.Open(path)
if err != nil {
return nil, false, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, false, err
}
size := fi.Size()
if size == 0 {
return nil, false, nil
}
if size < max {
max = size
}
offset, err := f.Seek(-max, io.SeekEnd)
if err != nil {
return nil, false, err
}
data, err := io.ReadAll(f)
return data, offset > 0, err
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package taints implements utilities for working with taints
package taints
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
const (
MODIFIED = "modified"
TAINTED = "tainted"
UNTAINTED = "untainted"
)
// parseTaint parses a taint from a string, whose form must be either
// '<key>=<value>:<effect>', '<key>:<effect>', or '<key>'.
func parseTaint(st string) (v1.Taint, error) {
var taint v1.Taint
var key string
var value string
var effect v1.TaintEffect
parts := strings.Split(st, ":")
switch len(parts) {
case 1:
key = parts[0]
case 2:
effect = v1.TaintEffect(parts[1])
if err := validateTaintEffect(effect); err != nil {
return taint, err
}
partsKV := strings.Split(parts[0], "=")
if len(partsKV) > 2 {
return taint, fmt.Errorf("invalid taint spec: %v", st)
}
key = partsKV[0]
if len(partsKV) == 2 {
value = partsKV[1]
if errs := validation.IsValidLabelValue(value); len(errs) > 0 {
return taint, fmt.Errorf("invalid taint spec: %v, %s", st, strings.Join(errs, "; "))
}
}
default:
return taint, fmt.Errorf("invalid taint spec: %v", st)
}
if errs := validation.IsQualifiedName(key); len(errs) > 0 {
return taint, fmt.Errorf("invalid taint spec: %v, %s", st, strings.Join(errs, "; "))
}
taint.Key = key
taint.Value = value
taint.Effect = effect
return taint, nil
}
func validateTaintEffect(effect v1.TaintEffect) error {
if effect != v1.TaintEffectNoSchedule && effect != v1.TaintEffectPreferNoSchedule && effect != v1.TaintEffectNoExecute {
return fmt.Errorf("invalid taint effect: %v, unsupported taint effect", effect)
}
return nil
}
// ParseTaints takes a spec which is an array and creates slices for new taints to be added, taints to be deleted.
// It also validates the spec. For example, the form `<key>` may be used to remove a taint, but not to add one.
func ParseTaints(spec []string) ([]v1.Taint, []v1.Taint, error) {
var taints, taintsToRemove []v1.Taint
uniqueTaints := map[v1.TaintEffect]sets.String{}
for _, taintSpec := range spec {
if strings.HasSuffix(taintSpec, "-") {
taintToRemove, err := parseTaint(strings.TrimSuffix(taintSpec, "-"))
if err != nil {
return nil, nil, err
}
taintsToRemove = append(taintsToRemove, v1.Taint{Key: taintToRemove.Key, Effect: taintToRemove.Effect})
} else {
newTaint, err := parseTaint(taintSpec)
if err != nil {
return nil, nil, err
}
// validate that the taint has an effect, which is required to add the taint
if len(newTaint.Effect) == 0 {
return nil, nil, fmt.Errorf("invalid taint spec: %v", taintSpec)
}
// validate if taint is unique by <key, effect>
if len(uniqueTaints[newTaint.Effect]) > 0 && uniqueTaints[newTaint.Effect].Has(newTaint.Key) {
return nil, nil, fmt.Errorf("duplicated taints with the same key and effect: %v", newTaint)
}
// add taint to existingTaints for uniqueness check
if len(uniqueTaints[newTaint.Effect]) == 0 {
uniqueTaints[newTaint.Effect] = sets.String{}
}
uniqueTaints[newTaint.Effect].Insert(newTaint.Key)
taints = append(taints, newTaint)
}
}
return taints, taintsToRemove, nil
}
// CheckIfTaintsAlreadyExists checks if the node already has taints that we want to add and returns a string with taint keys.
func CheckIfTaintsAlreadyExists(oldTaints []v1.Taint, taints []v1.Taint) string {
var existingTaintList = make([]string, 0)
for _, taint := range taints {
for _, oldTaint := range oldTaints {
if taint.Key == oldTaint.Key && taint.Effect == oldTaint.Effect {
existingTaintList = append(existingTaintList, taint.Key)
}
}
}
return strings.Join(existingTaintList, ",")
}
// DeleteTaintsByKey removes all the taints that have the same key to given taintKey
func DeleteTaintsByKey(taints []v1.Taint, taintKey string) ([]v1.Taint, bool) {
newTaints := []v1.Taint{}
deleted := false
for i := range taints {
if taintKey == taints[i].Key {
deleted = true
continue
}
newTaints = append(newTaints, taints[i])
}
return newTaints, deleted
}
// DeleteTaint removes all the taints that have the same key and effect to given taintToDelete.
func DeleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) {
newTaints := []v1.Taint{}
deleted := false
for i := range taints {
if taintToDelete.MatchTaint(&taints[i]) {
deleted = true
continue
}
newTaints = append(newTaints, taints[i])
}
return newTaints, deleted
}
// RemoveTaint tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated
// false otherwise.
func RemoveTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) {
newNode := node.DeepCopy()
nodeTaints := newNode.Spec.Taints
if len(nodeTaints) == 0 {
return newNode, false, nil
}
if !TaintExists(nodeTaints, taint) {
return newNode, false, nil
}
newTaints, _ := DeleteTaint(nodeTaints, taint)
newNode.Spec.Taints = newTaints
return newNode, true, nil
}
// AddOrUpdateTaint tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated
// false otherwise.
func AddOrUpdateTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) {
newNode := node.DeepCopy()
nodeTaints := newNode.Spec.Taints
var newTaints []v1.Taint
updated := false
for i := range nodeTaints {
if taint.MatchTaint(&nodeTaints[i]) {
if helper.Semantic.DeepEqual(*taint, nodeTaints[i]) {
return newNode, false, nil
}
newTaints = append(newTaints, *taint)
updated = true
continue
}
newTaints = append(newTaints, nodeTaints[i])
}
if !updated {
newTaints = append(newTaints, *taint)
}
newNode.Spec.Taints = newTaints
return newNode, true, nil
}
// TaintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func TaintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}
// TaintKeyExists checks if the given taint key exists in list of taints. Returns true if exists false otherwise.
func TaintKeyExists(taints []v1.Taint, taintKeyToMatch string) bool {
for _, taint := range taints {
if taint.Key == taintKeyToMatch {
return true
}
}
return false
}
// TaintSetDiff finds the difference between two taint slices and
// returns all new and removed elements of the new slice relative to the old slice.
// for example:
// input: taintsNew=[a b] taintsOld=[a c]
// output: taintsToAdd=[b] taintsToRemove=[c]
func TaintSetDiff(taintsNew, taintsOld []v1.Taint) (taintsToAdd []*v1.Taint, taintsToRemove []*v1.Taint) {
for _, taint := range taintsNew {
if !TaintExists(taintsOld, &taint) {
t := taint
taintsToAdd = append(taintsToAdd, &t)
}
}
for _, taint := range taintsOld {
if !TaintExists(taintsNew, &taint) {
t := taint
taintsToRemove = append(taintsToRemove, &t)
}
}
return
}
// TaintSetFilter filters from the taint slice according to the passed fn function to get the filtered taint slice.
func TaintSetFilter(taints []v1.Taint, fn func(*v1.Taint) bool) []v1.Taint {
res := []v1.Taint{}
for _, taint := range taints {
if fn(&taint) {
res = append(res, taint)
}
}
return res
}
// CheckTaintValidation checks if the given taint is valid.
// Returns error if the given taint is invalid.
func CheckTaintValidation(taint v1.Taint) error {
if errs := validation.IsQualifiedName(taint.Key); len(errs) > 0 {
return fmt.Errorf("invalid taint key: %s", strings.Join(errs, "; "))
}
if taint.Value != "" {
if errs := validation.IsValidLabelValue(taint.Value); len(errs) > 0 {
return fmt.Errorf("invalid taint value: %s", strings.Join(errs, "; "))
}
}
if taint.Effect != "" {
if err := validateTaintEffect(taint.Effect); err != nil {
return err
}
}
return nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/filesystem"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/utils/clock"
)
const globalMountInGlobalPath = "globalmount"
type csiAttacher struct {
plugin *csiPlugin
k8s kubernetes.Interface
watchTimeout time.Duration
csiClient csiClient
}
// volume.Attacher methods
var _ volume.Attacher = &csiAttacher{}
var _ volume.Detacher = &csiAttacher{}
var _ volume.DeviceMounter = &csiAttacher{}
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
_, ok := c.plugin.host.(volume.KubeletVolumeHost)
if ok {
return "", errors.New("attaching volumes from the kubelet is not supported")
}
if spec == nil {
klog.Error(log("attacher.Attach missing volume.Spec"))
return "", errors.New("missing spec")
}
pvSrc, err := getPVSourceFromSpec(spec)
if err != nil {
return "", errors.New(log("attacher.Attach failed to get CSIPersistentVolumeSource: %v", err))
}
node := string(nodeName)
attachID := getAttachmentName(pvSrc.VolumeHandle, pvSrc.Driver, node)
attachment, err := c.plugin.volumeAttachmentLister.Get(attachID)
if err != nil && !apierrors.IsNotFound(err) {
return "", errors.New(log("failed to get volume attachment from lister: %v", err))
}
if attachment == nil {
var vaSrc storage.VolumeAttachmentSource
if spec.InlineVolumeSpecForCSIMigration {
// inline PV scenario - use PV spec to populate VA source.
// The volume spec will be populated by CSI translation API
// for inline volumes. This allows fields required by the CSI
// attacher such as AccessMode and MountOptions (in addition to
// fields in the CSI persistent volume source) to be populated
// as part of CSI translation for inline volumes.
vaSrc = storage.VolumeAttachmentSource{
InlineVolumeSpec: &spec.PersistentVolume.Spec,
}
} else {
// regular PV scenario - use PV name to populate VA source
pvName := spec.PersistentVolume.GetName()
vaSrc = storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
}
}
attachment := &storage.VolumeAttachment{
ObjectMeta: metav1.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: node,
Attacher: pvSrc.Driver,
Source: vaSrc,
},
}
_, err = c.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{})
if err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", errors.New(log("attacher.Attach failed: %v", err))
}
klog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, pvSrc.VolumeHandle))
} else {
klog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, pvSrc.VolumeHandle))
}
}
// Attach and detach functionality is exclusive to the CSI plugin that runs in the AttachDetachController,
// and has access to a VolumeAttachment lister that can be polled for the current status.
if err := c.waitForVolumeAttachmentWithLister(spec, pvSrc.VolumeHandle, attachID, c.watchTimeout); err != nil {
return "", err
}
klog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID))
// Don't return attachID as a devicePath. We can reconstruct the attachID using getAttachmentName()
return "", nil
}
// WaitForAttach waits for the attach operation to complete and returns the device path when it is done.
// But in this case, there should be no waiting. The device is found by the CSI driver later, in NodeStage / NodePublish calls.
// so it should just return device metadata, in this case it is VolumeAttachment name. If the target VolumeAttachment does not
// exist or is not attached, the function will return an error. And then the caller (kubelet) should retry it.
// We can get rid of watching it that serves no purpose. More details in https://issues.k8s.io/124398
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, _ string, pod *v1.Pod, _ time.Duration) (string, error) {
source, err := getPVSourceFromSpec(spec)
if err != nil {
return "", errors.New(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
}
volumeHandle := source.VolumeHandle
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(c.plugin.host.GetNodeName()))
attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, metav1.GetOptions{})
if err != nil {
klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err)
}
successful, err := verifyAttachmentStatus(attach, volumeHandle)
if err != nil {
return "", err
}
if !successful {
klog.Error(log("attacher.WaitForAttach failed for volume [%s] attached (will continue to try)", volumeHandle))
return "", fmt.Errorf("volume %v is not attached for volume attachment %v", volumeHandle, attachID)
}
return attach.Name, nil
}
func (c *csiAttacher) waitForVolumeAttachmentWithLister(spec *volume.Spec, volumeHandle, attachID string, timeout time.Duration) error {
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
verifyStatus := func() (bool, error) {
volumeAttachment, err := c.plugin.volumeAttachmentLister.Get(attachID)
if err != nil {
// Ignore "not found" errors in case the VolumeAttachment was just created and hasn't yet made it into the lister.
if !apierrors.IsNotFound(err) {
klog.Error(log("unexpected error waiting for volume attachment, %v", err))
return false, err
}
// The VolumeAttachment is not available yet and we will have to try again.
return false, nil
}
successful, err := verifyAttachmentStatus(volumeAttachment, volumeHandle)
if err != nil {
return false, err
}
return successful, nil
}
return c.waitForVolumeAttachDetachStatusWithLister(spec, volumeHandle, attachID, timeout, verifyStatus, "Attach")
}
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
klog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
attached := make(map[*volume.Spec]bool)
for _, spec := range specs {
if spec == nil {
klog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
return nil, errors.New("missing spec")
}
pvSrc, err := getPVSourceFromSpec(spec)
if err != nil {
attached[spec] = false
klog.Error(log("attacher.VolumesAreAttached failed to get CSIPersistentVolumeSource: %v", err))
continue
}
driverName := pvSrc.Driver
volumeHandle := pvSrc.VolumeHandle
skip, err := c.plugin.skipAttach(driverName)
if err != nil {
klog.Error(log("Failed to check CSIDriver for %s: %s", driverName, err))
} else {
if skip {
// This volume is not attachable, pretend it's attached
attached[spec] = true
continue
}
}
attachID := getAttachmentName(volumeHandle, driverName, string(nodeName))
var attach *storage.VolumeAttachment
if c.plugin.volumeAttachmentLister != nil {
attach, err = c.plugin.volumeAttachmentLister.Get(attachID)
if err == nil {
attached[spec] = attach.Status.Attached
continue
}
klog.V(4).Info(log("attacher.VolumesAreAttached failed in AttachmentLister for attach.ID=%v: %v. Probing the API server.", attachID, err))
}
// The cache lookup is not setup or the object is not found in the cache.
// Get the object from the API server.
klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err = c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, metav1.GetOptions{})
if err != nil {
attached[spec] = false
klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
continue
}
klog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
attached[spec] = attach.Status.Attached
}
return attached, nil
}
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
klog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec))
deviceMountPath, err := makeDeviceMountPath(c.plugin, spec)
if err != nil {
return "", errors.New(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err))
}
klog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath)
return deviceMountPath, nil
}
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, deviceMounterArgs volume.DeviceMounterArgs) error {
klog.V(4).Info(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
if deviceMountPath == "" {
return errors.New(log("attacher.MountDevice failed, deviceMountPath is empty"))
}
// Setup
if spec == nil {
return errors.New(log("attacher.MountDevice failed, spec is nil"))
}
csiSource, err := getPVSourceFromSpec(spec)
if err != nil {
return errors.New(log("attacher.MountDevice failed to get CSIPersistentVolumeSource: %v", err))
}
// lets check if node/unstage is supported
if c.csiClient == nil {
c.csiClient, err = newCsiDriverClient(csiDriverName(csiSource.Driver))
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return volumetypes.NewTransientOperationFailure(log("attacher.MountDevice failed to create newCsiDriverClient: %v", err))
}
}
csi := c.csiClient
ctx, cancel := createCSIOperationContext(spec, c.watchTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
return err
}
// Get secrets and publish context required for mountDevice
nodeName := string(c.plugin.host.GetNodeName())
publishContext, err := c.plugin.getPublishContext(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName)
if err != nil {
return volumetypes.NewTransientOperationFailure(err.Error())
}
nodeStageSecrets := map[string]string{}
// we only require secrets if csiSource has them and volume has NodeStage capability
if csiSource.NodeStageSecretRef != nil && stageUnstageSet {
nodeStageSecrets, err = getCredentialsFromSecret(c.k8s, csiSource.NodeStageSecretRef)
if err != nil {
err = fmt.Errorf("fetching NodeStageSecretRef %s/%s failed: %v",
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
// if we failed to fetch secret then that could be a transient error
return volumetypes.NewTransientOperationFailure(err.Error())
}
}
var mountOptions []string
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.MountOptions != nil {
mountOptions = spec.PersistentVolume.Spec.MountOptions
}
var seLinuxSupported bool
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
support, err := c.plugin.SupportsSELinuxContextMount(spec)
if err != nil {
return errors.New(log("failed to query for SELinuxMount support: %s", err))
}
if support && deviceMounterArgs.SELinuxLabel != "" {
mountOptions = util.AddSELinuxMountOption(mountOptions, deviceMounterArgs.SELinuxLabel)
seLinuxSupported = true
}
}
// Store volume metadata for UnmountDevice. Keep it around even if the
// driver does not support NodeStage, UnmountDevice still needs it.
if err = filesystem.MkdirAllWithPathCheck(deviceMountPath, 0750); err != nil {
return errors.New(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err))
}
klog.V(4).Info(log("created target path successfully [%s]", deviceMountPath))
dataDir := filepath.Dir(deviceMountPath)
data := map[string]string{
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
}
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) && seLinuxSupported {
data[volDataKey.seLinuxMountContext] = deviceMounterArgs.SELinuxLabel
}
if !stageUnstageSet {
klog.Info(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
return nil
}
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = spec.PersistentVolume.Spec.AccessModes[0]
}
var nodeStageFSGroupArg *int64
driverSupportsCSIVolumeMountGroup, err := csi.NodeSupportsVolumeMountGroup(ctx)
if err != nil {
return volumetypes.NewTransientOperationFailure(log("attacher.MountDevice failed to determine if the node service has VOLUME_MOUNT_GROUP capability: %v", err))
}
err = saveVolumeData(dataDir, volDataFileName, data)
if err != nil {
errMsg := log("failed to save volume info data: %v", err)
klog.Error(errMsg)
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", deviceMountPath, err))
}
return errors.New(errMsg)
}
if driverSupportsCSIVolumeMountGroup {
klog.V(3).Infof("Driver %s supports applying FSGroup (has VOLUME_MOUNT_GROUP node capability). Delegating FSGroup application to the driver through NodeStageVolume.", csiSource.Driver)
nodeStageFSGroupArg = deviceMounterArgs.FsGroup
}
fsType := csiSource.FSType
csiRPCError := csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishContext,
deviceMountPath,
fsType,
accessMode,
nodeStageSecrets,
csiSource.VolumeAttributes,
mountOptions,
nodeStageFSGroupArg)
if csiRPCError != nil {
if volumetypes.IsOperationFinishedError(csiRPCError) {
// clean up metadata
klog.Error(log("attacher.MountDevice failed: %v", csiRPCError))
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", deviceMountPath, err))
}
}
return csiRPCError
}
klog.V(4).Info(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
return nil
}
var _ volume.Detacher = &csiAttacher{}
var _ volume.DeviceUnmounter = &csiAttacher{}
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
_, ok := c.plugin.host.(volume.KubeletVolumeHost)
if ok {
return errors.New("detaching volumes from the kubelet is not supported")
}
var attachID string
var volID string
if volumeName == "" {
klog.Error(log("detacher.Detach missing value for parameter volumeName"))
return errors.New("missing expected parameter volumeName")
}
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
parts := strings.Split(volumeName, volNameSep)
if len(parts) != 2 {
klog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
return errors.New("volumeName missing expected data")
}
driverName := parts[0]
volID = parts[1]
attachID = getAttachmentName(volID, driverName, string(nodeName))
if err := c.k8s.StorageV1().VolumeAttachments().Delete(context.TODO(), attachID, metav1.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
// object deleted or never existed, done
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID))
return nil
}
return errors.New(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
}
klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
// Attach and detach functionality is exclusive to the CSI plugin that runs in the AttachDetachController,
// and has access to a VolumeAttachment lister that can be polled for the current status.
return c.waitForVolumeDetachmentWithLister(volID, attachID, c.watchTimeout)
}
func (c *csiAttacher) waitForVolumeDetachmentWithLister(volumeHandle, attachID string, timeout time.Duration) error {
klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
verifyStatus := func() (bool, error) {
volumeAttachment, err := c.plugin.volumeAttachmentLister.Get(attachID)
if err != nil {
if !apierrors.IsNotFound(err) {
return false, errors.New(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
}
// Detachment successful.
klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
return true, nil
}
// Detachment is only "successful" once the VolumeAttachment is deleted, however we perform
// this check to make sure the object does not contain any detach errors.
successful, err := verifyDetachmentStatus(volumeAttachment, volumeHandle)
if err != nil {
return false, err
}
return successful, nil
}
return c.waitForVolumeAttachDetachStatusWithLister(nil, volumeHandle, attachID, timeout, verifyStatus, "Detach")
}
func (c *csiAttacher) waitForVolumeAttachDetachStatusWithLister(spec *volume.Spec, volumeHandle, attachID string, timeout time.Duration, verifyStatus func() (bool, error), operation string) error {
var (
initBackoff = 500 * time.Millisecond
// This is approximately the duration between consecutive ticks after two minutes (CSI timeout).
maxBackoff = 7 * time.Second
resetDuration = time.Minute
backoffFactor = 1.05
jitter = 0.1
clock = &clock.RealClock{}
)
backoffMgr := wait.NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration, backoffFactor, jitter, clock)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Get driver name from spec for better log messages. During detach spec can be nil, and it's ok for driver to be unknown.
csiDriverName, err := GetCSIDriverName(spec)
if err != nil {
csiDriverName = "unknown"
klog.V(4).Info(log("Could not find CSI driver name in spec for volume [%v]", volumeHandle))
}
for {
t := backoffMgr.Backoff()
select {
case <-t.C():
successful, err := verifyStatus()
if err != nil {
return err
}
if successful {
return nil
}
case <-ctx.Done():
t.Stop()
klog.Error(log("%s timeout after %v [volume=%v; attachment.ID=%v]", operation, timeout, volumeHandle, attachID))
return fmt.Errorf("timed out waiting for external-attacher of %v CSI driver to %v volume %v", csiDriverName, strings.ToLower(operation), volumeHandle)
}
}
}
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
klog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath))
// Setup
var driverName, volID string
dataDir := filepath.Dir(deviceMountPath)
data, err := loadVolumeData(dataDir, volDataFileName)
if err == nil {
driverName = data[volDataKey.driverName]
volID = data[volDataKey.volHandle]
} else {
if errors.Is(err, os.ErrNotExist) {
klog.V(4).Info(log("attacher.UnmountDevice skipped because volume data file [%s] does not exist", dataDir))
return nil
}
klog.Error(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
return err
}
if c.csiClient == nil {
c.csiClient, err = newCsiDriverClient(csiDriverName(driverName))
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return volumetypes.NewTransientOperationFailure(log("attacher.UnmountDevice failed to create newCsiDriverClient: %v", err))
}
}
csi := c.csiClient
// could not get whether this is migrated because there is no spec
ctx, cancel := createCSIOperationContext(nil, csiTimeout)
defer cancel()
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
return errors.New(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
}
if !stageUnstageSet {
klog.Info(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
// Just delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
}
return nil
}
// Start UnmountDevice
err = csi.NodeUnstageVolume(ctx,
volID,
deviceMountPath)
if err != nil {
return errors.New(log("attacher.UnmountDevice failed: %v", err))
}
// Delete the global directory + json file
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
}
klog.V(4).Info(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath))
return nil
}
// getAttachmentName returns csi-<sha256(volName,csiDriverName,NodeName)>
func getAttachmentName(volName, csiDriverName, nodeName string) string {
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))
return fmt.Sprintf("csi-%x", result)
}
func makeDeviceMountPath(plugin *csiPlugin, spec *volume.Spec) (string, error) {
if spec == nil {
return "", errors.New("makeDeviceMountPath failed, spec is nil")
}
driver, err := GetCSIDriverName(spec)
if err != nil {
return "", err
}
if driver == "" {
return "", errors.New("makeDeviceMountPath failed, csi source driver name is empty")
}
csiSource, err := getPVSourceFromSpec(spec)
if err != nil {
return "", errors.New(log("makeDeviceMountPath failed to get CSIPersistentVolumeSource: %v", err))
}
if csiSource.VolumeHandle == "" {
return "", errors.New("makeDeviceMountPath failed, CSIPersistentVolumeSource volume handle is empty")
}
result := sha256.Sum256([]byte(fmt.Sprintf("%s", csiSource.VolumeHandle)))
volSha := fmt.Sprintf("%x", result)
return filepath.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), driver, volSha, globalMountInGlobalPath), nil
}
func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) {
// when we received a deleted event during attachment, fail fast
if attachment == nil {
klog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", volumeHandle))
return false, errors.New("volume attachment has been deleted")
}
// if being deleted, fail fast
if attachment.GetDeletionTimestamp() != nil {
klog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name))
return false, errors.New("volume attachment is being deleted")
}
// attachment OK
if attachment.Status.Attached {
return true, nil
}
// driver reports attach error
attachErr := attachment.Status.AttachError
if attachErr != nil {
klog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return false, errors.New(attachErr.Message)
}
return false, nil
}
func verifyDetachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) {
// when we received a deleted event during detachment
// it means we have successfully detached it.
if attachment == nil {
return true, nil
}
// driver reports detach error
detachErr := attachment.Status.DetachError
if detachErr != nil {
klog.Error(log("detachment for VolumeAttachment for volume [%s] failed: %v", volumeHandle, detachErr.Message))
return false, errors.New(detachErr.Message)
}
return false, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file defines block volume related methods for CSI driver.
CSI driver is responsible for staging/publishing volumes to their staging/publish paths.
Mapping and unmapping of a device in a publish path to its global map path and its
pod device map path are done by operation_executor through MapBlockVolume/UnmapBlockVolume
(MapBlockVolume and UnmapBlockVolume take care for lock, symlink, and bind mount).
Summary of block volume related CSI driver's methods are as follows:
- GetGlobalMapPath returns a global map path,
- GetPodDeviceMapPath returns a pod device map path and filename,
- SetUpDevice calls CSI's NodeStageVolume and stage a volume to its staging path,
- MapPodDevice calls CSI's NodePublishVolume and publish a volume to its publish path,
- UnmapPodDevice calls CSI's NodeUnpublishVolume and unpublish a volume from its publish path,
- TearDownDevice calls CSI's NodeUnstageVolume and unstage a volume from its staging path.
These methods are called by below sequences:
- operation_executor.MountVolume
- csi.GetGlobalMapPath
- csi.SetupDevice
- NodeStageVolume
- ASW.MarkDeviceAsMounted
- csi.GetPodDeviceMapPath
- csi.MapPodDevice
- NodePublishVolume
- util.MapBlockVolume
- ASW.MarkVolumeAsMounted
- operation_executor.UnmountVolume
- csi.GetPodDeviceMapPath
- util.UnmapBlockVolume
- csi.UnmapPodDevice
- NodeUnpublishVolume
- ASW.MarkVolumeAsUnmounted
- operation_executor.UnmountDevice
- csi.TearDownDevice
- NodeUnstageVolume
- ASW.MarkDeviceAsUnmounted
After successful MountVolume for block volume, directory structure will be like below:
/dev/loopX ... Descriptor lock(Loopback device to mapFile under global map path)
/var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/{specName}/dev/ ... Global map path
/var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/{specName}/dev/{podUID} ... MapFile(Bind mount to publish Path)
/var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/staging/{specName} ... Staging path
/var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/publish/{specName}/{podUID} ... Publish path
/var/lib/kubelet/pods/{podUID}/volumeDevices/kubernetes.io~csi/ ... Pod device map path
/var/lib/kubelet/pods/{podUID}/volumeDevices/kubernetes.io~csi/{specName} ... MapFile(Symlink to publish path)
*/
package csi
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/util/removeall"
"k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
utilstrings "k8s.io/utils/strings"
)
type csiBlockMapper struct {
csiClientGetter
k8s kubernetes.Interface
plugin *csiPlugin
driverName csiDriverName
specName string
volumeID string
readOnly bool
spec *volume.Spec
pod *v1.Pod
podUID types.UID
volume.MetricsProvider
}
var _ volume.BlockVolumeMapper = &csiBlockMapper{}
var _ volume.CustomBlockVolumeMapper = &csiBlockMapper{}
// GetGlobalMapPath returns a global map path (on the node) to a device file which will be symlinked to
// Example: plugins/kubernetes.io/csi/volumeDevices/{specName}/dev
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
dir := getVolumeDevicePluginDir(m.specName, m.plugin.host)
klog.V(4).Info(log("blockMapper.GetGlobalMapPath = %s", dir))
return dir, nil
}
// GetStagingPath returns a staging path for a directory (on the node) that should be used on NodeStageVolume/NodeUnstageVolume
// Example: plugins/kubernetes.io/csi/volumeDevices/staging/{specName}
func (m *csiBlockMapper) GetStagingPath() string {
return filepath.Join(m.plugin.host.GetVolumeDevicePluginDir(CSIPluginName), "staging", m.specName)
}
// SupportsMetrics returns true for csiBlockMapper as it initializes the
// MetricsProvider.
func (m *csiBlockMapper) SupportsMetrics() bool {
return true
}
// getPublishDir returns path to a directory, where the volume is published to each pod.
// Example: plugins/kubernetes.io/csi/volumeDevices/publish/{specName}
func (m *csiBlockMapper) getPublishDir() string {
return filepath.Join(m.plugin.host.GetVolumeDevicePluginDir(CSIPluginName), "publish", m.specName)
}
// getPublishPath returns a publish path for a file (on the node) that should be used on NodePublishVolume/NodeUnpublishVolume
// Example: plugins/kubernetes.io/csi/volumeDevices/publish/{specName}/{podUID}
func (m *csiBlockMapper) getPublishPath() string {
return filepath.Join(m.getPublishDir(), string(m.podUID))
}
// GetPodDeviceMapPath returns pod's device file which will be mapped to a volume
// returns: pods/{podUID}/volumeDevices/kubernetes.io~csi, {specName}
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, utilstrings.EscapeQualifiedName(CSIPluginName))
klog.V(4).Info(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName))
return path, m.specName
}
// stageVolumeForBlock stages a block volume to stagingPath
func (m *csiBlockMapper) stageVolumeForBlock(
ctx context.Context,
csi csiClient,
accessMode v1.PersistentVolumeAccessMode,
csiSource *v1.CSIPersistentVolumeSource,
attachment *storage.VolumeAttachment,
) (string, error) {
klog.V(4).Info(log("blockMapper.stageVolumeForBlock called"))
stagingPath := m.GetStagingPath()
klog.V(4).Info(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
return "", errors.New(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
}
if !stageUnstageSet {
klog.Info(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
return "", nil
}
publishVolumeInfo := map[string]string{}
if attachment != nil {
publishVolumeInfo = attachment.Status.AttachmentMetadata
}
nodeStageSecrets := map[string]string{}
if csiSource.NodeStageSecretRef != nil {
nodeStageSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodeStageSecretRef)
if err != nil {
return "", fmt.Errorf("failed to get NodeStageSecretRef %s/%s: %v",
csiSource.NodeStageSecretRef.Namespace, csiSource.NodeStageSecretRef.Name, err)
}
}
// Creating a stagingPath directory before call to NodeStageVolume
if err := os.MkdirAll(stagingPath, 0750); err != nil {
return "", errors.New(log("blockMapper.stageVolumeForBlock failed to create dir %s: %v", stagingPath, err))
}
klog.V(4).Info(log("blockMapper.stageVolumeForBlock created stagingPath directory successfully [%s]", stagingPath))
// Request to stage a block volume to stagingPath.
// Expected implementation for driver is creating driver specific resource on stagingPath and
// attaching the block volume to the node.
err = csi.NodeStageVolume(ctx,
csiSource.VolumeHandle,
publishVolumeInfo,
stagingPath,
fsTypeBlockName,
accessMode,
nodeStageSecrets,
csiSource.VolumeAttributes,
nil, /* MountOptions */
nil /* fsGroup */)
if err != nil {
return "", err
}
klog.V(4).Info(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
return stagingPath, nil
}
// publishVolumeForBlock publishes a block volume to publishPath
func (m *csiBlockMapper) publishVolumeForBlock(
ctx context.Context,
csi csiClient,
accessMode v1.PersistentVolumeAccessMode,
csiSource *v1.CSIPersistentVolumeSource,
attachment *storage.VolumeAttachment,
) (string, error) {
klog.V(4).Info(log("blockMapper.publishVolumeForBlock called"))
publishVolumeInfo := map[string]string{}
if attachment != nil {
publishVolumeInfo = attachment.Status.AttachmentMetadata
}
// Inject pod information into volume_attributes
volAttribs := csiSource.VolumeAttributes
podInfoEnabled, err := m.plugin.podInfoEnabled(string(m.driverName))
if err != nil {
return "", errors.New(log("blockMapper.publishVolumeForBlock failed to assemble volume attributes: %v", err))
}
volumeLifecycleMode, err := m.plugin.getVolumeLifecycleMode(m.spec)
if err != nil {
return "", errors.New(log("blockMapper.publishVolumeForBlock failed to get VolumeLifecycleMode: %v", err))
}
if podInfoEnabled {
volAttribs = mergeMap(volAttribs, getPodInfoAttrs(m.pod, volumeLifecycleMode))
}
nodePublishSecrets := map[string]string{}
if csiSource.NodePublishSecretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef)
if err != nil {
return "", errors.New(log("blockMapper.publishVolumeForBlock failed to get NodePublishSecretRef %s/%s: %v",
csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err))
}
}
publishPath := m.getPublishPath()
// Setup a parent directory for publishPath before call to NodePublishVolume
publishDir := filepath.Dir(publishPath)
if err := os.MkdirAll(publishDir, 0750); err != nil {
return "", errors.New(log("blockMapper.publishVolumeForBlock failed to create dir %s: %v", publishDir, err))
}
klog.V(4).Info(log("blockMapper.publishVolumeForBlock created directory for publishPath successfully [%s]", publishDir))
// Request to publish a block volume to publishPath.
// Expectation for driver is to place a block volume on the publishPath, by bind-mounting the device file on the publishPath or
// creating device file on the publishPath.
// Parent directory for publishPath is created by k8s, but driver is responsible for creating publishPath itself.
// If driver doesn't implement NodeStageVolume, attaching the block volume to the node may be done, here.
err = csi.NodePublishVolume(
ctx,
m.volumeID,
m.readOnly,
m.GetStagingPath(),
publishPath,
accessMode,
publishVolumeInfo,
volAttribs,
nodePublishSecrets,
fsTypeBlockName,
[]string{}, /* mountOptions */
nil, /* fsGroup */
)
if err != nil {
return "", err
}
return publishPath, nil
}
// SetUpDevice ensures the device is attached returns path where the device is located.
func (m *csiBlockMapper) SetUpDevice() (string, error) {
klog.V(4).Info(log("blockMapper.SetUpDevice called"))
// Get csiSource from spec
if m.spec == nil {
return "", errors.New(log("blockMapper.SetUpDevice spec is nil"))
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
return "", errors.New(log("blockMapper.SetUpDevice failed to get CSI persistent source: %v", err))
}
driverName := csiSource.Driver
skip, err := m.plugin.skipAttach(driverName)
if err != nil {
return "", errors.New(log("blockMapper.SetupDevice failed to check CSIDriver for %s: %v", driverName, err))
}
var attachment *storage.VolumeAttachment
if !skip {
// Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
if err != nil {
return "", errors.New(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err))
}
}
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if m.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
csiClient, err := m.csiClientGetter.Get()
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return "", volumetypes.NewTransientOperationFailure(log("blockMapper.SetUpDevice failed to get CSI client: %v", err))
}
// Call NodeStageVolume
stagingPath, err := m.stageVolumeForBlock(ctx, csiClient, accessMode, csiSource, attachment)
if err != nil {
if volumetypes.IsOperationFinishedError(err) {
cleanupErr := m.cleanupOrphanDeviceFiles()
if cleanupErr != nil {
// V(4) for not so serious error
klog.V(4).Infof("Failed to clean up block volume directory %s", cleanupErr)
}
}
return "", err
}
return stagingPath, nil
}
func (m *csiBlockMapper) MapPodDevice() (string, error) {
klog.V(4).Info(log("blockMapper.MapPodDevice called"))
// Get csiSource from spec
if m.spec == nil {
return "", errors.New(log("blockMapper.MapPodDevice spec is nil"))
}
csiSource, err := getCSISourceFromSpec(m.spec)
if err != nil {
return "", errors.New(log("blockMapper.MapPodDevice failed to get CSI persistent source: %v", err))
}
driverName := csiSource.Driver
skip, err := m.plugin.skipAttach(driverName)
if err != nil {
return "", errors.New(log("blockMapper.MapPodDevice failed to check CSIDriver for %s: %v", driverName, err))
}
var attachment *storage.VolumeAttachment
if !skip {
// Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
nodeName := string(m.plugin.host.GetNodeName())
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName)
attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
if err != nil {
return "", errors.New(log("blockMapper.MapPodDevice failed to get volume attachment [id=%v]: %v", attachID, err))
}
}
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := v1.ReadWriteOnce
if m.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = m.spec.PersistentVolume.Spec.AccessModes[0]
}
ctx, cancel := createCSIOperationContext(m.spec, csiTimeout)
defer cancel()
csiClient, err := m.csiClientGetter.Get()
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return "", volumetypes.NewTransientOperationFailure(log("blockMapper.MapPodDevice failed to get CSI client: %v", err))
}
// Call NodePublishVolume
publishPath, err := m.publishVolumeForBlock(ctx, csiClient, accessMode, csiSource, attachment)
if err != nil {
return "", err
}
return publishPath, nil
}
var _ volume.BlockVolumeUnmapper = &csiBlockMapper{}
var _ volume.CustomBlockVolumeUnmapper = &csiBlockMapper{}
// unpublishVolumeForBlock unpublishes a block volume from publishPath
func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiClient, publishPath string) error {
// Request to unpublish a block volume from publishPath.
// Expectation for driver is to remove block volume from the publishPath, by unmounting bind-mounted device file
// or deleting device file.
// Driver is responsible for deleting publishPath itself.
// If driver doesn't implement NodeUnstageVolume, detaching the block volume from the node may be done, here.
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil {
return errors.New(log("blockMapper.unpublishVolumeForBlock failed: %v", err))
}
klog.V(4).Info(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
return nil
}
// unstageVolumeForBlock unstages a block volume from stagingPath
func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClient, stagingPath string) error {
// Check whether "STAGE_UNSTAGE_VOLUME" is set
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
return errors.New(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
}
if !stageUnstageSet {
klog.Info(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
return nil
}
// Request to unstage a block volume from stagingPath.
// Expected implementation for driver is removing driver specific resource in stagingPath and
// detaching the block volume from the node.
if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil {
return errors.New(log("blockMapper.unstageVolumeForBlock failed: %v", err))
}
klog.V(4).Info(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
// Remove stagingPath directory and its contents
if err := os.RemoveAll(stagingPath); err != nil {
return errors.New(log("blockMapper.unstageVolumeForBlock failed to remove staging path after NodeUnstageVolume() error [%s]: %v", stagingPath, err))
}
return nil
}
// TearDownDevice removes traces of the SetUpDevice.
func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error {
ctx, cancel := createCSIOperationContext(m.spec, csiTimeout)
defer cancel()
csiClient, err := m.csiClientGetter.Get()
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return volumetypes.NewTransientOperationFailure(log("blockMapper.TearDownDevice failed to get CSI client: %v", err))
}
// Call NodeUnstageVolume
stagingPath := m.GetStagingPath()
if _, err := os.Stat(stagingPath); err != nil {
if os.IsNotExist(err) {
klog.V(4).Info(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
} else {
return err
}
} else {
err := m.unstageVolumeForBlock(ctx, csiClient, stagingPath)
if err != nil {
return err
}
}
if err = m.cleanupOrphanDeviceFiles(); err != nil {
// V(4) for not so serious error
klog.V(4).Infof("Failed to clean up block volume directory %s", err)
}
return nil
}
// Clean up any orphan files / directories when a block volume is being unstaged.
// At this point we can be sure that there is no pod using the volume and all
// files are indeed orphaned.
func (m *csiBlockMapper) cleanupOrphanDeviceFiles() error {
// Remove artifacts of NodePublish.
// publishDir: xxx/plugins/kubernetes.io/csi/volumeDevices/publish/<volume name>
// Each PublishVolume() created a subdirectory there. Since everything should be
// already unpublished at this point, the directory should be empty by now.
publishDir := m.getPublishDir()
if err := os.Remove(publishDir); err != nil && !os.IsNotExist(err) {
return errors.New(log("failed to remove publish directory [%s]: %v", publishDir, err))
}
// Remove artifacts of NodeStage.
// stagingPath: xxx/plugins/kubernetes.io/csi/volumeDevices/staging/<volume name>
stagingPath := m.GetStagingPath()
if err := os.Remove(stagingPath); err != nil && !os.IsNotExist(err) {
return errors.New(log("failed to delete volume staging path [%s]: %v", stagingPath, err))
}
// Remove everything under xxx/plugins/kubernetes.io/csi/volumeDevices/<volume name>.
// At this point it contains only "data/vol_data.json" and empty "dev/".
volumeDir := getVolumePluginDir(m.specName, m.plugin.host)
mounter := m.plugin.host.GetMounter()
if err := removeall.RemoveAllOneFilesystem(mounter, volumeDir); err != nil {
return err
}
return nil
}
// UnmapPodDevice unmaps the block device path.
func (m *csiBlockMapper) UnmapPodDevice() error {
publishPath := m.getPublishPath()
csiClient, err := m.csiClientGetter.Get()
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return volumetypes.NewTransientOperationFailure(log("blockMapper.UnmapPodDevice failed to get CSI client: %v", err))
}
ctx, cancel := createCSIOperationContext(m.spec, csiTimeout)
defer cancel()
// Call NodeUnpublishVolume.
// Even if publishPath does not exist - previous NodePublish may have timed out
// and Kubernetes makes sure that the operation is finished.
return m.unpublishVolumeForBlock(ctx, csiClient, publishPath)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"context"
"errors"
"fmt"
"io"
"net"
"strconv"
"sync"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
type csiClient interface {
NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error)
// The caller is responsible for checking whether the driver supports
// applying FSGroup by calling NodeSupportsVolumeMountGroup().
// If the driver does not, fsGroup must be set to nil.
NodePublishVolume(
ctx context.Context,
volumeid string,
readOnly bool,
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
fsGroup *int64,
) error
NodeExpandVolume(ctx context.Context, rsOpts csiResizeOptions) (resource.Quantity, error)
NodeUnpublishVolume(
ctx context.Context,
volID string,
targetPath string,
) error
// The caller is responsible for checking whether the driver supports
// applying FSGroup by calling NodeSupportsVolumeMountGroup().
// If the driver does not, fsGroup must be set to nil.
NodeStageVolume(ctx context.Context,
volID string,
publishVolumeInfo map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
secrets map[string]string,
volumeContext map[string]string,
mountOptions []string,
fsGroup *int64,
) error
NodeGetVolumeStats(
ctx context.Context,
volID string,
targetPath string,
) (*volume.Metrics, error)
NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error
NodeSupportsStageUnstage(ctx context.Context) (bool, error)
NodeSupportsNodeExpand(ctx context.Context) (bool, error)
NodeSupportsVolumeStats(ctx context.Context) (bool, error)
NodeSupportsSingleNodeMultiWriterAccessMode(ctx context.Context) (bool, error)
NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error)
}
// Strongly typed address
type csiAddr string
// Strongly typed driver name
type csiDriverName string
// csiClient encapsulates all csi-plugin methods
type csiDriverClient struct {
driverName csiDriverName
addr csiAddr
metricsManager *MetricsManager
nodeV1ClientCreator nodeV1ClientCreator
}
type csiResizeOptions struct {
volumeID string
volumePath string
stagingTargetPath string
fsType string
accessMode api.PersistentVolumeAccessMode
newSize resource.Quantity
mountOptions []string
secrets map[string]string
}
var _ csiClient = &csiDriverClient{}
type nodeV1ClientCreator func(addr csiAddr, metricsManager *MetricsManager) (
nodeClient csipbv1.NodeClient,
closer io.Closer,
err error,
)
type nodeV1AccessModeMapper func(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapability_AccessMode_Mode
// newV1NodeClient creates a new NodeClient with the internally used gRPC
// connection set up. It also returns a closer which must be called to close
// the gRPC connection when the NodeClient is not used anymore.
// This is the default implementation for the nodeV1ClientCreator, used in
// newCsiDriverClient.
func newV1NodeClient(addr csiAddr, metricsManager *MetricsManager) (nodeClient csipbv1.NodeClient, closer io.Closer, err error) {
var conn *grpc.ClientConn
conn, err = newGrpcConn(addr, metricsManager)
if err != nil {
return nil, nil, err
}
nodeClient = csipbv1.NewNodeClient(conn)
return nodeClient, conn, nil
}
func newCsiDriverClient(driverName csiDriverName) (*csiDriverClient, error) {
if driverName == "" {
return nil, fmt.Errorf("driver name is empty")
}
existingDriver, driverExists := csiDrivers.Get(string(driverName))
if !driverExists {
return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName)
}
nodeV1ClientCreator := newV1NodeClient
return &csiDriverClient{
driverName: driverName,
addr: csiAddr(existingDriver.endpoint),
nodeV1ClientCreator: nodeV1ClientCreator,
metricsManager: NewCSIMetricsManager(string(driverName)),
}, nil
}
func (c *csiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
klog.V(4).InfoS(log("calling NodeGetInfo rpc"))
var getNodeInfoError error
nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError = c.nodeGetInfoV1(ctx)
if getNodeInfoError != nil {
klog.InfoS("Error calling CSI NodeGetInfo()", "err", getNodeInfoError.Error())
}
return nodeID, maxVolumePerNode, accessibleTopology, getNodeInfoError
}
func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error) {
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return "", 0, nil, err
}
defer closer.Close()
res, err := nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{})
if err != nil {
return "", 0, nil, err
}
topology := res.GetAccessibleTopology()
if topology != nil {
accessibleTopology = topology.Segments
}
return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil
}
func (c *csiDriverClient) NodePublishVolume(
ctx context.Context,
volID string,
readOnly bool,
stagingTargetPath string,
targetPath string,
accessMode api.PersistentVolumeAccessMode,
publishContext map[string]string,
volumeContext map[string]string,
secrets map[string]string,
fsType string,
mountOptions []string,
fsGroup *int64,
) error {
klog.V(4).InfoS(log("calling NodePublishVolume rpc"), "volID", volID, "targetPath", targetPath)
if volID == "" {
return errors.New("missing volume id")
}
if targetPath == "" {
return errors.New("missing target path")
}
if c.nodeV1ClientCreator == nil {
return errors.New("failed to call NodePublishVolume. nodeV1ClientCreator is nil")
}
accessModeMapper, err := c.getNodeV1AccessModeMapper(ctx)
if err != nil {
return err
}
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv1.NodePublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
Readonly: readOnly,
PublishContext: publishContext,
VolumeContext: volumeContext,
Secrets: secrets,
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: accessModeMapper(accessMode),
},
},
}
if stagingTargetPath != "" {
req.StagingTargetPath = stagingTargetPath
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
mountVolume := &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
}
if fsGroup != nil {
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
}
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: mountVolume,
}
}
_, err = nodeClient.NodePublishVolume(ctx, req)
if err != nil && !isFinalError(err) {
return volumetypes.NewUncertainProgressError(err.Error())
}
return err
}
func (c *csiDriverClient) NodeExpandVolume(ctx context.Context, opts csiResizeOptions) (resource.Quantity, error) {
if c.nodeV1ClientCreator == nil {
return opts.newSize, fmt.Errorf("version of CSI driver does not support volume expansion")
}
if opts.volumeID == "" {
return opts.newSize, errors.New("missing volume id")
}
if opts.volumePath == "" {
return opts.newSize, errors.New("missing volume path")
}
if opts.newSize.Value() < 0 {
return opts.newSize, errors.New("size can not be less than 0")
}
accessModeMapper, err := c.getNodeV1AccessModeMapper(ctx)
if err != nil {
return opts.newSize, err
}
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return opts.newSize, err
}
defer closer.Close()
req := &csipbv1.NodeExpandVolumeRequest{
VolumeId: opts.volumeID,
VolumePath: opts.volumePath,
CapacityRange: &csipbv1.CapacityRange{RequiredBytes: opts.newSize.Value()},
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: accessModeMapper(opts.accessMode),
},
},
Secrets: opts.secrets,
}
// not all CSI drivers support NodeStageUnstage and hence the StagingTargetPath
// should only be set when available
if opts.stagingTargetPath != "" {
req.StagingTargetPath = opts.stagingTargetPath
}
if opts.fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: opts.fsType,
MountFlags: opts.mountOptions,
},
}
}
resp, err := nodeClient.NodeExpandVolume(ctx, req)
if err != nil {
if !isFinalError(err) {
return opts.newSize, volumetypes.NewUncertainProgressError(err.Error())
}
return opts.newSize, err
}
updatedQuantity := resource.NewQuantity(resp.CapacityBytes, resource.BinarySI)
return *updatedQuantity, nil
}
func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error {
klog.V(4).InfoS(log("calling NodeUnpublishVolume rpc"), "volID", volID, "targetPath", targetPath)
if volID == "" {
return errors.New("missing volume id")
}
if targetPath == "" {
return errors.New("missing target path")
}
if c.nodeV1ClientCreator == nil {
return errors.New("nodeV1ClientCreate is nil")
}
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv1.NodeUnpublishVolumeRequest{
VolumeId: volID,
TargetPath: targetPath,
}
_, err = nodeClient.NodeUnpublishVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
volID string,
publishContext map[string]string,
stagingTargetPath string,
fsType string,
accessMode api.PersistentVolumeAccessMode,
secrets map[string]string,
volumeContext map[string]string,
mountOptions []string,
fsGroup *int64,
) error {
klog.V(4).InfoS(log("calling NodeStageVolume rpc"), "volID", volID, "stagingTargetPath", stagingTargetPath)
if volID == "" {
return errors.New("missing volume id")
}
if stagingTargetPath == "" {
return errors.New("missing staging target path")
}
if c.nodeV1ClientCreator == nil {
return errors.New("nodeV1ClientCreate is nil")
}
accessModeMapper, err := c.getNodeV1AccessModeMapper(ctx)
if err != nil {
return err
}
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv1.NodeStageVolumeRequest{
VolumeId: volID,
PublishContext: publishContext,
StagingTargetPath: stagingTargetPath,
VolumeCapability: &csipbv1.VolumeCapability{
AccessMode: &csipbv1.VolumeCapability_AccessMode{
Mode: accessModeMapper(accessMode),
},
},
Secrets: secrets,
VolumeContext: volumeContext,
}
if fsType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
mountVolume := &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
}
if fsGroup != nil {
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
}
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: mountVolume,
}
}
_, err = nodeClient.NodeStageVolume(ctx, req)
if err != nil && !isFinalError(err) {
return volumetypes.NewUncertainProgressError(err.Error())
}
return err
}
func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error {
klog.V(4).InfoS(log("calling NodeUnstageVolume rpc"), "volID", volID, "stagingTargetPath", stagingTargetPath)
if volID == "" {
return errors.New("missing volume id")
}
if stagingTargetPath == "" {
return errors.New("missing staging target path")
}
if c.nodeV1ClientCreator == nil {
return errors.New("nodeV1ClientCreate is nil")
}
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return err
}
defer closer.Close()
req := &csipbv1.NodeUnstageVolumeRequest{
VolumeId: volID,
StagingTargetPath: stagingTargetPath,
}
_, err = nodeClient.NodeUnstageVolume(ctx, req)
return err
}
func (c *csiDriverClient) NodeSupportsNodeExpand(ctx context.Context) (bool, error) {
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME)
}
func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) {
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME)
}
func (c *csiDriverClient) getNodeV1AccessModeMapper(ctx context.Context) (nodeV1AccessModeMapper, error) {
supported, err := c.NodeSupportsSingleNodeMultiWriterAccessMode(ctx)
if err != nil {
return nil, err
}
if supported {
return asSingleNodeMultiWriterCapableCSIAccessModeV1, nil
}
return asCSIAccessModeV1, nil
}
func asCSIAccessModeV1(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapability_AccessMode_Mode {
switch am {
case api.ReadWriteOnce:
return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.ReadOnlyMany:
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.ReadWriteMany:
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
// This mapping exists to enable CSI drivers that lack the
// SINGLE_NODE_MULTI_WRITER capability to work with the
// ReadWriteOncePod access mode.
case api.ReadWriteOncePod:
return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
}
return csipbv1.VolumeCapability_AccessMode_UNKNOWN
}
func asSingleNodeMultiWriterCapableCSIAccessModeV1(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapability_AccessMode_Mode {
switch am {
case api.ReadWriteOnce:
return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER
case api.ReadOnlyMany:
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.ReadWriteMany:
return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
case api.ReadWriteOncePod:
return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER
}
return csipbv1.VolumeCapability_AccessMode_UNKNOWN
}
func newGrpcConn(addr csiAddr, metricsManager *MetricsManager) (*grpc.ClientConn, error) {
network := "unix"
klog.V(4).InfoS(log("creating new gRPC connection"), "protocol", network, "endpoint", addr)
return grpc.Dial(
string(addr),
grpc.WithAuthority("localhost"),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(func(ctx context.Context, target string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, target)
}),
grpc.WithChainUnaryInterceptor(metricsManager.RecordMetricsInterceptor),
)
}
// CSI client getter with cache.
// This provides a method to initialize CSI client with driver name and caches
// it for later use. When CSI clients have not been discovered yet (e.g.
// on kubelet restart), client initialization will fail. Users of CSI client (e.g.
// mounter manager and block mapper) can use this to delay CSI client
// initialization until needed.
type csiClientGetter struct {
sync.RWMutex
csiClient csiClient
driverName csiDriverName
}
func (c *csiClientGetter) Get() (csiClient, error) {
c.RLock()
if c.csiClient != nil {
c.RUnlock()
return c.csiClient, nil
}
c.RUnlock()
c.Lock()
defer c.Unlock()
// Double-checking locking criterion.
if c.csiClient != nil {
return c.csiClient, nil
}
csi, err := newCsiDriverClient(c.driverName)
if err != nil {
return nil, err
}
c.csiClient = csi
return c.csiClient, nil
}
func (c *csiDriverClient) NodeSupportsVolumeStats(ctx context.Context) (bool, error) {
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_GET_VOLUME_STATS)
}
func (c *csiDriverClient) NodeSupportsSingleNodeMultiWriterAccessMode(ctx context.Context) (bool, error) {
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER)
}
func (c *csiDriverClient) NodeGetVolumeStats(ctx context.Context, volID string, targetPath string) (*volume.Metrics, error) {
klog.V(4).InfoS(log("calling NodeGetVolumeStats rpc"), "volID", volID, "targetPath", targetPath)
if volID == "" {
return nil, errors.New("missing volume id")
}
if targetPath == "" {
return nil, errors.New("missing target path")
}
if c.nodeV1ClientCreator == nil {
return nil, errors.New("nodeV1ClientCreate is nil")
}
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return nil, err
}
defer closer.Close()
req := &csipbv1.NodeGetVolumeStatsRequest{
VolumeId: volID,
VolumePath: targetPath,
}
resp, err := nodeClient.NodeGetVolumeStats(ctx, req)
if err != nil {
return nil, err
}
metrics := &volume.Metrics{
Used: resource.NewQuantity(int64(0), resource.BinarySI),
Capacity: resource.NewQuantity(int64(0), resource.BinarySI),
Available: resource.NewQuantity(int64(0), resource.BinarySI),
InodesUsed: resource.NewQuantity(int64(0), resource.BinarySI),
Inodes: resource.NewQuantity(int64(0), resource.BinarySI),
InodesFree: resource.NewQuantity(int64(0), resource.BinarySI),
}
var isSupportNodeVolumeCondition bool
if utilfeature.DefaultFeatureGate.Enabled(features.CSIVolumeHealth) {
isSupportNodeVolumeCondition, err = c.nodeSupportsVolumeCondition(ctx)
if err != nil {
return nil, err
}
if isSupportNodeVolumeCondition {
abnormal, message := resp.VolumeCondition.GetAbnormal(), resp.VolumeCondition.GetMessage()
metrics.Abnormal, metrics.Message = &abnormal, &message
}
}
usages := resp.GetUsage()
// If the driver does not support volume condition and usages is nil, return an error
if !isSupportNodeVolumeCondition && usages == nil {
return nil, fmt.Errorf("failed to get usage from response. usage is nil")
}
for _, usage := range usages {
if usage == nil {
continue
}
unit := usage.GetUnit()
switch unit {
case csipbv1.VolumeUsage_BYTES:
metrics.Available = resource.NewQuantity(usage.GetAvailable(), resource.BinarySI)
metrics.Capacity = resource.NewQuantity(usage.GetTotal(), resource.BinarySI)
metrics.Used = resource.NewQuantity(usage.GetUsed(), resource.BinarySI)
case csipbv1.VolumeUsage_INODES:
metrics.InodesFree = resource.NewQuantity(usage.GetAvailable(), resource.BinarySI)
metrics.Inodes = resource.NewQuantity(usage.GetTotal(), resource.BinarySI)
metrics.InodesUsed = resource.NewQuantity(usage.GetUsed(), resource.BinarySI)
default:
klog.ErrorS(nil, "unknown unit in VolumeUsage", "unit", unit.String())
}
}
return metrics, nil
}
func (c *csiDriverClient) nodeSupportsVolumeCondition(ctx context.Context) (bool, error) {
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_VOLUME_CONDITION)
}
func (c *csiDriverClient) NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error) {
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP)
}
func (c *csiDriverClient) nodeSupportsCapability(ctx context.Context, capabilityType csipbv1.NodeServiceCapability_RPC_Type) (bool, error) {
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if the node service has %s capability", capabilityType))
capabilities, err := c.nodeGetCapabilities(ctx)
if err != nil {
return false, err
}
for _, capability := range capabilities {
if capability == nil || capability.GetRpc() == nil {
continue
}
if capability.GetRpc().GetType() == capabilityType {
return true, nil
}
}
return false, nil
}
func (c *csiDriverClient) nodeGetCapabilities(ctx context.Context) ([]*csipbv1.NodeServiceCapability, error) {
if c.nodeV1ClientCreator == nil {
return []*csipbv1.NodeServiceCapability{}, errors.New("nodeV1ClientCreate is nil")
}
nodeClient, closer, err := c.nodeV1ClientCreator(c.addr, c.metricsManager)
if err != nil {
return []*csipbv1.NodeServiceCapability{}, err
}
defer closer.Close()
req := &csipbv1.NodeGetCapabilitiesRequest{}
resp, err := nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return []*csipbv1.NodeServiceCapability{}, err
}
return resp.GetCapabilities(), nil
}
func isFinalError(err error) bool {
// Sources:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
// https://github.com/container-storage-interface/spec/blob/master/spec.md
st, ok := status.FromError(err)
if !ok {
// This is not gRPC error. The operation must have failed before gRPC
// method was called, otherwise we would get gRPC error.
// We don't know if any previous volume operation is in progress, be on the safe side.
return false
}
switch st.Code() {
case codes.Canceled, // gRPC: Client Application cancelled the request
codes.DeadlineExceeded, // gRPC: Timeout
codes.Unavailable, // gRPC: Server shutting down, TCP connection broken - previous volume operation may be still in progress.
codes.ResourceExhausted, // gRPC: Server temporarily out of resources - previous volume operation may be still in progress.
codes.Aborted: // CSI: Operation pending for volume
return false
}
// All other errors mean that operation either did not
// even start or failed. It is for sure not in progress.
return true
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"sync"
utilversion "k8s.io/apimachinery/pkg/util/version"
)
// Driver is a description of a CSI Driver, defined by an endpoint and the
// highest CSI version supported
type Driver struct {
endpoint string
highestSupportedVersion *utilversion.Version
}
// DriversStore holds a list of CSI Drivers
type DriversStore struct {
store
sync.RWMutex
}
type store map[string]Driver
// Get lets you retrieve a CSI Driver by name.
// This method is protected by a mutex.
func (s *DriversStore) Get(driverName string) (Driver, bool) {
s.RLock()
defer s.RUnlock()
driver, ok := s.store[driverName]
return driver, ok
}
// Set lets you save a CSI Driver to the list and give it a specific name.
// This method is protected by a mutex.
func (s *DriversStore) Set(driverName string, driver Driver) {
s.Lock()
defer s.Unlock()
if s.store == nil {
s.store = store{}
}
s.store[driverName] = driver
}
// Delete lets you delete a CSI Driver by name.
// This method is protected by a mutex.
func (s *DriversStore) Delete(driverName string) {
s.Lock()
defer s.Unlock()
delete(s.store, driverName)
}
// Clear deletes all entries in the store.
// This methiod is protected by a mutex.
func (s *DriversStore) Clear() {
s.Lock()
defer s.Unlock()
s.store = store{}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"context"
"fmt"
"time"
"google.golang.org/grpc"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
var _ volume.MetricsProvider = &metricsCsi{}
// metricsCsi represents a MetricsProvider that calculates the used,free and
// capacity information for volume using volume path.
type metricsCsi struct {
// the directory path the volume is mounted to.
targetPath string
// Volume handle or id
volumeID string
//csiClient with cache
csiClientGetter
}
// NewMetricsCsi creates a new metricsCsi with the Volume ID and path.
func NewMetricsCsi(volumeID string, targetPath string, driverName csiDriverName) volume.MetricsProvider {
mc := &metricsCsi{volumeID: volumeID, targetPath: targetPath}
mc.csiClientGetter.driverName = driverName
return mc
}
func (mc *metricsCsi) GetMetrics() (*volume.Metrics, error) {
startTime := time.Now()
defer servermetrics.CollectVolumeStatCalDuration(string(mc.csiClientGetter.driverName), startTime)
currentTime := metav1.Now()
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
// Get CSI client
csiClient, err := mc.csiClientGetter.Get()
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return nil, volumetypes.NewTransientOperationFailure(err.Error())
}
// Check whether "GET_VOLUME_STATS" is set
volumeStatsSet, err := csiClient.NodeSupportsVolumeStats(ctx)
if err != nil {
return nil, err
}
// if plugin doesnot support volume status, return.
if !volumeStatsSet {
return nil, volume.NewNotSupportedErrorWithDriverName(
string(mc.csiClientGetter.driverName))
}
// Get Volumestatus
metrics, err := csiClient.NodeGetVolumeStats(ctx, mc.volumeID, mc.targetPath)
if err != nil {
return nil, err
}
if metrics == nil {
return nil, fmt.Errorf("csi.NodeGetVolumeStats returned nil metrics for volume %s", mc.volumeID)
}
//set recorded time
metrics.Time = currentTime
return metrics, nil
}
// MetricsManager defines the metrics manager for CSI operation
type MetricsManager struct {
driverName string
}
// NewCSIMetricsManager creates a CSIMetricsManager object
func NewCSIMetricsManager(driverName string) *MetricsManager {
cmm := MetricsManager{
driverName: driverName,
}
return &cmm
}
type additionalInfo struct {
Migrated string
}
type additionalInfoKeyType struct{}
var additionalInfoKey additionalInfoKeyType
// RecordMetricsInterceptor is a grpc interceptor that is used to
// record CSI operation
func (cmm *MetricsManager) RecordMetricsInterceptor(
ctx context.Context,
method string,
req, reply interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption) error {
start := time.Now()
err := invoker(ctx, method, req, reply, cc, opts...)
duration := time.Since(start)
// Check if this is migrated operation
additionalInfoVal := ctx.Value(additionalInfoKey)
migrated := "false"
if additionalInfoVal != nil {
additionalInfoVal, ok := additionalInfoVal.(additionalInfo)
if !ok {
return err
}
migrated = additionalInfoVal.Migrated
}
// Record the metric latency
volumeutil.RecordCSIOperationLatencyMetrics(cmm.driverName, method, err, duration, migrated)
return err
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
authenticationv1 "k8s.io/api/authentication/v1"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
utilstrings "k8s.io/utils/strings"
)
// TODO (vladimirvivien) move this in a central loc later
var (
volDataKey = struct {
specVolID,
volHandle,
driverName,
nodeName,
attachmentID,
volumeLifecycleMode,
seLinuxMountContext string
}{
"specVolID",
"volumeHandle",
"driverName",
"nodeName",
"attachmentID",
"volumeLifecycleMode",
"seLinuxMountContext",
}
)
type csiMountMgr struct {
csiClientGetter
k8s kubernetes.Interface
plugin *csiPlugin
driverName csiDriverName
volumeLifecycleMode storage.VolumeLifecycleMode
volumeID string
specVolumeID string
readOnly bool
needSELinuxRelabel bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
publishContext map[string]string
kubeVolHost volume.KubeletVolumeHost
volume.MetricsProvider
}
// volume.Volume methods
var _ volume.Volume = &csiMountMgr{}
func (c *csiMountMgr) GetPath() string {
dir := GetCSIMounterPath(filepath.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host)))
klog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
return dir
}
func getTargetPath(uid types.UID, specVolumeID string, host volume.VolumeHost) string {
specVolID := utilstrings.EscapeQualifiedName(specVolumeID)
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(CSIPluginName), specVolID)
}
// volume.Mounter methods
var _ volume.Mounter = &csiMountMgr{}
func (c *csiMountMgr) SetUp(mounterArgs volume.MounterArgs) error {
return c.SetUpAt(c.GetPath(), mounterArgs)
}
func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
klog.V(4).Info(log("Mounter.SetUpAt(%s)", dir))
csi, err := c.csiClientGetter.Get()
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to get CSI client: %v", err))
}
ctx, cancel := createCSIOperationContext(c.spec, csiTimeout)
defer cancel()
volSrc, pvSrc, err := getSourceFromSpec(c.spec)
if err != nil {
return errors.New(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
}
// Check CSIDriver.Spec.Mode to ensure that the CSI driver
// supports the current volumeLifecycleMode.
if err := c.supportsVolumeLifecycleMode(); err != nil {
return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check volume lifecycle mode: %s", err))
}
fsGroupPolicy, err := c.getFSGroupPolicy()
if err != nil {
return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check fsGroup policy: %s", err))
}
driverName := c.driverName
volumeHandle := c.volumeID
readOnly := c.readOnly
accessMode := api.ReadWriteOnce
var (
fsType string
volAttribs map[string]string
nodePublishSecrets map[string]string
publishContext map[string]string
mountOptions []string
deviceMountPath string
secretRef *api.SecretReference
)
switch {
case volSrc != nil:
if c.volumeLifecycleMode != storage.VolumeLifecycleEphemeral {
return fmt.Errorf("unexpected volume mode: %s", c.volumeLifecycleMode)
}
if volSrc.FSType != nil {
fsType = *volSrc.FSType
}
volAttribs = volSrc.VolumeAttributes
if volSrc.NodePublishSecretRef != nil {
secretName := volSrc.NodePublishSecretRef.Name
ns := c.pod.Namespace
secretRef = &api.SecretReference{Name: secretName, Namespace: ns}
}
case pvSrc != nil:
if c.volumeLifecycleMode != storage.VolumeLifecyclePersistent {
return fmt.Errorf("unexpected driver mode: %s", c.volumeLifecycleMode)
}
fsType = pvSrc.FSType
volAttribs = pvSrc.VolumeAttributes
if pvSrc.NodePublishSecretRef != nil {
secretRef = pvSrc.NodePublishSecretRef
}
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
if c.spec.PersistentVolume.Spec.AccessModes != nil {
accessMode = c.spec.PersistentVolume.Spec.AccessModes[0]
}
mountOptions = c.spec.PersistentVolume.Spec.MountOptions
// Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
if err != nil {
return errors.New(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capability: %v", err))
}
if stageUnstageSet {
deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec)
if err != nil {
return errors.New(log("mounter.SetUpAt failed to make device mount path: %v", err))
}
}
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
if c.publishContext == nil {
nodeName := string(c.plugin.host.GetNodeName())
c.publishContext, err = c.plugin.getPublishContext(c.k8s, volumeHandle, string(driverName), nodeName)
if err != nil {
// we could have a transient error associated with fetching publish context
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to fetch publishContext: %v", err))
}
publishContext = c.publishContext
}
default:
return fmt.Errorf("volume source not found in volume.Spec")
}
// create target_dir before call to NodePublish
parentDir := filepath.Dir(dir)
if err := os.MkdirAll(parentDir, 0750); err != nil {
return errors.New(log("mounter.SetUpAt failed to create dir %#v: %v", parentDir, err))
}
klog.V(4).Info(log("created target path successfully [%s]", parentDir))
nodePublishSecrets = map[string]string{}
if secretRef != nil {
nodePublishSecrets, err = getCredentialsFromSecret(c.k8s, secretRef)
if err != nil {
return volumetypes.NewTransientOperationFailure(fmt.Sprintf("fetching NodePublishSecretRef %s/%s failed: %v",
secretRef.Namespace, secretRef.Name, err))
}
}
// Inject pod information into volume_attributes
podInfoEnabled, err := c.plugin.podInfoEnabled(string(c.driverName))
if err != nil {
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to assemble volume attributes: %v", err))
}
if podInfoEnabled {
volAttribs = mergeMap(volAttribs, getPodInfoAttrs(c.pod, c.volumeLifecycleMode))
}
// Inject pod service account token into volume attributes
serviceAccountTokenAttrs, err := c.podServiceAccountTokenAttrs()
if err != nil {
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to get service accoount token attributes: %v", err))
}
volAttribs = mergeMap(volAttribs, serviceAccountTokenAttrs)
driverSupportsCSIVolumeMountGroup := false
var nodePublishFSGroupArg *int64
driverSupportsCSIVolumeMountGroup, err = csi.NodeSupportsVolumeMountGroup(ctx)
if err != nil {
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to determine if the node service has VOLUME_MOUNT_GROUP capability: %v", err))
}
if driverSupportsCSIVolumeMountGroup {
klog.V(3).Infof("Driver %s supports applying FSGroup (has VOLUME_MOUNT_GROUP node capability). Delegating FSGroup application to the driver through NodePublishVolume.", c.driverName)
nodePublishFSGroupArg = mounterArgs.FsGroup
}
var selinuxLabelMount bool
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
support, err := c.plugin.SupportsSELinuxContextMount(c.spec)
if err != nil {
return errors.New(log("failed to query for SELinuxMount support: %s", err))
}
if support && mounterArgs.SELinuxLabel != "" {
mountOptions = util.AddSELinuxMountOption(mountOptions, mounterArgs.SELinuxLabel)
selinuxLabelMount = true
}
}
// Save volume info in pod dir
// persist volume info data for teardown
nodeName := string(c.plugin.host.GetNodeName())
volData := map[string]string{
volDataKey.specVolID: c.spec.Name(),
volDataKey.volHandle: volumeHandle,
volDataKey.driverName: string(c.driverName),
volDataKey.nodeName: nodeName,
volDataKey.volumeLifecycleMode: string(c.volumeLifecycleMode),
volDataKey.attachmentID: getAttachmentName(volumeHandle, string(c.driverName), nodeName),
}
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) && selinuxLabelMount {
volData[volDataKey.seLinuxMountContext] = mounterArgs.SELinuxLabel
}
err = saveVolumeData(parentDir, volDataFileName, volData)
if err != nil {
errorMsg := log("mounter.SetUpAt failed to save volume info data: %v", err)
klog.Error(errorMsg)
if removeerr := removeMountDir(c.plugin, dir); removeerr != nil {
klog.Error(log("mounter.SetUpAt failed to remove mount dir after error [%s]: %v", dir, removeerr))
}
return err
}
csiRPCError := csi.NodePublishVolume(
ctx,
volumeHandle,
readOnly,
deviceMountPath,
dir,
accessMode,
publishContext,
volAttribs,
nodePublishSecrets,
fsType,
mountOptions,
nodePublishFSGroupArg,
)
if csiRPCError != nil {
// If operation finished with error then we can remove the mount directory.
if volumetypes.IsOperationFinishedError(csiRPCError) {
if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil {
klog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr))
}
}
return csiRPCError
}
if !selinuxLabelMount {
c.needSELinuxRelabel, err = c.kubeVolHost.GetHostUtil().GetSELinuxSupport(dir)
if err != nil {
// The volume is mounted. Return UncertainProgressError, so kubelet will unmount it when user deletes the pod.
return volumetypes.NewUncertainProgressError(fmt.Sprintf("error checking for SELinux support: %s", err))
}
}
if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, fsGroupPolicy) {
// Driver doesn't support applying FSGroup. Kubelet must apply it instead.
var ownershipChanger volume.VolumeOwnershipChanger
if mounterArgs.VolumeOwnershipApplicator != nil {
ownershipChanger = mounterArgs.VolumeOwnershipApplicator
} else {
ownershipChanger = volume.NewVolumeOwnership(c, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec))
}
// fullPluginName helps to distinguish different driver from csi plugin
ownershipChanger.AddProgressNotifier(c.pod, mounterArgs.Recorder)
err = ownershipChanger.ChangePermissions()
if err != nil {
// At this point mount operation is successful:
// 1. Since volume can not be used by the pod because of invalid permissions, we must return error
// 2. Since mount is successful, we must record volume as mounted in uncertain state, so it can be
// cleaned up.
return volumetypes.NewUncertainProgressError(fmt.Sprintf("applyFSGroup failed for vol %s: %v", c.volumeID, err))
}
klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *mounterArgs.FsGroup, c.volumeID))
}
klog.V(4).Info(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
return nil
}
func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) {
if c.plugin.serviceAccountTokenGetter == nil {
return nil, errors.New("ServiceAccountTokenGetter is nil")
}
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
if err != nil {
if apierrors.IsNotFound(err) {
klog.V(5).Info(log("CSIDriver %q not found, not adding service account token information", c.driverName))
return nil, nil
}
return nil, err
}
if len(csiDriver.Spec.TokenRequests) == 0 {
return nil, nil
}
outputs := map[string]authenticationv1.TokenRequestStatus{}
for _, tokenRequest := range csiDriver.Spec.TokenRequests {
audience := tokenRequest.Audience
audiences := []string{audience}
if audience == "" {
audiences = []string{}
}
tr, err := c.plugin.serviceAccountTokenGetter(c.pod.Namespace, c.pod.Spec.ServiceAccountName, &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: audiences,
ExpirationSeconds: tokenRequest.ExpirationSeconds,
BoundObjectRef: &authenticationv1.BoundObjectReference{
APIVersion: "v1",
Kind: "Pod",
Name: c.pod.Name,
UID: c.pod.UID,
},
},
})
if err != nil {
return nil, err
}
outputs[audience] = tr.Status
}
klog.V(4).Info(log("Fetched service account token attrs for CSIDriver %q", c.driverName))
tokens, _ := json.Marshal(outputs)
return map[string]string{
"csi.storage.k8s.io/serviceAccount.tokens": string(tokens),
}, nil
}
func (c *csiMountMgr) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: c.readOnly,
Managed: !c.readOnly,
SELinuxRelabel: c.needSELinuxRelabel,
}
}
// volume.Unmounter methods
var _ volume.Unmounter = &csiMountMgr{}
func (c *csiMountMgr) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *csiMountMgr) TearDownAt(dir string) error {
klog.V(4).Info(log("Unmounter.TearDownAt(%s)", dir))
volID := c.volumeID
csi, err := c.csiClientGetter.Get()
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return volumetypes.NewTransientOperationFailure(log("Unmounter.TearDownAt failed to get CSI client: %v", err))
}
// Could not get spec info on whether this is a migrated operation because c.spec is nil
ctx, cancel := createCSIOperationContext(c.spec, csiTimeout)
defer cancel()
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
return errors.New(log("Unmounter.TearDownAt failed: %v", err))
}
// Removal of target_path provided in the NodePublish RPC call
// (in this case location `dir`) MUST be done by the CSI plugin according
// to the spec.
//
// Kubelet should only be responsible for removal of json data files it
// creates and parent directories.
//
// However, some CSI plugins maybe buggy and don't adhere to the standard,
// so we still need to remove the target_path here if it's unmounted and
// empty.
if err := removeMountDir(c.plugin, dir); err != nil {
return errors.New(log("Unmounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
}
klog.V(4).Info(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir))
return nil
}
func (c *csiMountMgr) supportsFSGroup(fsType string, fsGroup *int64, driverPolicy storage.FSGroupPolicy) bool {
if fsGroup == nil || driverPolicy == storage.NoneFSGroupPolicy || c.readOnly {
return false
}
if driverPolicy == storage.FileFSGroupPolicy {
return true
}
if fsType == "" {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided"))
return false
}
if c.spec.PersistentVolume != nil {
if c.spec.PersistentVolume.Spec.AccessModes == nil {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided"))
return false
}
if !hasReadWriteOnce(c.spec.PersistentVolume.Spec.AccessModes) {
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode"))
return false
}
return true
} else if c.spec.Volume != nil && c.spec.Volume.CSI != nil {
// Inline CSI volumes are always mounted with RWO AccessMode by SetUpAt
return true
}
klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, unsupported volume type"))
return false
}
// getFSGroupPolicy returns if the CSI driver supports a volume in the given mode.
// An error indicates that it isn't supported and explains why.
func (c *csiMountMgr) getFSGroupPolicy() (storage.FSGroupPolicy, error) {
// Retrieve CSIDriver. It's not an error if the driver isn't found
// (CSIDriver is optional)
var csiDriver *storage.CSIDriver
driver := string(c.driverName)
if c.plugin.csiDriverLister != nil {
c, err := c.plugin.getCSIDriver(driver)
if err != nil && !apierrors.IsNotFound(err) {
// Some internal error.
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, err
}
csiDriver = c
}
// If the csiDriver isn't defined, return the default behavior
if csiDriver == nil {
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, nil
}
// If the csiDriver exists but the fsGroupPolicy isn't defined, return an error
if csiDriver.Spec.FSGroupPolicy == nil || *csiDriver.Spec.FSGroupPolicy == "" {
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, errors.New(log("expected valid fsGroupPolicy, received nil value or empty string"))
}
return *csiDriver.Spec.FSGroupPolicy, nil
}
// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode.
// An error indicates that it isn't supported and explains why.
func (c *csiMountMgr) supportsVolumeLifecycleMode() error {
// Retrieve CSIDriver. It's not an error if the driver isn't found
// (CSIDriver is optional), but then only persistent volumes are supported.
var csiDriver *storage.CSIDriver
driver := string(c.driverName)
if c.plugin.csiDriverLister != nil {
c, err := c.plugin.getCSIDriver(driver)
if err != nil && !apierrors.IsNotFound(err) {
// Some internal error.
return err
}
csiDriver = c
}
// The right response depends on whether we have information
// about the driver and the volume mode.
switch {
case csiDriver == nil && c.volumeLifecycleMode == storage.VolumeLifecyclePersistent:
// No information, but that's okay for persistent volumes (and only those).
return nil
case csiDriver == nil:
return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", c.volumeLifecycleMode, driver)
case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, c.volumeLifecycleMode):
// Explicitly listed.
return nil
default:
return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", c.volumeLifecycleMode, driver, csiDriver.Spec.VolumeLifecycleModes)
}
}
// containsVolumeMode checks whether the given volume mode is listed.
func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter()
notMnt, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
klog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
return false, err
}
return !notMnt, nil
}
// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir
func removeMountDir(plug *csiPlugin, mountPath string) error {
klog.V(4).Info(log("removing mount path [%s]", mountPath))
mnt, err := isDirMounted(plug, mountPath)
if err != nil {
return err
}
if !mnt {
klog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {
return errors.New(log("failed to remove dir [%s]: %v", mountPath, err))
}
// remove volume data file as well
volPath := filepath.Dir(mountPath)
dataFile := filepath.Join(volPath, volDataFileName)
klog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
return errors.New(log("failed to delete volume data file [%s]: %v", dataFile, err))
}
// remove volume path
klog.V(4).Info(log("deleting volume path [%s]", volPath))
if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) {
return errors.New(log("failed to delete volume path [%s]: %v", volPath, err))
}
}
return nil
}
// makeVolumeHandle returns csi-<sha256(podUID,volSourceSpecName)>
func makeVolumeHandle(podUID, volSourceSpecName string) string {
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s", podUID, volSourceSpecName)))
return fmt.Sprintf("csi-%x", result)
}
func mergeMap(first, second map[string]string) map[string]string {
if first == nil {
return second
}
for k, v := range second {
first[k] = v
}
return first
}
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"fmt"
"sync"
"time"
v1 "k8s.io/api/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
)
// csiNodeUpdater watches for changes to CSIDriver objects and manages the lifecycle
// of per-driver goroutines that periodically update CSINodeDriver.Allocatable information
// based on the NodeAllocatableUpdatePeriodSeconds setting.
type csiNodeUpdater struct {
// Informer for CSIDriver objects
driverInformer cache.SharedIndexInformer
// Map of driver names to stop channels for update goroutines
driverUpdaters sync.Map
// Ensures the updater is only started once
once sync.Once
}
// NewCSINodeUpdater creates a new csiNodeUpdater
func NewCSINodeUpdater(driverInformer cache.SharedIndexInformer) (*csiNodeUpdater, error) {
if driverInformer == nil {
return nil, fmt.Errorf("driverInformer must not be nil")
}
return &csiNodeUpdater{
driverInformer: driverInformer,
driverUpdaters: sync.Map{},
}, nil
}
// Run starts the csiNodeUpdater by registering event handlers.
func (u *csiNodeUpdater) Run() {
u.once.Do(func() {
_, err := u.driverInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: u.onDriverAdd,
UpdateFunc: u.onDriverUpdate,
DeleteFunc: u.onDriverDelete,
})
if err != nil {
klog.ErrorS(err, "Failed to add event handler for CSI driver informer")
return
}
klog.V(4).InfoS("csiNodeUpdater initialized successfully")
})
}
// onDriverAdd handles the addition of a new CSIDriver object.
func (u *csiNodeUpdater) onDriverAdd(obj interface{}) {
driver, ok := obj.(*v1.CSIDriver)
if !ok {
return
}
klog.V(7).InfoS("onDriverAdd event", "driver", driver.Name)
u.syncDriverUpdater(driver.Name)
}
// onDriverUpdate handles updates to CSIDriver objects.
func (u *csiNodeUpdater) onDriverUpdate(oldObj, newObj interface{}) {
oldDriver, ok := oldObj.(*v1.CSIDriver)
if !ok {
return
}
newDriver, ok := newObj.(*v1.CSIDriver)
if !ok {
return
}
// Only reconfigure if the NodeAllocatableUpdatePeriodSeconds field is updated.
oldPeriod := getNodeAllocatableUpdatePeriod(oldDriver)
newPeriod := getNodeAllocatableUpdatePeriod(newDriver)
if oldPeriod != newPeriod {
klog.V(4).InfoS("NodeAllocatableUpdatePeriodSeconds updated", "driver", newDriver.Name, "oldPeriod", oldPeriod, "newPeriod", newPeriod)
u.syncDriverUpdater(newDriver.Name)
}
}
// onDriverDelete handles deletion of CSIDriver objects.
func (u *csiNodeUpdater) onDriverDelete(obj interface{}) {
driver, ok := obj.(*v1.CSIDriver)
if !ok {
return
}
klog.V(7).InfoS("onDriverDelete event", "driver", driver.Name)
u.syncDriverUpdater(driver.Name)
}
// syncDriverUpdater re-evaluates whether the periodic updater for a given driver should run.
// It is invoked from informer events (Add/Update/Delete) and from plugin registration/deregistration.
func (u *csiNodeUpdater) syncDriverUpdater(driverName string) {
// Check if the CSI plugin is installed on this node.
if !isDriverInstalled(driverName) {
klog.V(4).InfoS("Driver not installed; stopping csiNodeUpdater", "driver", driverName)
u.unregisterDriver(driverName)
return
}
// Get the CSIDriver object from the informer cache.
obj, exists, err := u.driverInformer.GetStore().GetByKey(driverName)
if err != nil {
u.unregisterDriver(driverName)
klog.ErrorS(err, "Error retrieving CSIDriver from store", "driver", driverName)
return
}
if !exists {
klog.InfoS("CSIDriver object not found; stopping csiNodeUpdater", "driver", driverName)
u.unregisterDriver(driverName)
return
}
driver, ok := obj.(*v1.CSIDriver)
if !ok {
klog.ErrorS(fmt.Errorf("invalid CSIDriver object type"), "failed to cast CSIDriver object", "driver", driverName)
return
}
// Get the update period.
period := getNodeAllocatableUpdatePeriod(driver)
if period == 0 {
klog.V(7).InfoS("NodeAllocatableUpdatePeriodSeconds is not configured; disabling updates", "driver", driverName)
u.unregisterDriver(driverName)
return
}
newStopCh := make(chan struct{})
prevStopCh, loaded := u.driverUpdaters.Swap(driverName, newStopCh)
// If an updater is already running, stop it so we can reconfigure.
if loaded && prevStopCh != nil {
if stopCh, ok := prevStopCh.(chan struct{}); ok {
close(stopCh)
}
}
// Start the periodic update goroutine.
go u.runPeriodicUpdate(driverName, period, newStopCh)
}
// unregisterDriver stops any running periodic update goroutine for the given driver.
func (u *csiNodeUpdater) unregisterDriver(driverName string) {
prev, loaded := u.driverUpdaters.LoadAndDelete(driverName)
if loaded && prev != nil {
if stopCh, ok := prev.(chan struct{}); ok {
close(stopCh)
}
}
}
// runPeriodicUpdate runs the periodic update loop for a driver.
func (u *csiNodeUpdater) runPeriodicUpdate(driverName string, period time.Duration, stopCh <-chan struct{}) {
ticker := time.NewTicker(period)
defer ticker.Stop()
klog.V(7).InfoS("Starting periodic updates for driver", "driver", driverName, "period", period)
for {
select {
case <-ticker.C:
if err := updateCSIDriver(driverName); err != nil {
klog.ErrorS(err, "Failed to update CSIDriver", "driver", driverName)
}
case <-stopCh:
klog.V(4).InfoS("Stopping periodic updates for driver", "driver", driverName, "period", period)
return
}
}
}
// isDriverInstalled checks if the CSI driver is installed on the node by checking the global csiDrivers map
func isDriverInstalled(driverName string) bool {
_, ok := csiDrivers.Get(driverName)
return ok
}
// getNodeAllocatableUpdatePeriod returns the NodeAllocatableUpdatePeriodSeconds value from the CSIDriver
func getNodeAllocatableUpdatePeriod(driver *v1.CSIDriver) time.Duration {
if driver == nil || driver.Spec.NodeAllocatableUpdatePeriodSeconds == nil {
return 0
}
return time.Duration(*driver.Spec.NodeAllocatableUpdatePeriodSeconds) * time.Second
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"google.golang.org/grpc/codes"
"k8s.io/klog/v2"
authenticationv1 "k8s.io/api/authentication/v1"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
csitranslationplugins "k8s.io/csi-translation-lib/plugins"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
const (
// CSIPluginName is the name of the in-tree CSI Plugin
CSIPluginName = "kubernetes.io/csi"
csiTimeout = 2 * time.Minute
volNameSep = "^"
volDataFileName = "vol_data.json"
fsTypeBlockName = "block"
// CsiResyncPeriod is default resync period duration
// TODO: increase to something useful
CsiResyncPeriod = time.Minute
)
type csiPlugin struct {
host volume.VolumeHost
csiDriverLister storagelisters.CSIDriverLister
csiDriverInformer cache.SharedIndexInformer
serviceAccountTokenGetter func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
volumeAttachmentLister storagelisters.VolumeAttachmentLister
}
// ProbeVolumePlugins returns implemented plugins
func ProbeVolumePlugins() []volume.VolumePlugin {
p := &csiPlugin{
host: nil,
}
return []volume.VolumePlugin{p}
}
// volume.VolumePlugin methods
var _ volume.VolumePlugin = &csiPlugin{}
// RegistrationHandler is the handler which is fed to the pluginwatcher API.
type RegistrationHandler struct {
csiPlugin *csiPlugin
}
// TODO (verult) consider using a struct instead of global variables
// csiDrivers map keep track of all registered CSI drivers on the node and their
// corresponding sockets
var csiDrivers = &DriversStore{}
var nim nodeinfomanager.Interface
var csiNodeUpdaterVar *csiNodeUpdater
// PluginHandler is the plugin registration handler interface passed to the
// pluginwatcher module in kubelet
var PluginHandler = &RegistrationHandler{}
// ValidatePlugin is called by kubelet's plugin watcher upon detection
// of a new registration socket opened by CSI Driver registrar side car.
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
klog.Info(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s",
pluginName, endpoint, strings.Join(versions, ",")))
_, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions)
if err != nil {
return fmt.Errorf("validation failed for CSI Driver %s at endpoint %s: %v", pluginName, endpoint, err)
}
return err
}
// RegisterPlugin is called when a plugin can be registered
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error {
klog.Info(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions)
if err != nil {
return err
}
// Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key
// all other CSI components will be able to get the actual socket of CSI drivers by its name.
csiDrivers.Set(pluginName, Driver{
endpoint: endpoint,
highestSupportedVersion: highestSupportedVersion,
})
// Get node info from the driver.
csi, err := newCsiDriverClient(csiDriverName(pluginName))
if err != nil {
return err
}
var timeout time.Duration
if pluginClientTimeout == nil {
timeout = csiTimeout
} else {
timeout = *pluginClientTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx)
if err != nil {
if unregErr := unregisterDriver(pluginName); unregErr != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr))
}
return err
}
err = nim.InstallCSIDriver(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology)
if err != nil {
if unregErr := unregisterDriver(pluginName); unregErr != nil {
klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr))
}
return err
}
if csiNodeUpdaterVar != nil {
csiNodeUpdaterVar.syncDriverUpdater(pluginName)
}
return nil
}
func updateCSIDriver(pluginName string) error {
csi, err := newCsiDriverClient(csiDriverName(pluginName))
if err != nil {
return fmt.Errorf("failed to create CSI client for driver %q: %w", pluginName, err)
}
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx)
if err != nil {
return fmt.Errorf("failed to get NodeGetInfo from driver %q: %w", pluginName, err)
}
if err := nim.UpdateCSIDriver(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology); err != nil {
return fmt.Errorf("failed to update driver %q: %w", pluginName, err)
}
return nil
}
func (p *csiPlugin) VerifyExhaustedResource(spec *volume.Spec) bool {
if spec == nil || spec.PersistentVolume == nil || spec.PersistentVolume.Spec.CSI == nil {
klog.ErrorS(nil, "Invalid volume spec for CSI")
return false
}
pluginName := spec.PersistentVolume.Spec.CSI.Driver
driver, err := p.getCSIDriver(pluginName)
if err != nil {
klog.ErrorS(err, "Failed to retrieve CSIDriver", "pluginName", pluginName)
return false
}
period := getNodeAllocatableUpdatePeriod(driver)
if period == 0 {
return false
}
volumeHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
attachmentName := getAttachmentName(volumeHandle, pluginName, string(p.host.GetNodeName()))
kubeClient := p.host.GetKubeClient()
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
defer cancel()
attachment, err := kubeClient.StorageV1().VolumeAttachments().Get(ctx, attachmentName, meta.GetOptions{})
if err != nil {
klog.ErrorS(err, "Failed to get volume attachment", "attachmentName", attachmentName)
return false
}
if isResourceExhaustError(attachment) {
klog.V(4).InfoS("Detected ResourceExhausted error for volume", "pluginName", pluginName, "volumeHandle", volumeHandle)
if err := updateCSIDriver(pluginName); err != nil {
klog.ErrorS(err, "Failed to update CSIDriver", "pluginName", pluginName)
}
return true
}
return false
}
func isResourceExhaustError(attachment *storage.VolumeAttachment) bool {
if attachment == nil || attachment.Status.AttachError == nil {
return false
}
return attachment.Status.AttachError.ErrorCode != nil &&
*attachment.Status.AttachError.ErrorCode == int32(codes.ResourceExhausted)
}
func (h *RegistrationHandler) validateVersions(callerName, pluginName string, endpoint string, versions []string) (*utilversion.Version, error) {
if len(versions) == 0 {
return nil, errors.New(log("%s for CSI driver %q failed. Plugin returned an empty list for supported versions", callerName, pluginName))
}
// Validate version
// CSI currently only has version 0.x and 1.x (see https://github.com/container-storage-interface/spec/releases).
// Therefore any driver claiming version 2.x+ is ignored as an unsupported versions.
// Future 1.x versions of CSI are supposed to be backwards compatible so this version of Kubernetes will work with any 1.x driver
// (or 0.x), but it may not work with 2.x drivers (because 2.x does not have to be backwards compatible with 1.x).
// CSI v0.x is no longer supported as of Kubernetes v1.17 in accordance with deprecation policy set out in Kubernetes v1.13.
newDriverHighestVersion, err := utilversion.HighestSupportedVersion(versions)
if err != nil {
return nil, errors.New(log("%s for CSI driver %q failed. None of the versions specified %q are supported. err=%v", callerName, pluginName, versions, err))
}
existingDriver, driverExists := csiDrivers.Get(pluginName)
if driverExists {
if !existingDriver.highestSupportedVersion.LessThan(newDriverHighestVersion) {
return nil, errors.New(log("%s for CSI driver %q failed. Another driver with the same name is already registered with a higher supported version: %q", callerName, pluginName, existingDriver.highestSupportedVersion))
}
}
return newDriverHighestVersion, nil
}
// DeRegisterPlugin is called when a plugin removed its socket, signaling
// it is no longer available
func (h *RegistrationHandler) DeRegisterPlugin(pluginName, endpoint string) {
klog.Info(log("registrationHandler.DeRegisterPlugin request for plugin %s, endpoint %s", pluginName, endpoint))
if err := unregisterDriver(pluginName); err != nil {
klog.Error(log("registrationHandler.DeRegisterPlugin failed: %v", err))
}
if csiNodeUpdaterVar != nil {
csiNodeUpdaterVar.syncDriverUpdater(pluginName)
}
}
func (p *csiPlugin) Init(host volume.VolumeHost) error {
p.host = host
csiClient := host.GetKubeClient()
if csiClient == nil {
klog.Warning(log("kubeclient not set, assuming standalone kubelet"))
} else {
// set CSIDriverLister and volumeAttachmentLister
seLinuxHost, ok := host.(volume.CSIDriverVolumeHost)
if ok {
p.csiDriverLister = seLinuxHost.CSIDriverLister()
if p.csiDriverLister == nil {
klog.Error(log("CSIDriverLister not found on CSIDriverVolumeHost"))
}
}
adcHost, ok := host.(volume.AttachDetachVolumeHost)
if ok {
p.volumeAttachmentLister = adcHost.VolumeAttachmentLister()
if p.volumeAttachmentLister == nil {
klog.Error(log("VolumeAttachmentLister not found on AttachDetachVolumeHost"))
}
}
kletHost, ok := host.(volume.KubeletVolumeHost)
if ok {
p.csiDriverLister = kletHost.CSIDriverLister()
if p.csiDriverLister == nil {
klog.Error(log("CSIDriverLister not found on KubeletVolumeHost"))
}
p.serviceAccountTokenGetter = host.GetServiceAccountTokenFunc()
if p.serviceAccountTokenGetter == nil {
klog.Error(log("ServiceAccountTokenGetter not found on KubeletVolumeHost"))
}
// We don't run the volumeAttachmentLister in the kubelet context
p.volumeAttachmentLister = nil
informerFactory := kletHost.GetInformerFactory()
if informerFactory == nil {
klog.Error(log("InformerFactory not found on KubeletVolumeHost"))
} else {
p.csiDriverInformer = informerFactory.Storage().V1().CSIDrivers().Informer()
}
}
}
var migratedPlugins = map[string](func() bool){
csitranslationplugins.GCEPDInTreePluginName: func() bool {
return true
},
csitranslationplugins.AWSEBSInTreePluginName: func() bool {
return true
},
csitranslationplugins.CinderInTreePluginName: func() bool {
return true
},
csitranslationplugins.AzureDiskInTreePluginName: func() bool {
return true
},
csitranslationplugins.AzureFileInTreePluginName: func() bool {
return true
},
csitranslationplugins.VSphereInTreePluginName: func() bool {
return true
},
csitranslationplugins.PortworxVolumePluginName: func() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx)
},
}
// Initializing the label management channels
nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host, migratedPlugins)
PluginHandler.csiPlugin = p
// This function prevents Kubelet from posting Ready status until CSINode
// is both installed and initialized
if err := initializeCSINode(host, p.csiDriverInformer); err != nil {
return errors.New(log("failed to initialize CSINode: %v", err))
}
return nil
}
func initializeCSINode(host volume.VolumeHost, csiDriverInformer cache.SharedIndexInformer) error {
kvh, ok := host.(volume.KubeletVolumeHost)
if !ok {
klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet")
return nil
}
kubeClient := host.GetKubeClient()
if kubeClient == nil {
// Kubelet running in standalone mode. Skip CSINode initialization
klog.Warning("Skipping CSINode initialization, kubelet running in standalone mode")
return nil
}
kvh.SetKubeletError(errors.New("CSINode is not yet initialized"))
go func() {
defer utilruntime.HandleCrash()
// First wait indefinitely to talk to Kube APIServer
nodeName := host.GetNodeName()
err := waitForAPIServerForever(kubeClient, nodeName)
if err != nil {
klog.Fatalf("Failed to initialize CSINode while waiting for API server to report ok: %v", err)
}
// Backoff parameters tuned to retry over 140 seconds. Will fail and restart the Kubelet
// after max retry steps.
initBackoff := wait.Backoff{
Steps: 6,
Duration: 15 * time.Millisecond,
Factor: 6.0,
Jitter: 0.1,
}
err = wait.ExponentialBackoff(initBackoff, func() (bool, error) {
klog.V(4).Infof("Initializing migrated drivers on CSINode")
err := nim.InitializeCSINodeWithAnnotation()
if err != nil {
kvh.SetKubeletError(fmt.Errorf("failed to initialize CSINode: %v", err))
klog.Errorf("Failed to initialize CSINode: %v", err)
return false, nil
}
// Successfully initialized drivers, allow Kubelet to post Ready
kvh.SetKubeletError(nil)
return true, nil
})
if err != nil {
// 2 releases after CSIMigration and all CSIMigrationX (where X is a volume plugin)
// are permanently enabled the apiserver/controllers can assume that the kubelet is
// using CSI for all Migrated volume plugins. Then all the CSINode initialization
// code can be dropped from Kubelet.
// Kill the Kubelet process and allow it to restart to retry initialization
klog.Fatalf("Failed to initialize CSINode after retrying: %v", err)
}
}()
if utilfeature.DefaultFeatureGate.Enabled(features.MutableCSINodeAllocatableCount) && csiNodeUpdaterVar == nil {
if csiDriverInformer != nil {
var err error
csiNodeUpdaterVar, err = NewCSINodeUpdater(csiDriverInformer)
if err != nil {
klog.ErrorS(err, "Failed to create CSINodeUpdater")
} else {
go csiNodeUpdaterVar.Run()
}
}
}
return nil
}
func (p *csiPlugin) GetPluginName() string {
return CSIPluginName
}
// GetvolumeName returns a concatenated string of CSIVolumeSource.Driver<volNameSe>CSIVolumeSource.VolumeHandle
// That string value is used in Detach() to extract driver name and volumeName.
func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
csi, err := getPVSourceFromSpec(spec)
if err != nil {
return "", errors.New(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err))
}
// return driverName<separator>volumeHandle
return fmt.Sprintf("%s%s%s", csi.Driver, volNameSep, csi.VolumeHandle), nil
}
func (p *csiPlugin) CanSupport(spec *volume.Spec) bool {
// TODO (vladimirvivien) CanSupport should also take into account
// the availability/registration of specified Driver in the volume source
if spec == nil {
return false
}
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil) ||
(spec.Volume != nil && spec.Volume.CSI != nil)
}
func (p *csiPlugin) RequiresRemount(spec *volume.Spec) bool {
if p.csiDriverLister == nil {
return false
}
driverName, err := GetCSIDriverName(spec)
if err != nil {
klog.V(5).Info(log("Failed to mark %q as republish required, err: %v", spec.Name(), err))
return false
}
csiDriver, err := p.getCSIDriver(driverName)
if err != nil {
klog.V(5).Info(log("Failed to mark %q as republish required, err: %v", spec.Name(), err))
return false
}
return *csiDriver.Spec.RequiresRepublish
}
func (p *csiPlugin) NewMounter(
spec *volume.Spec,
pod *api.Pod) (volume.Mounter, error) {
volSrc, pvSrc, err := getSourceFromSpec(spec)
if err != nil {
return nil, err
}
var (
driverName string
volumeHandle string
readOnly bool
)
switch {
case volSrc != nil:
volumeHandle = makeVolumeHandle(string(pod.UID), spec.Name())
driverName = volSrc.Driver
if volSrc.ReadOnly != nil {
readOnly = *volSrc.ReadOnly
}
case pvSrc != nil:
driverName = pvSrc.Driver
volumeHandle = pvSrc.VolumeHandle
readOnly = spec.ReadOnly
default:
return nil, errors.New(log("volume source not found in volume.Spec"))
}
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
if err != nil {
return nil, err
}
k8s := p.host.GetKubeClient()
if k8s == nil {
return nil, errors.New(log("failed to get a kubernetes client"))
}
kvh, ok := p.host.(volume.KubeletVolumeHost)
if !ok {
return nil, errors.New(log("cast from VolumeHost to KubeletVolumeHost failed"))
}
mounter := &csiMountMgr{
plugin: p,
k8s: k8s,
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: csiDriverName(driverName),
volumeLifecycleMode: volumeLifecycleMode,
volumeID: volumeHandle,
specVolumeID: spec.Name(),
readOnly: readOnly,
kubeVolHost: kvh,
}
mounter.csiClientGetter.driverName = csiDriverName(driverName)
dir := mounter.GetPath()
mounter.MetricsProvider = NewMetricsCsi(volumeHandle, dir, csiDriverName(driverName))
klog.V(4).Info(log("mounter created successfully"))
return mounter, nil
}
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
klog.V(4).Info(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
kvh, ok := p.host.(volume.KubeletVolumeHost)
if !ok {
return nil, errors.New(log("cast from VolumeHost to KubeletVolumeHost failed"))
}
unmounter := &csiMountMgr{
plugin: p,
podUID: podUID,
specVolumeID: specName,
kubeVolHost: kvh,
}
// load volume info from file
dir := unmounter.GetPath()
dataDir := filepath.Dir(dir) // dropoff /mount at end
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
return nil, errors.New(log("unmounter failed to load volume data file [%s]: %v", dir, err))
}
unmounter.driverName = csiDriverName(data[volDataKey.driverName])
unmounter.volumeID = data[volDataKey.volHandle]
unmounter.csiClientGetter.driverName = unmounter.driverName
return unmounter, nil
}
func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.ReconstructedVolume, error) {
klog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
volData, err := loadVolumeData(mountPath, volDataFileName)
if err != nil {
return volume.ReconstructedVolume{}, errors.New(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
}
klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
var ret volume.ReconstructedVolume
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
ret.SELinuxMountContext = volData[volDataKey.seLinuxMountContext]
}
// If mode is VolumeLifecycleEphemeral, use constructVolSourceSpec
// to construct volume source spec. If mode is VolumeLifecyclePersistent,
// use constructPVSourceSpec to construct volume construct pv source spec.
if storage.VolumeLifecycleMode(volData[volDataKey.volumeLifecycleMode]) == storage.VolumeLifecycleEphemeral {
ret.Spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
return ret, nil
}
ret.Spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle])
return ret, nil
}
// constructVolSourceSpec constructs volume.Spec with CSIVolumeSource
func (p *csiPlugin) constructVolSourceSpec(volSpecName, driverName string) *volume.Spec {
vol := &api.Volume{
Name: volSpecName,
VolumeSource: api.VolumeSource{
CSI: &api.CSIVolumeSource{
Driver: driverName,
},
},
}
return volume.NewSpecFromVolume(vol)
}
// constructPVSourceSpec constructs volume.Spec with CSIPersistentVolumeSource
func (p *csiPlugin) constructPVSourceSpec(volSpecName, driverName, volumeHandle string) *volume.Spec {
fsMode := api.PersistentVolumeFilesystem
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volSpecName,
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
CSI: &api.CSIPersistentVolumeSource{
Driver: driverName,
VolumeHandle: volumeHandle,
},
},
VolumeMode: &fsMode,
},
}
return volume.NewSpecFromPersistentVolume(pv, false)
}
func (p *csiPlugin) SupportsMountOption() bool {
// TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags
// to probe for the result for this method
// (bswartz) Until the CSI spec supports probing, our only option is to
// make plugins register their support for mount options or lack thereof
// directly with kubernetes.
return true
}
func (p *csiPlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
driver, err := GetCSIDriverName(spec)
if err != nil {
return false, err
}
csiDriver, err := p.getCSIDriver(driver)
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
if csiDriver.Spec.SELinuxMount != nil {
return *csiDriver.Spec.SELinuxMount, nil
}
return false, nil
}
return false, nil
}
// volume.AttachableVolumePlugin methods
var _ volume.AttachableVolumePlugin = &csiPlugin{}
var _ volume.DeviceMountableVolumePlugin = &csiPlugin{}
func (p *csiPlugin) NewAttacher() (volume.Attacher, error) {
return p.newAttacherDetacher()
}
func (p *csiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return p.NewAttacher()
}
func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
return p.newAttacherDetacher()
}
func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
if err != nil {
return false, err
}
if volumeLifecycleMode == storage.VolumeLifecycleEphemeral {
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
return false, nil
}
pvSrc, err := getCSISourceFromSpec(spec)
if err != nil {
return false, err
}
driverName := pvSrc.Driver
skipAttach, err := p.skipAttach(driverName)
if err != nil {
return false, err
}
return !skipAttach, nil
}
// CanDeviceMount returns true if the spec supports device mount
func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
if err != nil {
return false, err
}
if volumeLifecycleMode == storage.VolumeLifecycleEphemeral {
klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name()))
return false, nil
}
// Persistent volumes support device mount.
return true, nil
}
func (p *csiPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return p.NewDetacher()
}
func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := p.host.GetMounter()
return m.GetMountRefs(deviceMountPath)
}
// BlockVolumePlugin methods
var _ volume.BlockVolumePlugin = &csiPlugin{}
func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod) (volume.BlockVolumeMapper, error) {
pvSource, err := getCSISourceFromSpec(spec)
if err != nil {
return nil, err
}
readOnly, err := getReadOnlyFromSpec(spec)
if err != nil {
return nil, err
}
klog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver))
k8s := p.host.GetKubeClient()
if k8s == nil {
return nil, errors.New(log("failed to get a kubernetes client"))
}
mapper := &csiBlockMapper{
k8s: k8s,
plugin: p,
volumeID: pvSource.VolumeHandle,
driverName: csiDriverName(pvSource.Driver),
readOnly: readOnly,
spec: spec,
specName: spec.Name(),
pod: podRef,
podUID: podRef.UID,
}
mapper.csiClientGetter.driverName = csiDriverName(pvSource.Driver)
// Save volume info in pod dir
dataDir := getVolumeDeviceDataDir(spec.Name(), p.host)
if err := os.MkdirAll(dataDir, 0750); err != nil {
return nil, errors.New(log("failed to create data dir %s: %v", dataDir, err))
}
klog.V(4).Info(log("created path successfully [%s]", dataDir))
blockPath, err := mapper.GetGlobalMapPath(spec)
if err != nil {
return nil, errors.New(log("failed to get device path: %v", err))
}
mapper.MetricsProvider = NewMetricsCsi(pvSource.VolumeHandle, blockPath+"/"+string(podRef.UID), csiDriverName(pvSource.Driver))
// persist volume info data for teardown
node := string(p.host.GetNodeName())
attachID := getAttachmentName(pvSource.VolumeHandle, pvSource.Driver, node)
volData := map[string]string{
volDataKey.specVolID: spec.Name(),
volDataKey.volHandle: pvSource.VolumeHandle,
volDataKey.driverName: pvSource.Driver,
volDataKey.nodeName: node,
volDataKey.attachmentID: attachID,
}
err = saveVolumeData(dataDir, volDataFileName, volData)
defer func() {
// Only if there was an error and volume operation was considered
// finished, we should remove the directory.
if err != nil && volumetypes.IsOperationFinishedError(err) {
// attempt to cleanup volume mount dir.
if err = removeMountDir(p, dataDir); err != nil {
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", dataDir, err))
}
}
}()
if err != nil {
errorMsg := log("csi.NewBlockVolumeMapper failed to save volume info data: %v", err)
klog.Error(errorMsg)
return nil, errors.New(errorMsg)
}
return mapper, nil
}
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
klog.V(4).Info(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
unmapper := &csiBlockMapper{
plugin: p,
podUID: podUID,
specName: volName,
}
// load volume info from file
dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host)
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
return nil, errors.New(log("unmapper failed to load volume data file [%s]: %v", dataDir, err))
}
unmapper.driverName = csiDriverName(data[volDataKey.driverName])
unmapper.volumeID = data[volDataKey.volHandle]
unmapper.csiClientGetter.driverName = unmapper.driverName
return unmapper, nil
}
func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapPath string) (*volume.Spec, error) {
klog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath)
dataDir := getVolumeDeviceDataDir(specVolName, p.host)
volData, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
return nil, errors.New(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err))
}
klog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData))
blockMode := api.PersistentVolumeBlock
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volData[volDataKey.specVolID],
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
CSI: &api.CSIPersistentVolumeSource{
Driver: volData[volDataKey.driverName],
VolumeHandle: volData[volDataKey.volHandle],
},
},
VolumeMode: &blockMode,
},
}
return volume.NewSpecFromPersistentVolume(pv, false), nil
}
// skipAttach looks up CSIDriver object associated with driver name
// to determine if driver requires attachment volume operation
func (p *csiPlugin) skipAttach(driver string) (bool, error) {
csiDriver, err := p.getCSIDriver(driver)
if err != nil {
if apierrors.IsNotFound(err) {
// Don't skip attach if CSIDriver does not exist
return false, nil
}
return false, err
}
if csiDriver.Spec.AttachRequired != nil && *csiDriver.Spec.AttachRequired == false {
return true, nil
}
return false, nil
}
func (p *csiPlugin) getCSIDriver(driver string) (*storage.CSIDriver, error) {
kletHost, ok := p.host.(volume.KubeletVolumeHost)
if ok {
if err := kletHost.WaitForCacheSync(); err != nil {
return nil, err
}
}
if p.csiDriverLister == nil {
return nil, errors.New("CSIDriver lister does not exist")
}
csiDriver, err := p.csiDriverLister.Get(driver)
return csiDriver, err
}
// getVolumeLifecycleMode returns the mode for the specified spec: {persistent|ephemeral}.
// 1) If mode cannot be determined, it will default to "persistent".
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/596-csi-inline-volumes/README.md
func (p *csiPlugin) getVolumeLifecycleMode(spec *volume.Spec) (storage.VolumeLifecycleMode, error) {
// 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral
// 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent
volSrc, _, err := getSourceFromSpec(spec)
if err != nil {
return "", err
}
if volSrc != nil {
return storage.VolumeLifecycleEphemeral, nil
}
return storage.VolumeLifecyclePersistent, nil
}
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
skip, err := p.skipAttach(driver)
if err != nil {
return nil, err
}
if skip {
return nil, nil
}
attachID := getAttachmentName(handle, driver, nodeName)
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
attachment, err := client.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
if err != nil {
return nil, err // This err already has enough context ("VolumeAttachment xyz not found")
}
if attachment == nil {
err = errors.New("no existing VolumeAttachment found")
return nil, err
}
return attachment.Status.AttachmentMetadata, nil
}
func (p *csiPlugin) newAttacherDetacher() (*csiAttacher, error) {
k8s := p.host.GetKubeClient()
if k8s == nil {
return nil, errors.New(log("unable to get kubernetes client from host"))
}
return &csiAttacher{
plugin: p,
k8s: k8s,
watchTimeout: csiTimeout,
}, nil
}
// podInfoEnabled check CSIDriver enabled pod info flag
func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) {
csiDriver, err := p.getCSIDriver(driverName)
if err != nil {
if apierrors.IsNotFound(err) {
klog.V(4).Info(log("CSIDriver %q not found, not adding pod information", driverName))
return false, nil
}
return false, err
}
// if PodInfoOnMount is not set or false we do not set pod attributes
if csiDriver.Spec.PodInfoOnMount == nil || *csiDriver.Spec.PodInfoOnMount == false {
klog.V(4).Info(log("CSIDriver %q does not require pod information", driverName))
return false, nil
}
return true, nil
}
func unregisterDriver(driverName string) error {
csiDrivers.Delete(driverName)
if err := nim.UninstallCSIDriver(driverName); err != nil {
return errors.New(log("Error uninstalling CSI driver: %v", err))
}
return nil
}
// waitForAPIServerForever waits forever to get a CSINode instance as a proxy
// for a healthy APIServer
func waitForAPIServerForever(client clientset.Interface, nodeName types.NodeName) error {
var lastErr error
// Served object is discarded so no risk to have stale object with benefit to
// reduce the load on APIServer and etcd.
opts := meta.GetOptions{}
util.FromApiserverCache(&opts)
err := wait.PollImmediateInfinite(time.Second, func() (bool, error) {
// Get a CSINode from API server to make sure 1) kubelet can reach API server
// and 2) it has enough permissions. Kubelet may have restricted permissions
// when it's bootstrapping TLS.
// https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/
_, lastErr = client.StorageV1().CSINodes().Get(context.TODO(), string(nodeName), opts)
if lastErr == nil || apierrors.IsNotFound(lastErr) {
// API server contacted
return true, nil
}
klog.V(2).Infof("Failed to contact API server when waiting for CSINode publishing: %s", lastErr)
return false, nil
})
if err != nil {
// In theory this is unreachable, but just in case:
return fmt.Errorf("%v: %v", err, lastErr)
}
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume"
utilstrings "k8s.io/utils/strings"
)
const (
// TestInformerSyncPeriod is informer sync period duration for testing
TestInformerSyncPeriod = 100 * time.Millisecond
// TestInformerSyncTimeout is informer timeout duration for testing
TestInformerSyncTimeout = 30 * time.Second
)
func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) {
credentials := map[string]string{}
secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(context.TODO(), secretRef.Name, meta.GetOptions{})
if err != nil {
return credentials, errors.New(log("failed to find the secret %s in the namespace %s with error: %v", secretRef.Name, secretRef.Namespace, err))
}
for key, value := range secret.Data {
credentials[key] = string(value)
}
return credentials, nil
}
// saveVolumeData persists parameter data as json file at the provided location
func saveVolumeData(dir string, fileName string, data map[string]string) error {
dataFilePath := filepath.Join(dir, fileName)
klog.V(4).Info(log("saving volume data file [%s]", dataFilePath))
file, err := os.Create(dataFilePath)
if err != nil {
return errors.New(log("failed to save volume data file %s: %v", dataFilePath, err))
}
defer file.Close()
if err := json.NewEncoder(file).Encode(data); err != nil {
return errors.New(log("failed to save volume data file %s: %v", dataFilePath, err))
}
klog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
return nil
}
// loadVolumeData loads volume info from specified json file/location
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
// remove /mount at the end
dataFileName := filepath.Join(dir, fileName)
klog.V(4).Info(log("loading volume data file [%s]", dataFileName))
file, err := os.Open(dataFileName)
if err != nil {
return nil, fmt.Errorf("%s: %w", log("failed to open volume data file [%s]", dataFileName), err)
}
defer file.Close()
data := map[string]string{}
if err := json.NewDecoder(file).Decode(&data); err != nil {
return nil, errors.New(log("failed to parse volume data file [%s]: %v", dataFileName, err))
}
return data, nil
}
func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
return getPVSourceFromSpec(spec)
}
func getReadOnlyFromSpec(spec *volume.Spec) (bool, error) {
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CSI != nil {
return spec.ReadOnly, nil
}
return false, fmt.Errorf("CSIPersistentVolumeSource not defined in spec")
}
// log prepends log string with `kubernetes.io/csi`
func log(msg string, parts ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("%s: %s", CSIPluginName, msg), parts...)
}
// getVolumePluginDir returns the path where CSI plugin keeps metadata for given volume
func getVolumePluginDir(specVolID string, host volume.VolumeHost) string {
sanitizedSpecVolID := utilstrings.EscapeQualifiedName(specVolID)
return filepath.Join(host.GetVolumeDevicePluginDir(CSIPluginName), sanitizedSpecVolID)
}
// getVolumeDevicePluginDir returns the path where the CSI plugin keeps the
// symlink for a block device associated with a given specVolumeID.
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/dev
func getVolumeDevicePluginDir(specVolID string, host volume.VolumeHost) string {
return filepath.Join(getVolumePluginDir(specVolID, host), "dev")
}
// getVolumeDeviceDataDir returns the path where the CSI plugin keeps the
// volume data for a block device associated with a given specVolumeID.
// path: plugins/kubernetes.io/csi/volumeDevices/{specVolumeID}/data
func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
return filepath.Join(getVolumePluginDir(specVolID, host), "data")
}
// hasReadWriteOnce returns true if modes contains v1.ReadWriteOnce
func hasReadWriteOnce(modes []api.PersistentVolumeAccessMode) bool {
if modes == nil {
return false
}
for _, mode := range modes {
if mode == api.ReadWriteOnce ||
mode == api.ReadWriteOncePod {
return true
}
}
return false
}
// getSourceFromSpec returns either CSIVolumeSource or CSIPersistentVolumeSource, but not both
func getSourceFromSpec(spec *volume.Spec) (*api.CSIVolumeSource, *api.CSIPersistentVolumeSource, error) {
if spec == nil {
return nil, nil, fmt.Errorf("volume.Spec nil")
}
if spec.Volume != nil && spec.PersistentVolume != nil {
return nil, nil, fmt.Errorf("volume.Spec has both volume and persistent volume sources")
}
if spec.Volume != nil && spec.Volume.CSI != nil {
return spec.Volume.CSI, nil, nil
}
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CSI != nil {
return nil, spec.PersistentVolume.Spec.CSI, nil
}
return nil, nil, fmt.Errorf("volume source not found in volume.Spec")
}
// getPVSourceFromSpec ensures only CSIPersistentVolumeSource is present in volume.Spec
func getPVSourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) {
volSrc, pvSrc, err := getSourceFromSpec(spec)
if err != nil {
return nil, err
}
if volSrc != nil {
return nil, fmt.Errorf("unexpected api.CSIVolumeSource found in volume.Spec")
}
return pvSrc, nil
}
// GetCSIMounterPath returns the mounter path given the base path.
func GetCSIMounterPath(path string) string {
return filepath.Join(path, "/mount")
}
// GetCSIDriverName returns the csi driver name
func GetCSIDriverName(spec *volume.Spec) (string, error) {
volSrc, pvSrc, err := getSourceFromSpec(spec)
if err != nil {
return "", err
}
switch {
case volSrc != nil:
return volSrc.Driver, nil
case pvSrc != nil:
return pvSrc.Driver, nil
default:
return "", errors.New(log("volume source not found in volume.Spec"))
}
}
func createCSIOperationContext(volumeSpec *volume.Spec, timeout time.Duration) (context.Context, context.CancelFunc) {
migrated := false
if volumeSpec != nil {
migrated = volumeSpec.Migrated
}
ctx := context.WithValue(context.Background(), additionalInfoKey, additionalInfo{Migrated: strconv.FormatBool(migrated)})
return context.WithTimeout(ctx, timeout)
}
// getPodInfoAttrs returns pod info for NodePublish
func getPodInfoAttrs(pod *api.Pod, volumeMode storage.VolumeLifecycleMode) map[string]string {
attrs := map[string]string{
"csi.storage.k8s.io/pod.name": pod.Name,
"csi.storage.k8s.io/pod.namespace": pod.Namespace,
"csi.storage.k8s.io/pod.uid": string(pod.UID),
"csi.storage.k8s.io/serviceAccount.name": pod.Spec.ServiceAccountName,
"csi.storage.k8s.io/ephemeral": strconv.FormatBool(volumeMode == storage.VolumeLifecycleEphemeral),
}
return attrs
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"errors"
"fmt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
api "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
var _ volume.NodeExpandableVolumePlugin = &csiPlugin{}
func (c *csiPlugin) RequiresFSResize() bool {
return true
}
func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
klog.V(4).Info(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath))
csiSource, err := getCSISourceFromSpec(resizeOptions.VolumeSpec)
if err != nil {
return false, errors.New(log("Expander.NodeExpand failed to get CSI persistent source: %v", err))
}
csClient, err := newCsiDriverClient(csiDriverName(csiSource.Driver))
if err != nil {
// Treat the absence of the CSI driver as a transient error
// See https://github.com/kubernetes/kubernetes/issues/120268
return false, volumetypes.NewTransientOperationFailure(err.Error())
}
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, errors.New(log("Expander.NodeExpand failed to check VolumeMode of source: %v", err))
}
return c.nodeExpandWithClient(resizeOptions, csiSource, csClient, fsVolume)
}
func (c *csiPlugin) nodeExpandWithClient(
resizeOptions volume.NodeResizeOptions,
csiSource *api.CSIPersistentVolumeSource,
csClient csiClient,
fsVolume bool) (bool, error) {
driverName := csiSource.Driver
ctx, cancel := createCSIOperationContext(resizeOptions.VolumeSpec, csiTimeout)
defer cancel()
nodeExpandSet, err := csClient.NodeSupportsNodeExpand(ctx)
if err != nil {
return false, fmt.Errorf("Expander.NodeExpand failed to check if node supports expansion : %v", err)
}
if !nodeExpandSet {
return false, volumetypes.NewOperationNotSupportedError(fmt.Sprintf("NodeExpand is not supported by the CSI driver %s", driverName))
}
pv := resizeOptions.VolumeSpec.PersistentVolume
if pv == nil {
return false, fmt.Errorf("Expander.NodeExpand failed to find associated PersistentVolume for plugin %s", c.GetPluginName())
}
nodeExpandSecrets := map[string]string{}
expandClient := c.host.GetKubeClient()
if csiSource.NodeExpandSecretRef != nil {
nodeExpandSecrets, err = getCredentialsFromSecret(expandClient, csiSource.NodeExpandSecretRef)
if err != nil {
return false, fmt.Errorf("expander.NodeExpand failed to get NodeExpandSecretRef %s/%s: %v",
csiSource.NodeExpandSecretRef.Namespace, csiSource.NodeExpandSecretRef.Name, err)
}
}
opts := csiResizeOptions{
volumePath: resizeOptions.DeviceMountPath,
stagingTargetPath: resizeOptions.DeviceStagePath,
volumeID: csiSource.VolumeHandle,
newSize: resizeOptions.NewSize,
fsType: csiSource.FSType,
accessMode: api.ReadWriteOnce,
mountOptions: pv.Spec.MountOptions,
secrets: nodeExpandSecrets,
}
if !fsVolume {
// for block volumes the volumePath in CSI NodeExpandvolumeRequest is
// basically same as DevicePath because block devices are not mounted and hence
// DeviceMountPath does not get populated in resizeOptions.DeviceMountPath
opts.volumePath = resizeOptions.DevicePath
opts.fsType = fsTypeBlockName
}
if pv.Spec.AccessModes != nil {
opts.accessMode = pv.Spec.AccessModes[0]
}
_, err = csClient.NodeExpandVolume(ctx, opts)
if err != nil {
if inUseError(err) {
failedConditionErr := fmt.Errorf("Expander.NodeExpand failed to expand the volume : %w", volumetypes.NewFailedPreconditionError(err.Error()))
return false, failedConditionErr
}
if isInfeasibleError(err) {
infeasibleError := volumetypes.NewInfeasibleError(fmt.Sprintf("Expander.NodeExpand failed to expand the volume %s", err.Error()))
return false, infeasibleError
}
return false, fmt.Errorf("Expander.NodeExpand failed to expand the volume : %w", err)
}
return true, nil
}
func inUseError(err error) bool {
st, ok := status.FromError(err)
if !ok {
// not a grpc error
return false
}
// if this is a failed precondition error then that means driver does not support expansion
// of in-use volumes
// More info - https://github.com/container-storage-interface/spec/blob/master/spec.md#controllerexpandvolume-errors
return st.Code() == codes.FailedPrecondition
}
// IsInfeasibleError returns true for grpc errors that are considered terminal in a way
// that they indicate CSI operation as infeasible.
// This function returns a subset of final errors. All infeasible errors are also final errors.
func isInfeasibleError(err error) bool {
st, ok := status.FromError(err)
if !ok {
// This is not gRPC error. The operation must have failed before gRPC
// method was called, otherwise we would get gRPC error.
// We don't know if any previous volume operation is in progress, be on the safe side.
return false
}
switch st.Code() {
case codes.InvalidArgument,
codes.OutOfRange,
codes.NotFound:
return true
}
// All other errors mean that operation either did not
// even start or failed. It is for sure are not infeasible errors
return false
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nodeinfomanager includes internal functions used to add/delete labels to
// kubernetes nodes for corresponding CSI drivers
package nodeinfomanager
import (
"context"
"encoding/json"
goerrors "errors"
"fmt"
"math"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
// Name of node annotation that contains JSON map of driver names to node
annotationKeyNodeID = "csi.volume.kubernetes.io/nodeid"
)
var (
nodeKind = v1.SchemeGroupVersion.WithKind("Node")
updateBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
)
// nodeInfoManager contains necessary common dependencies to update node info on both
// the Node and CSINode objects.
type nodeInfoManager struct {
nodeName types.NodeName
volumeHost volume.VolumeHost
migratedPlugins map[string](func() bool)
// lock protects changes to node.
lock sync.Mutex
}
// If no updates is needed, the function must return the same Node object as the input.
type nodeUpdateFunc func(*v1.Node) (newNode *v1.Node, updated bool, err error)
// Interface implements an interface for managing labels of a node
type Interface interface {
CreateCSINode() (*storagev1.CSINode, error)
// Updates or Creates the CSINode object with annotations for CSI Migration
InitializeCSINodeWithAnnotation() error
// Record in the cluster the given node information from the CSI driver with the given name.
// Concurrent calls to InstallCSIDriver() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
InstallCSIDriver(driverName string, driverNodeID string, maxVolumeLimit int64, topology map[string]string) error
// UpdateCSIDriver updates CSIDrivers field in the CSINode object.
UpdateCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology map[string]string) error
// Remove in the cluster node information from the CSI driver with the given name.
// Concurrent calls to UninstallCSIDriver() is allowed, but they should not be intertwined with calls
// to other methods in this interface.
UninstallCSIDriver(driverName string) error
}
// NewNodeInfoManager initializes nodeInfoManager
func NewNodeInfoManager(
nodeName types.NodeName,
volumeHost volume.VolumeHost,
migratedPlugins map[string](func() bool)) Interface {
return &nodeInfoManager{
nodeName: nodeName,
volumeHost: volumeHost,
migratedPlugins: migratedPlugins,
}
}
// InstallCSIDriver updates the node ID annotation in the Node object and CSIDrivers field in the
// CSINode object. If the CSINode object doesn't yet exist, it will be created.
// If multiple calls to InstallCSIDriver() are made in parallel, some calls might receive Node or
// CSINode update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology map[string]string) error {
if driverNodeID == "" {
return fmt.Errorf("error adding CSI driver node info: driverNodeID must not be empty")
}
nodeUpdateFuncs := []nodeUpdateFunc{
removeMaxAttachLimit(driverName), // remove in 1.35 due to the version skew policy, we have to keep it for 3 releases
updateNodeIDInNode(driverName, driverNodeID),
updateTopologyLabels(topology),
}
err := nim.updateNode(nodeUpdateFuncs...)
if err != nil {
return fmt.Errorf("error updating Node object with CSI driver node info: %v", err)
}
err = nim.updateCSINode(driverName, driverNodeID, maxAttachLimit, topology)
if err != nil {
return fmt.Errorf("error updating CSINode object with CSI driver node info: %v", err)
}
return nil
}
// UpdateCSIDriver updates CSIDrivers field in the CSINode object.
func (nim *nodeInfoManager) UpdateCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology map[string]string) error {
err := nim.updateCSINode(driverName, driverNodeID, maxAttachLimit, topology)
if err != nil {
return fmt.Errorf("error updating CSINode object with CSI driver node info: %w", err)
}
return nil
}
// UninstallCSIDriver removes the node ID annotation from the Node object and CSIDrivers field from the
// CSINode object. If the CSINodeInfo object contains no CSIDrivers, it will be deleted.
// If multiple calls to UninstallCSIDriver() are made in parallel, some calls might receive Node or
// CSINode update conflicts, which causes the function to retry the corresponding update.
func (nim *nodeInfoManager) UninstallCSIDriver(driverName string) error {
err := nim.uninstallDriverFromCSINode(driverName)
if err != nil {
return fmt.Errorf("error uninstalling CSI driver from CSINode object %v", err)
}
err = nim.updateNode(
removeMaxAttachLimit(driverName), // remove it when this function is removed from nodeUpdateFuncs
removeNodeIDFromNode(driverName),
)
if err != nil {
return fmt.Errorf("error removing CSI driver node info from Node object %v", err)
}
return nil
}
func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error {
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUpdateNode(updateFuncs...); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating node: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
// updateNode repeatedly attempts to update the corresponding node object
// which is modified by applying the given update functions sequentially.
// Because updateFuncs are applied sequentially, later updateFuncs should take into account
// the effects of previous updateFuncs to avoid potential conflicts. For example, if multiple
// functions update the same field, updates in the last function are persisted.
func (nim *nodeInfoManager) tryUpdateNode(updateFuncs ...nodeUpdateFunc) error {
nim.lock.Lock()
defer nim.lock.Unlock()
// Retrieve the latest version of Node before attempting update, so that
// existing changes are not overwritten.
kubeClient := nim.volumeHost.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("error getting kube client")
}
nodeClient := kubeClient.CoreV1().Nodes()
originalNode, err := nodeClient.Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return err
}
node := originalNode.DeepCopy()
needUpdate := false
for _, update := range updateFuncs {
newNode, updated, err := update(node)
if err != nil {
return err
}
node = newNode
needUpdate = needUpdate || updated
}
if needUpdate {
// PatchNodeStatus can update both node's status and labels or annotations
// Updating status by directly updating node does not work
_, _, updateErr := nodeutil.PatchNodeStatus(kubeClient.CoreV1(), types.NodeName(node.Name), originalNode, node)
return updateErr
}
return nil
}
// Guarantees the map is non-nil if no error is returned.
func buildNodeIDMapFromAnnotation(node *v1.Node) (map[string]string, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
var existingDriverMap map[string]string
if previousAnnotationValue != "" {
// Parse previousAnnotationValue as JSON
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
}
if existingDriverMap == nil {
return make(map[string]string), nil
}
return existingDriverMap, nil
}
// updateNodeIDInNode returns a function that updates a Node object with the given
// Node ID information.
func updateNodeIDInNode(
csiDriverName string,
csiDriverNodeID string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
existingDriverMap, err := buildNodeIDMapFromAnnotation(node)
if err != nil {
return nil, false, err
}
if val, ok := existingDriverMap[csiDriverName]; ok {
if val == csiDriverNodeID {
// Value already exists in node annotation, nothing more to do
return node, false, nil
}
}
// Add/update annotation value
existingDriverMap[csiDriverName] = csiDriverNodeID
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"error while marshalling node ID map updated with driverName=%q, nodeID=%q: %v",
csiDriverName,
csiDriverNodeID,
err)
}
if node.ObjectMeta.Annotations == nil {
node.ObjectMeta.Annotations = make(map[string]string)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
return node, true, nil
}
}
// removeNodeIDFromNode returns a function that removes node ID information matching the given
// driver name from a Node object.
func removeNodeIDFromNode(csiDriverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
var previousAnnotationValue string
if node.ObjectMeta.Annotations != nil {
previousAnnotationValue =
node.ObjectMeta.Annotations[annotationKeyNodeID]
}
if previousAnnotationValue == "" {
return node, false, nil
}
// Parse previousAnnotationValue as JSON
existingDriverMap := map[string]string{}
if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil {
return nil, false, fmt.Errorf(
"failed to parse node's %q annotation value (%q) err=%v",
annotationKeyNodeID,
previousAnnotationValue,
err)
}
if _, ok := existingDriverMap[csiDriverName]; !ok {
// Value is already missing in node annotation, nothing more to do
return node, false, nil
}
// Delete annotation value
delete(existingDriverMap, csiDriverName)
if len(existingDriverMap) == 0 {
delete(node.ObjectMeta.Annotations, annotationKeyNodeID)
} else {
jsonObj, err := json.Marshal(existingDriverMap)
if err != nil {
return nil, false, fmt.Errorf(
"failed while trying to remove key %q from node %q annotation. Existing data: %v",
csiDriverName,
annotationKeyNodeID,
previousAnnotationValue)
}
node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj)
}
return node, true, nil
}
}
// updateTopologyLabels returns a function that updates labels of a Node object with the given
// topology information.
func updateTopologyLabels(topology map[string]string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
if len(topology) == 0 {
return node, false, nil
}
for k, v := range topology {
if curVal, exists := node.Labels[k]; exists && curVal != v {
return nil, false, fmt.Errorf("detected topology value collision: driver reported %q:%q but existing label is %q:%q", k, v, k, curVal)
}
}
if node.Labels == nil {
node.Labels = make(map[string]string)
}
for k, v := range topology {
node.Labels[k] = v
}
return node, true, nil
}
}
func (nim *nodeInfoManager) updateCSINode(
driverName string,
driverNodeID string,
maxAttachLimit int64,
topology map[string]string) error {
csiKubeClient := nim.volumeHost.GetKubeClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUpdateCSINode(csiKubeClient, driverName, driverNodeID, maxAttachLimit, topology); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating CSINode: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
func (nim *nodeInfoManager) tryUpdateCSINode(
csiKubeClient clientset.Interface,
driverName string,
driverNodeID string,
maxAttachLimit int64,
topology map[string]string) error {
nim.lock.Lock()
defer nim.lock.Unlock()
nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{})
if nodeInfo == nil || errors.IsNotFound(err) {
nodeInfo, err = nim.CreateCSINode()
}
if err != nil {
return err
}
return nim.installDriverToCSINode(nodeInfo, driverName, driverNodeID, maxAttachLimit, topology)
}
func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error {
csiKubeClient := nim.volumeHost.GetKubeClient()
if csiKubeClient == nil {
return goerrors.New("error getting CSI client")
}
var lastErr error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if lastErr = nim.tryInitializeCSINodeWithAnnotation(csiKubeClient); lastErr != nil {
klog.V(2).Infof("Failed to publish CSINode: %v", lastErr)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating CSINode annotation: %v; caused by: %v", err, lastErr)
}
return nil
}
func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient clientset.Interface) error {
nim.lock.Lock()
defer nim.lock.Unlock()
nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{})
if nodeInfo == nil || errors.IsNotFound(err) {
// CreateCSINode will set the annotation
_, err = nim.CreateCSINode()
return err
} else if err != nil {
return err
}
annotationModified := setMigrationAnnotation(nim.migratedPlugins, nodeInfo)
if annotationModified {
_, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo, metav1.UpdateOptions{})
return err
}
return nil
}
func (nim *nodeInfoManager) CreateCSINode() (*storagev1.CSINode, error) {
csiKubeClient := nim.volumeHost.GetKubeClient()
if csiKubeClient == nil {
return nil, fmt.Errorf("error getting CSI client")
}
node, err := csiKubeClient.CoreV1().Nodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{})
if err != nil {
return nil, err
}
nodeInfo := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: string(nim.nodeName),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: nodeKind.Version,
Kind: nodeKind.Kind,
Name: node.Name,
UID: node.UID,
},
},
},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{},
},
}
setMigrationAnnotation(nim.migratedPlugins, nodeInfo)
return csiKubeClient.StorageV1().CSINodes().Create(context.TODO(), nodeInfo, metav1.CreateOptions{})
}
func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo *storagev1.CSINode) (modified bool) {
if migratedPlugins == nil {
return false
}
nodeInfoAnnotations := nodeInfo.GetAnnotations()
if nodeInfoAnnotations == nil {
nodeInfoAnnotations = map[string]string{}
}
var oldAnnotationSet sets.Set[string]
mpa := nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey]
tok := strings.Split(mpa, ",")
if len(mpa) == 0 {
oldAnnotationSet = sets.New[string]()
} else {
oldAnnotationSet = sets.New[string](tok...)
}
newAnnotationSet := sets.New[string]()
for pluginName, migratedFunc := range migratedPlugins {
if migratedFunc() {
newAnnotationSet.Insert(pluginName)
}
}
if oldAnnotationSet.Equal(newAnnotationSet) {
return false
}
nas := strings.Join(sets.List[string](newAnnotationSet), ",")
if len(nas) != 0 {
nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
} else {
delete(nodeInfoAnnotations, v1.MigratedPluginsAnnotationKey)
}
nodeInfo.Annotations = nodeInfoAnnotations
return true
}
// Returns true if and only if new maxAttachLimit doesn't require CSINode update
func keepAllocatableCount(driverInfoSpec storagev1.CSINodeDriver, maxAttachLimit int64) bool {
if maxAttachLimit == 0 {
return driverInfoSpec.Allocatable == nil || driverInfoSpec.Allocatable.Count == nil
}
return driverInfoSpec.Allocatable != nil && driverInfoSpec.Allocatable.Count != nil && int64(*driverInfoSpec.Allocatable.Count) == maxAttachLimit
}
func (nim *nodeInfoManager) installDriverToCSINode(
nodeInfo *storagev1.CSINode,
driverName string,
driverNodeID string,
maxAttachLimit int64,
topology map[string]string) error {
csiKubeClient := nim.volumeHost.GetKubeClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
topologyKeys := sets.KeySet[string, string](topology)
specModified := true
// Clone driver list, omitting the driver that matches the given driverName
newDriverSpecs := []storagev1.CSINodeDriver{}
for _, driverInfoSpec := range nodeInfo.Spec.Drivers {
if driverInfoSpec.Name == driverName {
if driverInfoSpec.NodeID == driverNodeID &&
sets.New[string](driverInfoSpec.TopologyKeys...).Equal(topologyKeys) &&
keepAllocatableCount(driverInfoSpec, maxAttachLimit) {
specModified = false
}
} else {
// Omit driverInfoSpec matching given driverName
newDriverSpecs = append(newDriverSpecs, driverInfoSpec)
}
}
annotationModified := setMigrationAnnotation(nim.migratedPlugins, nodeInfo)
if !specModified && !annotationModified {
return nil
}
// Append new driver
driverSpec := storagev1.CSINodeDriver{
Name: driverName,
NodeID: driverNodeID,
TopologyKeys: sets.List[string](topologyKeys),
}
if maxAttachLimit > 0 {
if maxAttachLimit > math.MaxInt32 {
klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32)
maxAttachLimit = math.MaxInt32
}
m := int32(maxAttachLimit)
driverSpec.Allocatable = &storagev1.VolumeNodeResources{Count: &m}
} else if maxAttachLimit != 0 {
klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName)
}
newDriverSpecs = append(newDriverSpecs, driverSpec)
nodeInfo.Spec.Drivers = newDriverSpecs
_, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo, metav1.UpdateOptions{})
return err
}
func (nim *nodeInfoManager) uninstallDriverFromCSINode(
csiDriverName string) error {
csiKubeClient := nim.volumeHost.GetKubeClient()
if csiKubeClient == nil {
return fmt.Errorf("error getting CSI client")
}
var updateErrs []error
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
if err := nim.tryUninstallDriverFromCSINode(csiKubeClient, csiDriverName); err != nil {
updateErrs = append(updateErrs, err)
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("error updating CSINode: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs))
}
return nil
}
func (nim *nodeInfoManager) tryUninstallDriverFromCSINode(
csiKubeClient clientset.Interface,
csiDriverName string) error {
nim.lock.Lock()
defer nim.lock.Unlock()
nodeInfoClient := csiKubeClient.StorageV1().CSINodes()
nodeInfo, err := nodeInfoClient.Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
}
hasModified := false
// Uninstall CSINodeDriver with name csiDriverName
drivers := nodeInfo.Spec.Drivers[:0]
for _, driver := range nodeInfo.Spec.Drivers {
if driver.Name != csiDriverName {
drivers = append(drivers, driver)
} else {
// Found a driver with name csiDriverName
// Set hasModified to true because it will be removed
hasModified = true
}
}
if !hasModified {
// No changes, don't update
return nil
}
nodeInfo.Spec.Drivers = drivers
_, err = nodeInfoClient.Update(context.TODO(), nodeInfo, metav1.UpdateOptions{})
return err // do not wrap error
}
func removeMaxAttachLimit(driverName string) nodeUpdateFunc {
return func(node *v1.Node) (*v1.Node, bool, error) {
limitKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName))
capacityExists := false
if node.Status.Capacity != nil {
_, capacityExists = node.Status.Capacity[limitKey]
}
allocatableExists := false
if node.Status.Allocatable != nil {
_, allocatableExists = node.Status.Allocatable[limitKey]
}
if !capacityExists && !allocatableExists {
return node, false, nil
}
delete(node.Status.Capacity, limitKey)
if len(node.Status.Capacity) == 0 {
node.Status.Capacity = nil
}
delete(node.Status.Allocatable, limitKey)
if len(node.Status.Allocatable) == 0 {
node.Status.Allocatable = nil
}
return node, true, nil
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csimigration
import (
"errors"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/component-base/featuregate"
csilibplugins "k8s.io/csi-translation-lib/plugins"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
)
// PluginNameMapper contains utility methods to retrieve names of plugins
// that support a spec, map intree <=> migrated CSI plugin names, etc
type PluginNameMapper interface {
GetInTreePluginNameFromSpec(pv *v1.PersistentVolume, vol *v1.Volume) (string, error)
GetCSINameFromInTreeName(pluginName string) (string, error)
}
// PluginManager keeps track of migrated state of in-tree plugins
type PluginManager struct {
PluginNameMapper
featureGate featuregate.FeatureGate
}
// NewPluginManager returns a new PluginManager instance
func NewPluginManager(m PluginNameMapper, featureGate featuregate.FeatureGate) PluginManager {
return PluginManager{
PluginNameMapper: m,
featureGate: featureGate,
}
}
// IsMigrationCompleteForPlugin indicates whether CSI migration has been completed
// for a particular storage plugin. A complete migration will need to:
// 1. Enable CSIMigrationXX for the plugin
// 2. Unregister the in-tree plugin by setting the InTreePluginXXUnregister feature gate
func (pm PluginManager) IsMigrationCompleteForPlugin(pluginName string) bool {
// CSIMigration feature and plugin specific InTreePluginUnregister feature flags should
// be enabled for plugin specific migration completion to be take effect
if !pm.IsMigrationEnabledForPlugin(pluginName) {
return false
}
switch pluginName {
case csilibplugins.AWSEBSInTreePluginName:
return true
case csilibplugins.GCEPDInTreePluginName:
return true
case csilibplugins.AzureFileInTreePluginName:
return true
case csilibplugins.AzureDiskInTreePluginName:
return true
case csilibplugins.CinderInTreePluginName:
return true
case csilibplugins.VSphereInTreePluginName:
return true
case csilibplugins.PortworxVolumePluginName:
return pm.featureGate.Enabled(features.InTreePluginPortworxUnregister)
default:
return false
}
}
// IsMigrationEnabledForPlugin indicates whether CSI migration has been enabled
// for a particular storage plugin
func (pm PluginManager) IsMigrationEnabledForPlugin(pluginName string) bool {
// CSIMigration feature should be enabled along with the plugin-specific one
// CSIMigration has been GA. It will be enabled by default.
switch pluginName {
case csilibplugins.AWSEBSInTreePluginName:
return true
case csilibplugins.GCEPDInTreePluginName:
return true
case csilibplugins.AzureFileInTreePluginName:
return true
case csilibplugins.AzureDiskInTreePluginName:
return true
case csilibplugins.CinderInTreePluginName:
return true
case csilibplugins.VSphereInTreePluginName:
return true
case csilibplugins.PortworxVolumePluginName:
return pm.featureGate.Enabled(features.CSIMigrationPortworx)
default:
return false
}
}
// IsMigratable indicates whether CSI migration has been enabled for a volume
// plugin that the spec refers to
func (pm PluginManager) IsMigratable(spec *volume.Spec) (bool, error) {
if spec == nil {
return false, fmt.Errorf("could not find if plugin is migratable because volume spec is nil")
}
pluginName, _ := pm.GetInTreePluginNameFromSpec(spec.PersistentVolume, spec.Volume)
if pluginName == "" {
return false, nil
}
// found an in-tree plugin that supports the spec
return pm.IsMigrationEnabledForPlugin(pluginName), nil
}
// InTreeToCSITranslator performs translation of Volume sources for PV and Volume objects
// from references to in-tree plugins to migrated CSI plugins
type InTreeToCSITranslator interface {
TranslateInTreePVToCSI(logger klog.Logger, pv *v1.PersistentVolume) (*v1.PersistentVolume, error)
TranslateInTreeInlineVolumeToCSI(logger klog.Logger, volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error)
}
// TranslateInTreeSpecToCSI translates a volume spec (either PV or inline volume)
// supported by an in-tree plugin to CSI
func TranslateInTreeSpecToCSI(logger klog.Logger, spec *volume.Spec, podNamespace string, translator InTreeToCSITranslator) (*volume.Spec, error) {
var csiPV *v1.PersistentVolume
var err error
inlineVolume := false
if spec.PersistentVolume != nil {
csiPV, err = translator.TranslateInTreePVToCSI(logger, spec.PersistentVolume)
} else if spec.Volume != nil {
csiPV, err = translator.TranslateInTreeInlineVolumeToCSI(logger, spec.Volume, podNamespace)
inlineVolume = true
} else {
err = errors.New("not a valid volume spec")
}
if err != nil {
return nil, fmt.Errorf("failed to translate in-tree pv to CSI: %v", err)
}
return &volume.Spec{
Migrated: true,
PersistentVolume: csiPV,
ReadOnly: spec.ReadOnly,
InlineVolumeSpecForCSIMigration: inlineVolume,
}, nil
}
// CheckMigrationFeatureFlags checks the configuration of feature flags related
// to CSI Migration is valid. It will return whether the migration is complete
// by looking up the pluginUnregister flag
func CheckMigrationFeatureFlags(f featuregate.FeatureGate, pluginMigration,
pluginUnregister featuregate.Feature) (migrationComplete bool, err error) {
// This is for in-tree plugin that get migration finished
if f.Enabled(pluginMigration) && f.Enabled(pluginUnregister) {
return true, nil
}
return false, nil
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostpath
import (
"fmt"
"os"
"regexp"
"k8s.io/klog/v2"
"github.com/opencontainers/selinux/go-selinux"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
"k8s.io/kubernetes/pkg/volume/validation"
"k8s.io/mount-utils"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
// The volumeConfig arg provides the ability to configure volume behavior. It is implemented as a pointer to allow nils.
// The hostPathPlugin is used to store the volumeConfig and give it, when needed, to the func that Recycles.
// Tests that exercise recycling should not use this func but instead use ProbeRecyclablePlugins() to override default behavior.
func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin {
return []volume.VolumePlugin{
&hostPathPlugin{
host: nil,
config: volumeConfig,
},
}
}
func FakeProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin {
return []volume.VolumePlugin{
&hostPathPlugin{
host: nil,
config: volumeConfig,
noTypeChecker: true,
},
}
}
type hostPathPlugin struct {
host volume.VolumeHost
config volume.VolumeConfig
noTypeChecker bool
}
var _ volume.VolumePlugin = &hostPathPlugin{}
var _ volume.PersistentVolumePlugin = &hostPathPlugin{}
var _ volume.RecyclableVolumePlugin = &hostPathPlugin{}
var _ volume.DeletableVolumePlugin = &hostPathPlugin{}
var _ volume.ProvisionableVolumePlugin = &hostPathPlugin{}
const (
hostPathPluginName = "kubernetes.io/host-path"
)
func (plugin *hostPathPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *hostPathPlugin) GetPluginName() string {
return hostPathPluginName
}
func (plugin *hostPathPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.Path, nil
}
func (plugin *hostPathPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath != nil) ||
(spec.Volume != nil && spec.Volume.HostPath != nil)
}
func (plugin *hostPathPlugin) RequiresRemount(spec *volume.Spec) bool {
return false
}
func (plugin *hostPathPlugin) SupportsMountOption() bool {
return false
}
func (plugin *hostPathPlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
return false, nil
}
func (plugin *hostPathPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod) (volume.Mounter, error) {
hostPathVolumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
path := hostPathVolumeSource.Path
pathType := new(v1.HostPathType)
if hostPathVolumeSource.Type == nil {
*pathType = v1.HostPathUnset
} else {
pathType = hostPathVolumeSource.Type
}
kvh, ok := plugin.host.(volume.KubeletVolumeHost)
if !ok {
return nil, fmt.Errorf("plugin volume host does not implement KubeletVolumeHost interface")
}
return &hostPathMounter{
hostPath: &hostPath{path: path, pathType: pathType},
readOnly: readOnly,
mounter: plugin.host.GetMounter(),
hu: kvh.GetHostUtil(),
noTypeChecker: plugin.noTypeChecker,
}, nil
}
func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &hostPathUnmounter{&hostPath{
path: "",
}}, nil
}
// Recycle recycles/scrubs clean a HostPath volume.
// Recycle blocks until the pod has completed or any error occurs.
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
return fmt.Errorf("spec.PersistentVolume.Spec.HostPath is nil")
}
pod := plugin.config.RecyclerPodTemplate
timeout := util.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
// overrides
pod.Spec.ActiveDeadlineSeconds = &timeout
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: spec.PersistentVolume.Spec.HostPath.Path,
},
}
return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
}
func (plugin *hostPathPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return newDeleter(spec, plugin.host)
}
func (plugin *hostPathPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
if !plugin.config.ProvisioningEnabled {
return nil, fmt.Errorf("provisioning in volume plugin %q is disabled", plugin.GetPluginName())
}
return newProvisioner(options, plugin.host, plugin)
}
func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.ReconstructedVolume, error) {
hostPathVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: volumeName,
},
},
}
return volume.ReconstructedVolume{
Spec: volume.NewSpecFromVolume(hostPathVolume),
}, nil
}
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
}
path := spec.PersistentVolume.Spec.HostPath.Path
return &hostPathDeleter{name: spec.Name(), path: path, host: host}, nil
}
func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost, plugin *hostPathPlugin) (volume.Provisioner, error) {
return &hostPathProvisioner{options: options, host: host, plugin: plugin, basePath: "hostpath_pv"}, nil
}
// HostPath volumes represent a bare host file or directory mount.
// The direct at the specified path will be directly exposed to the container.
type hostPath struct {
path string
pathType *v1.HostPathType
volume.MetricsNil
}
func (hp *hostPath) GetPath() string {
return hp.path
}
type hostPathMounter struct {
*hostPath
readOnly bool
mounter mount.Interface
hu hostutil.HostUtils
noTypeChecker bool
}
var _ volume.Mounter = &hostPathMounter{}
func (b *hostPathMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: false,
SELinuxRelabel: false,
}
}
// SetUp does nothing.
func (b *hostPathMounter) SetUp(mounterArgs volume.MounterArgs) error {
err := validation.ValidatePathNoBacksteps(b.GetPath())
if err != nil {
return fmt.Errorf("invalid HostPath `%s`: %v", b.GetPath(), err)
}
if *b.pathType == v1.HostPathUnset {
return nil
}
if b.noTypeChecker {
return nil
} else {
return checkType(b.GetPath(), b.pathType, b.hu)
}
}
// SetUpAt does not make sense for host paths - probably programmer error.
func (b *hostPathMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
return fmt.Errorf("SetUpAt() does not make sense for host paths")
}
func (b *hostPathMounter) GetPath() string {
return b.path
}
type hostPathUnmounter struct {
*hostPath
}
var _ volume.Unmounter = &hostPathUnmounter{}
// TearDown does nothing.
func (c *hostPathUnmounter) TearDown() error {
return nil
}
// TearDownAt does not make sense for host paths - probably programmer error.
func (c *hostPathUnmounter) TearDownAt(dir string) error {
return fmt.Errorf("TearDownAt() does not make sense for host paths")
}
// hostPathProvisioner implements a Provisioner for the HostPath plugin
// This implementation is meant for testing only and only works in a single node cluster.
type hostPathProvisioner struct {
host volume.VolumeHost
options volume.VolumeOptions
plugin *hostPathPlugin
basePath string
}
// Create for hostPath simply creates a local /tmp/%/%s directory as a new PersistentVolume, default /tmp/hostpath_pv/%s.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if util.CheckPersistentVolumeClaimModeBlock(r.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", r.plugin.GetPluginName())
}
fullpath := fmt.Sprintf("/tmp/%s/%s", r.basePath, uuid.NewUUID())
capacity := r.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: r.options.PVName,
Annotations: map[string]string{
util.VolumeDynamicallyCreatedByKey: "hostpath-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
AccessModes: r.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): capacity,
},
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fullpath,
},
},
},
}
if len(r.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = r.plugin.GetAccessModes()
}
if err := os.MkdirAll(pv.Spec.HostPath.Path, 0750); err != nil {
return nil, err
}
if selinux.GetEnabled() {
err := selinux.SetFileLabel(pv.Spec.HostPath.Path, config.KubeletContainersSharedSELinuxLabel)
if err != nil {
return nil, fmt.Errorf("failed to set selinux label for %q: %v", pv.Spec.HostPath.Path, err)
}
}
return pv, nil
}
// hostPathDeleter deletes a hostPath PV from the cluster.
// This deleter only works on a single host cluster and is for testing purposes only.
type hostPathDeleter struct {
name string
path string
host volume.VolumeHost
volume.MetricsNil
}
func (r *hostPathDeleter) GetPath() string {
return r.path
}
// Delete for hostPath removes the local directory so long as it is beneath /tmp/*.
// THIS IS FOR TESTING AND LOCAL DEVELOPMENT ONLY! This message should scare you away from using
// this deleter for anything other than development and testing.
func (r *hostPathDeleter) Delete() error {
regexp := regexp.MustCompile("/tmp/.+")
if !regexp.MatchString(r.GetPath()) {
return fmt.Errorf("host_path deleter only supports /tmp/.+ but received provided %s", r.GetPath())
}
return os.RemoveAll(r.GetPath())
}
func getVolumeSource(spec *volume.Spec) (*v1.HostPathVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.HostPath != nil {
return spec.Volume.HostPath, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.HostPath != nil {
return spec.PersistentVolume.Spec.HostPath, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("spec does not reference an HostPath volume type")
}
type hostPathTypeChecker interface {
Exists() bool
IsFile() bool
MakeFile() error
IsDir() bool
MakeDir() error
IsBlock() bool
IsChar() bool
IsSocket() bool
GetPath() string
}
type fileTypeChecker struct {
path string
hu hostutil.HostUtils
}
func (ftc *fileTypeChecker) Exists() bool {
exists, err := ftc.hu.PathExists(ftc.path)
return exists && err == nil
}
func (ftc *fileTypeChecker) IsFile() bool {
if !ftc.Exists() {
return false
}
pathType, err := ftc.hu.GetFileType(ftc.path)
if err != nil {
return false
}
return string(pathType) == string(v1.HostPathFile)
}
func (ftc *fileTypeChecker) MakeFile() error {
return makeFile(ftc.path)
}
func (ftc *fileTypeChecker) IsDir() bool {
if !ftc.Exists() {
return false
}
pathType, err := ftc.hu.GetFileType(ftc.path)
if err != nil {
return false
}
return string(pathType) == string(v1.HostPathDirectory)
}
func (ftc *fileTypeChecker) MakeDir() error {
return makeDir(ftc.path)
}
func (ftc *fileTypeChecker) IsBlock() bool {
blkDevType, err := ftc.hu.GetFileType(ftc.path)
if err != nil {
return false
}
return string(blkDevType) == string(v1.HostPathBlockDev)
}
func (ftc *fileTypeChecker) IsChar() bool {
charDevType, err := ftc.hu.GetFileType(ftc.path)
if err != nil {
return false
}
return string(charDevType) == string(v1.HostPathCharDev)
}
func (ftc *fileTypeChecker) IsSocket() bool {
socketType, err := ftc.hu.GetFileType(ftc.path)
if err != nil {
return false
}
return string(socketType) == string(v1.HostPathSocket)
}
func (ftc *fileTypeChecker) GetPath() string {
return ftc.path
}
func newFileTypeChecker(path string, hu hostutil.HostUtils) hostPathTypeChecker {
return &fileTypeChecker{path: path, hu: hu}
}
// checkType checks whether the given path is the exact pathType
func checkType(path string, pathType *v1.HostPathType, hu hostutil.HostUtils) error {
return checkTypeInternal(newFileTypeChecker(path, hu), pathType)
}
func checkTypeInternal(ftc hostPathTypeChecker, pathType *v1.HostPathType) error {
switch *pathType {
case v1.HostPathDirectoryOrCreate:
if !ftc.Exists() {
return ftc.MakeDir()
}
fallthrough
case v1.HostPathDirectory:
if !ftc.IsDir() {
return fmt.Errorf("hostPath type check failed: %s is not a directory", ftc.GetPath())
}
case v1.HostPathFileOrCreate:
if !ftc.Exists() {
return ftc.MakeFile()
}
fallthrough
case v1.HostPathFile:
if !ftc.IsFile() {
return fmt.Errorf("hostPath type check failed: %s is not a file", ftc.GetPath())
}
case v1.HostPathSocket:
if !ftc.IsSocket() {
return fmt.Errorf("hostPath type check failed: %s is not a socket file", ftc.GetPath())
}
case v1.HostPathCharDev:
if !ftc.IsChar() {
return fmt.Errorf("hostPath type check failed: %s is not a character device", ftc.GetPath())
}
case v1.HostPathBlockDev:
if !ftc.IsBlock() {
return fmt.Errorf("hostPath type check failed: %s is not a block device", ftc.GetPath())
}
default:
return fmt.Errorf("%s is an invalid volume type", *pathType)
}
return nil
}
// makeDir creates a new directory.
// If pathname already exists as a directory, no error is returned.
// If pathname already exists as a file, an error is returned.
func makeDir(pathname string) error {
err := os.MkdirAll(pathname, os.FileMode(0755))
if err != nil {
if !os.IsExist(err) {
return err
}
}
return nil
}
// makeFile creates an empty file.
// If pathname already exists, whether a file or directory, no error is returned.
func makeFile(pathname string) error {
f, err := os.OpenFile(pathname, os.O_CREATE, os.FileMode(0644))
if f != nil {
f.Close()
}
if err != nil {
if !os.IsExist(err) {
return err
}
}
return nil
}
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"io"
"os"
"runtime"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics"
)
var _ MetricsProvider = &metricsBlock{}
// metricsBlock represents a MetricsProvider that detects the size of the
// BlockMode Volume.
type metricsBlock struct {
// the device node where the volume is attached to.
device string
}
// NewMetricsBlock creates a new metricsBlock with the device node of the
// Volume.
func NewMetricsBlock(device string) MetricsProvider {
return &metricsBlock{device}
}
// See MetricsProvider.GetMetrics
// GetMetrics detects the size of the BlockMode volume for the device node
// where the Volume is attached.
//
// Note that only the capacity of the device can be detected with standard
// tools. Storage systems may have more information that they can provide by
// going through specialized APIs.
func (mb *metricsBlock) GetMetrics() (*Metrics, error) {
startTime := time.Now()
defer servermetrics.CollectVolumeStatCalDuration("block", startTime)
// TODO: Windows does not yet support VolumeMode=Block
if runtime.GOOS == "windows" {
return nil, NewNotImplementedError("Windows does not support Block volumes")
}
metrics := &Metrics{Time: metav1.Now()}
if mb.device == "" {
return metrics, NewNoPathDefinedError()
}
err := mb.getBlockInfo(metrics)
if err != nil {
return metrics, err
}
return metrics, nil
}
// getBlockInfo fetches metrics.Capacity by opening the device and seeking to
// the end.
func (mb *metricsBlock) getBlockInfo(metrics *Metrics) error {
dev, err := os.Open(mb.device)
if err != nil {
return fmt.Errorf("unable to open device %q: %w", mb.device, err)
}
defer dev.Close()
end, err := dev.Seek(0, io.SeekEnd)
if err != nil {
return fmt.Errorf("failed to detect size of %q: %w", mb.device, err)
}
metrics.Capacity = resource.NewQuantity(end, resource.BinarySI)
return nil
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"sync"
"sync/atomic"
)
var _ MetricsProvider = &cachedMetrics{}
// cachedMetrics represents a MetricsProvider that wraps another provider and
// caches the result.
type cachedMetrics struct {
wrapped MetricsProvider
resultError error
resultMetrics *Metrics
once cacheOnce
}
// NewCachedMetrics creates a new cachedMetrics wrapping another
// MetricsProvider and caching the results.
func NewCachedMetrics(provider MetricsProvider) MetricsProvider {
return &cachedMetrics{wrapped: provider}
}
// GetMetrics runs the wrapped metrics provider's GetMetrics method once and
// caches the result. Will not cache result if there is an error.
// See MetricsProvider.GetMetrics
func (md *cachedMetrics) GetMetrics() (*Metrics, error) {
md.once.cache(func() error {
md.resultMetrics, md.resultError = md.wrapped.GetMetrics()
return md.resultError
})
return md.resultMetrics, md.resultError
}
// Copied from sync.Once but we don't want to cache the results if there is an
// error
type cacheOnce struct {
m sync.Mutex
done uint32
}
// Copied from sync.Once but we don't want to cache the results if there is an
// error
func (o *cacheOnce) cache(f func() error) {
if atomic.LoadUint32(&o.done) == 1 {
return
}
// Slow-path.
o.m.Lock()
defer o.m.Unlock()
if o.done == 0 {
err := f()
if err == nil {
atomic.StoreUint32(&o.done, 1)
}
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume/util/fs"
)
var _ MetricsProvider = &metricsDu{}
// metricsDu represents a MetricsProvider that calculates the used and
// available Volume space by calling fs.DiskUsage() and gathering
// filesystem info for the Volume path.
type metricsDu struct {
// the directory path the volume is mounted to.
path string
}
// NewMetricsDu creates a new metricsDu with the Volume path.
func NewMetricsDu(path string) MetricsProvider {
return &metricsDu{path}
}
// GetMetrics calculates the volume usage and device free space by executing "du"
// and gathering filesystem info for the Volume path.
// See MetricsProvider.GetMetrics
func (md *metricsDu) GetMetrics() (*Metrics, error) {
metrics := &Metrics{Time: metav1.Now()}
if md.path == "" {
return metrics, NewNoPathDefinedError()
}
err := md.getDiskUsage(metrics)
if err != nil {
return metrics, err
}
err = md.getFsInfo(metrics)
if err != nil {
return metrics, err
}
return metrics, nil
}
// getDiskUsage writes metrics.Used and metric.InodesUsed from fs.DiskUsage
func (md *metricsDu) getDiskUsage(metrics *Metrics) error {
usage, err := fs.DiskUsage(md.path)
if err != nil {
return err
}
metrics.Used = resource.NewQuantity(usage.Bytes, resource.BinarySI)
metrics.InodesUsed = resource.NewQuantity(usage.Inodes, resource.BinarySI)
return nil
}
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
// info
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
available, capacity, _, inodes, inodesFree, _, err := fs.Info(md.path)
if err != nil {
return NewFsInfoFailedError(err)
}
metrics.Available = resource.NewQuantity(available, resource.BinarySI)
metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
metrics.Inodes = resource.NewQuantity(inodes, resource.BinarySI)
metrics.InodesFree = resource.NewQuantity(inodesFree, resource.BinarySI)
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
)
const (
// ErrCodeNotSupported code for NotSupported Errors.
ErrCodeNotSupported int = iota + 1
// ErrCodeNoPathDefined code for NoPathDefined Errors.
ErrCodeNoPathDefined
// ErrCodeFsInfoFailed code for FsInfoFailed Errors.
ErrCodeFsInfoFailed
)
// NewNotSupportedError creates a new MetricsError with code NotSupported.
func NewNotSupportedError() *MetricsError {
return &MetricsError{
Code: ErrCodeNotSupported,
Msg: "metrics are not supported for MetricsNil Volumes",
}
}
// NewNotImplementedError creates a new MetricsError with code NotSupported.
func NewNotImplementedError(reason string) *MetricsError {
return &MetricsError{
Code: ErrCodeNotSupported,
Msg: fmt.Sprintf("metrics support is not implemented: %s", reason),
}
}
// NewNotSupportedErrorWithDriverName creates a new MetricsError with code NotSupported.
// driver name is added to the error message.
func NewNotSupportedErrorWithDriverName(name string) *MetricsError {
return &MetricsError{
Code: ErrCodeNotSupported,
Msg: fmt.Sprintf("metrics are not supported for %s volumes", name),
}
}
// NewNoPathDefinedError creates a new MetricsError with code NoPathDefined.
func NewNoPathDefinedError() *MetricsError {
return &MetricsError{
Code: ErrCodeNoPathDefined,
Msg: "no path defined for disk usage metrics.",
}
}
// NewFsInfoFailedError creates a new MetricsError with code FsInfoFailed.
func NewFsInfoFailedError(err error) *MetricsError {
return &MetricsError{
Code: ErrCodeFsInfoFailed,
Msg: fmt.Sprintf("failed to get FsInfo due to error %v", err),
}
}
// MetricsError to distinguish different Metrics Errors.
type MetricsError struct {
Code int
Msg string
}
func (e *MetricsError) Error() string {
return e.Msg
}
// IsNotSupported returns true if and only if err is "key" not found error.
func IsNotSupported(err error) bool {
return isErrCode(err, ErrCodeNotSupported)
}
func isErrCode(err error, code int) bool {
if err == nil {
return false
}
if e, ok := err.(*MetricsError); ok {
return e.Code == code
}
return false
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
var _ MetricsProvider = &MetricsNil{}
// MetricsNil represents a MetricsProvider that does not support returning
// Metrics. It serves as a placeholder for Volumes that do not yet support
// metrics.
type MetricsNil struct{}
// SupportsMetrics returns false for the MetricsNil type.
func (*MetricsNil) SupportsMetrics() bool {
return false
}
// GetMetrics returns an empty Metrics and an error.
// See MetricsProvider.GetMetrics
func (*MetricsNil) GetMetrics() (*Metrics, error) {
return &Metrics{}, NewNotSupportedError()
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics"
"k8s.io/kubernetes/pkg/volume/util/fs"
)
var _ MetricsProvider = &metricsStatFS{}
// metricsStatFS represents a MetricsProvider that calculates the used and available
// Volume space by stat'ing and gathering filesystem info for the Volume path.
type metricsStatFS struct {
// the directory path the volume is mounted to.
path string
}
// NewMetricsStatFS creates a new metricsStatFS with the Volume path.
func NewMetricsStatFS(path string) MetricsProvider {
return &metricsStatFS{path}
}
// See MetricsProvider.GetMetrics
// GetMetrics calculates the volume usage and device free space by executing "du"
// and gathering filesystem info for the Volume path.
func (md *metricsStatFS) GetMetrics() (*Metrics, error) {
startTime := time.Now()
defer servermetrics.CollectVolumeStatCalDuration("statfs", startTime)
metrics := &Metrics{Time: metav1.Now()}
if md.path == "" {
return metrics, NewNoPathDefinedError()
}
err := md.getFsInfo(metrics)
if err != nil {
return metrics, err
}
return metrics, nil
}
// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
available, capacity, usage, inodes, inodesFree, inodesUsed, err := fs.Info(md.path)
if err != nil {
return NewFsInfoFailedError(err)
}
metrics.Available = resource.NewQuantity(available, resource.BinarySI)
metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
metrics.Used = resource.NewQuantity(usage, resource.BinarySI)
metrics.Inodes = resource.NewQuantity(inodes, resource.BinarySI)
metrics.InodesFree = resource.NewQuantity(inodesFree, resource.BinarySI)
metrics.InodesUsed = resource.NewQuantity(inodesUsed, resource.BinarySI)
return nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
)
type noopExpandableVolumePluginInstance struct {
spec *Spec
}
var _ ExpandableVolumePlugin = &noopExpandableVolumePluginInstance{}
func (n *noopExpandableVolumePluginInstance) ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
return newSize, nil
}
func (n *noopExpandableVolumePluginInstance) Init(host VolumeHost) error {
return nil
}
func (n *noopExpandableVolumePluginInstance) GetPluginName() string {
return n.spec.KubeletExpandablePluginName()
}
func (n *noopExpandableVolumePluginInstance) GetVolumeName(spec *Spec) (string, error) {
return n.spec.Name(), nil
}
func (n *noopExpandableVolumePluginInstance) CanSupport(spec *Spec) bool {
return true
}
func (n *noopExpandableVolumePluginInstance) RequiresRemount(spec *Spec) bool {
return false
}
func (n *noopExpandableVolumePluginInstance) NewMounter(spec *Spec, podRef *v1.Pod) (Mounter, error) {
return nil, nil
}
func (n *noopExpandableVolumePluginInstance) NewUnmounter(name string, podUID types.UID) (Unmounter, error) {
return nil, nil
}
func (n *noopExpandableVolumePluginInstance) ConstructVolumeSpec(volumeName, mountPath string) (ReconstructedVolume, error) {
return ReconstructedVolume{Spec: n.spec}, nil
}
func (n *noopExpandableVolumePluginInstance) SupportsMountOption() bool {
return true
}
func (n *noopExpandableVolumePluginInstance) RequiresFSResize() bool {
return true
}
func (n *noopExpandableVolumePluginInstance) SupportsSELinuxContextMount(spec *Spec) (bool, error) {
return false, nil
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
"k8s.io/kubernetes/pkg/volume/util/subpath"
)
// ProbeOperation represents a type of operation for probing volume plugins.
type ProbeOperation uint32
// ProbeEvent represents an event triggered during a volume plugin probe operation.
type ProbeEvent struct {
Plugin VolumePlugin // VolumePlugin that was added/updated/removed. if ProbeEvent.Op is 'ProbeRemove', Plugin should be nil
PluginName string
Op ProbeOperation // The operation to the plugin
}
const (
// VolumeParameterFSType is a common parameter which can be specified in StorageClass to specify the desired FSType
// Provisioners SHOULD implement support for this if they are block device based
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Default value depends on the provisioner
VolumeParameterFSType = "fstype"
// ProbeAddOrUpdate represents an operation where a probe is added or updated.
ProbeAddOrUpdate ProbeOperation = 1 << iota
// ProbeRemove represents an operation to remove a probe.
// This operation is used to indicate that a previously added probe should be removed.
ProbeRemove
)
// ErrNoPluginMatched is used to return when no volume plugin matches the requested type.
var ErrNoPluginMatched = errors.New("no volume plugin matched")
// VolumeOptions contains option information about a volume.
type VolumeOptions struct {
// The attributes below are required by volume.Provisioner
// TODO: refactor all of this out of volumes when an admin can configure
// many kinds of provisioners.
// Reclamation policy for a persistent volume
PersistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy
// Mount options for a persistent volume
MountOptions []string
// Suggested PV.Name of the PersistentVolume to provision.
// This is a generated name guaranteed to be unique in Kubernetes cluster.
// If you choose not to use it as volume name, ensure uniqueness by either
// combining it with your value or create unique values of your own.
PVName string
// PVC is reference to the claim that lead to provisioning of a new PV.
// Provisioners *must* create a PV that would be matched by this PVC,
// i.e. with required capacity, accessMode, labels matching PVC.Selector and
// so on.
PVC *v1.PersistentVolumeClaim
// Volume provisioning parameters from StorageClass
Parameters map[string]string
}
// NodeResizeOptions contain options to be passed for node expansion.
type NodeResizeOptions struct {
VolumeSpec *Spec
// DevicePath - location of actual device on the node. In case of CSI
// this just could be volumeID
DevicePath string
// DeviceMountPath location where device is mounted on the node. If volume type
// is attachable - this would be global mount path otherwise
// it would be location where volume was mounted for the pod
DeviceMountPath string
// DeviceStagingPath stores location where the volume is staged
DeviceStagePath string
NewSize resource.Quantity
OldSize resource.Quantity
}
// DynamicPluginProber is an interface that defines methods for probing dynamic volume plugins.
type DynamicPluginProber interface {
Init() error
// aggregates events for successful drivers and errors for failed drivers
Probe() (events []ProbeEvent, err error)
}
// VolumePlugin is an interface to volume plugins that can be used on a
// kubernetes node (e.g. by kubelet) to instantiate and manage volumes.
type VolumePlugin interface {
// Init initializes the plugin. This will be called exactly once
// before any New* calls are made - implementations of plugins may
// depend on this.
Init(host VolumeHost) error
// Name returns the plugin's name. Plugins must use namespaced names
// such as "example.com/volume" and contain exactly one '/' character.
// The "kubernetes.io" namespace is reserved for plugins which are
// bundled with kubernetes.
GetPluginName() string
// GetVolumeName returns the name/ID to uniquely identifying the actual
// backing device, directory, path, etc. referenced by the specified volume
// spec.
// For Attachable volumes, this value must be able to be passed back to
// volume Detach methods to identify the device to act on.
// If the plugin does not support the given spec, this returns an error.
GetVolumeName(spec *Spec) (string, error)
// CanSupport tests whether the plugin supports a given volume
// specification from the API. The spec pointer should be considered
// const.
CanSupport(spec *Spec) bool
// RequiresRemount returns true if this plugin requires mount calls to be
// reexecuted. Atomically updating volumes, like Downward API, depend on
// this to update the contents of the volume.
RequiresRemount(spec *Spec) bool
// NewMounter creates a new volume.Mounter from an API specification.
// Ownership of the spec pointer in *not* transferred.
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewMounter(spec *Spec, podRef *v1.Pod) (Mounter, error)
// NewUnmounter creates a new volume.Unmounter from recoverable state.
// - name: The volume name, as per the v1.Volume spec.
// - podUID: The UID of the enclosing pod
NewUnmounter(name string, podUID types.UID) (Unmounter, error)
// ConstructVolumeSpec constructs a volume spec based on the given volume name
// and volumePath. The spec may have incomplete information due to limited
// information from input. This function is used by volume manager to reconstruct
// volume spec by reading the volume directories from disk
ConstructVolumeSpec(volumeName, volumePath string) (ReconstructedVolume, error)
// SupportsMountOption returns true if volume plugins supports Mount options
SupportsMountOption() bool
// SupportsSELinuxContextMount returns true if volume plugins supports
// mount -o context=XYZ for a given volume.
SupportsSELinuxContextMount(spec *Spec) (bool, error)
}
// PersistentVolumePlugin is an extended interface of VolumePlugin and is used
// by volumes that want to provide long term persistence of data
type PersistentVolumePlugin interface {
VolumePlugin
// GetAccessModes describes the ways a given volume can be accessed/mounted.
GetAccessModes() []v1.PersistentVolumeAccessMode
}
// RecyclableVolumePlugin is an extended interface of VolumePlugin and is used
// by persistent volumes that want to be recycled before being made available
// again to new claims
type RecyclableVolumePlugin interface {
VolumePlugin
// Recycle knows how to reclaim this
// resource after the volume's release from a PersistentVolumeClaim.
// Recycle will use the provided recorder to write any events that might be
// interesting to user. It's expected that caller will pass these events to
// the PV being recycled.
Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error
}
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
// by persistent volumes that want to be deleted from the cluster after their
// release from a PersistentVolumeClaim.
type DeletableVolumePlugin interface {
VolumePlugin
// NewDeleter creates a new volume.Deleter which knows how to delete this
// resource in accordance with the underlying storage provider after the
// volume's release from a claim
NewDeleter(logger klog.Logger, spec *Spec) (Deleter, error)
}
// ProvisionableVolumePlugin is an extended interface of VolumePlugin and is
// used to create volumes for the cluster.
type ProvisionableVolumePlugin interface {
VolumePlugin
// NewProvisioner creates a new volume.Provisioner which knows how to
// create PersistentVolumes in accordance with the plugin's underlying
// storage provider
NewProvisioner(logger klog.Logger, options VolumeOptions) (Provisioner, error)
}
// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment
// to a node before mounting.
type AttachableVolumePlugin interface {
DeviceMountableVolumePlugin
NewAttacher() (Attacher, error)
NewDetacher() (Detacher, error)
// CanAttach tests if provided volume spec is attachable
CanAttach(spec *Spec) (bool, error)
VerifyExhaustedResource(spec *Spec) bool
}
// DeviceMountableVolumePlugin is an extended interface of VolumePlugin and is used
// for volumes that requires mount device to a node before binding to volume to pod.
type DeviceMountableVolumePlugin interface {
VolumePlugin
NewDeviceMounter() (DeviceMounter, error)
NewDeviceUnmounter() (DeviceUnmounter, error)
GetDeviceMountRefs(deviceMountPath string) ([]string, error)
// CanDeviceMount determines if device in volume.Spec is mountable
CanDeviceMount(spec *Spec) (bool, error)
}
// ExpandableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that can be
// expanded via control-plane ExpandVolumeDevice call.
type ExpandableVolumePlugin interface {
VolumePlugin
ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error)
RequiresFSResize() bool
}
// NodeExpandableVolumePlugin is an expanded interface of VolumePlugin and is used for volumes that
// require expansion on the node via NodeExpand call.
type NodeExpandableVolumePlugin interface {
VolumePlugin
RequiresFSResize() bool
// NodeExpand expands volume on given deviceMountPath and returns true if resize is successful.
NodeExpand(resizeOptions NodeResizeOptions) (bool, error)
}
// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support.
type BlockVolumePlugin interface {
VolumePlugin
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
// Ownership of the spec pointer in *not* transferred.
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod) (BlockVolumeMapper, error)
// NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state.
// - name: The volume name, as per the v1.Volume spec.
// - podUID: The UID of the enclosing pod
NewBlockVolumeUnmapper(name string, podUID types.UID) (BlockVolumeUnmapper, error)
// ConstructBlockVolumeSpec constructs a volume spec based on the given
// podUID, volume name and a pod device map path.
// The spec may have incomplete information due to limited information
// from input. This function is used by volume manager to reconstruct
// volume spec by reading the volume directories from disk.
ConstructBlockVolumeSpec(podUID types.UID, volumeName, volumePath string) (*Spec, error)
}
// TODO(#14217)
// As part of the Volume Host refactor we are starting to create Volume Hosts
// for specific hosts. New methods for each specific host can be added here.
// Currently consumers will do type assertions to get the specific type of Volume
// Host; however, the end result should be that specific Volume Hosts are passed
// to the specific functions they are needed in (instead of using a catch-all
// VolumeHost interface)
// KubeletVolumeHost is a Kubelet specific interface that plugins can use to access the kubelet.
type KubeletVolumeHost interface {
// SetKubeletError lets plugins set an error on the Kubelet runtime status
// that will cause the Kubelet to post NotReady status with the error message provided
SetKubeletError(err error)
// GetInformerFactory returns the informer factory for CSIDriverLister
GetInformerFactory() informers.SharedInformerFactory
// CSIDriverLister returns the informer lister for the CSIDriver API Object
CSIDriverLister() storagelistersv1.CSIDriverLister
// CSIDriverSynced returns the informer synced for the CSIDriver API Object
CSIDriversSynced() cache.InformerSynced
// WaitForCacheSync is a helper function that waits for cache sync for CSIDriverLister
WaitForCacheSync() error
// Returns hostutil.HostUtils
GetHostUtil() hostutil.HostUtils
// Returns trust anchors from the named ClusterTrustBundle.
GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error)
// Returns trust anchors from the ClusterTrustBundles selected by signer
// name and label selector.
GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error)
// Returns the credential bundle for the specified podCertificate projected volume source.
GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error)
}
// CSIDriverVolumeHost is a volume host that has access to CSIDriverLister
type CSIDriverVolumeHost interface {
// CSIDriverLister returns the informer lister for the CSIDriver API Object
CSIDriverLister() storagelistersv1.CSIDriverLister
}
// AttachDetachVolumeHost is a AttachDetach Controller specific interface that plugins can use
// to access methods on the Attach Detach Controller.
type AttachDetachVolumeHost interface {
CSIDriverVolumeHost
// CSINodeLister returns the informer lister for the CSINode API Object
CSINodeLister() storagelistersv1.CSINodeLister
// VolumeAttachmentLister returns the informer lister for the VolumeAttachment API Object
VolumeAttachmentLister() storagelistersv1.VolumeAttachmentLister
// IsAttachDetachController is an interface marker to strictly tie AttachDetachVolumeHost
// to the attachDetachController
IsAttachDetachController() bool
}
// VolumeHost is an interface that plugins can use to access the kubelet.
type VolumeHost interface {
// GetPluginDir returns the absolute path to a directory under which
// a given plugin may store data. This directory might not actually
// exist on disk yet. For plugin data that is per-pod, see
// GetPodPluginDir().
GetPluginDir(pluginName string) string
// GetVolumeDevicePluginDir returns the absolute path to a directory
// under which a given plugin may store data.
// ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/
GetVolumeDevicePluginDir(pluginName string) string
// GetPodsDir returns the absolute path to a directory where all the pods
// information is stored
GetPodsDir() string
// GetPodVolumeDir returns the absolute path a directory which
// represents the named volume under the named plugin for the given
// pod. If the specified pod does not exist, the result of this call
// might not exist.
GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string
// GetPodPluginDir returns the absolute path to a directory under which
// a given plugin may store data for a given pod. If the specified pod
// does not exist, the result of this call might not exist. This
// directory might not actually exist on disk yet.
GetPodPluginDir(podUID types.UID, pluginName string) string
// GetPodVolumeDeviceDir returns the absolute path a directory which
// represents the named plugin for the given pod.
// If the specified pod does not exist, the result of this call
// might not exist.
// ex. pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/
GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string
// GetKubeClient returns a client interface
GetKubeClient() clientset.Interface
// NewWrapperMounter finds an appropriate plugin with which to handle
// the provided spec. This is used to implement volume plugins which
// "wrap" other plugins. For example, the "secret" volume is
// implemented in terms of the "emptyDir" volume.
NewWrapperMounter(volName string, spec Spec, pod *v1.Pod) (Mounter, error)
// NewWrapperUnmounter finds an appropriate plugin with which to handle
// the provided spec. See comments on NewWrapperMounter for more
// context.
NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error)
// Get mounter interface.
GetMounter() mount.Interface
// Returns the hostname of the host kubelet is running on
GetHostName() string
// Returns node allocatable.
GetNodeAllocatable() (v1.ResourceList, error)
// Returns a function that returns a secret.
GetSecretFunc() func(namespace, name string) (*v1.Secret, error)
// Returns a function that returns a configmap.
GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error)
GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
DeleteServiceAccountTokenFunc() func(podUID types.UID)
// Returns the labels on the node
GetNodeLabels() (map[string]string, error)
// Returns the name of the node
GetNodeName() types.NodeName
GetAttachedVolumesFromNodeStatus() (map[v1.UniqueVolumeName]string, error)
// Returns the event recorder of kubelet.
GetEventRecorder() record.EventRecorder
// Returns an interface that should be used to execute subpath operations
GetSubpather() subpath.Interface
}
// VolumePluginMgr tracks registered plugins.
type VolumePluginMgr struct {
mutex sync.RWMutex
plugins map[string]VolumePlugin
prober DynamicPluginProber
probedPlugins map[string]VolumePlugin
Host VolumeHost
}
// Spec is an internal representation of a volume. All API volume types translate to Spec.
type Spec struct {
Volume *v1.Volume
PersistentVolume *v1.PersistentVolume
ReadOnly bool
InlineVolumeSpecForCSIMigration bool
Migrated bool
}
// Name returns the name of either Volume or PersistentVolume, one of which must not be nil.
func (spec *Spec) Name() string {
switch {
case spec.Volume != nil:
return spec.Volume.Name
case spec.PersistentVolume != nil:
return spec.PersistentVolume.Name
default:
return ""
}
}
// IsKubeletExpandable returns true for volume types that can be expanded only by the node
// and not the controller. Currently Flex volume is the only one in this category since
// it is typically not installed on the controller
func (spec *Spec) IsKubeletExpandable() bool {
switch {
case spec.Volume != nil:
return spec.Volume.FlexVolume != nil
case spec.PersistentVolume != nil:
return spec.PersistentVolume.Spec.FlexVolume != nil
default:
return false
}
}
// KubeletExpandablePluginName creates and returns a name for the plugin
// this is used in context on the controller where the plugin lookup fails
// as volume expansion on controller isn't supported, but a plugin name is
// required
func (spec *Spec) KubeletExpandablePluginName() string {
switch {
case spec.Volume != nil && spec.Volume.FlexVolume != nil:
return spec.Volume.FlexVolume.Driver
case spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil:
return spec.PersistentVolume.Spec.FlexVolume.Driver
default:
return ""
}
}
// VolumeConfig is how volume plugins receive configuration. An instance
// specific to the plugin will be passed to the plugin's
// ProbeVolumePlugins(config) func. Reasonable defaults will be provided by
// the binary hosting the plugins while allowing override of those default
// values. Those config values are then set to an instance of VolumeConfig
// and passed to the plugin.
//
// Values in VolumeConfig are intended to be relevant to several plugins, but
// not necessarily all plugins. The preference is to leverage strong typing
// in this struct. All config items must have a descriptive but non-specific
// name (i.e, RecyclerMinimumTimeout is OK but RecyclerMinimumTimeoutForNFS is
// !OK). An instance of config will be given directly to the plugin, so
// config names specific to plugins are unneeded and wrongly expose plugins in
// this VolumeConfig struct.
//
// OtherAttributes is a map of string values intended for one-off
// configuration of a plugin or config that is only relevant to a single
// plugin. All values are passed by string and require interpretation by the
// plugin. Passing config as strings is the least desirable option but can be
// used for truly one-off configuration. The binary should still use strong
// typing for this value when binding CLI values before they are passed as
// strings in OtherAttributes.
type VolumeConfig struct {
// RecyclerPodTemplate is pod template that understands how to scrub clean
// a persistent volume after its release. The template is used by plugins
// which override specific properties of the pod in accordance with that
// plugin. See NewPersistentVolumeRecyclerPodTemplate for the properties
// that are expected to be overridden.
RecyclerPodTemplate *v1.Pod
// RecyclerMinimumTimeout is the minimum amount of time in seconds for the
// recycler pod's ActiveDeadlineSeconds attribute. Added to the minimum
// timeout is the increment per Gi of capacity.
RecyclerMinimumTimeout int
// RecyclerTimeoutIncrement is the number of seconds added to the recycler
// pod's ActiveDeadlineSeconds for each Gi of capacity in the persistent
// volume. Example: 5Gi volume x 30s increment = 150s + 30s minimum = 180s
// ActiveDeadlineSeconds for recycler pod
RecyclerTimeoutIncrement int
// PVName is name of the PersistentVolume instance that is being recycled.
// It is used to generate unique recycler pod name.
PVName string
// OtherAttributes stores config as strings. These strings are opaque to
// the system and only understood by the binary hosting the plugin and the
// plugin itself.
OtherAttributes map[string]string
// ProvisioningEnabled configures whether provisioning of this plugin is
// enabled or not. Currently used only in host_path plugin.
ProvisioningEnabled bool
}
// ReconstructedVolume contains information about a volume reconstructed by
// ConstructVolumeSpec().
type ReconstructedVolume struct {
// Spec is the volume spec of a mounted volume
Spec *Spec
// SELinuxMountContext is value of -o context=XYZ mount option.
// If empty, no such mount option is used.
SELinuxMountContext string
}
// NewSpecFromVolume creates an Spec from an v1.Volume
func NewSpecFromVolume(vs *v1.Volume) *Spec {
return &Spec{
Volume: vs,
}
}
// NewSpecFromPersistentVolume creates an Spec from an v1.PersistentVolume
func NewSpecFromPersistentVolume(pv *v1.PersistentVolume, readOnly bool) *Spec {
return &Spec{
PersistentVolume: pv,
ReadOnly: readOnly,
}
}
// InitPlugins initializes each plugin. All plugins must have unique names.
// This must be called exactly once before any New* methods are called on any
// plugins.
func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPluginProber, host VolumeHost) error {
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.Host = host
if prober == nil {
// Use a dummy prober to prevent nil deference.
pm.prober = &dummyPluginProber{}
} else {
pm.prober = prober
}
if err := pm.prober.Init(); err != nil {
// Prober init failure should not affect the initialization of other plugins.
klog.ErrorS(err, "Error initializing dynamic plugin prober")
pm.prober = &dummyPluginProber{}
}
if pm.plugins == nil {
pm.plugins = map[string]VolumePlugin{}
}
if pm.probedPlugins == nil {
pm.probedPlugins = map[string]VolumePlugin{}
}
allErrs := []error{}
for _, plugin := range plugins {
name := plugin.GetPluginName()
if errs := validation.IsQualifiedName(name); len(errs) != 0 {
allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
continue
}
if _, found := pm.plugins[name]; found {
allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name))
continue
}
err := plugin.Init(host)
if err != nil {
klog.ErrorS(err, "Failed to load volume plugin", "pluginName", name)
allErrs = append(allErrs, err)
continue
}
pm.plugins[name] = plugin
klog.V(1).InfoS("Loaded volume plugin", "pluginName", name)
}
pm.refreshProbedPlugins()
return utilerrors.NewAggregate(allErrs)
}
func (pm *VolumePluginMgr) initProbedPlugin(probedPlugin VolumePlugin) error {
name := probedPlugin.GetPluginName()
if errs := validation.IsQualifiedName(name); len(errs) != 0 {
return fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";"))
}
err := probedPlugin.Init(pm.Host)
if err != nil {
return fmt.Errorf("failed to load volume plugin %s, error: %s", name, err.Error())
}
klog.V(1).InfoS("Loaded volume plugin", "pluginName", name)
return nil
}
// FindPluginBySpec looks for a plugin that can support a given volume
// specification. If no plugins can support or more than one plugin can
// support it, return error.
func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
if spec == nil {
return nil, fmt.Errorf("could not find plugin because volume spec is nil")
}
var match VolumePlugin
matchedPluginNames := []string{}
for _, v := range pm.plugins {
if v.CanSupport(spec) {
match = v
matchedPluginNames = append(matchedPluginNames, v.GetPluginName())
}
}
pm.refreshProbedPlugins()
for _, plugin := range pm.probedPlugins {
if plugin.CanSupport(spec) {
match = plugin
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
}
}
if len(matchedPluginNames) == 0 {
return nil, ErrNoPluginMatched
}
if len(matchedPluginNames) > 1 {
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ","))
}
return match, nil
}
// FindPluginByName fetches a plugin by name. If no plugin is found, returns error.
func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
var match VolumePlugin
if v, found := pm.plugins[name]; found {
match = v
}
pm.refreshProbedPlugins()
if plugin, found := pm.probedPlugins[name]; found {
if match != nil {
return nil, fmt.Errorf("multiple volume plugins matched: %s and %s", match.GetPluginName(), plugin.GetPluginName())
}
match = plugin
}
if match == nil {
return nil, fmt.Errorf("no volume plugin matched name: %s", name)
}
return match, nil
}
// Check if probedPlugin cache update is required.
// If it is, initialize all probed plugins and replace the cache with them.
func (pm *VolumePluginMgr) refreshProbedPlugins() {
events, err := pm.prober.Probe()
if err != nil {
klog.ErrorS(err, "Error dynamically probing plugins")
}
// because the probe function can return a list of valid plugins
// even when an error is present we still must add the plugins
// or they will be skipped because each event only fires once
for _, event := range events {
if event.Op == ProbeAddOrUpdate {
if err := pm.initProbedPlugin(event.Plugin); err != nil {
klog.ErrorS(err, "Error initializing dynamically probed plugin",
"pluginName", event.Plugin.GetPluginName())
continue
}
pm.probedPlugins[event.Plugin.GetPluginName()] = event.Plugin
} else if event.Op == ProbeRemove {
// Plugin is not available on ProbeRemove event, only PluginName
delete(pm.probedPlugins, event.PluginName)
} else {
klog.ErrorS(nil, "Unknown Operation on PluginName.",
"pluginName", event.Plugin.GetPluginName())
}
}
}
// FindPersistentPluginBySpec looks for a persistent volume plugin that can
// support a given volume specification. If no plugin is found, return an
// error
func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, fmt.Errorf("could not find volume plugin for spec: %#v", spec)
}
if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok {
return persistentVolumePlugin, nil
}
return nil, fmt.Errorf("no persistent volume plugin matched")
}
// FindPersistentPluginByName fetches a recyclable persistent volume plugin by name. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindPersistentPluginByName(name string) (PersistentVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok {
return persistentVolumePlugin, nil
}
return nil, fmt.Errorf("no persistent volume plugin matched")
}
// FindRecyclablePluginBySpec fetches a recyclable persistent volume plugin by spec. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindRecyclablePluginBySpec(spec *Spec) (RecyclableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if recyclableVolumePlugin, ok := volumePlugin.(RecyclableVolumePlugin); ok {
return recyclableVolumePlugin, nil
}
return nil, fmt.Errorf("no recyclable volume plugin matched")
}
// FindProvisionablePluginByName fetches a provisionable persistent volume plugin by name. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindProvisionablePluginByName(name string) (ProvisionableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok {
return provisionableVolumePlugin, nil
}
return nil, fmt.Errorf("no provisionable volume plugin matched")
}
// FindDeletablePluginBySpec fetches a provisionable persistent volume plugin by spec. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok {
return deletableVolumePlugin, nil
}
return nil, fmt.Errorf("no deletable volume plugin matched")
}
// FindDeletablePluginByName fetches a deleteable persistent volume plugin by name. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok {
return deletableVolumePlugin, nil
}
return nil, fmt.Errorf("no deletable volume plugin matched")
}
// FindAttachablePluginBySpec fetches a persistent volume plugin by spec.
// Unlike the other "FindPlugin" methods, this does not return error if no
// plugin is found. All volumes require a mounter and unmounter, but not
// every volume will have an attacher/detacher.
func (pm *VolumePluginMgr) FindAttachablePluginBySpec(spec *Spec) (AttachableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if attachableVolumePlugin, ok := volumePlugin.(AttachableVolumePlugin); ok {
if canAttach, err := attachableVolumePlugin.CanAttach(spec); err != nil {
return nil, err
} else if canAttach {
return attachableVolumePlugin, nil
}
}
return nil, nil
}
// FindAttachablePluginByName fetches an attachable volume plugin by name.
// Unlike the other "FindPlugin" methods, this does not return error if no
// plugin is found. All volumes require a mounter and unmounter, but not
// every volume will have an attacher/detacher.
func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if attachablePlugin, ok := volumePlugin.(AttachableVolumePlugin); ok {
return attachablePlugin, nil
}
return nil, nil
}
// FindDeviceMountablePluginBySpec fetches a devicemountable persistent volume plugin by spec.
func (pm *VolumePluginMgr) FindDeviceMountablePluginBySpec(spec *Spec) (DeviceMountableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if deviceMountableVolumePlugin, ok := volumePlugin.(DeviceMountableVolumePlugin); ok {
if canMount, err := deviceMountableVolumePlugin.CanDeviceMount(spec); err != nil {
return nil, err
} else if canMount {
return deviceMountableVolumePlugin, nil
}
}
return nil, nil
}
// FindDeviceMountablePluginByName fetches a devicemountable persistent volume plugin by name.
func (pm *VolumePluginMgr) FindDeviceMountablePluginByName(name string) (DeviceMountableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if deviceMountableVolumePlugin, ok := volumePlugin.(DeviceMountableVolumePlugin); ok {
return deviceMountableVolumePlugin, nil
}
return nil, nil
}
// FindExpandablePluginBySpec fetches an expandable persistent volume plugin by spec.
func (pm *VolumePluginMgr) FindExpandablePluginBySpec(spec *Spec) (ExpandableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
if spec.IsKubeletExpandable() {
// for kubelet expandable volumes, return a noop plugin that
// returns success for expand on the controller
klog.V(4).InfoS("FindExpandablePluginBySpec -> returning noopExpandableVolumePluginInstance", "specName", spec.Name())
return &noopExpandableVolumePluginInstance{spec}, nil
}
if errors.Is(err, ErrNoPluginMatched) {
return nil, nil
}
klog.V(4).InfoS("FindExpandablePluginBySpec -> err", "specName", spec.Name(), "err", err)
return nil, err
}
if expandableVolumePlugin, ok := volumePlugin.(ExpandableVolumePlugin); ok {
return expandableVolumePlugin, nil
}
return nil, nil
}
// FindExpandablePluginByName fetches an expandable persistent volume plugin by name.
func (pm *VolumePluginMgr) FindExpandablePluginByName(name string) (ExpandableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if expandableVolumePlugin, ok := volumePlugin.(ExpandableVolumePlugin); ok {
return expandableVolumePlugin, nil
}
return nil, nil
}
// FindMapperPluginBySpec fetches a block volume plugin by spec.
func (pm *VolumePluginMgr) FindMapperPluginBySpec(spec *Spec) (BlockVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok {
return blockVolumePlugin, nil
}
return nil, nil
}
// FindMapperPluginByName fetches a block volume plugin by name.
func (pm *VolumePluginMgr) FindMapperPluginByName(name string) (BlockVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok {
return blockVolumePlugin, nil
}
return nil, nil
}
// FindNodeExpandablePluginBySpec fetches a node expandable persistent volume plugin by spec
func (pm *VolumePluginMgr) FindNodeExpandablePluginBySpec(spec *Spec) (NodeExpandableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if fsResizablePlugin, ok := volumePlugin.(NodeExpandableVolumePlugin); ok {
return fsResizablePlugin, nil
}
return nil, nil
}
// FindNodeExpandablePluginByName fetches a node expandable persistent volume plugin by name
func (pm *VolumePluginMgr) FindNodeExpandablePluginByName(name string) (NodeExpandableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name)
if err != nil {
return nil, err
}
if fsResizablePlugin, ok := volumePlugin.(NodeExpandableVolumePlugin); ok {
return fsResizablePlugin, nil
}
return nil, nil
}
// Run starts the volume plugin manager, initializing and running the necessary
// tasks for managing volume plugins. This method is typically called to begin
// the plugin management lifecycle.
func (pm *VolumePluginMgr) Run(stopCh <-chan struct{}) {
kletHost, ok := pm.Host.(KubeletVolumeHost)
if ok {
// start informer for CSIDriver
informerFactory := kletHost.GetInformerFactory()
informerFactory.Start(stopCh)
informerFactory.WaitForCacheSync(stopCh)
}
}
// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler
// pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests
// for emptiness. Most attributes of the template will be correct for most
// plugin implementations. The following attributes can be overridden per
// plugin via configuration:
//
// 1. pod.Spec.Volumes[0].VolumeSource must be overridden. Recycler
// implementations without a valid VolumeSource will fail.
// 2. pod.GenerateName helps distinguish recycler pods by name. Recommended.
// Default is "pv-recycler-".
// 3. pod.Spec.ActiveDeadlineSeconds gives the recycler pod a maximum timeout
// before failing. Recommended. Default is 60 seconds.
//
// See HostPath and NFS for working recycler examples
func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod {
timeout := int64(60)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pv-recycler-",
Namespace: metav1.NamespaceDefault,
},
Spec: v1.PodSpec{
ActiveDeadlineSeconds: &timeout,
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "vol",
// IMPORTANT! All plugins using this template MUST
// override pod.Spec.Volumes[0].VolumeSource Recycler
// implementations without a valid VolumeSource will fail.
VolumeSource: v1.VolumeSource{},
},
},
Containers: []v1.Container{
{
Name: "pv-recycler",
Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.4",
Command: []string{"/bin/sh"},
Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"},
VolumeMounts: []v1.VolumeMount{
{
Name: "vol",
MountPath: "/scrub",
},
},
},
},
},
}
return pod
}
// ValidateRecyclerPodTemplate checks validity of recycle pod template
// List of checks:
// - at least one volume is defined in the recycle pod template
// If successful, returns nil
// if unsuccessful, returns an error.
func ValidateRecyclerPodTemplate(pod *v1.Pod) error {
if len(pod.Spec.Volumes) < 1 {
return fmt.Errorf("does not contain any volume(s)")
}
return nil
}
type dummyPluginProber struct{}
func (*dummyPluginProber) Init() error { return nil }
func (*dummyPluginProber) Probe() ([]ProbeEvent, error) { return nil, nil }
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secret
import (
"errors"
"fmt"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilstrings "k8s.io/utils/strings"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// ProbeVolumePlugins is the entry point for plugin detection in a package.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&secretPlugin{}}
}
const (
secretPluginName = "kubernetes.io/secret"
)
// secretPlugin implements the VolumePlugin interface.
type secretPlugin struct {
host volume.VolumeHost
getSecret func(namespace, name string) (*v1.Secret, error)
}
var _ volume.VolumePlugin = &secretPlugin{}
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}}},
}
}
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(secretPluginName), volName)
}
func (plugin *secretPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
plugin.getSecret = host.GetSecretFunc()
return nil
}
func (plugin *secretPlugin) GetPluginName() string {
return secretPluginName
}
func (plugin *secretPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _ := getVolumeSource(spec)
if volumeSource == nil {
return "", fmt.Errorf("Spec does not reference a Secret volume type")
}
return volumeSource.SecretName, nil
}
func (plugin *secretPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.Secret != nil
}
func (plugin *secretPlugin) RequiresRemount(spec *volume.Spec) bool {
return true
}
func (plugin *secretPlugin) SupportsMountOption() bool {
return false
}
func (plugin *secretPlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
return false, nil
}
func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod) (volume.Mounter, error) {
return &secretVolumeMounter{
secretVolume: &secretVolume{
spec.Name(),
pod.UID,
plugin,
plugin.host.GetMounter(),
volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))),
},
source: *spec.Volume.Secret,
pod: *pod,
getSecret: plugin.getSecret,
}, nil
}
func (plugin *secretPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &secretVolumeUnmounter{
&secretVolume{
volName,
podUID,
plugin,
plugin.host.GetMounter(),
volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))),
},
}, nil
}
func (plugin *secretPlugin) ConstructVolumeSpec(volName, mountPath string) (volume.ReconstructedVolume, error) {
secretVolume := &v1.Volume{
Name: volName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: volName,
},
},
}
return volume.ReconstructedVolume{
Spec: volume.NewSpecFromVolume(secretVolume),
}, nil
}
type secretVolume struct {
volName string
podUID types.UID
plugin *secretPlugin
mounter mount.Interface
volume.MetricsProvider
}
var _ volume.Volume = &secretVolume{}
func (sv *secretVolume) GetPath() string {
return getPath(sv.podUID, sv.volName, sv.plugin.host)
}
// secretVolumeMounter handles retrieving secrets from the API server
// and placing them into the volume on the host.
type secretVolumeMounter struct {
*secretVolume
source v1.SecretVolumeSource
pod v1.Pod
getSecret func(namespace, name string) (*v1.Secret, error)
}
var _ volume.Mounter = &secretVolumeMounter{}
func (sv *secretVolume) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: true,
Managed: true,
SELinuxRelabel: true,
}
}
func (b *secretVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
return b.SetUpAt(b.GetPath(), mounterArgs)
}
func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
// Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod)
if err != nil {
return err
}
optional := b.source.Optional != nil && *b.source.Optional
secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName)
if err != nil {
if !(apierrors.IsNotFound(err) && optional) {
klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err)
return err
}
secret = &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: b.pod.Namespace,
Name: b.source.SecretName,
},
}
}
totalBytes := totalSecretBytes(secret)
klog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes",
b.pod.Namespace,
b.source.SecretName,
len(secret.Data),
totalBytes)
payload, err := MakePayload(b.source.Items, secret, b.source.DefaultMode, optional)
if err != nil {
return err
}
setupSuccess := false
if err := wrapped.SetUpAt(dir, mounterArgs); err != nil {
return err
}
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
return err
}
defer func() {
// Clean up directories if setup fails
if !setupSuccess {
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
if unmountCreateErr != nil {
klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
return
}
tearDownErr := unmounter.TearDown()
if tearDownErr != nil {
klog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr)
}
}
}()
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
if err != nil {
klog.Errorf("Error creating atomic writer: %v", err)
return err
}
setPerms := func(_ string) error {
// This may be the first time writing and new files get created outside the timestamp subdirectory:
// change the permissions on the whole volume and not only in the timestamp directory.
ownershipChanger := volume.NewVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil))
return ownershipChanger.ChangePermissions()
}
err = writer.Write(payload, setPerms)
if err != nil {
klog.Errorf("Error writing payload to dir: %v", err)
return err
}
setupSuccess = true
return nil
}
// MakePayload function is exported so that it can be called from the projection volume driver
func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) {
if defaultMode == nil {
return nil, fmt.Errorf("no defaultMode used, not even the default value for it")
}
payload := make(map[string]volumeutil.FileProjection, len(secret.Data))
var fileProjection volumeutil.FileProjection
if len(mappings) == 0 {
for name, data := range secret.Data {
fileProjection.Data = []byte(data)
fileProjection.Mode = *defaultMode
payload[name] = fileProjection
}
} else {
for _, ktp := range mappings {
content, ok := secret.Data[ktp.Key]
if !ok {
if optional {
continue
}
errMsg := fmt.Sprintf("references non-existent secret key: %s", ktp.Key)
klog.Error(errMsg)
return nil, errors.New(errMsg)
}
fileProjection.Data = []byte(content)
if ktp.Mode != nil {
fileProjection.Mode = *ktp.Mode
} else {
fileProjection.Mode = *defaultMode
}
payload[ktp.Path] = fileProjection
}
}
return payload, nil
}
func totalSecretBytes(secret *v1.Secret) int {
totalSize := 0
for _, bytes := range secret.Data {
totalSize += len(bytes)
}
return totalSize
}
// secretVolumeUnmounter handles cleaning up secret volumes.
type secretVolumeUnmounter struct {
*secretVolume
}
var _ volume.Unmounter = &secretVolumeUnmounter{}
func (c *secretVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) {
var readOnly bool
var volumeSource *v1.SecretVolumeSource
if spec.Volume != nil && spec.Volume.Secret != nil {
volumeSource = spec.Volume.Secret
readOnly = spec.ReadOnly
}
return volumeSource, readOnly
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"os"
"path/filepath"
goruntime "runtime"
"strings"
"sync"
"testing"
"time"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/exec"
testingexec "k8s.io/utils/exec/testing"
utilstrings "k8s.io/utils/strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
const (
// A hook specified in storage class to indicate it's provisioning
// is expected to fail.
ExpectProvisionFailureKey = "expect-provision-failure"
// The node is marked as uncertain. The attach operation will fail and return timeout error
// for the first attach call. The following call will return successfully.
UncertainAttachNode = "uncertain-attach-node"
// The detach operation will keep failing on the node.
FailDetachNode = "fail-detach-node"
// The node is marked as timeout. The attach operation will always fail and return timeout error
// but the operation is actually succeeded.
TimeoutAttachNode = "timeout-attach-node"
// The node is marked as multi-attach which means it is allowed to attach the volume to multiple nodes.
MultiAttachNode = "multi-attach-node"
// TimeoutOnSetupVolumeName will cause Setup call to timeout but volume will finish mounting.
TimeoutOnSetupVolumeName = "timeout-setup-volume"
// FailOnSetupVolumeName will cause setup call to fail
FailOnSetupVolumeName = "fail-setup-volume"
//TimeoutAndFailOnSetupVolumeName will first timeout and then fail the setup
TimeoutAndFailOnSetupVolumeName = "timeout-and-fail-setup-volume"
// SuccessAndTimeoutSetupVolumeName will cause first mount operation to succeed but subsequent attempts to timeout
SuccessAndTimeoutSetupVolumeName = "success-and-timeout-setup-volume-name"
// SuccessAndFailOnSetupVolumeName will cause first mount operation to succeed but subsequent attempts to fail
SuccessAndFailOnSetupVolumeName = "success-and-failed-setup-device-name"
// TimeoutOnMountDeviceVolumeName will cause MountDevice call to timeout but Setup will finish.
TimeoutOnMountDeviceVolumeName = "timeout-mount-device-volume"
// TimeoutAndFailOnMountDeviceVolumeName will cause first MountDevice call to timeout but second call will fail
TimeoutAndFailOnMountDeviceVolumeName = "timeout-and-fail-mount-device-name"
// FailMountDeviceVolumeName will cause MountDevice operation on volume to fail
FailMountDeviceVolumeName = "fail-mount-device-volume-name"
// SuccessAndTimeoutDeviceName will cause first mount operation to succeed but subsequent attempts to timeout
SuccessAndTimeoutDeviceName = "success-and-timeout-device-name"
// SuccessAndFailOnMountDeviceName will cause first mount operation to succeed but subsequent attempts to fail
SuccessAndFailOnMountDeviceName = "success-and-failed-mount-device-name"
// FailWithInUseVolumeName will cause NodeExpandVolume to result in FailedPrecondition error
FailWithInUseVolumeName = "fail-expansion-in-use"
FailWithUnSupportedVolumeName = "fail-expansion-unsupported"
FailVolumeExpansion = "fail-expansion-test"
InfeasibleNodeExpansion = "infeasible-fail-node-expansion"
OtherFinalNodeExpansionError = "other-final-node-expansion-error"
deviceNotMounted = "deviceNotMounted"
deviceMountUncertain = "deviceMountUncertain"
deviceMounted = "deviceMounted"
volumeNotMounted = "volumeNotMounted"
volumeMountUncertain = "volumeMountUncertain"
volumeMounted = "volumeMounted"
FailNewMounter = "fail-new-mounter"
)
// CommandScript is used to pre-configure a command that will be executed and
// optionally set it's output (stdout and stderr combined) and return code.
type CommandScript struct {
// Cmd is the command to execute, e.g. "ls"
Cmd string
// Args is a slice of arguments to pass to the command, e.g. "-a"
Args []string
// Output is the combined stdout and stderr of the command to return
Output string
// ReturnCode is the exit code for the command. Setting this to non-zero will
// cause the command to return an error with this exit code set.
ReturnCode int
}
// ScriptCommands configures fe, the FakeExec, to have a pre-configured list of
// commands to expect. Calling more commands using fe than those scripted will
// result in a panic. By default, the fe does not enforce command argument checking
// or order -- if you have given an Output to the command, the first command scripted
// will return its output on the first command call, even if the command called is
// different than the one scripted. This is mostly useful to make sure that the
// right number of commands were called. If you want to check the exact commands
// and arguments were called, set fe.ExectOrder to true.
func ScriptCommands(fe *testingexec.FakeExec, scripts []CommandScript) {
fe.DisableScripts = false
for _, script := range scripts {
fakeCmd := &testingexec.FakeCmd{}
cmdAction := makeFakeCmd(fakeCmd, script.Cmd, script.Args...)
outputAction := makeFakeOutput(script.Output, script.ReturnCode)
fakeCmd.CombinedOutputScript = append(fakeCmd.CombinedOutputScript, outputAction)
fe.CommandScript = append(fe.CommandScript, cmdAction)
}
}
func makeFakeCmd(fakeCmd *testingexec.FakeCmd, cmd string, args ...string) testingexec.FakeCommandAction {
fc := fakeCmd
c := cmd
a := args
return func(cmd string, args ...string) exec.Cmd {
command := testingexec.InitFakeCmd(fc, c, a...)
return command
}
}
func makeFakeOutput(output string, rc int) testingexec.FakeAction {
o := output
var e error
if rc != 0 {
e = testingexec.FakeExitError{Status: rc}
}
return func() ([]byte, []byte, error) {
return []byte(o), nil, e
}
}
func ProbeVolumePlugins(config volume.VolumeConfig) []volume.VolumePlugin {
if _, ok := config.OtherAttributes["fake-property"]; ok {
return []volume.VolumePlugin{
&FakeVolumePlugin{
PluginName: "fake-plugin",
Host: nil,
// SomeFakeProperty: config.OtherAttributes["fake-property"] -- string, may require parsing by plugin
},
}
}
return []volume.VolumePlugin{&FakeVolumePlugin{PluginName: "fake-plugin"}}
}
// FakeVolumePlugin is useful for testing. It tries to be a fully compliant
// plugin, but all it does is make empty directories.
// Use as:
//
// volume.RegisterPlugin(&FakePlugin{"fake-name"})
type FakeVolumePlugin struct {
sync.RWMutex
PluginName string
Host volume.VolumeHost
Config volume.VolumeConfig
LastProvisionerOptions volume.VolumeOptions
LastResizeOptions volume.NodeResizeOptions
NewAttacherCallCount int
NewDetacherCallCount int
NodeExpandCallCount int
VolumeLimits map[string]int64
VolumeLimitsError error
LimitKey string
ProvisionDelaySeconds int
SupportsRemount bool
SupportsSELinux bool
DisableNodeExpansion bool
CanSupportFn func(*volume.Spec) bool
VerifyExhaustedEnabled bool
// default to false which means it is attachable by default
NonAttachable bool
// Add callbacks as needed
WaitForAttachHook func(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error)
UnmountDeviceHook func(globalMountPath string) error
Mounters []*FakeVolume
Unmounters []*FakeVolume
Attachers []*FakeVolume
Detachers []*FakeVolume
BlockVolumeMappers []*FakeVolume
BlockVolumeUnmappers []*FakeVolume
}
var _ volume.VolumePlugin = &FakeVolumePlugin{}
var _ volume.BlockVolumePlugin = &FakeVolumePlugin{}
var _ volume.RecyclableVolumePlugin = &FakeVolumePlugin{}
var _ volume.DeletableVolumePlugin = &FakeVolumePlugin{}
var _ volume.ProvisionableVolumePlugin = &FakeVolumePlugin{}
var _ volume.AttachableVolumePlugin = &FakeVolumePlugin{}
var _ volume.DeviceMountableVolumePlugin = &FakeVolumePlugin{}
var _ volume.NodeExpandableVolumePlugin = &FakeVolumePlugin{}
func (plugin *FakeVolumePlugin) getFakeVolume(list *[]*FakeVolume) *FakeVolume {
if list != nil {
volumeList := *list
if len(volumeList) > 0 {
volume := volumeList[0]
volume.Lock()
defer volume.Unlock()
volume.WaitForAttachHook = plugin.WaitForAttachHook
volume.UnmountDeviceHook = plugin.UnmountDeviceHook
return volume
}
}
volume := &FakeVolume{
WaitForAttachHook: plugin.WaitForAttachHook,
UnmountDeviceHook: plugin.UnmountDeviceHook,
}
volume.VolumesAttached = make(map[string]sets.Set[string])
volume.DeviceMountState = make(map[string]string)
volume.VolumeMountState = make(map[string]string)
if list != nil {
*list = append(*list, volume)
}
return volume
}
func (plugin *FakeVolumePlugin) Init(host volume.VolumeHost) error {
plugin.Lock()
defer plugin.Unlock()
plugin.Host = host
return nil
}
func (plugin *FakeVolumePlugin) GetPluginName() string {
plugin.RLock()
defer plugin.RUnlock()
return plugin.PluginName
}
func (plugin *FakeVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
var volumeName string
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
volumeName = spec.Volume.GCEPersistentDisk.PDName
} else if spec.Volume != nil && spec.Volume.RBD != nil {
volumeName = spec.Volume.RBD.RBDImage
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.GCEPersistentDisk != nil {
volumeName = spec.PersistentVolume.Spec.GCEPersistentDisk.PDName
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
volumeName = spec.PersistentVolume.Spec.RBD.RBDImage
} else if spec.Volume != nil && spec.Volume.CSI != nil {
volumeName = spec.Volume.CSI.Driver
}
if volumeName == "" {
volumeName = spec.Name()
}
return volumeName, nil
}
func (plugin *FakeVolumePlugin) CanSupport(spec *volume.Spec) bool {
if plugin.CanSupportFn != nil {
return plugin.CanSupportFn(spec)
}
return true
}
func (plugin *FakeVolumePlugin) RequiresRemount(spec *volume.Spec) bool {
return plugin.SupportsRemount
}
func (plugin *FakeVolumePlugin) SupportsMountOption() bool {
return true
}
func (plugin *FakeVolumePlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
return plugin.SupportsSELinux, nil
}
func (plugin *FakeVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod) (volume.Mounter, error) {
plugin.Lock()
defer plugin.Unlock()
if spec.Name() == FailNewMounter {
return nil, fmt.Errorf("AlwaysFailNewMounter")
}
fakeVolume := plugin.getFakeVolume(&plugin.Mounters)
fakeVolume.Lock()
defer fakeVolume.Unlock()
fakeVolume.PodUID = pod.UID
fakeVolume.VolName = spec.Name()
fakeVolume.Plugin = plugin
fakeVolume.MetricsNil = volume.MetricsNil{}
return fakeVolume, nil
}
func (plugin *FakeVolumePlugin) GetMounters() (Mounters []*FakeVolume) {
plugin.RLock()
defer plugin.RUnlock()
return plugin.Mounters
}
func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
plugin.Lock()
defer plugin.Unlock()
fakeVolume := plugin.getFakeVolume(&plugin.Unmounters)
fakeVolume.Lock()
defer fakeVolume.Unlock()
fakeVolume.PodUID = podUID
fakeVolume.VolName = volName
fakeVolume.Plugin = plugin
fakeVolume.MetricsNil = volume.MetricsNil{}
return fakeVolume, nil
}
func (plugin *FakeVolumePlugin) GetUnmounters() (Unmounters []*FakeVolume) {
plugin.RLock()
defer plugin.RUnlock()
return plugin.Unmounters
}
// Block volume support
func (plugin *FakeVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod) (volume.BlockVolumeMapper, error) {
plugin.Lock()
defer plugin.Unlock()
volume := plugin.getFakeVolume(&plugin.BlockVolumeMappers)
volume.Lock()
defer volume.Unlock()
if pod != nil {
volume.PodUID = pod.UID
}
volume.VolName = spec.Name()
volume.Plugin = plugin
return volume, nil
}
// Block volume support
func (plugin *FakeVolumePlugin) GetBlockVolumeMapper() (BlockVolumeMappers []*FakeVolume) {
plugin.RLock()
defer plugin.RUnlock()
return plugin.BlockVolumeMappers
}
// Block volume support
func (plugin *FakeVolumePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
plugin.Lock()
defer plugin.Unlock()
volume := plugin.getFakeVolume(&plugin.BlockVolumeUnmappers)
volume.Lock()
defer volume.Unlock()
volume.PodUID = podUID
volume.VolName = volName
volume.Plugin = plugin
return volume, nil
}
// Block volume support
func (plugin *FakeVolumePlugin) GetBlockVolumeUnmapper() (BlockVolumeUnmappers []*FakeVolume) {
plugin.RLock()
defer plugin.RUnlock()
return plugin.BlockVolumeUnmappers
}
func (plugin *FakeVolumePlugin) NewAttacher() (volume.Attacher, error) {
plugin.Lock()
defer plugin.Unlock()
plugin.NewAttacherCallCount = plugin.NewAttacherCallCount + 1
return plugin.getFakeVolume(&plugin.Attachers), nil
}
func (plugin *FakeVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *FakeVolumePlugin) GetAttachers() (Attachers []*FakeVolume) {
plugin.RLock()
defer plugin.RUnlock()
return plugin.Attachers
}
func (plugin *FakeVolumePlugin) GetNewAttacherCallCount() int {
plugin.RLock()
defer plugin.RUnlock()
return plugin.NewAttacherCallCount
}
func (plugin *FakeVolumePlugin) NewDetacher() (volume.Detacher, error) {
plugin.Lock()
defer plugin.Unlock()
plugin.NewDetacherCallCount = plugin.NewDetacherCallCount + 1
detacher := plugin.getFakeVolume(&plugin.Detachers)
attacherList := plugin.Attachers
if len(attacherList) > 0 {
detacherList := plugin.Detachers
if len(detacherList) > 0 {
detacherList[0].VolumesAttached = attacherList[0].VolumesAttached
}
}
return detacher, nil
}
func (plugin *FakeVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (plugin *FakeVolumePlugin) GetDetachers() (Detachers []*FakeVolume) {
plugin.RLock()
defer plugin.RUnlock()
return plugin.Detachers
}
func (plugin *FakeVolumePlugin) GetNewDetacherCallCount() int {
plugin.RLock()
defer plugin.RUnlock()
return plugin.NewDetacherCallCount
}
func (plugin *FakeVolumePlugin) CanAttach(spec *volume.Spec) (bool, error) {
return !plugin.NonAttachable, nil
}
func (plugin *FakeVolumePlugin) VerifyExhaustedResource(spec *volume.Spec) bool {
return plugin.VerifyExhaustedEnabled
}
func (plugin *FakeVolumePlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
return true, nil
}
func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
return nil
}
func (plugin *FakeVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return &FakeDeleter{"/attributesTransferredFromSpec", volume.MetricsNil{}}, nil
}
func (plugin *FakeVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
plugin.Lock()
defer plugin.Unlock()
plugin.LastProvisionerOptions = options
return &FakeProvisioner{options, plugin.Host, plugin.ProvisionDelaySeconds}, nil
}
func (plugin *FakeVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{}
}
func (plugin *FakeVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.ReconstructedVolume, error) {
return volume.ReconstructedVolume{
Spec: &volume.Spec{
Volume: &v1.Volume{
Name: volumeName,
},
},
}, nil
}
// Block volume support
func (plugin *FakeVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*volume.Spec, error) {
return &volume.Spec{
Volume: &v1.Volume{
Name: volumeName,
},
}, nil
}
func (plugin *FakeVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
return []string{}, nil
}
// Expandable volume support
func (plugin *FakeVolumePlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
return resource.Quantity{}, nil
}
func (plugin *FakeVolumePlugin) RequiresFSResize() bool {
return !plugin.DisableNodeExpansion
}
func (plugin *FakeVolumePlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
plugin.NodeExpandCallCount++
plugin.LastResizeOptions = resizeOptions
if resizeOptions.VolumeSpec.Name() == FailWithInUseVolumeName {
return false, volumetypes.NewFailedPreconditionError("volume-in-use")
}
if resizeOptions.VolumeSpec.Name() == FailWithUnSupportedVolumeName {
return false, volumetypes.NewOperationNotSupportedError("volume-unsupported")
}
if resizeOptions.VolumeSpec.Name() == InfeasibleNodeExpansion {
return false, volumetypes.NewInfeasibleError("infeasible-expansion")
}
if resizeOptions.VolumeSpec.Name() == OtherFinalNodeExpansionError {
return false, fmt.Errorf("other-final-node-expansion-error")
}
if resizeOptions.VolumeSpec.Name() == FailVolumeExpansion {
return false, fmt.Errorf("fail volume expansion for volume: %s", FailVolumeExpansion)
}
return true, nil
}
func (plugin *FakeVolumePlugin) GetVolumeLimits() (map[string]int64, error) {
return plugin.VolumeLimits, plugin.VolumeLimitsError
}
func (plugin *FakeVolumePlugin) VolumeLimitKey(spec *volume.Spec) string {
return plugin.LimitKey
}
// FakeBasicVolumePlugin implements a basic volume plugin. It wrappers on
// FakeVolumePlugin but implements VolumePlugin interface only.
// It is useful to test logic involving plugin interfaces.
type FakeBasicVolumePlugin struct {
Plugin FakeVolumePlugin
}
func (f *FakeBasicVolumePlugin) GetPluginName() string {
return f.Plugin.GetPluginName()
}
func (f *FakeBasicVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
return f.Plugin.GetVolumeName(spec)
}
// CanSupport tests whether the plugin supports a given volume specification by
// testing volume spec name begins with plugin name or not.
// This is useful to choose plugin by volume in testing.
func (f *FakeBasicVolumePlugin) CanSupport(spec *volume.Spec) bool {
return strings.HasPrefix(spec.Name(), f.GetPluginName())
}
func (f *FakeBasicVolumePlugin) ConstructVolumeSpec(ame, mountPath string) (volume.ReconstructedVolume, error) {
return f.Plugin.ConstructVolumeSpec(ame, mountPath)
}
func (f *FakeBasicVolumePlugin) Init(ost volume.VolumeHost) error {
return f.Plugin.Init(ost)
}
func (f *FakeBasicVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod) (volume.Mounter, error) {
return f.Plugin.NewMounter(spec, pod)
}
func (f *FakeBasicVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return f.Plugin.NewUnmounter(volName, podUID)
}
func (f *FakeBasicVolumePlugin) RequiresRemount(spec *volume.Spec) bool {
return f.Plugin.RequiresRemount(spec)
}
func (f *FakeBasicVolumePlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
return f.Plugin.SupportsSELinuxContextMount(spec)
}
func (f *FakeBasicVolumePlugin) SupportsMountOption() bool {
return f.Plugin.SupportsMountOption()
}
var _ volume.VolumePlugin = &FakeBasicVolumePlugin{}
// FakeDeviceMountableVolumePlugin implements an device mountable plugin based on FakeBasicVolumePlugin.
type FakeDeviceMountableVolumePlugin struct {
FakeBasicVolumePlugin
}
func (f *FakeDeviceMountableVolumePlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
return true, nil
}
func (f *FakeDeviceMountableVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return f.Plugin.NewDeviceMounter()
}
func (f *FakeDeviceMountableVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return f.Plugin.NewDeviceUnmounter()
}
func (f *FakeDeviceMountableVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
return f.Plugin.GetDeviceMountRefs(deviceMountPath)
}
var _ volume.VolumePlugin = &FakeDeviceMountableVolumePlugin{}
var _ volume.DeviceMountableVolumePlugin = &FakeDeviceMountableVolumePlugin{}
// FakeAttachableVolumePlugin implements an attachable plugin based on FakeDeviceMountableVolumePlugin.
type FakeAttachableVolumePlugin struct {
FakeDeviceMountableVolumePlugin
}
func (f *FakeAttachableVolumePlugin) NewAttacher() (volume.Attacher, error) {
return f.Plugin.NewAttacher()
}
func (f *FakeAttachableVolumePlugin) NewDetacher() (volume.Detacher, error) {
return f.Plugin.NewDetacher()
}
func (f *FakeAttachableVolumePlugin) CanAttach(spec *volume.Spec) (bool, error) {
return true, nil
}
func (f *FakeAttachableVolumePlugin) VerifyExhaustedResource(spec *volume.Spec) bool {
return false
}
var _ volume.VolumePlugin = &FakeAttachableVolumePlugin{}
var _ volume.AttachableVolumePlugin = &FakeAttachableVolumePlugin{}
type FakeFileVolumePlugin struct {
}
func (plugin *FakeFileVolumePlugin) Init(host volume.VolumeHost) error {
return nil
}
func (plugin *FakeFileVolumePlugin) GetPluginName() string {
return "fake-file-plugin"
}
func (plugin *FakeFileVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
return "", nil
}
func (plugin *FakeFileVolumePlugin) CanSupport(spec *volume.Spec) bool {
return true
}
func (plugin *FakeFileVolumePlugin) RequiresRemount(spec *volume.Spec) bool {
return false
}
func (plugin *FakeFileVolumePlugin) SupportsMountOption() bool {
return false
}
func (plugin *FakeFileVolumePlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
return false, nil
}
func (plugin *FakeFileVolumePlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod) (volume.Mounter, error) {
return nil, nil
}
func (plugin *FakeFileVolumePlugin) NewUnmounter(name string, podUID types.UID) (volume.Unmounter, error) {
return nil, nil
}
func (plugin *FakeFileVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.ReconstructedVolume, error) {
return volume.ReconstructedVolume{}, nil
}
func NewFakeFileVolumePlugin() []volume.VolumePlugin {
return []volume.VolumePlugin{&FakeFileVolumePlugin{}}
}
type FakeVolume struct {
sync.RWMutex
PodUID types.UID
VolName string
Plugin *FakeVolumePlugin
volume.MetricsNil
VolumesAttached map[string]sets.Set[string]
DeviceMountState map[string]string
VolumeMountState map[string]string
// Add callbacks as needed
WaitForAttachHook func(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error)
UnmountDeviceHook func(globalMountPath string) error
SetUpCallCount int
TearDownCallCount int
AttachCallCount int
DetachCallCount int
WaitForAttachCallCount int
MountDeviceCallCount int
UnmountDeviceCallCount int
GetDeviceMountPathCallCount int
SetUpDeviceCallCount int
TearDownDeviceCallCount int
MapPodDeviceCallCount int
UnmapPodDeviceCallCount int
GlobalMapPathCallCount int
PodDeviceMapPathCallCount int
}
func getUniqueVolumeName(spec *volume.Spec) (string, error) {
var volumeName string
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
volumeName = spec.Volume.GCEPersistentDisk.PDName
} else if spec.Volume != nil && spec.Volume.RBD != nil {
volumeName = spec.Volume.RBD.RBDImage
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.GCEPersistentDisk != nil {
volumeName = spec.PersistentVolume.Spec.GCEPersistentDisk.PDName
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.RBD != nil {
volumeName = spec.PersistentVolume.Spec.RBD.RBDImage
}
if volumeName == "" {
volumeName = spec.Name()
}
return volumeName, nil
}
func (_ *FakeVolume) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: false,
Managed: true,
SELinuxRelabel: true,
}
}
func (fv *FakeVolume) SetUp(mounterArgs volume.MounterArgs) error {
fv.Lock()
defer fv.Unlock()
err := fv.setupInternal(mounterArgs)
fv.SetUpCallCount++
return err
}
func (fv *FakeVolume) setupInternal(mounterArgs volume.MounterArgs) error {
if fv.VolName == TimeoutOnSetupVolumeName {
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
return volumetypes.NewUncertainProgressError("time out on setup")
}
if fv.VolName == FailOnSetupVolumeName {
fv.VolumeMountState[fv.VolName] = volumeNotMounted
return fmt.Errorf("mounting volume failed")
}
if fv.VolName == TimeoutAndFailOnSetupVolumeName {
_, ok := fv.VolumeMountState[fv.VolName]
if !ok {
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
return volumetypes.NewUncertainProgressError("time out on setup")
}
fv.VolumeMountState[fv.VolName] = volumeNotMounted
return fmt.Errorf("mounting volume failed")
}
if fv.VolName == SuccessAndFailOnSetupVolumeName {
_, ok := fv.VolumeMountState[fv.VolName]
if ok {
fv.VolumeMountState[fv.VolName] = volumeNotMounted
return fmt.Errorf("mounting volume failed")
}
}
if fv.VolName == SuccessAndTimeoutSetupVolumeName {
_, ok := fv.VolumeMountState[fv.VolName]
if ok {
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
return volumetypes.NewUncertainProgressError("time out on setup")
}
}
fv.VolumeMountState[fv.VolName] = volumeNotMounted
return fv.SetUpAt(fv.getPath(), mounterArgs)
}
func (fv *FakeVolume) GetSetUpCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.SetUpCallCount
}
func (fv *FakeVolume) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
return os.MkdirAll(dir, 0750)
}
func (fv *FakeVolume) GetPath() string {
fv.RLock()
defer fv.RUnlock()
return fv.getPath()
}
func (fv *FakeVolume) getPath() string {
return filepath.Join(fv.Plugin.Host.GetPodVolumeDir(fv.PodUID, utilstrings.EscapeQualifiedName(fv.Plugin.PluginName), fv.VolName))
}
func (fv *FakeVolume) TearDown() error {
fv.Lock()
defer fv.Unlock()
fv.TearDownCallCount++
return fv.TearDownAt(fv.getPath())
}
func (fv *FakeVolume) GetTearDownCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.TearDownCallCount
}
func (fv *FakeVolume) TearDownAt(dir string) error {
return os.RemoveAll(dir)
}
// Block volume support
func (fv *FakeVolume) SetUpDevice() (string, error) {
fv.Lock()
defer fv.Unlock()
if fv.VolName == TimeoutOnMountDeviceVolumeName {
fv.DeviceMountState[fv.VolName] = deviceMountUncertain
return "", volumetypes.NewUncertainProgressError("mount failed")
}
if fv.VolName == FailMountDeviceVolumeName {
fv.DeviceMountState[fv.VolName] = deviceNotMounted
return "", fmt.Errorf("error mapping disk: %s", fv.VolName)
}
if fv.VolName == TimeoutAndFailOnMountDeviceVolumeName {
_, ok := fv.DeviceMountState[fv.VolName]
if !ok {
fv.DeviceMountState[fv.VolName] = deviceMountUncertain
return "", volumetypes.NewUncertainProgressError("timed out mounting error")
}
fv.DeviceMountState[fv.VolName] = deviceNotMounted
return "", fmt.Errorf("error mapping disk: %s", fv.VolName)
}
if fv.VolName == SuccessAndTimeoutDeviceName {
_, ok := fv.DeviceMountState[fv.VolName]
if ok {
fv.DeviceMountState[fv.VolName] = deviceMountUncertain
return "", volumetypes.NewUncertainProgressError("error mounting state")
}
}
if fv.VolName == SuccessAndFailOnMountDeviceName {
_, ok := fv.DeviceMountState[fv.VolName]
if ok {
return "", fmt.Errorf("error mapping disk: %s", fv.VolName)
}
}
fv.DeviceMountState[fv.VolName] = deviceMounted
fv.SetUpDeviceCallCount++
return "", nil
}
func (fv *FakeVolume) GetStagingPath() string {
return filepath.Join(fv.Plugin.Host.GetVolumeDevicePluginDir(utilstrings.EscapeQualifiedName(fv.Plugin.PluginName)), "staging", fv.VolName)
}
// Block volume support
func (fv *FakeVolume) GetSetUpDeviceCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.SetUpDeviceCallCount
}
// Block volume support
func (fv *FakeVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
fv.Lock()
defer fv.Unlock()
fv.GlobalMapPathCallCount++
return fv.getGlobalMapPath()
}
// Block volume support
func (fv *FakeVolume) getGlobalMapPath() (string, error) {
return filepath.Join(fv.Plugin.Host.GetVolumeDevicePluginDir(utilstrings.EscapeQualifiedName(fv.Plugin.PluginName)), "pluginDependentPath"), nil
}
// Block volume support
func (fv *FakeVolume) GetGlobalMapPathCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.GlobalMapPathCallCount
}
// Block volume support
func (fv *FakeVolume) GetPodDeviceMapPath() (string, string) {
fv.RLock()
defer fv.RUnlock()
fv.PodDeviceMapPathCallCount++
return fv.getPodDeviceMapPath()
}
// Block volume support
func (fv *FakeVolume) getPodDeviceMapPath() (string, string) {
return filepath.Join(fv.Plugin.Host.GetPodVolumeDeviceDir(fv.PodUID, utilstrings.EscapeQualifiedName(fv.Plugin.PluginName))), fv.VolName
}
// Block volume support
func (fv *FakeVolume) GetPodDeviceMapPathCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.PodDeviceMapPathCallCount
}
// Block volume support
func (fv *FakeVolume) TearDownDevice(mapPath string, devicePath string) error {
fv.Lock()
defer fv.Unlock()
fv.TearDownDeviceCallCount++
return nil
}
// Block volume support
func (fv *FakeVolume) GetTearDownDeviceCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.TearDownDeviceCallCount
}
// Block volume support
func (fv *FakeVolume) UnmapPodDevice() error {
fv.Lock()
defer fv.Unlock()
fv.UnmapPodDeviceCallCount++
return nil
}
// Block volume support
func (fv *FakeVolume) GetUnmapPodDeviceCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.UnmapPodDeviceCallCount
}
// Block volume support
func (fv *FakeVolume) MapPodDevice() (string, error) {
fv.Lock()
defer fv.Unlock()
if fv.VolName == TimeoutOnSetupVolumeName {
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
return "", volumetypes.NewUncertainProgressError("time out on setup")
}
if fv.VolName == FailOnSetupVolumeName {
fv.VolumeMountState[fv.VolName] = volumeNotMounted
return "", fmt.Errorf("mounting volume failed")
}
if fv.VolName == TimeoutAndFailOnSetupVolumeName {
_, ok := fv.VolumeMountState[fv.VolName]
if !ok {
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
return "", volumetypes.NewUncertainProgressError("time out on setup")
}
fv.VolumeMountState[fv.VolName] = volumeNotMounted
return "", fmt.Errorf("mounting volume failed")
}
if fv.VolName == SuccessAndFailOnSetupVolumeName {
_, ok := fv.VolumeMountState[fv.VolName]
if ok {
fv.VolumeMountState[fv.VolName] = volumeNotMounted
return "", fmt.Errorf("mounting volume failed")
}
}
if fv.VolName == SuccessAndTimeoutSetupVolumeName {
_, ok := fv.VolumeMountState[fv.VolName]
if ok {
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
return "", volumetypes.NewUncertainProgressError("time out on setup")
}
}
fv.VolumeMountState[fv.VolName] = volumeMounted
fv.MapPodDeviceCallCount++
return "", nil
}
// Block volume support
func (fv *FakeVolume) GetMapPodDeviceCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.MapPodDeviceCallCount
}
func (fv *FakeVolume) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
fv.Lock()
defer fv.Unlock()
fv.AttachCallCount++
volumeName, err := getUniqueVolumeName(spec)
if err != nil {
return "", err
}
volumeNodes, exist := fv.VolumesAttached[volumeName]
if exist {
if nodeName == UncertainAttachNode {
return "/dev/vdb-test", nil
}
// even if volume was previously attached to time out, we need to keep returning error
// so as reconciler can not confirm this volume as attached.
if nodeName == TimeoutAttachNode {
return "", fmt.Errorf("timed out to attach volume %q to node %q", volumeName, nodeName)
}
if volumeNodes.Has(string(nodeName)) || volumeNodes.Has(MultiAttachNode) || nodeName == MultiAttachNode {
volumeNodes.Insert(string(nodeName))
return "/dev/vdb-test", nil
}
return "", fmt.Errorf("volume %q trying to attach to node %q is already attached to node %q", volumeName, nodeName, volumeNodes)
}
fv.VolumesAttached[volumeName] = sets.New[string](string(nodeName))
if nodeName == UncertainAttachNode || nodeName == TimeoutAttachNode {
return "", fmt.Errorf("timed out to attach volume %q to node %q", volumeName, nodeName)
}
return "/dev/vdb-test", nil
}
func (fv *FakeVolume) GetAttachCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.AttachCallCount
}
func (fv *FakeVolume) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error) {
fv.Lock()
defer fv.Unlock()
fv.WaitForAttachCallCount++
if fv.WaitForAttachHook != nil {
return fv.WaitForAttachHook(spec, devicePath, pod, spectimeout)
}
return "/dev/sdb", nil
}
func (fv *FakeVolume) GetWaitForAttachCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.WaitForAttachCallCount
}
func (fv *FakeVolume) GetDeviceMountPath(spec *volume.Spec) (string, error) {
fv.Lock()
defer fv.Unlock()
fv.GetDeviceMountPathCallCount++
return "", nil
}
func (fv *FakeVolume) mountDeviceInternal(spec *volume.Spec, devicePath string, deviceMountPath string) error {
fv.Lock()
defer fv.Unlock()
if spec.Name() == TimeoutOnMountDeviceVolumeName {
fv.DeviceMountState[spec.Name()] = deviceMountUncertain
return volumetypes.NewUncertainProgressError("mount failed")
}
if spec.Name() == FailMountDeviceVolumeName {
fv.DeviceMountState[spec.Name()] = deviceNotMounted
return fmt.Errorf("error mounting disk: %s", devicePath)
}
if spec.Name() == TimeoutAndFailOnMountDeviceVolumeName {
_, ok := fv.DeviceMountState[spec.Name()]
if !ok {
fv.DeviceMountState[spec.Name()] = deviceMountUncertain
return volumetypes.NewUncertainProgressError("timed out mounting error")
}
fv.DeviceMountState[spec.Name()] = deviceNotMounted
return fmt.Errorf("error mounting disk: %s", devicePath)
}
if spec.Name() == SuccessAndTimeoutDeviceName {
_, ok := fv.DeviceMountState[spec.Name()]
if ok {
fv.DeviceMountState[spec.Name()] = deviceMountUncertain
return volumetypes.NewUncertainProgressError("error mounting state")
}
}
if spec.Name() == SuccessAndFailOnMountDeviceName {
_, ok := fv.DeviceMountState[spec.Name()]
if ok {
return fmt.Errorf("error mounting disk: %s", devicePath)
}
}
fv.DeviceMountState[spec.Name()] = deviceMounted
fv.MountDeviceCallCount++
return nil
}
func (fv *FakeVolume) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
return fv.mountDeviceInternal(spec, devicePath, deviceMountPath)
}
func (fv *FakeVolume) GetMountDeviceCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.MountDeviceCallCount
}
func (fv *FakeVolume) GetUnmountDeviceCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.UnmountDeviceCallCount
}
func (fv *FakeVolume) Detach(volumeName string, nodeName types.NodeName) error {
fv.Lock()
defer fv.Unlock()
node := string(nodeName)
volumeNodes, exist := fv.VolumesAttached[volumeName]
if !exist || !volumeNodes.Has(node) {
return fmt.Errorf("trying to detach volume %q that is not attached to the node %q", volumeName, node)
}
fv.DetachCallCount++
if nodeName == FailDetachNode {
return fmt.Errorf("fail to detach volume %q to node %q", volumeName, nodeName)
}
volumeNodes.Delete(node)
if volumeNodes.Len() == 0 {
delete(fv.VolumesAttached, volumeName)
}
return nil
}
func (fv *FakeVolume) VolumesAreAttached(spec []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
fv.Lock()
defer fv.Unlock()
return nil, nil
}
func (fv *FakeVolume) GetDetachCallCount() int {
fv.RLock()
defer fv.RUnlock()
return fv.DetachCallCount
}
func (fv *FakeVolume) UnmountDevice(globalMountPath string) error {
fv.Lock()
defer fv.Unlock()
fv.UnmountDeviceCallCount++
if fv.UnmountDeviceHook != nil {
return fv.UnmountDeviceHook(globalMountPath)
}
return nil
}
type FakeDeleter struct {
path string
volume.MetricsNil
}
func (fd *FakeDeleter) Delete() error {
// nil is success, else error
return nil
}
func (fd *FakeDeleter) GetPath() string {
return fd.path
}
type FakeProvisioner struct {
Options volume.VolumeOptions
Host volume.VolumeHost
ProvisionDelaySeconds int
}
func (fc *FakeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
// Add provision failure hook
if fc.Options.Parameters != nil {
if _, ok := fc.Options.Parameters[ExpectProvisionFailureKey]; ok {
return nil, fmt.Errorf("expected error")
}
}
fullpath := fmt.Sprintf("/%s/hostpath_pv/%s", os.TempDir(), uuid.NewUUID())
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: fc.Options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "fakeplugin-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy,
AccessModes: fc.Options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
},
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fullpath,
},
},
},
}
if fc.ProvisionDelaySeconds > 0 {
time.Sleep(time.Duration(fc.ProvisionDelaySeconds) * time.Second)
}
return pv, nil
}
var _ volumepathhandler.BlockVolumePathHandler = &FakeVolumePathHandler{}
// NewDeviceHandler Create a new IoHandler implementation
func NewBlockVolumePathHandler() volumepathhandler.BlockVolumePathHandler {
return &FakeVolumePathHandler{}
}
type FakeVolumePathHandler struct {
sync.RWMutex
}
func (fv *FakeVolumePathHandler) MapDevice(devicePath string, mapDir string, linkName string, bindMount bool) error {
// nil is success, else error
return nil
}
func (fv *FakeVolumePathHandler) UnmapDevice(mapDir string, linkName string, bindMount bool) error {
// nil is success, else error
return nil
}
func (fv *FakeVolumePathHandler) RemoveMapPath(mapPath string) error {
// nil is success, else error
return nil
}
func (fv *FakeVolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
// nil is success, else error
return true, nil
}
func (fv *FakeVolumePathHandler) IsDeviceBindMountExist(mapPath string) (bool, error) {
// nil is success, else error
return true, nil
}
func (fv *FakeVolumePathHandler) GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error) {
// nil is success, else error
return []string{}, nil
}
func (fv *FakeVolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
// nil is success, else error
return "", nil
}
func (fv *FakeVolumePathHandler) AttachFileDevice(path string) (string, error) {
// nil is success, else error
return "", nil
}
func (fv *FakeVolumePathHandler) DetachFileDevice(path string) error {
// nil is success, else error
return nil
}
func (fv *FakeVolumePathHandler) GetLoopDevice(path string) (string, error) {
// nil is success, else error
return "/dev/loop1", nil
}
// FindEmptyDirectoryUsageOnTmpfs finds the expected usage of an empty directory existing on
// a tmpfs filesystem on this system.
func FindEmptyDirectoryUsageOnTmpfs() (*resource.Quantity, error) {
// The command below does not exist on Windows. Additionally, empty folders have size 0 on Windows.
if goruntime.GOOS == "windows" {
used, err := resource.ParseQuantity("0")
return &used, err
}
tmpDir, err := utiltesting.MkTmpdir("metrics_du_test")
if err != nil {
return nil, err
}
defer os.RemoveAll(tmpDir)
out, err := exec.New().Command("nice", "-n", "19", "du", "-x", "-s", "-B", "1", tmpDir).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("failed command 'du' on %s with error %v", tmpDir, err)
}
used, err := resource.ParseQuantity(strings.Fields(string(out))[0])
if err != nil {
return nil, fmt.Errorf("failed to parse 'du' output %s due to error %v", out, err)
}
used.Format = resource.BinarySI
return &used, nil
}
// VerifyAttachCallCount ensures that at least one of the Attachers for this
// plugin has the expectedAttachCallCount number of calls. Otherwise it returns
// an error.
func VerifyAttachCallCount(
expectedAttachCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, attacher := range fakeVolumePlugin.GetAttachers() {
actualCallCount := attacher.GetAttachCallCount()
if actualCallCount >= expectedAttachCallCount {
return nil
}
}
return fmt.Errorf(
"No attachers have expected AttachCallCount. Expected: <%v>.",
expectedAttachCallCount)
}
// VerifyZeroAttachCalls ensures that all of the Attachers for this plugin have
// a zero AttachCallCount. Otherwise it returns an error.
func VerifyZeroAttachCalls(fakeVolumePlugin *FakeVolumePlugin) error {
for _, attacher := range fakeVolumePlugin.GetAttachers() {
actualCallCount := attacher.GetAttachCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one attacher has non-zero AttachCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifyWaitForAttachCallCount ensures that at least one of the Mounters for
// this plugin has the expectedWaitForAttachCallCount number of calls. Otherwise
// it returns an error.
func VerifyWaitForAttachCallCount(
expectedWaitForAttachCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, attacher := range fakeVolumePlugin.GetAttachers() {
actualCallCount := attacher.GetWaitForAttachCallCount()
if actualCallCount >= expectedWaitForAttachCallCount {
return nil
}
}
return fmt.Errorf(
"No Attachers have expected WaitForAttachCallCount. Expected: <%v>.",
expectedWaitForAttachCallCount)
}
// VerifyZeroWaitForAttachCallCount ensures that all Attachers for this plugin
// have a zero WaitForAttachCallCount. Otherwise it returns an error.
func VerifyZeroWaitForAttachCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
for _, attacher := range fakeVolumePlugin.GetAttachers() {
actualCallCount := attacher.GetWaitForAttachCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one attacher has non-zero WaitForAttachCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifyMountDeviceCallCount ensures that at least one of the Mounters for
// this plugin has the expectedMountDeviceCallCount number of calls. Otherwise
// it returns an error.
func VerifyMountDeviceCallCount(
expectedMountDeviceCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, attacher := range fakeVolumePlugin.GetAttachers() {
actualCallCount := attacher.GetMountDeviceCallCount()
if actualCallCount >= expectedMountDeviceCallCount {
return nil
}
}
return fmt.Errorf(
"No Attachers have expected MountDeviceCallCount. Expected: <%v>.",
expectedMountDeviceCallCount)
}
func VerifyUnmountDeviceCallCount(expectedCallCount int, fakeVolumePlugin *FakeVolumePlugin) error {
detachers := fakeVolumePlugin.GetDetachers()
if len(detachers) == 0 && (expectedCallCount == 0) {
return nil
}
actualCallCount := 0
for _, detacher := range detachers {
actualCallCount = detacher.GetUnmountDeviceCallCount()
if expectedCallCount == 0 && actualCallCount == expectedCallCount {
return nil
}
if (expectedCallCount > 0) && (actualCallCount >= expectedCallCount) {
return nil
}
}
return fmt.Errorf(
"Expected DeviceUnmount Call %d, got %d",
expectedCallCount, actualCallCount)
}
// VerifyZeroMountDeviceCallCount ensures that all Attachers for this plugin
// have a zero MountDeviceCallCount. Otherwise it returns an error.
func VerifyZeroMountDeviceCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
for _, attacher := range fakeVolumePlugin.GetAttachers() {
actualCallCount := attacher.GetMountDeviceCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one attacher has non-zero MountDeviceCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifySetUpCallCount ensures that at least one of the Mounters for this
// plugin has the expectedSetUpCallCount number of calls. Otherwise it returns
// an error.
func VerifySetUpCallCount(
expectedSetUpCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, mounter := range fakeVolumePlugin.GetMounters() {
actualCallCount := mounter.GetSetUpCallCount()
if actualCallCount >= expectedSetUpCallCount {
return nil
}
}
return fmt.Errorf(
"No Mounters have expected SetUpCallCount. Expected: <%v>.",
expectedSetUpCallCount)
}
// VerifyZeroSetUpCallCount ensures that all Mounters for this plugin have a
// zero SetUpCallCount. Otherwise it returns an error.
func VerifyZeroSetUpCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
for _, mounter := range fakeVolumePlugin.GetMounters() {
actualCallCount := mounter.GetSetUpCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one mounter has non-zero SetUpCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifyTearDownCallCount ensures that at least one of the Unounters for this
// plugin has the expectedTearDownCallCount number of calls. Otherwise it
// returns an error.
func VerifyTearDownCallCount(
expectedTearDownCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
unmounters := fakeVolumePlugin.GetUnmounters()
if len(unmounters) == 0 && (expectedTearDownCallCount == 0) {
return nil
}
for _, unmounter := range unmounters {
actualCallCount := unmounter.GetTearDownCallCount()
if expectedTearDownCallCount == 0 && actualCallCount == expectedTearDownCallCount {
return nil
}
if (expectedTearDownCallCount > 0) && (actualCallCount >= expectedTearDownCallCount) {
return nil
}
}
return fmt.Errorf(
"No Unmounters have expected SetUpCallCount. Expected: <%v>.",
expectedTearDownCallCount)
}
// VerifyZeroTearDownCallCount ensures that all Mounters for this plugin have a
// zero TearDownCallCount. Otherwise it returns an error.
func VerifyZeroTearDownCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
for _, mounter := range fakeVolumePlugin.GetMounters() {
actualCallCount := mounter.GetTearDownCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one mounter has non-zero TearDownCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifyDetachCallCount ensures that at least one of the Attachers for this
// plugin has the expectedDetachCallCount number of calls. Otherwise it returns
// an error.
func VerifyDetachCallCount(
expectedDetachCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, detacher := range fakeVolumePlugin.GetDetachers() {
actualCallCount := detacher.GetDetachCallCount()
if actualCallCount == expectedDetachCallCount {
return nil
}
}
return fmt.Errorf(
"No Detachers have expected DetachCallCount. Expected: <%v>.",
expectedDetachCallCount)
}
// VerifyZeroDetachCallCount ensures that all Detachers for this plugin have a
// zero DetachCallCount. Otherwise it returns an error.
func VerifyZeroDetachCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
for _, detacher := range fakeVolumePlugin.GetDetachers() {
actualCallCount := detacher.GetDetachCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one detacher has non-zero DetachCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifyTearDownDeviceCallCount ensures that at least one of the Unmappers for this
// plugin has the expectedTearDownDeviceCallCount number of calls. Otherwise it
// returns an error.
func VerifyTearDownDeviceCallCount(
expectedTearDownDeviceCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, unmapper := range fakeVolumePlugin.GetBlockVolumeUnmapper() {
actualCallCount := unmapper.GetTearDownDeviceCallCount()
if actualCallCount >= expectedTearDownDeviceCallCount {
return nil
}
}
return fmt.Errorf(
"No Unmapper have expected TearDownDeviceCallCount. Expected: <%v>.",
expectedTearDownDeviceCallCount)
}
// VerifyZeroTearDownDeviceCallCount ensures that all Mappers for this plugin have a
// zero TearDownDeviceCallCount. Otherwise it returns an error.
func VerifyZeroTearDownDeviceCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
for _, unmapper := range fakeVolumePlugin.GetBlockVolumeUnmapper() {
actualCallCount := unmapper.GetTearDownDeviceCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one unmapper has non-zero TearDownDeviceCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifyUnmapPodDeviceCallCount ensures that at least one of the Unmappers for this
// plugin has the expected number of UnmapPodDevice calls. Otherwise it
// returns an error.
func VerifyUnmapPodDeviceCallCount(
expectedUnmapPodDeviceCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, unmapper := range fakeVolumePlugin.GetBlockVolumeUnmapper() {
actualCallCount := unmapper.GetUnmapPodDeviceCallCount()
if actualCallCount >= expectedUnmapPodDeviceCallCount {
return nil
}
}
return fmt.Errorf(
"No Unmapper have expected UnmapPodDeviceCallCount. Expected: <%v>.",
expectedUnmapPodDeviceCallCount)
}
// VerifyZeroUnmapPodDeviceCallCount ensures that all Mappers for this plugin have a
// zero UnmapPodDevice calls. Otherwise it returns an error.
func VerifyZeroUnmapPodDeviceCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
for _, unmapper := range fakeVolumePlugin.GetBlockVolumeUnmapper() {
actualCallCount := unmapper.GetUnmapPodDeviceCallCount()
if actualCallCount != 0 {
return fmt.Errorf(
"At least one unmapper has non-zero UnmapPodDeviceCallCount: <%v>.",
actualCallCount)
}
}
return nil
}
// VerifyGetGlobalMapPathCallCount ensures that at least one of the Mappers for this
// plugin has the expectedGlobalMapPathCallCount number of calls. Otherwise it returns
// an error.
func VerifyGetGlobalMapPathCallCount(
expectedGlobalMapPathCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, mapper := range fakeVolumePlugin.GetBlockVolumeMapper() {
actualCallCount := mapper.GetGlobalMapPathCallCount()
if actualCallCount == expectedGlobalMapPathCallCount {
return nil
}
}
return fmt.Errorf(
"No Mappers have expected GetGlobalMapPathCallCount. Expected: <%v>.",
expectedGlobalMapPathCallCount)
}
// VerifyGetPodDeviceMapPathCallCount ensures that at least one of the Mappers for this
// plugin has the expectedPodDeviceMapPathCallCount number of calls. Otherwise it returns
// an error.
func VerifyGetPodDeviceMapPathCallCount(
expectedPodDeviceMapPathCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, mapper := range fakeVolumePlugin.GetBlockVolumeMapper() {
actualCallCount := mapper.GetPodDeviceMapPathCallCount()
if actualCallCount == expectedPodDeviceMapPathCallCount {
return nil
}
}
return fmt.Errorf(
"No Mappers have expected GetPodDeviceMapPathCallCount. Expected: <%v>.",
expectedPodDeviceMapPathCallCount)
}
// VerifyGetMapPodDeviceCallCount ensures that at least one of the Mappers for this
// plugin has the expectedMapPodDeviceCallCount number of calls. Otherwise it
// returns an error.
func VerifyGetMapPodDeviceCallCount(
expectedMapPodDeviceCallCount int,
fakeVolumePlugin *FakeVolumePlugin) error {
for _, mapper := range fakeVolumePlugin.GetBlockVolumeMapper() {
actualCallCount := mapper.GetMapPodDeviceCallCount()
if actualCallCount >= expectedMapPodDeviceCallCount {
return nil
}
}
return fmt.Errorf(
"No Mapper have expected MapPodDeviceCallCount. Expected: <%v>.",
expectedMapPodDeviceCallCount)
}
// GetTestVolumePluginMgr creates, initializes, and returns a test volume plugin
// manager and fake volume plugin using a fake volume host.
func GetTestVolumePluginMgr(t *testing.T) (*volume.VolumePluginMgr, *FakeVolumePlugin) {
plugins := ProbeVolumePlugins(volume.VolumeConfig{})
v := NewFakeVolumeHost(
t,
t.TempDir(), /* rootDir */
nil, /* kubeClient */
plugins, /* plugins */
)
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
}
func GetTestKubeletVolumePluginMgr(t *testing.T) (*volume.VolumePluginMgr, *FakeVolumePlugin) {
plugins := ProbeVolumePlugins(volume.VolumeConfig{})
v := NewFakeKubeletVolumeHost(
t,
t.TempDir(), /* rootDir */
nil, /* kubeClient */
plugins, /* plugins */
)
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
}
func GetTestKubeletVolumePluginMgrWithNode(t *testing.T, node *v1.Node) (*volume.VolumePluginMgr, *FakeVolumePlugin) {
plugins := ProbeVolumePlugins(volume.VolumeConfig{})
v := NewFakeKubeletVolumeHost(
t,
t.TempDir(), /* rootDir */
nil, /* kubeClient */
plugins, /* plugins */
)
v.WithNode(node)
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
}
func GetTestKubeletVolumePluginMgrWithNodeAndRoot(t *testing.T, node *v1.Node, rootDir string) (*volume.VolumePluginMgr, *FakeVolumePlugin) {
plugins := ProbeVolumePlugins(volume.VolumeConfig{})
v := NewFakeKubeletVolumeHost(
t,
rootDir, /* rootDir */
nil, /* kubeClient */
plugins, /* plugins */
)
v.WithNode(node)
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
}
// CreateTestPVC returns a provisionable PVC for tests
func CreateTestPVC(capacity string, accessModes []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "dummy",
Namespace: "default",
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: accessModes,
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity),
},
},
},
}
return &claim
}
func MetricsEqualIgnoreTimestamp(a *volume.Metrics, b *volume.Metrics) bool {
available := a.Available == b.Available
capacity := a.Capacity == b.Capacity
used := a.Used == b.Used
inodes := a.Inodes == b.Inodes
inodesFree := a.InodesFree == b.InodesFree
inodesUsed := a.InodesUsed == b.InodesUsed
return available && capacity && used && inodes && inodesFree && inodesUsed
}
func ContainsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"bytes"
"context"
"fmt"
"path/filepath"
"strings"
"sync"
"testing"
"time"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
csilibplugins "k8s.io/csi-translation-lib/plugins"
. "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/mount-utils"
)
type FakeVolumeHost interface {
VolumeHost
GetPluginMgr() *VolumePluginMgr
}
// fakeVolumeHost is useful for testing volume plugins.
// TODO: Extract fields specific to fakeKubeletVolumeHost and fakeAttachDetachVolumeHost.
type fakeVolumeHost struct {
rootDir string
kubeClient clientset.Interface
pluginMgr *VolumePluginMgr
mounter mount.Interface
hostUtil hostutil.HostUtils
nodeLabels map[string]string
nodeName string
subpather subpath.Interface
node *v1.Node
csiDriverLister storagelistersv1.CSIDriverLister
volumeAttachmentLister storagelistersv1.VolumeAttachmentLister
informerFactory informers.SharedInformerFactory
kubeletErr error
mux sync.Mutex
}
var _ VolumeHost = &fakeVolumeHost{}
var _ FakeVolumeHost = &fakeVolumeHost{}
func NewFakeVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) FakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, "", nil, nil)
}
func NewFakeVolumeHostWithCloudProvider(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) FakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, "", nil, nil)
}
func NewFakeVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) FakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, nodeName, driverLister, volumeAttachLister)
}
func newFakeVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) FakeVolumeHost {
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, nodeName: nodeName, csiDriverLister: driverLister, volumeAttachmentLister: volumeAttachLister}
host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.pluginMgr = &VolumePluginMgr{}
if err := host.pluginMgr.InitPlugins(plugins, nil /* prober */, host); err != nil {
t.Fatalf("Failed to init plugins while creating fake volume host: %v", err)
}
host.subpather = &subpath.FakeSubpath{}
host.informerFactory = informers.NewSharedInformerFactory(kubeClient, time.Minute)
// Wait until the InitPlugins setup is finished before returning from this setup func
if err := host.WaitForKubeletErrNil(); err != nil {
t.Fatalf("Failed to wait for kubelet err to be nil while creating fake volume host: %v", err)
}
return host
}
func (f *fakeVolumeHost) GetPluginDir(podUID string) string {
return filepath.Join(f.rootDir, "plugins", podUID)
}
func (f *fakeVolumeHost) GetVolumeDevicePluginDir(pluginName string) string {
return filepath.Join(f.rootDir, "plugins", pluginName, "volumeDevices")
}
func (f *fakeVolumeHost) GetPodsDir() string {
return filepath.Join(f.rootDir, "pods")
}
func (f *fakeVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "volumes", pluginName, volumeName)
}
func (f *fakeVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "volumeDevices", pluginName)
}
func (f *fakeVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "plugins", pluginName)
}
func (f *fakeVolumeHost) GetKubeClient() clientset.Interface {
return f.kubeClient
}
func (f *fakeVolumeHost) GetMounter() mount.Interface {
return f.mounter
}
func (f *fakeVolumeHost) GetSubpather() subpath.Interface {
return f.subpather
}
func (f *fakeVolumeHost) GetPluginMgr() *VolumePluginMgr {
return f.pluginMgr
}
func (f *fakeVolumeHost) GetAttachedVolumesFromNodeStatus() (map[v1.UniqueVolumeName]string, error) {
return map[v1.UniqueVolumeName]string{}, nil
}
func (f *fakeVolumeHost) NewWrapperMounter(volName string, spec Spec, pod *v1.Pod) (Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plug, err := f.pluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plug.NewMounter(&spec, pod)
}
func (f *fakeVolumeHost) NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plug, err := f.pluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plug.NewUnmounter(spec.Name(), podUID)
}
// Returns the hostname of the host kubelet is running on
func (f *fakeVolumeHost) GetHostName() string {
return "fakeHostName"
}
func (f *fakeVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
return v1.ResourceList{}, nil
}
func (f *fakeVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
return func(namespace, name string) (*v1.Secret, error) {
return f.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
}
func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return func(namespace, name string) (*v1.ConfigMap, error) {
return f.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
}
func (f *fakeVolumeHost) GetServiceAccountTokenFunc() func(string, string, *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{})
}
}
func (f *fakeVolumeHost) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {}
}
func (f *fakeVolumeHost) GetNodeLabels() (map[string]string, error) {
if f.nodeLabels == nil {
f.nodeLabels = map[string]string{"test-label": "test-value"}
}
return f.nodeLabels, nil
}
func (f *fakeVolumeHost) GetNodeName() types.NodeName {
return types.NodeName(f.nodeName)
}
func (f *fakeVolumeHost) GetEventRecorder() record.EventRecorder {
return nil
}
func (f *fakeVolumeHost) WaitForKubeletErrNil() error {
return wait.PollImmediate(10*time.Millisecond, 10*time.Second, func() (bool, error) {
f.mux.Lock()
defer f.mux.Unlock()
return f.kubeletErr == nil, nil
})
}
type fakeAttachDetachVolumeHost struct {
fakeVolumeHost
}
var _ AttachDetachVolumeHost = &fakeAttachDetachVolumeHost{}
var _ FakeVolumeHost = &fakeAttachDetachVolumeHost{}
func NewFakeAttachDetachVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) FakeVolumeHost {
return newFakeAttachDetachVolumeHost(t, rootDir, kubeClient, plugins, nil, nodeName, driverLister, volumeAttachLister)
}
func newFakeAttachDetachVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) FakeVolumeHost {
host := &fakeAttachDetachVolumeHost{}
host.rootDir = rootDir
host.kubeClient = kubeClient
host.nodeName = nodeName
host.csiDriverLister = driverLister
host.volumeAttachmentLister = volumeAttachLister
host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.pluginMgr = &VolumePluginMgr{}
if err := host.pluginMgr.InitPlugins(plugins, nil /* prober */, host); err != nil {
t.Fatalf("Failed to init plugins while creating fake volume host: %v", err)
}
host.subpather = &subpath.FakeSubpath{}
host.informerFactory = informers.NewSharedInformerFactory(kubeClient, time.Minute)
// Wait until the InitPlugins setup is finished before returning from this setup func
if err := host.WaitForKubeletErrNil(); err != nil {
t.Fatalf("Failed to wait for kubelet err to be nil while creating fake volume host: %v", err)
}
return host
}
func (f *fakeAttachDetachVolumeHost) CSINodeLister() storagelistersv1.CSINodeLister {
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{Name: f.nodeName},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{},
},
}
enableMigrationOnNode(csiNode, csilibplugins.GCEPDInTreePluginName)
return getFakeCSINodeLister(csiNode)
}
func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
nodeInfoAnnotations := csiNode.GetAnnotations()
if nodeInfoAnnotations == nil {
nodeInfoAnnotations = map[string]string{}
}
newAnnotationSet := sets.New[string]()
newAnnotationSet.Insert(pluginName)
nas := strings.Join(sets.List(newAnnotationSet), ",")
nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
csiNode.Annotations = nodeInfoAnnotations
}
func (f *fakeAttachDetachVolumeHost) CSIDriverLister() storagelistersv1.CSIDriverLister {
return f.csiDriverLister
}
func (f *fakeAttachDetachVolumeHost) VolumeAttachmentLister() storagelistersv1.VolumeAttachmentLister {
return f.volumeAttachmentLister
}
func (f *fakeAttachDetachVolumeHost) IsAttachDetachController() bool {
return true
}
type fakeKubeletVolumeHost struct {
fakeVolumeHost
}
var _ KubeletVolumeHost = &fakeKubeletVolumeHost{}
var _ FakeVolumeHost = &fakeKubeletVolumeHost{}
func NewFakeKubeletVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeKubeletVolumeHost {
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, "", nil, nil)
}
func NewFakeKubeletVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeKubeletVolumeHost {
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, nodeName, driverLister, volumeAttachLister)
}
func NewFakeKubeletVolumeHostWithMounterFSType(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType) *fakeKubeletVolumeHost {
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, pathToTypeMap, "", nil, nil)
}
func newFakeKubeletVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeKubeletVolumeHost {
host := &fakeKubeletVolumeHost{}
host.rootDir = rootDir
host.kubeClient = kubeClient
host.nodeName = nodeName
host.csiDriverLister = driverLister
host.volumeAttachmentLister = volumeAttachLister
host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.pluginMgr = &VolumePluginMgr{}
if err := host.pluginMgr.InitPlugins(plugins, nil /* prober */, host); err != nil {
t.Fatalf("Failed to init plugins while creating fake volume host: %v", err)
}
host.subpather = &subpath.FakeSubpath{}
host.informerFactory = informers.NewSharedInformerFactory(kubeClient, time.Minute)
// Wait until the InitPlugins setup is finished before returning from this setup func
if err := host.WaitForKubeletErrNil(); err != nil {
t.Fatalf("Failed to wait for kubelet err to be nil while creating fake volume host: %v", err)
}
return host
}
func (f *fakeKubeletVolumeHost) WithNode(node *v1.Node) *fakeKubeletVolumeHost {
f.node = node
return f
}
type CSINodeLister []storagev1.CSINode
// Get returns a fake CSINode object.
func (n CSINodeLister) Get(name string) (*storagev1.CSINode, error) {
for _, cn := range n {
if cn.Name == name {
return &cn, nil
}
}
return nil, fmt.Errorf("csiNode %q not found", name)
}
// List lists all CSINodes in the indexer.
func (n CSINodeLister) List(selector labels.Selector) (ret []*storagev1.CSINode, err error) {
return nil, fmt.Errorf("not implemented")
}
func getFakeCSINodeLister(csiNode *storagev1.CSINode) CSINodeLister {
csiNodeLister := CSINodeLister{}
if csiNode != nil {
csiNodeLister = append(csiNodeLister, *csiNode.DeepCopy())
}
return csiNodeLister
}
func (f *fakeKubeletVolumeHost) SetKubeletError(err error) {
f.mux.Lock()
defer f.mux.Unlock()
f.kubeletErr = err
}
func (f *fakeKubeletVolumeHost) GetInformerFactory() informers.SharedInformerFactory {
return f.informerFactory
}
func (f *fakeKubeletVolumeHost) GetAttachedVolumesFromNodeStatus() (map[v1.UniqueVolumeName]string, error) {
result := map[v1.UniqueVolumeName]string{}
if f.node != nil {
for _, av := range f.node.Status.VolumesAttached {
result[av.Name] = av.DevicePath
}
}
return result, nil
}
func (f *fakeKubeletVolumeHost) CSIDriverLister() storagelistersv1.CSIDriverLister {
return f.csiDriverLister
}
func (f *fakeKubeletVolumeHost) CSIDriversSynced() cache.InformerSynced {
// not needed for testing
return nil
}
func (f *fakeKubeletVolumeHost) WaitForCacheSync() error {
return nil
}
func (f *fakeKubeletVolumeHost) GetHostUtil() hostutil.HostUtils {
return f.hostUtil
}
func (f *fakeKubeletVolumeHost) GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error) {
ctb, err := f.kubeClient.CertificatesV1beta1().ClusterTrustBundles().Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("while getting ClusterTrustBundle %s: %w", name, err)
}
return []byte(ctb.Spec.TrustBundle), nil
}
// Note: we do none of the deduplication and sorting that the real deal should do.
func (f *fakeKubeletVolumeHost) GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error) {
ctbList, err := f.kubeClient.CertificatesV1beta1().ClusterTrustBundles().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("while listing all ClusterTrustBundles: %w", err)
}
fullSet := bytes.Buffer{}
for i, ctb := range ctbList.Items {
fullSet.WriteString(ctb.Spec.TrustBundle)
if i != len(ctbList.Items)-1 {
fullSet.WriteString("\n")
}
}
return fullSet.Bytes(), nil
}
func (f *fakeKubeletVolumeHost) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
return []byte("key\n"), []byte("cert\n"), nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
maxFileNameLength = 255
maxPathLength = 4096
)
// AtomicWriter handles atomically projecting content for a set of files into
// a target directory.
//
// Note:
//
// 1. AtomicWriter reserves the set of pathnames starting with `..`.
// 2. AtomicWriter offers no concurrency guarantees and must be synchronized
// by the caller.
//
// The visible files in this volume are symlinks to files in the writer's data
// directory. Actual files are stored in a hidden timestamped directory which
// is symlinked to by the data directory. The timestamped directory and
// data directory symlink are created in the writer's target dir. This scheme
// allows the files to be atomically updated by changing the target of the
// data directory symlink.
//
// Consumers of the target directory can monitor the ..data symlink using
// inotify or fanotify to receive events when the content in the volume is
// updated.
type AtomicWriter struct {
targetDir string
logContext string
}
// FileProjection contains file Data and access Mode
type FileProjection struct {
Data []byte
Mode int32
FsUser *int64
}
// NewAtomicWriter creates a new AtomicWriter configured to write to the given
// target directory, or returns an error if the target directory does not exist.
func NewAtomicWriter(targetDir string, logContext string) (*AtomicWriter, error) {
_, err := os.Stat(targetDir)
if os.IsNotExist(err) {
return nil, err
}
return &AtomicWriter{targetDir: targetDir, logContext: logContext}, nil
}
const (
dataDirName = "..data"
newDataDirName = "..data_tmp"
)
// Write does an atomic projection of the given payload into the writer's target
// directory. Input paths must not begin with '..'.
// setPerms is an optional pointer to a function that caller can provide to set the
// permissions of the newly created files before they are published. The function is
// passed subPath which is the name of the timestamped directory that was created
// under target directory.
//
// The Write algorithm is:
//
// 1. The payload is validated; if the payload is invalid, the function returns
//
// 2. The current timestamped directory is detected by reading the data directory
// symlink
//
// 3. The old version of the volume is walked to determine whether any
// portion of the payload was deleted and is still present on disk.
//
// 4. The data in the current timestamped directory is compared to the projected
// data to determine if an update to data directory is required.
//
// 5. A new timestamped dir is created if an update is required.
//
// 6. The payload is written to the new timestamped directory.
//
// 7. Permissions are set (if setPerms is not nil) on the new timestamped directory and files.
//
// 8. A symlink to the new timestamped directory ..data_tmp is created that will
// become the new data directory.
//
// 9. The new data directory symlink is renamed to the data directory; rename is atomic.
//
// 10. Symlinks and directory for new user-visible files are created (if needed).
//
// For example, consider the files:
// <target-dir>/podName
// <target-dir>/user/labels
// <target-dir>/k8s/annotations
//
// The user visible files are symbolic links into the internal data directory:
// <target-dir>/podName -> ..data/podName
// <target-dir>/usr -> ..data/usr
// <target-dir>/k8s -> ..data/k8s
//
// The data directory itself is a link to a timestamped directory with
// the real data:
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
// NOTE(claudiub): We need to create these symlinks AFTER we've finished creating and
// linking everything else. On Windows, if a target does not exist, the created symlink
// will not work properly if the target ends up being a directory.
//
// 11. Old paths are removed from the user-visible portion of the target directory.
//
// 12. The previous timestamped directory is removed, if it exists.
func (w *AtomicWriter) Write(payload map[string]FileProjection, setPerms func(subPath string) error) error {
// (1)
cleanPayload, err := validatePayload(payload)
if err != nil {
klog.Errorf("%s: invalid payload: %v", w.logContext, err)
return err
}
// (2)
dataDirPath := filepath.Join(w.targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil {
if !os.IsNotExist(err) {
klog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs)
// empty oldTsDir indicates that it didn't exist
oldTsDir = ""
}
oldTsPath := filepath.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.Set[string]
shouldWrite := true
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
// (3)
pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath)
if err != nil {
klog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
}
// (4)
if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil {
klog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
klog.V(4).Infof("%s: write not required for data directory %v", w.logContext, oldTsDir)
// data directory is already up to date, but we need to make sure that
// the user-visible symlinks are created.
// See https://github.com/kubernetes/kubernetes/issues/121472 for more details.
// Reset oldTsDir to empty string to avoid removing the data directory.
shouldWrite = false
oldTsDir = ""
} else {
klog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
}
if shouldWrite {
// (5)
tsDir, err := w.newTimestampDir()
if err != nil {
klog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
return err
}
tsDirName := filepath.Base(tsDir)
// (6)
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
klog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
return err
}
klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
// (7)
if setPerms != nil {
if err := setPerms(tsDirName); err != nil {
klog.Errorf("%s: error applying ownership settings: %v", w.logContext, err)
return err
}
}
// (8)
newDataDirPath := filepath.Join(w.targetDir, newDataDirName)
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
if err := os.RemoveAll(tsDir); err != nil {
klog.Errorf("%s: error removing new ts directory %s: %v", w.logContext, tsDir, err)
}
klog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err)
return err
}
// (9)
if runtime.GOOS == "windows" {
if err := os.Remove(dataDirPath); err != nil {
klog.Errorf("%s: error removing data dir directory %s: %v", w.logContext, dataDirPath, err)
}
err = os.Symlink(tsDirName, dataDirPath)
if err := os.Remove(newDataDirPath); err != nil {
klog.Errorf("%s: error removing new data dir directory %s: %v", w.logContext, newDataDirPath, err)
}
} else {
err = os.Rename(newDataDirPath, dataDirPath)
}
if err != nil {
if err := os.Remove(newDataDirPath); err != nil && err != os.ErrNotExist {
klog.Errorf("%s: error removing new data dir directory %s: %v", w.logContext, newDataDirPath, err)
}
if err := os.RemoveAll(tsDir); err != nil {
klog.Errorf("%s: error removing new ts directory %s: %v", w.logContext, tsDir, err)
}
klog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err)
return err
}
}
// (10)
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
return err
}
// (11)
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
return err
}
// (12)
if len(oldTsDir) > 0 {
if err = os.RemoveAll(oldTsPath); err != nil {
klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
return err
}
}
return nil
}
// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned.
func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) {
cleanPayload := make(map[string]FileProjection)
for k, content := range payload {
if err := validatePath(k); err != nil {
return nil, err
}
cleanPayload[filepath.Clean(k)] = content
}
return cleanPayload, nil
}
// validatePath validates a single path, returning an error if the path is
// invalid. paths may not:
//
// 1. be absolute
// 2. contain '..' as an element
// 3. start with '..'
// 4. contain filenames larger than 255 characters
// 5. be longer than 4096 characters
func validatePath(targetPath string) error {
// TODO: somehow unify this with the similar api validation,
// validateVolumeSourcePath; the error semantics are just different enough
// from this that it was time-prohibitive trying to find the right
// refactoring to re-use.
if targetPath == "" {
return fmt.Errorf("invalid path: must not be empty: %q", targetPath)
}
if path.IsAbs(targetPath) {
return fmt.Errorf("invalid path: must be relative path: %s", targetPath)
}
if len(targetPath) > maxPathLength {
return fmt.Errorf("invalid path: must be less than or equal to %d characters", maxPathLength)
}
items := strings.Split(targetPath, string(os.PathSeparator))
for _, item := range items {
if item == ".." {
return fmt.Errorf("invalid path: must not contain '..': %s", targetPath)
}
if len(item) > maxFileNameLength {
return fmt.Errorf("invalid path: filenames must be less than or equal to %d characters", maxFileNameLength)
}
}
if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
return fmt.Errorf("invalid path: must not start with '..': %s", targetPath)
}
return nil
}
// shouldWritePayload returns whether the payload should be written to disk.
func shouldWritePayload(payload map[string]FileProjection, oldTsDir string) (bool, error) {
for userVisiblePath, fileProjection := range payload {
shouldWrite, err := shouldWriteFile(filepath.Join(oldTsDir, userVisiblePath), fileProjection.Data)
if err != nil {
return false, err
}
if shouldWrite {
return true, nil
}
}
return false, nil
}
// shouldWriteFile returns whether a new version of a file should be written to disk.
func shouldWriteFile(path string, content []byte) (bool, error) {
_, err := os.Lstat(path)
if os.IsNotExist(err) {
return true, nil
}
contentOnFs, err := os.ReadFile(path)
if err != nil {
return false, err
}
return !bytes.Equal(content, contentOnFs), nil
}
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTSDir string) (sets.Set[string], error) {
paths := sets.New[string]()
visitor := func(path string, info os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, oldTSDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
if relativePath == "" {
return nil
}
paths.Insert(relativePath)
return nil
}
err := filepath.Walk(oldTSDir, visitor)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
klog.V(5).Infof("%s: current paths: %+v", w.targetDir, sets.List(paths))
newPaths := sets.New[string]()
for file := range payload {
// add all subpaths for the payload to the set of new paths
// to avoid attempting to remove non-empty dirs
for subPath := file; subPath != ""; {
newPaths.Insert(subPath)
subPath, _ = filepath.Split(subPath)
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
}
}
klog.V(5).Infof("%s: new paths: %+v", w.targetDir, sets.List(newPaths))
result := paths.Difference(newPaths)
klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
return result, nil
}
// newTimestampDir creates a new timestamp directory
func (w *AtomicWriter) newTimestampDir() (string, error) {
tsDir, err := os.MkdirTemp(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
if err != nil {
klog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
return "", err
}
// 0755 permissions are needed to allow 'group' and 'other' to recurse the
// directory tree. do a chmod here to ensure that permissions are set correctly
// regardless of the process' umask.
err = os.Chmod(tsDir, 0755)
if err != nil {
klog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err)
return "", err
}
return tsDir, nil
}
// writePayloadToDir writes the given payload to the given directory. The
// directory must exist.
func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir string) error {
for userVisiblePath, fileProjection := range payload {
content := fileProjection.Data
mode := os.FileMode(fileProjection.Mode)
fullPath := filepath.Join(dir, userVisiblePath)
baseDir, _ := filepath.Split(fullPath)
if err := os.MkdirAll(baseDir, os.ModePerm); err != nil {
klog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
return err
}
if err := os.WriteFile(fullPath, content, mode); err != nil {
klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
return err
}
// Chmod is needed because os.WriteFile() ends up calling
// open(2) to create the file, so the final mode used is "mode &
// ~umask". But we want to make sure the specified mode is used
// in the file no matter what the umask is.
if err := os.Chmod(fullPath, mode); err != nil {
klog.Errorf("%s: unable to change file %s with mode %v: %v", w.logContext, fullPath, mode, err)
return err
}
if fileProjection.FsUser == nil {
continue
}
if err := w.chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err)
return err
}
}
return nil
}
// createUserVisibleFiles creates the relative symlinks for all the
// files configured in the payload. If the directory in a file path does not
// exist, it is created.
//
// Viz:
// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
// the following symlinks are created:
// bar -> ..data/bar
// foo -> ..data/foo
// baz -> ..data/baz
func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) error {
for userVisiblePath := range payload {
slashpos := strings.Index(userVisiblePath, string(os.PathSeparator))
if slashpos == -1 {
slashpos = len(userVisiblePath)
}
linkname := userVisiblePath[:slashpos]
_, err := os.Readlink(filepath.Join(w.targetDir, linkname))
if err != nil && os.IsNotExist(err) {
// The link into the data directory for this path doesn't exist; create it
visibleFile := filepath.Join(w.targetDir, linkname)
dataDirFile := filepath.Join(dataDirName, linkname)
err = os.Symlink(dataDirFile, visibleFile)
if err != nil {
return err
}
}
}
return nil
}
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.Set[string]) error {
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {
// only remove symlinks from the volume root directory (i.e. items that don't contain '/')
if strings.Contains(p, ps) {
continue
}
if err := os.Remove(filepath.Join(w.targetDir, p)); err != nil {
klog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
lasterr = err
}
}
return lasterr
}
//go:build linux
// +build linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import "os"
// chown changes the numeric uid and gid of the named file.
func (w *AtomicWriter) chown(name string, uid, gid int) error {
return os.Chown(name, uid, gid)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"crypto/sha1"
"encoding/hex"
)
// This file is a common place holder for volume limit utility constants
// shared between volume package and scheduler
const (
// CSIAttachLimitPrefix defines prefix used for CSI volumes
CSIAttachLimitPrefix = "attachable-volumes-csi-"
// ResourceNameLengthLimit stores maximum allowed Length for a ResourceName
ResourceNameLengthLimit = 63
)
// GetCSIAttachLimitKey returns limit key used for CSI volumes
func GetCSIAttachLimitKey(driverName string) string {
csiPrefixLength := len(CSIAttachLimitPrefix)
totalkeyLength := csiPrefixLength + len(driverName)
if totalkeyLength >= ResourceNameLengthLimit {
charsFromDriverName := driverName[:23]
hash := sha1.New()
hash.Write([]byte(driverName))
hashed := hex.EncodeToString(hash.Sum(nil))
hashed = hashed[:16]
return CSIAttachLimitPrefix + charsFromDriverName + hashed
}
return CSIAttachLimitPrefix + driverName
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// DeviceUtil is a util for common device methods
type DeviceUtil interface {
FindMultipathDeviceForDevice(disk string) string
FindSlaveDevicesOnMultipath(disk string) []string
GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error)
FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error)
}
type deviceHandler struct {
getIo IoUtil
}
// NewDeviceHandler Create a new IoHandler implementation
func NewDeviceHandler(io IoUtil) DeviceUtil {
return &deviceHandler{getIo: io}
}
//go:build linux
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"k8s.io/klog/v2"
)
// FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent. If called with a device
// already resolved to devicemapper, do nothing.
func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string {
if strings.HasPrefix(device, "/dev/dm-") {
return device
}
io := handler.getIo
disk, err := findDeviceForPath(device, io)
if err != nil {
return ""
}
sysPath := "/sys/block/"
if dirs, err := io.ReadDir(sysPath); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.HasPrefix(name, "dm-") {
if _, err1 := io.Lstat(sysPath + name + "/slaves/" + disk); err1 == nil {
return "/dev/" + name
}
}
}
}
return ""
}
// findDeviceForPath Find the underlying disk for a linked path such as /dev/disk/by-path/XXXX or /dev/mapper/XXXX
// will return sdX or hdX etc, if /dev/sdX is passed in then sdX will be returned
func findDeviceForPath(path string, io IoUtil) (string, error) {
devicePath, err := io.EvalSymlinks(path)
if err != nil {
return "", err
}
// if path /dev/hdX split into "", "dev", "hdX" then we will
// return just the last part
parts := strings.Split(devicePath, "/")
if len(parts) == 3 && strings.HasPrefix(parts[1], "dev") {
return parts[2], nil
}
return "", errors.New("Illegal path for device " + devicePath)
}
// FindSlaveDevicesOnMultipath given a dm name like /dev/dm-1, find all devices
// which are managed by the devicemapper dm-1.
func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string {
var devices []string
io := handler.getIo
// Split path /dev/dm-1 into "", "dev", "dm-1"
parts := strings.Split(dm, "/")
if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") {
return devices
}
disk := parts[2]
slavesPath := filepath.Join("/sys/block/", disk, "/slaves/")
if files, err := io.ReadDir(slavesPath); err == nil {
for _, f := range files {
devices = append(devices, filepath.Join("/dev/", f.Name()))
}
}
return devices
}
// GetISCSIPortalHostMapForTarget given a target iqn, find all the scsi hosts logged into
// that target. Returns a map of iSCSI portals (string) to SCSI host numbers (integers).
//
// For example: {
// "192.168.30.7:3260": 2,
// "192.168.30.8:3260": 3,
// }
func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) {
portalHostMap := make(map[string]int)
io := handler.getIo
// Iterate over all the iSCSI hosts in sysfs
sysPath := "/sys/class/iscsi_host"
hostDirs, err := io.ReadDir(sysPath)
if err != nil {
if os.IsNotExist(err) {
return portalHostMap, nil
}
return nil, err
}
for _, hostDir := range hostDirs {
// iSCSI hosts are always of the format "host%d"
// See drivers/scsi/hosts.c in Linux
hostName := hostDir.Name()
if !strings.HasPrefix(hostName, "host") {
continue
}
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
if err != nil {
klog.Errorf("Could not get number from iSCSI host: %s", hostName)
continue
}
// Iterate over the children of the iscsi_host device
// We are looking for the associated session
devicePath := sysPath + "/" + hostName + "/device"
deviceDirs, err := io.ReadDir(devicePath)
if err != nil {
return nil, err
}
for _, deviceDir := range deviceDirs {
// Skip over files that aren't the session
// Sessions are of the format "session%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
sessionName := deviceDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
}
sessionPath := devicePath + "/" + sessionName
// Read the target name for the iSCSI session
targetNamePath := sessionPath + "/iscsi_session/" + sessionName + "/targetname"
targetName, err := io.ReadFile(targetNamePath)
if err != nil {
klog.Infof("Failed to process session %s, assuming this session is unavailable: %s", sessionName, err)
continue
}
// Ignore hosts that don't matchthe target we were looking for.
if strings.TrimSpace(string(targetName)) != targetIqn {
continue
}
// Iterate over the children of the iSCSI session looking
// for the iSCSI connection.
dirs2, err := io.ReadDir(sessionPath)
if err != nil {
klog.Infof("Failed to process session %s, assuming this session is unavailable: %s", sessionName, err)
continue
}
for _, dir2 := range dirs2 {
// Skip over files that aren't the connection
// Connections are of the format "connection%d:%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
dirName := dir2.Name()
if !strings.HasPrefix(dirName, "connection") {
continue
}
connectionPath := sessionPath + "/" + dirName + "/iscsi_connection/" + dirName
// Read the current and persistent portal information for the connection.
addrPath := connectionPath + "/address"
addr, err := io.ReadFile(addrPath)
if err != nil {
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
continue
}
portPath := connectionPath + "/port"
port, err := io.ReadFile(portPath)
if err != nil {
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
continue
}
persistentAddrPath := connectionPath + "/persistent_address"
persistentAddr, err := io.ReadFile(persistentAddrPath)
if err != nil {
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
continue
}
persistentPortPath := connectionPath + "/persistent_port"
persistentPort, err := io.ReadFile(persistentPortPath)
if err != nil {
klog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err)
continue
}
// Add entries to the map for both the current and persistent portals
// pointing to the SCSI host for those connections
// JoinHostPort will add `[]` around IPv6 addresses.
portal := net.JoinHostPort(strings.TrimSpace(string(addr)), strings.TrimSpace(string(port)))
portalHostMap[portal] = hostNumber
persistentPortal := net.JoinHostPort(strings.TrimSpace(string(persistentAddr)), strings.TrimSpace(string(persistentPort)))
portalHostMap[persistentPortal] = hostNumber
}
}
}
return portalHostMap, nil
}
// FindDevicesForISCSILun given an iqn, and lun number, find all the devices
// corresponding to that LUN.
func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) {
devices := make([]string, 0)
io := handler.getIo
// Iterate over all the iSCSI hosts in sysfs
sysPath := "/sys/class/iscsi_host"
hostDirs, err := io.ReadDir(sysPath)
if err != nil {
return nil, err
}
for _, hostDir := range hostDirs {
// iSCSI hosts are always of the format "host%d"
// See drivers/scsi/hosts.c in Linux
hostName := hostDir.Name()
if !strings.HasPrefix(hostName, "host") {
continue
}
hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host"))
if err != nil {
klog.Errorf("Could not get number from iSCSI host: %s", hostName)
continue
}
// Iterate over the children of the iscsi_host device
// We are looking for the associated session
devicePath := sysPath + "/" + hostName + "/device"
deviceDirs, err := io.ReadDir(devicePath)
if err != nil {
return nil, err
}
for _, deviceDir := range deviceDirs {
// Skip over files that aren't the session
// Sessions are of the format "session%u"
// See drivers/scsi/scsi_transport_iscsi.c in Linux
sessionName := deviceDir.Name()
if !strings.HasPrefix(sessionName, "session") {
continue
}
// Read the target name for the iSCSI session
targetNamePath := devicePath + "/" + sessionName + "/iscsi_session/" + sessionName + "/targetname"
targetName, err := io.ReadFile(targetNamePath)
if err != nil {
return nil, err
}
// Only if the session matches the target we were looking for,
// add it to the map
if strings.TrimSpace(string(targetName)) != targetIqn {
continue
}
// The list of block devices on the scsi bus will be in a
// directory called "target%d:%d:%d".
// See drivers/scsi/scsi_scan.c in Linux
// We assume the channel/bus and device/controller are always zero for iSCSI
targetPath := devicePath + "/" + sessionName + fmt.Sprintf("/target%d:0:0", hostNumber)
// The block device for a given lun will be "%d:%d:%d:%d" --
// host:channel:bus:LUN
blockDevicePath := targetPath + fmt.Sprintf("/%d:0:0:%d", hostNumber, lun)
// If the LUN doesn't exist on this bus, continue on
_, err = io.Lstat(blockDevicePath)
if err != nil {
continue
}
// Read the block directory, there should only be one child --
// the block device "sd*"
path := blockDevicePath + "/block"
dirs, err := io.ReadDir(path)
if err != nil {
return nil, err
}
if 0 < len(dirs) {
devices = append(devices, dirs[0].Name())
}
}
}
return devices, nil
}
//go:build linux || darwin
// +build linux darwin
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
"golang.org/x/sys/unix"
servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics"
"k8s.io/kubernetes/pkg/volume/util/fsquota"
)
type UsageInfo struct {
Bytes int64
Inodes int64
}
// Info linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
// for the filesystem that path resides upon.
func Info(path string) (int64, int64, int64, int64, int64, int64, error) {
statfs := &unix.Statfs_t{}
err := unix.Statfs(path, statfs)
if err != nil {
return 0, 0, 0, 0, 0, 0, err
}
// Available is blocks available * fragment size
available := int64(statfs.Bavail) * int64(statfs.Bsize)
// Capacity is total block count * fragment size
capacity := int64(statfs.Blocks) * int64(statfs.Bsize)
// Usage is block being used * fragment size (aka block size).
usage := (int64(statfs.Blocks) - int64(statfs.Bfree)) * int64(statfs.Bsize)
inodes := int64(statfs.Files)
inodesFree := int64(statfs.Ffree)
inodesUsed := inodes - inodesFree
return available, capacity, usage, inodes, inodesFree, inodesUsed, nil
}
// DiskUsage calculates the number of inodes and disk usage for a given directory
func DiskUsage(path string) (UsageInfo, error) {
var usage UsageInfo
if path == "" {
return usage, fmt.Errorf("invalid directory")
}
// First check whether the quota system knows about this directory
// A nil quantity or error means that the path does not support quotas
// or xfs_quota tool is missing and we should use other mechanisms.
startTime := time.Now()
consumption, _ := fsquota.GetConsumption(path)
if consumption != nil {
usage.Bytes = consumption.Value()
defer servermetrics.CollectVolumeStatCalDuration("fsquota", startTime)
} else {
defer servermetrics.CollectVolumeStatCalDuration("du", startTime)
}
inodes, _ := fsquota.GetInodes(path)
if inodes != nil {
usage.Inodes = inodes.Value()
}
if inodes != nil && consumption != nil {
return usage, nil
}
topLevelStat := &unix.Stat_t{}
err := unix.Stat(path, topLevelStat)
if err != nil {
return usage, err
}
// dedupedInode stores inodes that could be duplicates (nlink > 1)
dedupedInodes := make(map[uint64]struct{})
err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
// ignore files that have been deleted after directory was read
if os.IsNotExist(err) {
return nil
}
if err != nil {
return fmt.Errorf("unable to count inodes for %s: %s", path, err)
}
// according to the docs, Sys can be nil
if info.Sys() == nil {
return fmt.Errorf("fileinfo Sys is nil")
}
s, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("unsupported fileinfo; could not convert to stat_t")
}
if s.Dev != topLevelStat.Dev {
// don't descend into directories on other devices
return filepath.SkipDir
}
// Dedupe hardlinks
if s.Nlink > 1 {
if _, ok := dedupedInodes[s.Ino]; !ok {
dedupedInodes[s.Ino] = struct{}{}
} else {
return nil
}
}
if consumption == nil {
usage.Bytes += int64(s.Blocks) * int64(512) // blocksize in bytes
}
if inodes == nil {
usage.Inodes++
}
return nil
})
return usage, err
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bufio"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"k8s.io/klog/v2"
)
var quotaCmd string
var quotaCmdInitialized bool
var quotaCmdLock sync.RWMutex
// If we later get a filesystem that uses project quota semantics other than
// XFS, we'll need to change this.
// Higher levels don't need to know what's inside
type linuxFilesystemType struct {
name string
typeMagic int64 // Filesystem magic number, per statfs(2)
maxQuota int64
allowEmptyOutput bool // Accept empty output from "quota" command
}
const (
bitsPerWord = 32 << (^uint(0) >> 63) // either 32 or 64
)
var (
linuxSupportedFilesystems = []linuxFilesystemType{
{
name: "XFS",
typeMagic: 0x58465342,
maxQuota: 1<<(bitsPerWord-1) - 1,
allowEmptyOutput: true, // XFS filesystems report nothing if a quota is not present
}, {
name: "ext4fs",
typeMagic: 0xef53,
maxQuota: (1<<(bitsPerWord-1) - 1) & (1<<58 - 1),
allowEmptyOutput: false, // ext4 filesystems always report something even if a quota is not present
},
}
)
// VolumeProvider supplies a quota applier to the generic code.
type VolumeProvider struct {
}
var quotaCmds = []string{"/sbin/xfs_quota",
"/usr/sbin/xfs_quota",
"/bin/xfs_quota"}
var quotaParseRegexp = regexp.MustCompilePOSIX("^[^ \t]*[ \t]*([0-9]+)")
var lsattrCmd = "/usr/bin/lsattr"
var lsattrParseRegexp = regexp.MustCompilePOSIX("^ *([0-9]+) [^ ]+ (.*)$")
// GetQuotaApplier -- does this backing device support quotas that
// can be applied to directories?
func (*VolumeProvider) GetQuotaApplier(mountpoint string, backingDev string) LinuxVolumeQuotaApplier {
for _, fsType := range linuxSupportedFilesystems {
if isFilesystemOfType(mountpoint, backingDev, fsType.typeMagic) {
return linuxVolumeQuotaApplier{mountpoint: mountpoint,
maxQuota: fsType.maxQuota,
allowEmptyOutput: fsType.allowEmptyOutput,
}
}
}
return nil
}
type linuxVolumeQuotaApplier struct {
mountpoint string
maxQuota int64
allowEmptyOutput bool
}
func getXFSQuotaCmd() (string, error) {
quotaCmdLock.Lock()
defer quotaCmdLock.Unlock()
if quotaCmdInitialized {
return quotaCmd, nil
}
for _, program := range quotaCmds {
fileinfo, err := os.Stat(program)
if err == nil && ((fileinfo.Mode().Perm() & (1 << 6)) != 0) {
klog.V(3).Infof("Found xfs_quota program %s", program)
quotaCmd = program
quotaCmdInitialized = true
return quotaCmd, nil
}
}
quotaCmdInitialized = true
return "", fmt.Errorf("no xfs_quota program found")
}
func doRunXFSQuotaCommand(mountpoint string, mountsFile, command string) (string, error) {
quotaCmd, err := getXFSQuotaCmd()
if err != nil {
return "", err
}
// We're using numeric project IDs directly; no need to scan
// /etc/projects or /etc/projid
klog.V(4).Infof("runXFSQuotaCommand %s -t %s -P/dev/null -D/dev/null -x -f %s -c %s", quotaCmd, mountsFile, mountpoint, command)
cmd := exec.Command(quotaCmd, "-t", mountsFile, "-P/dev/null", "-D/dev/null", "-x", "-f", mountpoint, "-c", command)
data, err := cmd.Output()
if err != nil {
return "", err
}
klog.V(4).Infof("runXFSQuotaCommand output %q", string(data))
return string(data), nil
}
// Extract the mountpoint we care about into a temporary mounts file so that xfs_quota does
// not attempt to scan every mount on the filesystem, which could hang if e. g.
// a stuck NFS mount is present.
// See https://bugzilla.redhat.com/show_bug.cgi?id=237120 for an example
// of the problem that could be caused if this were to happen.
func runXFSQuotaCommand(mountpoint string, command string) (string, error) {
tmpMounts, err := os.CreateTemp("", "mounts")
if err != nil {
return "", fmt.Errorf("cannot create temporary mount file: %v", err)
}
tmpMountsFileName := tmpMounts.Name()
defer tmpMounts.Close()
defer os.Remove(tmpMountsFileName)
mounts, err := os.Open(MountsFile)
if err != nil {
return "", fmt.Errorf("cannot open mounts file %s: %v", MountsFile, err)
}
defer mounts.Close()
scanner := bufio.NewScanner(mounts)
for scanner.Scan() {
match := MountParseRegexp.FindStringSubmatch(scanner.Text())
if match != nil {
mount := match[2]
if mount == mountpoint {
if _, err := tmpMounts.WriteString(fmt.Sprintf("%s\n", scanner.Text())); err != nil {
return "", fmt.Errorf("cannot write temporary mounts file: %v", err)
}
if err := tmpMounts.Sync(); err != nil {
return "", fmt.Errorf("cannot sync temporary mounts file: %v", err)
}
return doRunXFSQuotaCommand(mountpoint, tmpMountsFileName, command)
}
}
}
return "", fmt.Errorf("cannot run xfs_quota: cannot find mount point %s in %s", mountpoint, MountsFile)
}
// SupportsQuotas determines whether the filesystem supports quotas.
func SupportsQuotas(mountpoint string, qType QuotaType) (bool, error) {
data, err := runXFSQuotaCommand(mountpoint, "state -p")
if err != nil {
return false, err
}
if qType == FSQuotaEnforcing {
return strings.Contains(data, "Enforcement: ON"), nil
}
return strings.Contains(data, "Accounting: ON"), nil
}
func isFilesystemOfType(mountpoint string, backingDev string, typeMagic int64) bool {
var buf syscall.Statfs_t
err := syscall.Statfs(mountpoint, &buf)
if err != nil {
klog.Warningf("Warning: Unable to statfs %s: %v", mountpoint, err)
return false
}
if int64(buf.Type) != typeMagic {
return false
}
if answer, _ := SupportsQuotas(mountpoint, FSQuotaAccounting); answer {
return true
}
return false
}
// GetQuotaOnDir retrieves the quota ID (if any) associated with the specified directory
// If we can't make system calls, all we can say is that we don't know whether
// it has a quota, and higher levels have to make the call.
func (v linuxVolumeQuotaApplier) GetQuotaOnDir(path string) (QuotaID, error) {
cmd := exec.Command(lsattrCmd, "-pd", path)
data, err := cmd.Output()
if err != nil {
return BadQuotaID, fmt.Errorf("cannot run lsattr: %v", err)
}
match := lsattrParseRegexp.FindStringSubmatch(string(data))
if match == nil {
return BadQuotaID, fmt.Errorf("unable to parse lsattr -pd %s output %s", path, string(data))
}
if match[2] != path {
return BadQuotaID, fmt.Errorf("mismatch between supplied and returned path (%s != %s)", path, match[2])
}
projid, err := strconv.ParseInt(match[1], 10, 32)
if err != nil {
return BadQuotaID, fmt.Errorf("unable to parse project ID from %s (%v)", match[1], err)
}
return QuotaID(projid), nil
}
// SetQuotaOnDir applies a quota to the specified directory under the specified mountpoint.
func (v linuxVolumeQuotaApplier) SetQuotaOnDir(path string, id QuotaID, bytes int64) error {
if bytes < 0 || bytes > v.maxQuota {
bytes = v.maxQuota
}
_, err := runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("limit -p bhard=%v bsoft=%v %v", bytes, bytes, id))
if err != nil {
return err
}
_, err = runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("project -s -p %s %v", path, id))
return err
}
func getQuantity(mountpoint string, id QuotaID, xfsQuotaArg string, multiplier int64, allowEmptyOutput bool) (int64, error) {
data, err := runXFSQuotaCommand(mountpoint, fmt.Sprintf("quota -p -N -n -v %s %v", xfsQuotaArg, id))
if err != nil {
return 0, fmt.Errorf("unable to run xfs_quota: %v", err)
}
if data == "" && allowEmptyOutput {
return 0, nil
}
match := quotaParseRegexp.FindStringSubmatch(data)
if match == nil {
return 0, fmt.Errorf("unable to parse quota output '%s'", data)
}
size, err := strconv.ParseInt(match[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("unable to parse data size '%s' from '%s': %v", match[1], data, err)
}
klog.V(4).Infof("getQuantity %s %d %s %d => %d %v", mountpoint, id, xfsQuotaArg, multiplier, size, err)
return size * multiplier, nil
}
// GetConsumption returns the consumption in bytes if available via quotas
func (v linuxVolumeQuotaApplier) GetConsumption(_ string, id QuotaID) (int64, error) {
return getQuantity(v.mountpoint, id, "-b", 1024, v.allowEmptyOutput)
}
// GetInodes returns the inodes in use if available via quotas
func (v linuxVolumeQuotaApplier) GetInodes(_ string, id QuotaID) (int64, error) {
return getQuantity(v.mountpoint, id, "-i", 1, v.allowEmptyOutput)
}
// QuotaIDIsInUse checks whether the specified quota ID is in use on the specified
// filesystem
func (v linuxVolumeQuotaApplier) QuotaIDIsInUse(id QuotaID) (bool, error) {
bytes, err := v.GetConsumption(v.mountpoint, id)
if err != nil {
return false, err
}
if bytes > 0 {
return true, nil
}
inodes, err := v.GetInodes(v.mountpoint, id)
return inodes > 0, err
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fsquota
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"sync"
"golang.org/x/sys/unix"
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
)
var projectsFile = "/etc/projects"
var projidFile = "/etc/projid"
var projectsParseRegexp = regexp.MustCompilePOSIX("^([[:digit:]]+):(.*)$")
var projidParseRegexp = regexp.MustCompilePOSIX("^([^#][^:]*):([[:digit:]]+)$")
var quotaIDLock sync.RWMutex
const maxUnusedQuotasToSearch = 128 // Don't go into an infinite loop searching for an unused quota
type projectType struct {
isValid bool // False if we need to remove this line
id common.QuotaID
data string // Project name (projid) or directory (projects)
line string
}
type projectsList struct {
projects []projectType
projid []projectType
}
func projFilesAreOK() error {
if sf, err := os.Lstat(projectsFile); err != nil || sf.Mode().IsRegular() {
if sf, err := os.Lstat(projidFile); err != nil || sf.Mode().IsRegular() {
return nil
}
return fmt.Errorf("%s exists but is not a plain file, cannot continue", projidFile)
}
return fmt.Errorf("%s exists but is not a plain file, cannot continue", projectsFile)
}
func lockFile(file *os.File) error {
return unix.Flock(int(file.Fd()), unix.LOCK_EX)
}
func unlockFile(file *os.File) error {
return unix.Flock(int(file.Fd()), unix.LOCK_UN)
}
// openAndLockProjectFiles opens /etc/projects and /etc/projid locked.
// Creates them if they don't exist
func openAndLockProjectFiles() (*os.File, *os.File, error) {
// Make sure neither project-related file is a symlink!
if err := projFilesAreOK(); err != nil {
return nil, nil, fmt.Errorf("system project files failed verification: %v", err)
}
// We don't actually modify the original files; we create temporaries and
// move them over the originals
fProjects, err := os.OpenFile(projectsFile, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
err = fmt.Errorf("unable to open %s: %v", projectsFile, err)
return nil, nil, err
}
fProjid, err := os.OpenFile(projidFile, os.O_RDONLY|os.O_CREATE, 0644)
if err == nil {
// Check once more, to ensure nothing got changed out from under us
if err = projFilesAreOK(); err == nil {
err = lockFile(fProjects)
if err == nil {
err = lockFile(fProjid)
if err == nil {
return fProjects, fProjid, nil
}
// Nothing useful we can do if we get an error here
err = fmt.Errorf("unable to lock %s: %v", projidFile, err)
unlockFile(fProjects)
} else {
err = fmt.Errorf("unable to lock %s: %v", projectsFile, err)
}
} else {
err = fmt.Errorf("system project files failed re-verification: %v", err)
}
fProjid.Close()
} else {
err = fmt.Errorf("unable to open %s: %v", projidFile, err)
}
fProjects.Close()
return nil, nil, err
}
func closeProjectFiles(fProjects *os.File, fProjid *os.File) error {
// Nothing useful we can do if either of these fail,
// but we have to close (and thereby unlock) the files anyway.
var err error
var err1 error
if fProjid != nil {
err = fProjid.Close()
}
if fProjects != nil {
err1 = fProjects.Close()
}
if err == nil {
return err1
}
return err
}
func parseProject(l string) projectType {
if match := projectsParseRegexp.FindStringSubmatch(l); match != nil {
i, err := strconv.Atoi(match[1])
if err == nil {
return projectType{true, common.QuotaID(i), match[2], l}
}
}
return projectType{true, common.BadQuotaID, "", l}
}
func parseProjid(l string) projectType {
if match := projidParseRegexp.FindStringSubmatch(l); match != nil {
i, err := strconv.Atoi(match[2])
if err == nil {
return projectType{true, common.QuotaID(i), match[1], l}
}
}
return projectType{true, common.BadQuotaID, "", l}
}
func parseProjFile(f *os.File, parser func(l string) projectType) []projectType {
var answer []projectType
scanner := bufio.NewScanner(f)
for scanner.Scan() {
answer = append(answer, parser(scanner.Text()))
}
return answer
}
func readProjectFiles(projects *os.File, projid *os.File) projectsList {
return projectsList{parseProjFile(projects, parseProject), parseProjFile(projid, parseProjid)}
}
// findAvailableQuota finds the next available quota from the FirstQuota
// it returns error if QuotaIDIsInUse returns error when getting quota id in use;
// it searches at most maxUnusedQuotasToSearch(128) time
func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) {
unusedQuotasSearched := 0
for id := common.FirstQuota; true; id++ {
if _, ok := idMap[id]; !ok {
isInUse, err := getApplier(path).QuotaIDIsInUse(id)
if err != nil {
return common.BadQuotaID, err
} else if !isInUse {
return id, nil
}
unusedQuotasSearched++
if unusedQuotasSearched > maxUnusedQuotasToSearch {
break
}
}
}
return common.BadQuotaID, fmt.Errorf("cannot find available quota ID")
}
func addDirToProject(path string, id common.QuotaID, list *projectsList) (common.QuotaID, bool, error) {
idMap := make(map[common.QuotaID]bool)
for _, project := range list.projects {
if project.data == path {
if id != common.BadQuotaID && id != project.id {
return common.BadQuotaID, false, fmt.Errorf("attempt to reassign project ID for %s", path)
}
// Trying to reassign a directory to the project it's
// already in. Maybe this should be an error, but for
// now treat it as an idempotent operation
return project.id, false, nil
}
idMap[project.id] = true
}
var needToAddProjid = true
for _, projid := range list.projid {
idMap[projid.id] = true
if projid.id == id && id != common.BadQuotaID {
needToAddProjid = false
}
}
var err error
if id == common.BadQuotaID {
id, err = findAvailableQuota(path, idMap)
if err != nil {
return common.BadQuotaID, false, err
}
needToAddProjid = true
}
if needToAddProjid {
name := fmt.Sprintf("volume%v", id)
line := fmt.Sprintf("%s:%v", name, id)
list.projid = append(list.projid, projectType{true, id, name, line})
}
line := fmt.Sprintf("%v:%s", id, path)
list.projects = append(list.projects, projectType{true, id, path, line})
return id, needToAddProjid, nil
}
func removeDirFromProject(path string, id common.QuotaID, list *projectsList) (bool, error) {
if id == common.BadQuotaID {
return false, fmt.Errorf("attempt to remove invalid quota ID from %s", path)
}
foundAt := -1
countByID := make(map[common.QuotaID]int)
for i, project := range list.projects {
if project.data == path {
if id != project.id {
return false, fmt.Errorf("attempting to remove quota ID %v from path %s, but expecting ID %v", id, path, project.id)
} else if foundAt != -1 {
return false, fmt.Errorf("found multiple quota IDs for path %s", path)
}
// Faster and easier than deleting an element
list.projects[i].isValid = false
foundAt = i
}
countByID[project.id]++
}
if foundAt == -1 {
return false, fmt.Errorf("cannot find quota associated with path %s", path)
}
if countByID[id] <= 1 {
// Removing the last entry means that we're no longer using
// the quota ID, so remove that as well
for i, projid := range list.projid {
if projid.id == id {
list.projid[i].isValid = false
}
}
return true, nil
}
return false, nil
}
func writeProjectFile(base *os.File, projects []projectType) (string, error) {
oname := base.Name()
stat, err := base.Stat()
if err != nil {
return "", err
}
mode := stat.Mode() & os.ModePerm
f, err := os.CreateTemp(filepath.Dir(oname), filepath.Base(oname))
if err != nil {
return "", err
}
filename := f.Name()
if err := os.Chmod(filename, mode); err != nil {
return "", err
}
for _, proj := range projects {
if proj.isValid {
if _, err := f.WriteString(fmt.Sprintf("%s\n", proj.line)); err != nil {
f.Close()
os.Remove(filename)
return "", err
}
}
}
if err := f.Close(); err != nil {
os.Remove(filename)
return "", err
}
return filename, nil
}
func writeProjectFiles(fProjects *os.File, fProjid *os.File, writeProjid bool, list projectsList) error {
tmpProjects, err := writeProjectFile(fProjects, list.projects)
if err == nil {
// Ensure that both files are written before we try to rename either.
if writeProjid {
tmpProjid, err := writeProjectFile(fProjid, list.projid)
if err == nil {
err = os.Rename(tmpProjid, fProjid.Name())
if err != nil {
os.Remove(tmpProjid)
}
}
}
if err == nil {
err = os.Rename(tmpProjects, fProjects.Name())
if err == nil {
return nil
}
// We're in a bit of trouble here; at this
// point we've successfully renamed tmpProjid
// to the real thing, but renaming tmpProject
// to the real file failed. There's not much we
// can do in this position. Anything we could do
// to try to undo it would itself be likely to fail.
}
os.Remove(tmpProjects)
}
return fmt.Errorf("unable to write project files: %v", err)
}
// if ID is common.BadQuotaID, generate new project id if the dir is not in a project
func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
quotaIDLock.Lock()
defer quotaIDLock.Unlock()
fProjects, fProjid, err := openAndLockProjectFiles()
if err == nil {
defer closeProjectFiles(fProjects, fProjid)
list := readProjectFiles(fProjects, fProjid)
var writeProjid bool
ID, writeProjid, err = addDirToProject(path, ID, &list)
if err == nil && ID != common.BadQuotaID {
if err = writeProjectFiles(fProjects, fProjid, writeProjid, list); err == nil {
return ID, nil
}
}
}
return common.BadQuotaID, fmt.Errorf("createProjectID %s %v failed %v", path, ID, err)
}
func removeProjectID(path string, ID common.QuotaID) error {
if ID == common.BadQuotaID {
return fmt.Errorf("attempting to remove invalid quota ID %v", ID)
}
quotaIDLock.Lock()
defer quotaIDLock.Unlock()
fProjects, fProjid, err := openAndLockProjectFiles()
if err == nil {
defer closeProjectFiles(fProjects, fProjid)
list := readProjectFiles(fProjects, fProjid)
var writeProjid bool
writeProjid, err = removeDirFromProject(path, ID, &list)
if err == nil {
if err = writeProjectFiles(fProjects, fProjid, writeProjid, list); err == nil {
return nil
}
}
}
return fmt.Errorf("removeProjectID %s %v failed %v", path, ID, err)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fsquota
import (
"k8s.io/mount-utils"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
)
// Interface -- quota interface
type Interface interface {
// GetQuotaOnDir gets the quota ID (if any) that applies to
// this directory
GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error)
// Does the path provided support quotas, and if so, what types
SupportsQuotas(m mount.Interface, path string) (bool, error)
// Assign a quota (picked by the quota mechanism) to a path,
// and return it.
AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error
// Get the quota-based storage consumption for the path
GetConsumption(path string) (*resource.Quantity, error)
// Get the quota-based inode consumption for the path
GetInodes(path string) (*resource.Quantity, error)
// Remove the quota from a path
// Implementations may assume that any data covered by the
// quota has already been removed.
ClearQuota(m mount.Interface, path string) error
}
func enabledQuotasForMonitoring() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolationFSQuotaMonitoring)
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fsquota
import (
"bufio"
"fmt"
"os"
"path/filepath"
"sync"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
)
// Pod -> External Pod UID
var podUidMap = make(map[types.UID]types.UID)
// Pod -> ID
var podQuotaMap = make(map[types.UID]common.QuotaID)
// Dir -> ID (for convenience)
var dirQuotaMap = make(map[string]common.QuotaID)
// ID -> pod
var quotaPodMap = make(map[common.QuotaID]types.UID)
// Directory -> pod
var dirPodMap = make(map[string]types.UID)
// Backing device -> applier
// This is *not* cleaned up; its size will be bounded.
var devApplierMap = make(map[string]common.LinuxVolumeQuotaApplier)
// Directory -> applier
var dirApplierMap = make(map[string]common.LinuxVolumeQuotaApplier)
var dirApplierLock sync.RWMutex
// Pod -> refcount
var podDirCountMap = make(map[types.UID]int)
// ID -> size
var quotaSizeMap = make(map[common.QuotaID]int64)
var quotaLock sync.RWMutex
var supportsQuotasMap = make(map[string]bool)
var supportsQuotasLock sync.RWMutex
// Directory -> backingDev
var backingDevMap = make(map[string]string)
var backingDevLock sync.RWMutex
var mountpointMap = make(map[string]string)
var mountpointLock sync.RWMutex
var providers = []common.LinuxVolumeQuotaProvider{
&common.VolumeProvider{},
}
// Separate the innards for ease of testing
func detectBackingDevInternal(mountpoint string, mounts string) (string, error) {
file, err := os.Open(mounts)
if err != nil {
return "", err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := common.MountParseRegexp.FindStringSubmatch(scanner.Text())
if match != nil {
device := match[1]
mount := match[2]
if mount == mountpoint {
return device, nil
}
}
}
return "", fmt.Errorf("couldn't find backing device for %s", mountpoint)
}
// detectBackingDev assumes that the mount point provided is valid
func detectBackingDev(_ mount.Interface, mountpoint string) (string, error) {
return detectBackingDevInternal(mountpoint, common.MountsFile)
}
func clearBackingDev(path string) {
backingDevLock.Lock()
defer backingDevLock.Unlock()
delete(backingDevMap, path)
}
// Assumes that the path has been fully canonicalized
// Breaking this up helps with testing
func detectMountpointInternal(m mount.Interface, path string) (string, error) {
for path != "" && path != "/" {
// per k8s.io/mount-utils/mount_linux this detects all but
// a bind mount from one part of a mount to another.
// For our purposes that's fine; we simply want the "true"
// mount point
//
// IsNotMountPoint proved much more troublesome; it actually
// scans the mounts, and when a lot of mount/unmount
// activity takes place, it is not able to get a consistent
// view of /proc/self/mounts, causing it to time out and
// report incorrectly.
isNotMount, err := m.IsLikelyNotMountPoint(path)
if err != nil {
return "/", err
}
if !isNotMount {
return path, nil
}
path = filepath.Dir(path)
}
return "/", nil
}
func detectMountpoint(m mount.Interface, path string) (string, error) {
xpath, err := filepath.Abs(path)
if err != nil {
return "/", err
}
xpath, err = filepath.EvalSymlinks(xpath)
if err != nil {
return "/", err
}
if xpath, err = detectMountpointInternal(m, xpath); err == nil {
return xpath, nil
}
return "/", err
}
func clearMountpoint(path string) {
mountpointLock.Lock()
defer mountpointLock.Unlock()
delete(mountpointMap, path)
}
// getFSInfo Returns mountpoint and backing device
// getFSInfo should cache the mountpoint and backing device for the
// path.
func getFSInfo(m mount.Interface, path string) (string, string, error) {
mountpointLock.Lock()
defer mountpointLock.Unlock()
backingDevLock.Lock()
defer backingDevLock.Unlock()
var err error
mountpoint, okMountpoint := mountpointMap[path]
if !okMountpoint {
mountpoint, err = detectMountpoint(m, path)
if err != nil {
return "", "", fmt.Errorf("cannot determine mountpoint for %s: %v", path, err)
}
}
backingDev, okBackingDev := backingDevMap[path]
if !okBackingDev {
backingDev, err = detectBackingDev(m, mountpoint)
if err != nil {
return "", "", fmt.Errorf("cannot determine backing device for %s: %v", path, err)
}
}
mountpointMap[path] = mountpoint
backingDevMap[path] = backingDev
return mountpoint, backingDev, nil
}
func clearFSInfo(path string) {
clearMountpoint(path)
clearBackingDev(path)
}
func getApplier(path string) common.LinuxVolumeQuotaApplier {
dirApplierLock.Lock()
defer dirApplierLock.Unlock()
return dirApplierMap[path]
}
func setApplier(path string, applier common.LinuxVolumeQuotaApplier) {
dirApplierLock.Lock()
defer dirApplierLock.Unlock()
dirApplierMap[path] = applier
}
func clearApplier(path string) {
dirApplierLock.Lock()
defer dirApplierLock.Unlock()
delete(dirApplierMap, path)
}
func setQuotaOnDir(path string, id common.QuotaID, bytes int64) error {
return getApplier(path).SetQuotaOnDir(path, id, bytes)
}
func GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
_, _, err := getFSInfo(m, path)
if err != nil {
return common.BadQuotaID, err
}
return getApplier(path).GetQuotaOnDir(path)
}
func clearQuotaOnDir(m mount.Interface, path string, userNamespacesEnabled bool) error {
// Since we may be called without path being in the map,
// we explicitly have to check in this case.
klog.V(4).Infof("clearQuotaOnDir %s", path)
supportsQuotas, err := SupportsQuotas(m, path, userNamespacesEnabled)
if err != nil {
// Log-and-continue instead of returning an error for now
// due to unspecified backwards compatibility concerns (a subject to revise)
klog.V(3).Infof("Attempt to check for quota support failed: %v", err)
}
if !supportsQuotas {
return nil
}
projid, err := GetQuotaOnDir(m, path)
if err == nil && projid != common.BadQuotaID {
// This means that we have a quota on the directory but
// we can't clear it. That's not good.
err = setQuotaOnDir(path, projid, 0)
if err != nil {
klog.V(3).Infof("Attempt to clear quota failed: %v", err)
}
// Even if clearing the quota failed, we still need to
// try to remove the project ID, or that may be left dangling.
err1 := removeProjectID(path, projid)
if err1 != nil {
klog.V(3).Infof("Attempt to remove quota ID from system files failed: %v", err1)
}
clearFSInfo(path)
if err != nil {
return err
}
return err1
}
// If we couldn't get a quota, that's fine -- there may
// never have been one, and we have no way to know otherwise
klog.V(3).Infof("clearQuotaOnDir fails %v", err)
return nil
}
// SupportsQuotas -- Does the path support quotas
// Cache the applier for paths that support quotas. For paths that don't,
// don't cache the result because nothing will clean it up.
// However, do cache the device->applier map; the number of devices
// is bounded.
// User namespaces prevent changes to project IDs on the filesystem,
// ensuring xfs-quota metrics' reliability; hence, userNamespacesEnabled is checked.
func SupportsQuotas(m mount.Interface, path string, userNamespacesEnabled bool) (bool, error) {
if !enabledQuotasForMonitoring() {
klog.V(3).Info("SupportsQuotas called, but quotas disabled")
return false, nil
}
if !userNamespacesEnabled {
klog.V(3).Info("SupportQuotas called and LocalStorageCapacityIsolationFSQuotaMonitoring enabled, but pod is not in a user namespace")
return false, nil
}
supportsQuotasLock.Lock()
defer supportsQuotasLock.Unlock()
if supportsQuotas, ok := supportsQuotasMap[path]; ok {
return supportsQuotas, nil
}
mount, dev, err := getFSInfo(m, path)
if err != nil {
return false, err
}
// Do we know about this device?
applier, ok := devApplierMap[mount]
if !ok {
for _, provider := range providers {
if applier = provider.GetQuotaApplier(mount, dev); applier != nil {
devApplierMap[mount] = applier
break
}
}
}
if applier != nil {
supportsQuotasMap[path] = true
setApplier(path, applier)
return true, nil
}
delete(backingDevMap, path)
delete(mountpointMap, path)
return false, nil
}
// AssignQuota -- assign a quota to the specified directory.
// AssignQuota chooses the quota ID based on the pod UID and path.
// If the pod UID is identical to another one known, it may (but presently
// doesn't) choose the same quota ID as other volumes in the pod.
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity, userNamespacesEnabled bool) error { //nolint:staticcheck
if bytes == nil {
return fmt.Errorf("attempting to assign null quota to %s", path)
}
ibytes := bytes.Value()
if ok, err := SupportsQuotas(m, path, userNamespacesEnabled); !ok {
return fmt.Errorf("quotas not supported on %s: %v", path, err)
}
quotaLock.Lock()
defer quotaLock.Unlock()
// Current policy is to set individual quotas on each volume,
// for each new volume we generate a random UUID and we use that as
// the internal pod uid.
// From fsquota point of view each volume is attached to a
// single unique pod.
// If we decide later that we want to assign one quota for all
// volumes in a pod, we can simply use poduid parameter directly
// If and when we decide permanently that we're going to adopt
// one quota per volume, we can rip all of the pod code out.
externalPodUid := poduid
internalPodUid, ok := dirPodMap[path]
if ok {
if podUidMap[internalPodUid] != externalPodUid {
return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, podUidMap[internalPodUid], externalPodUid)
}
} else {
internalPodUid = types.UID(uuid.NewUUID())
}
oid, ok := podQuotaMap[internalPodUid]
if ok {
if quotaSizeMap[oid] != ibytes {
return fmt.Errorf("requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
}
if _, ok := dirPodMap[path]; ok {
return nil
}
} else {
oid = common.BadQuotaID
}
id, err := createProjectID(path, oid)
if err == nil {
if oid != common.BadQuotaID && oid != id {
return fmt.Errorf("attempt to reassign quota %v to %v", oid, id)
}
// When enforcing quotas are enabled, we'll condition this
// on their being disabled also.
fsbytes := ibytes
if fsbytes > 0 {
fsbytes = -1
}
if err = setQuotaOnDir(path, id, fsbytes); err == nil {
quotaPodMap[id] = internalPodUid
quotaSizeMap[id] = ibytes
podQuotaMap[internalPodUid] = id
dirQuotaMap[path] = id
dirPodMap[path] = internalPodUid
podUidMap[internalPodUid] = externalPodUid
podDirCountMap[internalPodUid]++
klog.V(4).Infof("Assigning quota ID %d (request limit %d, actual limit %d) to %s", id, ibytes, fsbytes, path)
return nil
}
removeProjectID(path, id)
}
return fmt.Errorf("assign quota FAILED %v", err)
}
// GetConsumption -- retrieve the consumption (in bytes) of the directory
func GetConsumption(path string) (*resource.Quantity, error) {
// Note that we actually need to hold the lock at least through
// running the quota command, so it can't get recycled behind our back
quotaLock.Lock()
defer quotaLock.Unlock()
applier := getApplier(path)
// No applier means directory is not under quota management
if applier == nil {
return nil, nil
}
ibytes, err := applier.GetConsumption(path, dirQuotaMap[path])
if err != nil {
return nil, err
}
return resource.NewQuantity(ibytes, resource.DecimalSI), nil
}
// GetInodes -- retrieve the number of inodes in use under the directory
func GetInodes(path string) (*resource.Quantity, error) {
// Note that we actually need to hold the lock at least through
// running the quota command, so it can't get recycled behind our back
quotaLock.Lock()
defer quotaLock.Unlock()
applier := getApplier(path)
// No applier means directory is not under quota management
if applier == nil {
return nil, nil
}
inodes, err := applier.GetInodes(path, dirQuotaMap[path])
if err != nil {
return nil, err
}
return resource.NewQuantity(inodes, resource.DecimalSI), nil
}
// ClearQuota -- remove the quota assigned to a directory
func ClearQuota(m mount.Interface, path string, userNamespacesEnabled bool) error {
klog.V(3).Infof("ClearQuota %s", path)
if !enabledQuotasForMonitoring() {
return fmt.Errorf("clearQuota called, but quotas disabled")
}
quotaLock.Lock()
defer quotaLock.Unlock()
poduid, ok := dirPodMap[path]
if !ok {
// Nothing in the map either means that there was no
// quota to begin with or that we're clearing a
// stale directory, so if we find a quota, just remove it.
// The process of clearing the quota requires that an applier
// be found, which needs to be cleaned up.
defer delete(supportsQuotasMap, path)
defer clearApplier(path)
return clearQuotaOnDir(m, path, userNamespacesEnabled)
}
_, ok = podQuotaMap[poduid]
if !ok {
return fmt.Errorf("clearQuota: No quota available for %s", path)
}
projid, err := GetQuotaOnDir(m, path)
if err != nil {
// Log-and-continue instead of returning an error for now
// due to unspecified backwards compatibility concerns (a subject to revise)
klog.V(3).Infof("Attempt to check quota ID %v on dir %s failed: %v", dirQuotaMap[path], path, err)
}
if projid != dirQuotaMap[path] {
return fmt.Errorf("expected quota ID %v on dir %s does not match actual %v", dirQuotaMap[path], path, projid)
}
count, ok := podDirCountMap[poduid]
if count <= 1 || !ok {
err = clearQuotaOnDir(m, path, userNamespacesEnabled)
// This error should be noted; we still need to clean up
// and otherwise handle in the same way.
if err != nil {
klog.V(3).Infof("Unable to clear quota %v %s: %v", dirQuotaMap[path], path, err)
}
delete(quotaSizeMap, podQuotaMap[poduid])
delete(quotaPodMap, podQuotaMap[poduid])
delete(podDirCountMap, poduid)
delete(podQuotaMap, poduid)
delete(podUidMap, poduid)
} else {
err = removeProjectID(path, projid)
podDirCountMap[poduid]--
klog.V(4).Infof("Not clearing quota for pod %s; still %v dirs outstanding", poduid, podDirCountMap[poduid])
}
delete(dirPodMap, path)
delete(dirQuotaMap, path)
delete(supportsQuotasMap, path)
clearApplier(path)
if err != nil {
return fmt.Errorf("unable to clear quota for %s: %v", path, err)
}
return nil
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostutil
import (
"errors"
"os"
"sync"
"k8s.io/mount-utils"
)
// FakeHostUtil is a fake HostUtils implementation for testing
type FakeHostUtil struct {
MountPoints []mount.MountPoint
Filesystem map[string]FileType
mutex sync.Mutex
}
// NewFakeHostUtil returns a struct that implements the HostUtils interface
// for testing
// TODO: no callers were initializing the struct with any MountPoints. Check
// if those are still being used by any callers and if MountPoints still need
// to be a part of the struct.
func NewFakeHostUtil(fs map[string]FileType) *FakeHostUtil {
return &FakeHostUtil{
Filesystem: fs,
}
}
// Compile-time check to make sure FakeHostUtil implements interface
var _ HostUtils = &FakeHostUtil{}
// DeviceOpened checks if block device referenced by pathname is in use by
// checking if is listed as a device in the in-memory mountpoint table.
func (hu *FakeHostUtil) DeviceOpened(pathname string) (bool, error) {
hu.mutex.Lock()
defer hu.mutex.Unlock()
for _, mp := range hu.MountPoints {
if mp.Device == pathname {
return true, nil
}
}
return false, nil
}
// PathIsDevice always returns true
func (hu *FakeHostUtil) PathIsDevice(pathname string) (bool, error) {
return true, nil
}
// GetDeviceNameFromMount given a mount point, find the volume id
func (hu *FakeHostUtil) GetDeviceNameFromMount(mounter mount.Interface, mountPath, pluginMountDir string) (string, error) {
return getDeviceNameFromMount(mounter, mountPath, pluginMountDir)
}
// MakeRShared checks if path is shared and bind-mounts it as rshared if needed.
// No-op for testing
func (hu *FakeHostUtil) MakeRShared(path string) error {
return nil
}
// GetFileType checks for file/directory/socket/block/character devices.
// Defaults to Directory if otherwise unspecified.
func (hu *FakeHostUtil) GetFileType(pathname string) (FileType, error) {
if t, ok := hu.Filesystem[pathname]; ok {
return t, nil
}
return FileType("Directory"), nil
}
// PathExists checks if pathname exists.
func (hu *FakeHostUtil) PathExists(pathname string) (bool, error) {
if _, ok := hu.Filesystem[pathname]; ok {
return true, nil
}
return false, nil
}
// EvalHostSymlinks returns the path name after evaluating symlinks.
// No-op for testing
func (hu *FakeHostUtil) EvalHostSymlinks(pathname string) (string, error) {
return pathname, nil
}
// GetOwner returns the integer ID for the user and group of the given path
// Not implemented for testing
func (hu *FakeHostUtil) GetOwner(pathname string) (int64, int64, error) {
return -1, -1, errors.New("GetOwner not implemented")
}
// GetSELinuxSupport tests if pathname is on a mount that supports SELinux.
// Not implemented for testing
func (hu *FakeHostUtil) GetSELinuxSupport(pathname string) (bool, error) {
return false, nil
}
// GetMode returns permissions of pathname.
// Not implemented for testing
func (hu *FakeHostUtil) GetMode(pathname string) (os.FileMode, error) {
return 0, errors.New("not implemented")
}
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
// given mount point.
func (hu *FakeHostUtil) GetSELinuxMountContext(pathname string) (string, error) {
// This pretends the OS does not support SELinux.
return "", nil
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostutil
import (
"fmt"
"os"
)
// FileType enumerates the known set of possible file types.
type FileType string
const (
// FileTypeBlockDev defines a constant for the block device FileType.
FileTypeBlockDev FileType = "BlockDevice"
// FileTypeCharDev defines a constant for the character device FileType.
FileTypeCharDev FileType = "CharDevice"
// FileTypeDirectory defines a constant for the directory FileType.
FileTypeDirectory FileType = "Directory"
// FileTypeFile defines a constant for the file FileType.
FileTypeFile FileType = "File"
// FileTypeSocket defines a constant for the socket FileType.
FileTypeSocket FileType = "Socket"
// FileTypeUnknown defines a constant for an unknown FileType.
FileTypeUnknown FileType = ""
)
var (
errUnknownFileType = fmt.Errorf("only recognise file, directory, socket, block device and character device")
)
// HostUtils defines the set of methods for interacting with paths on a host.
type HostUtils interface {
// DeviceOpened determines if the device (e.g. /dev/sdc) is in use elsewhere
// on the system, i.e. still mounted.
DeviceOpened(pathname string) (bool, error)
// PathIsDevice determines if a path is a device.
PathIsDevice(pathname string) (bool, error)
// MakeRShared checks that given path is on a mount with 'rshared' mount
// propagation. If not, it bind-mounts the path as rshared.
MakeRShared(path string) error
// GetFileType checks for file/directory/socket/block/character devices.
GetFileType(pathname string) (FileType, error)
// PathExists tests if the given path already exists
// Error is returned on any other error than "file not found".
PathExists(pathname string) (bool, error)
// EvalHostSymlinks returns the path name after evaluating symlinks.
EvalHostSymlinks(pathname string) (string, error)
// GetOwner returns the integer ID for the user and group of the given path
GetOwner(pathname string) (int64, int64, error)
// GetSELinuxSupport returns true if given path is on a mount that supports
// SELinux.
GetSELinuxSupport(pathname string) (bool, error)
// GetMode returns permissions of the path.
GetMode(pathname string) (os.FileMode, error)
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
// given mount point.
GetSELinuxMountContext(pathname string) (string, error)
}
// Compile-time check to ensure all HostUtil implementations satisfy
// the Interface.
var _ HostUtils = &HostUtil{}
// getFileType checks for file/directory/socket and block/character devices.
func getFileType(pathname string) (FileType, error) {
var pathType FileType
info, err := os.Stat(pathname)
if os.IsNotExist(err) {
return pathType, fmt.Errorf("path %q does not exist", pathname)
}
// err in call to os.Stat
if err != nil {
return pathType, err
}
// checks whether the mode is the target mode.
isSpecificMode := func(mode, targetMode os.FileMode) bool {
return mode&targetMode == targetMode
}
mode := info.Mode()
if mode.IsDir() {
return FileTypeDirectory, nil
} else if mode.IsRegular() {
return FileTypeFile, nil
} else if isSpecificMode(mode, os.ModeSocket) {
return FileTypeSocket, nil
} else if isSpecificMode(mode, os.ModeDevice) {
if isSpecificMode(mode, os.ModeCharDevice) {
return FileTypeCharDev, nil
}
return FileTypeBlockDev, nil
}
return pathType, errUnknownFileType
}
//go:build linux
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostutil
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"github.com/opencontainers/selinux/go-selinux"
"golang.org/x/sys/unix"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilpath "k8s.io/utils/path"
)
const (
// Location of the mountinfo file
procMountInfoPath = "/proc/self/mountinfo"
)
// HostUtil implements HostUtils for Linux platforms.
type HostUtil struct {
}
// NewHostUtil returns a struct that implements the HostUtils interface on
// linux platforms
func NewHostUtil() *HostUtil {
return &HostUtil{}
}
// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
// If pathname is not a device, log and return false with nil error.
// If open returns errno EBUSY, return true with nil error.
// If open returns nil, return false with nil error.
// Otherwise, return false with error
func (hu *HostUtil) DeviceOpened(pathname string) (bool, error) {
return ExclusiveOpenFailsOnDevice(pathname)
}
// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
// to a device.
func (hu *HostUtil) PathIsDevice(pathname string) (bool, error) {
pathType, err := hu.GetFileType(pathname)
isDevice := pathType == FileTypeCharDev || pathType == FileTypeBlockDev
return isDevice, err
}
// ExclusiveOpenFailsOnDevice checks if block device in use by calling Open with O_EXCL flag.
func ExclusiveOpenFailsOnDevice(pathname string) (bool, error) {
var isDevice bool
finfo, err := os.Stat(pathname)
if os.IsNotExist(err) {
isDevice = false
}
// err in call to os.Stat
if err != nil {
return false, fmt.Errorf(
"PathIsDevice failed for path %q: %v",
pathname,
err)
}
// path refers to a device
if finfo.Mode()&os.ModeDevice != 0 {
isDevice = true
}
if !isDevice {
klog.Errorf("Path %q is not referring to a device.", pathname)
return false, nil
}
fd, errno := unix.Open(pathname, unix.O_RDONLY|unix.O_EXCL|unix.O_CLOEXEC, 0)
// If the device is in use, open will return an invalid fd.
// When this happens, it is expected that Close will fail and throw an error.
defer unix.Close(fd)
if errno == nil {
// device not in use
return false, nil
} else if errno == unix.EBUSY {
// device is in use
return true, nil
}
// error during call to Open
return false, errno
}
// GetDeviceNameFromMount given a mount point, find the device name from its global mount point
func (hu *HostUtil) GetDeviceNameFromMount(mounter mount.Interface, mountPath, pluginMountDir string) (string, error) {
return getDeviceNameFromMount(mounter, mountPath, pluginMountDir)
}
// getDeviceNameFromMount find the device name from /proc/self/mountinfo in which
// the mount path reference should match the given plugin mount directory. In case no mount path reference
// matches, returns the volume name taken from its given mountPath
func getDeviceNameFromMount(mounter mount.Interface, mountPath, pluginMountDir string) (string, error) {
refs, err := mounter.GetMountRefs(mountPath)
if err != nil {
klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err)
return "", err
}
if len(refs) == 0 {
klog.V(4).Infof("Directory %s is not mounted", mountPath)
return "", fmt.Errorf("directory %s is not mounted", mountPath)
}
for _, ref := range refs {
if strings.HasPrefix(ref, pluginMountDir) {
volumeID, err := filepath.Rel(pluginMountDir, ref)
if err != nil {
klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err)
return "", err
}
return volumeID, nil
}
}
return path.Base(mountPath), nil
}
// MakeRShared checks that given path is on a mount with 'rshared' mount
// propagation. If not, it bind-mounts the path as rshared.
func (hu *HostUtil) MakeRShared(path string) error {
return DoMakeRShared(path, procMountInfoPath)
}
// GetFileType checks for file/directory/socket/block/character devices.
func (hu *HostUtil) GetFileType(pathname string) (FileType, error) {
return getFileType(pathname)
}
// PathExists tests if the given path already exists
// Error is returned on any other error than "file not found".
func (hu *HostUtil) PathExists(pathname string) (bool, error) {
return utilpath.Exists(utilpath.CheckFollowSymlink, pathname)
}
// EvalHostSymlinks returns the path name after evaluating symlinks.
func (hu *HostUtil) EvalHostSymlinks(pathname string) (string, error) {
return filepath.EvalSymlinks(pathname)
}
// FindMountInfo returns the mount info on the given path.
func (hu *HostUtil) FindMountInfo(path string) (mount.MountInfo, error) {
return findMountInfo(path, procMountInfoPath)
}
// isShared returns true, if given path is on a mount point that has shared
// mount propagation.
func isShared(mount string, mountInfoPath string) (bool, error) {
info, err := findMountInfo(mount, mountInfoPath)
if err != nil {
return false, err
}
// parse optional parameters
for _, opt := range info.OptionalFields {
if strings.HasPrefix(opt, "shared:") {
return true, nil
}
}
return false, nil
}
func findMountInfo(path, mountInfoPath string) (mount.MountInfo, error) {
infos, err := mount.ParseMountInfo(mountInfoPath)
if err != nil {
return mount.MountInfo{}, err
}
// process /proc/xxx/mountinfo in backward order and find the first mount
// point that is prefix of 'path' - that's the mount where path resides
var info *mount.MountInfo
for i := len(infos) - 1; i >= 0; i-- {
if mount.PathWithinBase(path, infos[i].MountPoint) {
info = &infos[i]
break
}
}
if info == nil {
return mount.MountInfo{}, fmt.Errorf("cannot find mount point for %q", path)
}
return *info, nil
}
// DoMakeRShared is common implementation of MakeRShared on Linux. It checks if
// path is shared and bind-mounts it as rshared if needed. mountCmd and
// mountArgs are expected to contain mount-like command, DoMakeRShared will add
// '--bind <path> <path>' and '--make-rshared <path>' to mountArgs.
func DoMakeRShared(path string, mountInfoFilename string) error {
shared, err := isShared(path, mountInfoFilename)
if err != nil {
return err
}
if shared {
klog.V(4).Infof("Directory %s is already on a shared mount", path)
return nil
}
klog.V(2).Infof("Bind-mounting %q with shared mount propagation", path)
// mount --bind /var/lib/kubelet /var/lib/kubelet
if err := syscall.Mount(path, path, "" /*fstype*/, syscall.MS_BIND, "" /*data*/); err != nil {
return fmt.Errorf("failed to bind-mount %s: %v", path, err)
}
// mount --make-rshared /var/lib/kubelet
if err := syscall.Mount(path, path, "" /*fstype*/, syscall.MS_SHARED|syscall.MS_REC, "" /*data*/); err != nil {
return fmt.Errorf("failed to make %s rshared: %v", path, err)
}
return nil
}
// selinux.SELinuxEnabled implementation for unit tests
type seLinuxEnabledFunc func() bool
// GetSELinux is common implementation of GetSELinuxSupport on Linux.
func GetSELinux(path string, mountInfoFilename string, selinuxEnabled seLinuxEnabledFunc) (bool, error) {
// Skip /proc/mounts parsing if SELinux is disabled.
if !selinuxEnabled() {
return false, nil
}
info, err := findMountInfo(path, mountInfoFilename)
if err != nil {
return false, err
}
// "seclabel" can be both in mount options and super options.
for _, opt := range info.SuperOptions {
if opt == "seclabel" {
return true, nil
}
}
for _, opt := range info.MountOptions {
if opt == "seclabel" {
return true, nil
}
}
return false, nil
}
// GetSELinuxSupport returns true if given path is on a mount that supports
// SELinux.
func (hu *HostUtil) GetSELinuxSupport(pathname string) (bool, error) {
return GetSELinux(pathname, procMountInfoPath, selinux.GetEnabled)
}
// GetOwner returns the integer ID for the user and group of the given path
func (hu *HostUtil) GetOwner(pathname string) (int64, int64, error) {
realpath, err := filepath.EvalSymlinks(pathname)
if err != nil {
return -1, -1, err
}
return GetOwnerLinux(realpath)
}
// GetMode returns permissions of the path.
func (hu *HostUtil) GetMode(pathname string) (os.FileMode, error) {
return GetModeLinux(pathname)
}
// pathname must already be evaluated for symlinks.
// GetOwnerLinux returns the integer ID for the user and group of the given path.
func GetOwnerLinux(pathname string) (int64, int64, error) {
info, err := os.Stat(pathname)
if err != nil {
return -1, -1, err
}
stat := info.Sys().(*syscall.Stat_t)
return int64(stat.Uid), int64(stat.Gid), nil
}
// GetModeLinux returns permissions of the pathname.
func GetModeLinux(pathname string) (os.FileMode, error) {
info, err := os.Stat(pathname)
if err != nil {
return 0, err
}
return info.Mode(), nil
}
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
// given mount point.
func (hu *HostUtil) GetSELinuxMountContext(pathname string) (string, error) {
return getSELinuxMountContext(pathname, procMountInfoPath, selinux.GetEnabled)
}
// getSELinux is common implementation of GetSELinuxSupport on Linux.
// Using an extra function for unit tests.
func getSELinuxMountContext(path string, mountInfoFilename string, selinuxEnabled seLinuxEnabledFunc) (string, error) {
// Skip /proc/mounts parsing if SELinux is disabled.
if !selinuxEnabled() {
return "", nil
}
info, err := findMountInfo(path, mountInfoFilename)
if err != nil {
return "", err
}
for _, opt := range info.SuperOptions {
if !strings.HasPrefix(opt, "context=") {
continue
}
// Remove context=
context := strings.TrimPrefix(opt, "context=")
// Remove double quotes
context = strings.Trim(context, "\"")
return context, nil
}
return "", nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"io/ioutil"
"os"
"path/filepath"
)
// IoUtil is a mockable util for common IO operations
type IoUtil interface {
ReadFile(filename string) ([]byte, error)
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
}
type osIOHandler struct{}
// NewIOHandler Create a new IoHandler implementation
func NewIOHandler() IoUtil {
return &osIOHandler{}
}
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
return os.ReadFile(filename)
}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(name)
}
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
return filepath.EvalSymlinks(path)
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"strconv"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
statusSuccess = "success"
statusFailUnknown = "fail-unknown"
)
/*
* By default, all the following metrics are defined as falling under
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
*
* Promoting the stability level of the metric is a responsibility of the component owner, since it
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
* the metric stability policy.
*/
var StorageOperationMetric = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Name: "storage_operation_duration_seconds",
Help: "Storage operation duration",
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
StabilityLevel: metrics.ALPHA,
},
[]string{"volume_plugin", "operation_name", "status", "migrated"},
)
var storageOperationEndToEndLatencyMetric = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Name: "volume_operation_total_seconds",
Help: "Storage operation end to end duration in seconds",
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
StabilityLevel: metrics.ALPHA,
},
[]string{"plugin_name", "operation_name"},
)
var csiOperationsLatencyMetric = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: "csi",
Name: "operations_seconds",
Help: "Container Storage Interface operation duration with gRPC error code status total",
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600},
StabilityLevel: metrics.ALPHA,
},
[]string{"driver_name", "method_name", "grpc_status_code", "migrated"},
)
func init() {
registerMetrics()
}
func registerMetrics() {
// legacyregistry is the internal k8s wrapper around the prometheus
// global registry, used specifically for metric stability enforcement
legacyregistry.MustRegister(StorageOperationMetric)
legacyregistry.MustRegister(storageOperationEndToEndLatencyMetric)
legacyregistry.MustRegister(csiOperationsLatencyMetric)
}
// OperationCompleteHook returns a hook to call when an operation is completed
func OperationCompleteHook(plugin, operationName string) func(types.CompleteFuncParam) {
requestTime := time.Now()
opComplete := func(c types.CompleteFuncParam) {
timeTaken := time.Since(requestTime).Seconds()
// Create metric with operation name and plugin name
status := statusSuccess
if *c.Err != nil {
// TODO: Establish well-known error codes to be able to distinguish
// user configuration errors from system errors.
status = statusFailUnknown
}
migrated := false
if c.Migrated != nil {
migrated = *c.Migrated
}
StorageOperationMetric.WithLabelValues(plugin, operationName, status, strconv.FormatBool(migrated)).Observe(timeTaken)
}
return opComplete
}
// FSGroupCompleteHook returns a hook to call when volume recursive permission is changed
func FSGroupCompleteHook(plugin volume.VolumePlugin, spec *volume.Spec) func(types.CompleteFuncParam) {
return OperationCompleteHook(GetFullQualifiedPluginNameForVolume(plugin.GetPluginName(), spec), "volume_apply_access_control")
}
// GetFullQualifiedPluginNameForVolume returns full qualified plugin name for
// given volume. For CSI plugin, it appends plugin driver name at the end of
// plugin name, e.g. kubernetes.io/csi:csi-hostpath. It helps to distinguish
// between metrics emitted for CSI volumes which may be handled by different
// CSI plugin drivers.
func GetFullQualifiedPluginNameForVolume(pluginName string, spec *volume.Spec) string {
if spec != nil {
if spec.Volume != nil && spec.Volume.CSI != nil {
return fmt.Sprintf("%s:%s", pluginName, spec.Volume.CSI.Driver)
}
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil {
return fmt.Sprintf("%s:%s", pluginName, spec.PersistentVolume.Spec.CSI.Driver)
}
}
return pluginName
}
// RecordOperationLatencyMetric records the end to end latency for certain operation
// into metric volume_operation_total_seconds
func RecordOperationLatencyMetric(plugin, operationName string, secondsTaken float64) {
storageOperationEndToEndLatencyMetric.WithLabelValues(plugin, operationName).Observe(secondsTaken)
}
// RecordCSIOperationLatencyMetrics records the CSI operation latency and grpc status
// into metric csi_kubelet_operations_seconds
func RecordCSIOperationLatencyMetrics(driverName string,
operationName string,
operationErr error,
operationDuration time.Duration,
migrated string) {
csiOperationsLatencyMetric.WithLabelValues(driverName, operationName, getErrorCode(operationErr), migrated).Observe(operationDuration.Seconds())
}
func getErrorCode(err error) string {
if err == nil {
return codes.OK.String()
}
st, ok := status.FromError(err)
if !ok {
// This is not gRPC error. The operation must have failed before gRPC
// method was called, otherwise we would get gRPC error.
return "unknown-non-grpc"
}
return st.Code().String()
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"os"
"path/filepath"
"sort"
"strings"
v1 "k8s.io/api/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// getNestedMountpoints returns a list of mountpoint directories that should be created
// for the volume indicated by name.
// note: the returned list is relative to baseDir
func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
var retval []string
checkContainer := func(container *v1.Container) error {
var allMountPoints []string // all mount points in this container
var myMountPoints []string // mount points that match name
for _, vol := range container.VolumeMounts {
cleaned := filepath.Clean(vol.MountPath)
allMountPoints = append(allMountPoints, cleaned)
if vol.Name == name {
myMountPoints = append(myMountPoints, cleaned)
}
}
sort.Strings(allMountPoints)
parentPrefix := ".." + string(os.PathSeparator)
// Examine each place where this volume is mounted
for _, myMountPoint := range myMountPoints {
if strings.HasPrefix(myMountPoint, parentPrefix) {
// Don't let a container trick us into creating directories outside of its rootfs
return fmt.Errorf("invalid container mount point %v", myMountPoint)
}
myMPSlash := myMountPoint + string(os.PathSeparator)
// The previously found nested mountpoints.
// NOTE: We can't simply rely on sort.Strings to have all the mountpoints sorted and
// grouped. For example, the following strings are sorted in this exact order:
// /dir/nested, /dir/nested-vol, /dir/nested.vol, /dir/nested/double, /dir/nested2
// The issue is a bit worse for Windows paths, since the \'s value is higher than /'s:
// \dir\nested, \dir\nested-vol, \dir\nested.vol, \dir\nested2, \dir\nested\double
// Because of this, we should use a list of previously mounted mountpoints, rather than only one.
prevNestedMPs := []string{}
// examine each mount point to see if it's nested beneath this volume
// (but skip any that are double-nested beneath this volume)
// For example, if this volume is mounted as /dir and other volumes are mounted
// as /dir/nested and /dir/nested/other, only create /dir/nested.
for _, mp := range allMountPoints {
if !strings.HasPrefix(mp, myMPSlash) {
continue // skip -- not nested beneath myMountPoint
}
isNested := false
for _, prevNestedMP := range prevNestedMPs {
if strings.HasPrefix(mp, prevNestedMP) {
isNested = true
break
}
}
if isNested {
continue // skip -- double nested beneath myMountPoint
}
// since this mount point is nested, remember it so that we can check that following ones aren't nested beneath this one
prevNestedMPs = append(prevNestedMPs, mp+string(os.PathSeparator))
retval = append(retval, mp[len(myMPSlash):])
}
}
return nil
}
var retErr error
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
retErr = checkContainer(c)
return retErr == nil
})
if retErr != nil {
return nil, retErr
}
return retval, nil
}
// MakeNestedMountpoints creates mount points in baseDir for volumes mounted beneath name
func MakeNestedMountpoints(name, baseDir string, pod v1.Pod) error {
dirs, err := getNestedMountpoints(name, baseDir, pod)
if err != nil {
return err
}
for _, dir := range dirs {
err := os.MkdirAll(filepath.Join(baseDir, dir), 0755)
if err != nil {
return fmt.Errorf("unable to create nested volume mountpoints: %v", err)
}
}
return nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package nestedpendingoperations is a modified implementation of
pkg/util/goroutinemap. It implements a data structure for managing go routines
by volume/pod name. It prevents the creation of new go routines if an existing
go routine for the volume already exists. It also allows multiple operations to
execute in parallel for the same volume as long as they are operating on
different pods.
*/
package nestedpendingoperations
import (
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
k8sRuntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
const (
// EmptyUniquePodName is a UniquePodName for empty string.
EmptyUniquePodName volumetypes.UniquePodName = volumetypes.UniquePodName("")
// EmptyUniqueVolumeName is a UniqueVolumeName for empty string
EmptyUniqueVolumeName v1.UniqueVolumeName = v1.UniqueVolumeName("")
// EmptyNodeName is a NodeName for empty string
EmptyNodeName types.NodeName = types.NodeName("")
)
// NestedPendingOperations defines the supported set of operations.
type NestedPendingOperations interface {
// Run adds the concatenation of volumeName, podName, and nodeName to the list
// of running operations and spawns a new go routine to run
// generatedOperations.
// volumeName, podName, and nodeName collectively form the operation key.
// The following forms of operation keys are supported (two keys are designed
// to be "matched" if we want to serialize their operations):
// - volumeName empty, podName and nodeName could be anything
// This key does not match with any keys.
// - volumeName exists, podName empty, nodeName empty
// This key matches all other keys with the same volumeName.
// - volumeName exists, podName exists, nodeName empty
// This key matches with:
// - the same volumeName and podName
// - the same volumeName, but empty podName
// - volumeName exists, podName empty, nodeName exists
// This key matches with:
// - the same volumeName and nodeName
// - the same volumeName but empty nodeName
// If there is no operation with a matching key, the operation is allowed to
// proceed.
// If an operation with a matching key exists and the previous operation is
// running, an AlreadyExists error is returned.
// If an operation with a matching key exists and the previous operation
// failed:
// - If the previous operation has the same
// generatedOperations.operationName:
// - If the full exponential backoff period is satisfied, the operation is
// allowed to proceed.
// - Otherwise, an ExponentialBackoff error is returned.
// - Otherwise, exponential backoff is reset and operation is allowed to
// proceed.
// Once the operation is complete, the go routine is terminated. If the
// operation succeeded, its corresponding key is removed from the list of
// executing operations, allowing a new operation to be started with the key
// without error. If it failed, the key remains and the exponential
// backoff status is updated.
Run(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName,
generatedOperations volumetypes.GeneratedOperations) error
// Wait blocks until all operations are completed. This is typically
// necessary during tests - the test should wait until all operations finish
// and evaluate results after that.
Wait()
// IsOperationPending returns true if an operation for the given volumeName
// and one of podName or nodeName is pending, otherwise it returns false
IsOperationPending(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName) bool
// IsOperationSafeToRetry returns false if an operation for the given volumeName
// and one of podName or nodeName is pending or in exponential backoff, otherwise it returns true
IsOperationSafeToRetry(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName, operationName string) bool
}
// NewNestedPendingOperations returns a new instance of NestedPendingOperations.
func NewNestedPendingOperations(exponentialBackOffOnError bool) NestedPendingOperations {
g := &nestedPendingOperations{
operations: []operation{},
exponentialBackOffOnError: exponentialBackOffOnError,
}
g.cond = sync.NewCond(&g.lock)
return g
}
type nestedPendingOperations struct {
operations []operation
exponentialBackOffOnError bool
cond *sync.Cond
lock sync.RWMutex
}
type operation struct {
key operationKey
operationName string
operationPending bool
expBackoff exponentialbackoff.ExponentialBackoff
}
func (grm *nestedPendingOperations) Run(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName,
generatedOperations volumetypes.GeneratedOperations) error {
grm.lock.Lock()
defer grm.lock.Unlock()
opKey := operationKey{volumeName, podName, nodeName}
opExists, previousOpIndex := grm.isOperationExists(opKey)
if opExists {
previousOp := grm.operations[previousOpIndex]
// Operation already exists
if previousOp.operationPending {
// Operation is pending
return NewAlreadyExistsError(opKey)
}
backOffErr := previousOp.expBackoff.SafeToRetry(fmt.Sprintf("%+v", opKey))
if backOffErr != nil {
if previousOp.operationName == generatedOperations.OperationName {
return backOffErr
}
// previous operation and new operation are different. reset op. name and exp. backoff
grm.operations[previousOpIndex].operationName = generatedOperations.OperationName
grm.operations[previousOpIndex].expBackoff = exponentialbackoff.ExponentialBackoff{}
}
// Update existing operation to mark as pending.
grm.operations[previousOpIndex].operationPending = true
grm.operations[previousOpIndex].key = opKey
} else {
// Create a new operation
grm.operations = append(grm.operations,
operation{
key: opKey,
operationPending: true,
operationName: generatedOperations.OperationName,
expBackoff: exponentialbackoff.ExponentialBackoff{},
})
}
go func() (eventErr, detailedErr error) {
// Handle unhandled panics (very unlikely)
defer k8sRuntime.HandleCrash()
// Handle completion of and error, if any, from operationFunc()
defer grm.operationComplete(opKey, &detailedErr)
return generatedOperations.Run()
}()
return nil
}
func (grm *nestedPendingOperations) IsOperationSafeToRetry(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName,
operationName string) bool {
grm.lock.RLock()
defer grm.lock.RUnlock()
opKey := operationKey{volumeName, podName, nodeName}
exist, previousOpIndex := grm.isOperationExists(opKey)
if !exist {
return true
}
previousOp := grm.operations[previousOpIndex]
if previousOp.operationPending {
return false
}
backOffErr := previousOp.expBackoff.SafeToRetry(fmt.Sprintf("%+v", opKey))
if backOffErr != nil {
if previousOp.operationName == operationName {
return false
}
}
return true
}
func (grm *nestedPendingOperations) IsOperationPending(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName) bool {
grm.lock.RLock()
defer grm.lock.RUnlock()
opKey := operationKey{volumeName, podName, nodeName}
exist, previousOpIndex := grm.isOperationExists(opKey)
if exist && grm.operations[previousOpIndex].operationPending {
return true
}
return false
}
// This is an internal function and caller should acquire and release the lock
func (grm *nestedPendingOperations) isOperationExists(key operationKey) (bool, int) {
// If volumeName is empty, operation can be executed concurrently
if key.volumeName == EmptyUniqueVolumeName {
return false, -1
}
opIndex := -1
for previousOpIndex, previousOp := range grm.operations {
volumeNameMatch := previousOp.key.volumeName == key.volumeName
podNameMatch := previousOp.key.podName == EmptyUniquePodName ||
key.podName == EmptyUniquePodName ||
previousOp.key.podName == key.podName
podNameExactMatch := previousOp.key.podName == key.podName
nodeNameMatch := previousOp.key.nodeName == EmptyNodeName ||
key.nodeName == EmptyNodeName ||
previousOp.key.nodeName == key.nodeName
nodeNameExactMatch := previousOp.key.nodeName == key.nodeName
if volumeNameMatch && podNameMatch && nodeNameMatch {
// nonExactMatch pending first
if previousOp.operationPending {
return true, previousOpIndex
}
// nonExactMatch with no pending, set opIndex to the first nonExactMatch
// exactMatch can override opIndex to expected
if opIndex == -1 || (podNameExactMatch && nodeNameExactMatch) {
opIndex = previousOpIndex
}
}
}
return opIndex != -1, opIndex
}
func (grm *nestedPendingOperations) getOperation(key operationKey) (uint, error) {
// Assumes lock has been acquired by caller.
for i, op := range grm.operations {
if op.key.volumeName == key.volumeName &&
op.key.podName == key.podName &&
op.key.nodeName == key.nodeName {
return uint(i), nil
}
}
return 0, fmt.Errorf("operation %+v not found", key)
}
func (grm *nestedPendingOperations) deleteOperation(key operationKey) {
// Assumes lock has been acquired by caller.
opIndex := -1
for i, op := range grm.operations {
if op.key.volumeName == key.volumeName &&
op.key.podName == key.podName &&
op.key.nodeName == key.nodeName {
opIndex = i
break
}
}
if opIndex < 0 {
return
}
// Delete index without preserving order
grm.operations[opIndex] = grm.operations[len(grm.operations)-1]
grm.operations = grm.operations[:len(grm.operations)-1]
}
func (grm *nestedPendingOperations) operationComplete(key operationKey, err *error) {
// Defer operations are executed in Last-In is First-Out order. In this case
// the lock is acquired first when operationCompletes begins, and is
// released when the method finishes, after the lock is released cond is
// signaled to wake waiting goroutine.
defer grm.cond.Signal()
grm.lock.Lock()
defer grm.lock.Unlock()
if *err == nil || !grm.exponentialBackOffOnError {
// Operation completed without error, or exponentialBackOffOnError disabled
grm.deleteOperation(key)
if *err != nil {
// Log error
klog.Errorf("operation %+v failed with: %v", key, *err)
}
return
}
// Operation completed with error and exponentialBackOffOnError Enabled
existingOpIndex, getOpErr := grm.getOperation(key)
if getOpErr != nil {
// Failed to find existing operation
klog.Errorf("Operation %+v completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.",
key,
*err)
return
}
grm.operations[existingOpIndex].expBackoff.Update(err)
grm.operations[existingOpIndex].operationPending = false
// Log error
klog.Errorf("%v", grm.operations[existingOpIndex].expBackoff.
GenerateNoRetriesPermittedMsg(fmt.Sprintf("%+v", key)))
}
func (grm *nestedPendingOperations) Wait() {
grm.lock.Lock()
defer grm.lock.Unlock()
for len(grm.operations) > 0 {
grm.cond.Wait()
}
}
type operationKey struct {
volumeName v1.UniqueVolumeName
podName volumetypes.UniquePodName
nodeName types.NodeName
}
// NewAlreadyExistsError returns a new instance of AlreadyExists error.
func NewAlreadyExistsError(key operationKey) error {
return alreadyExistsError{key}
}
// IsAlreadyExists returns true if an error returned from
// NestedPendingOperations indicates a new operation can not be started because
// an operation with the same operation name is already executing.
func IsAlreadyExists(err error) bool {
switch err.(type) {
case alreadyExistsError:
return true
default:
return false
}
}
// alreadyExistsError is the error returned by NestedPendingOperations when a
// new operation can not be started because an operation with the same operation
// name is already executing.
type alreadyExistsError struct {
operationKey operationKey
}
var _ error = alreadyExistsError{}
func (err alreadyExistsError) Error() string {
return fmt.Sprintf(
"Failed to create operation with name %+v. An operation with that name is already executing.",
err.operationKey)
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operationexecutor
import (
"time"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// fakeOGCounter is a simple OperationGenerator which counts number of times a function
// has been caled
type fakeOGCounter struct {
// calledFuncs stores name and count of functions
calledFuncs map[string]int
opFunc func() volumetypes.OperationContext
}
var _ OperationGenerator = &fakeOGCounter{}
// NewFakeOGCounter returns a OperationGenerator
func NewFakeOGCounter(opFunc func() volumetypes.OperationContext) OperationGenerator {
return &fakeOGCounter{
calledFuncs: map[string]int{},
opFunc: opFunc,
}
}
func (f *fakeOGCounter) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) volumetypes.GeneratedOperations {
return f.recordFuncCall("GenerateMountVolumeFunc")
}
func (f *fakeOGCounter) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateUnmountVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
return f.recordFuncCall("GenerateAttachVolumeFunc")
}
func (f *fakeOGCounter) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateDetachVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateVolumesAreAttachedFunc"), nil
}
func (f *fakeOGCounter) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, hu hostutil.HostUtils) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateUnmountDeviceFunc"), nil
}
func (f *fakeOGCounter) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateVerifyControllerAttachedVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateMapVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateUnmapVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, hu hostutil.HostUtils) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateUnmapDeviceFunc"), nil
}
func (f *fakeOGCounter) GetVolumePluginMgr() *volume.VolumePluginMgr {
return nil
}
func (f *fakeOGCounter) GenerateExpandVolumeFunc(*v1.PersistentVolumeClaim, *v1.PersistentVolume) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateExpandVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateExpandAndRecoverVolumeFunc(*v1.PersistentVolumeClaim, *v1.PersistentVolume, string) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateExpandVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateExpandInUseVolumeFunc(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, currentSize resource.Quantity) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateExpandInUseVolumeFunc"), nil
}
func (f *fakeOGCounter) recordFuncCall(name string) volumetypes.GeneratedOperations {
if _, ok := f.calledFuncs[name]; ok {
f.calledFuncs[name]++
}
ops := volumetypes.GeneratedOperations{
OperationName: name,
OperationFunc: f.opFunc,
}
return ops
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operationexecutor
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/util/storage"
kevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/volume/util"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
type NodeExpander struct {
nodeResizeOperationOpts
kubeClient clientset.Interface
recorder record.EventRecorder
// computed via precheck
pvcStatusCap resource.Quantity
resizeStatus v1.ClaimResourceStatus
// indicates that if volume expansion failed on the node, then current expansion should be marked
// as infeasible so as controller can reconcile the resizing operation by using new user requested size.
markExpansionInfeasibleOnFailure bool
// pvcAlreadyUpdated if true indicates that although we are calling NodeExpandVolume on the kubelet
// PVC has already been updated - possibly because expansion already succeeded on different node.
// This can happen when a RWX PVC is expanded.
pvcAlreadyUpdated bool
// testStatus is used for testing purposes only.
testStatus testResponseData
}
func newNodeExpander(resizeOp nodeResizeOperationOpts, client clientset.Interface, recorder record.EventRecorder) *NodeExpander {
return &NodeExpander{
kubeClient: client,
nodeResizeOperationOpts: resizeOp,
recorder: recorder,
}
}
// testResponseData is merely used for doing sanity checks in unit tests
type testResponseData struct {
// indicates that resize operation was called on underlying volume driver
// mainly useful for testing.
resizeCalledOnPlugin bool
// Indicates whether kubelet should assume resize operation as finished.
// For kubelet - resize operation could be assumed as finished even if
// actual resizing is *not* finished. This can happen, because certain prechecks
// are failing and kubelet should not retry expansion, or it could happen
// because resize operation is genuinely finished.
assumeResizeFinished bool
}
// runPreCheck performs some sanity checks before expansion can be performed on the PVC.
// This function returns true only if node expansion is allowed to proceed otherwise
// it returns false.
func (ne *NodeExpander) runPreCheck() bool {
ne.pvcStatusCap = ne.pvc.Status.Capacity[v1.ResourceStorage]
allocatedResourceStatus := ne.pvc.Status.AllocatedResourceStatuses
if currentStatus, ok := allocatedResourceStatus[v1.ResourceStorage]; ok {
ne.resizeStatus = currentStatus
}
pvcSpecCap := ne.pvc.Spec.Resources.Requests[v1.ResourceStorage]
// usually when are performing node expansion, we expect pv size and pvc spec size
// to be the same, but if user has edited pvc since then and volume expansion failed
// with final error, then we should let controller reconcile this state, by marking entire
// node expansion as infeasible.
if pvcSpecCap.Cmp(ne.pluginResizeOpts.NewSize) != 0 &&
ne.actualStateOfWorld.CheckVolumeInFailedExpansionWithFinalErrors(ne.vmt.VolumeName) {
ne.markExpansionInfeasibleOnFailure = true
}
if ne.pvcStatusCap.Cmp(ne.pluginResizeOpts.NewSize) >= 0 && ne.resizeStatus == "" {
ne.pvcAlreadyUpdated = true
}
// if the volume is already expanded, but volume is of type RWX and
// pvc doesn't have annotation indicating that node expansion is not required
// then we should allow node expansion to proceed, even if the volume is already expanded.
//
// This special cases is needed because, in case of RWX volumes, the volume expansion
// should be performed on all nodes, even if the volume is already expanded.
if ne.pvcAlreadyUpdated &&
storage.ContainsAccessMode(ne.pvc.Spec.AccessModes, v1.ReadWriteMany) &&
!metav1.HasAnnotation(ne.pvc.ObjectMeta, volumetypes.NodeExpansionNotRequired) {
return true
}
// recovery features will only work for newer version of resize controller
if ne.resizeStatus == "" {
return false
}
resizeStatusVal := ne.resizeStatus
// if resizestatus is nil or NodeExpansionInProgress or NodeExpansionPending then we
// should allow volume expansion on the node to proceed.
if resizeStatusVal == v1.PersistentVolumeClaimNodeResizePending ||
resizeStatusVal == v1.PersistentVolumeClaimNodeResizeInProgress {
return true
}
return false
}
func (ne *NodeExpander) expandOnPlugin() (bool, resource.Quantity, error) {
allowExpansion := ne.runPreCheck()
if !allowExpansion {
if ne.pvcAlreadyUpdated {
// if pvc is already updated, then we could be here because size stored in ASOW is smaller and controller did full
// expansion and hence no node expansion is needed.
// This will stop reconciler from retrying expansion on the node.
ne.testStatus = testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: false}
return true, ne.pluginResizeOpts.NewSize, nil
}
klog.V(3).Infof("NodeExpandVolume is not allowed to proceed for volume %s with resizeStatus %s", ne.vmt.VolumeName, ne.resizeStatus)
ne.testStatus = testResponseData{false /* resizeCalledOnPlugin */, true /* assumeResizeFinished */}
return false, ne.pluginResizeOpts.OldSize, nil
}
var err error
nodeName := ne.vmt.Pod.Spec.NodeName
if !ne.pvcAlreadyUpdated {
ne.pvc, err = util.MarkNodeExpansionInProgress(ne.pvc, ne.kubeClient)
if err != nil {
msg := ne.vmt.GenerateErrorDetailed("MountVolume.NodeExpandVolume failed to mark node expansion in progress: %v", err)
klog.Error(msg.Error())
ne.testStatus = testResponseData{}
return false, ne.pluginResizeOpts.OldSize, err
}
}
_, resizeErr := ne.volumePlugin.NodeExpand(ne.pluginResizeOpts)
if resizeErr != nil {
// In order to support node volume expansion for RWX volumes on different nodes,
// we bypass the check for VolumeExpansionPendingOnNode state during the pre-check
// and then directly call the NodeExpandVolume method on the plugin.
//
// However, it does not make sense where the csi driver does not support node expansion.
// We should not treat this as a failure. It is a workaround for this issue:
// https://github.com/kubernetes/kubernetes/issues/131381.
//
// For other access modes, we should not hit this state, because we will wait for
// VolumeExpansionPendingOnNode before trying to expand volume in kubelet.
// See runPreCheck() above.
//
// If volume is already expanded, then we should not retry expansion on the node if
// driver returns OperationNotSupportedError.
if volumetypes.IsOperationNotSupportedError(resizeErr) && ne.pvcAlreadyUpdated {
klog.V(4).InfoS(ne.vmt.GenerateMsgDetailed("MountVolume.NodeExpandVolume failed", "NodeExpandVolume not supported"), "pod", klog.KObj(ne.vmt.Pod))
ne.testStatus = testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: false}
return true, ne.pluginResizeOpts.NewSize, nil
}
if volumetypes.IsOperationFinishedError(resizeErr) {
var markFailedError error
ne.actualStateOfWorld.MarkVolumeExpansionFailedWithFinalError(ne.vmt.VolumeName)
if volumetypes.IsInfeasibleError(resizeErr) || ne.markExpansionInfeasibleOnFailure {
ne.pvc, markFailedError = util.MarkNodeExpansionInfeasible(ne.pvc, ne.kubeClient, resizeErr)
if markFailedError != nil {
klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
}
} else {
ne.pvc, markFailedError = util.MarkNodeExpansionFailedCondition(ne.pvc, ne.kubeClient, resizeErr)
if markFailedError != nil {
klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
}
}
}
// if driver returned FailedPrecondition error that means
// volume expansion should not be retried on this node but
// expansion operation should not block mounting
if volumetypes.IsFailedPreconditionError(resizeErr) {
ne.actualStateOfWorld.MarkForInUseExpansionError(ne.vmt.VolumeName)
klog.Error(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error())
ne.testStatus = testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
return false, ne.pluginResizeOpts.OldSize, nil
}
ne.testStatus = testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
return false, ne.pluginResizeOpts.OldSize, resizeErr
}
simpleMsg, detailedMsg := ne.vmt.GenerateMsg("MountVolume.NodeExpandVolume succeeded", nodeName)
ne.recorder.Eventf(ne.vmt.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
ne.recorder.Eventf(ne.pvc, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
klog.InfoS(detailedMsg, "pod", klog.KObj(ne.vmt.Pod))
ne.testStatus = testResponseData{true /*resizeCalledOnPlugin */, true /* assumeResizeFinished */}
// no need to update PVC object if we already updated it
if ne.pvcAlreadyUpdated {
return true, ne.pluginResizeOpts.NewSize, nil
}
// File system resize succeeded, now update the PVC's Capacity to match the PV's
ne.pvc, err = util.MarkNodeExpansionFinishedWithRecovery(ne.pvc, ne.pluginResizeOpts.NewSize, ne.kubeClient)
if err != nil {
return true, ne.pluginResizeOpts.NewSize, fmt.Errorf("mountVolume.NodeExpandVolume update pvc status failed: %w", err)
}
return true, ne.pluginResizeOpts.NewSize, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package operationexecutor implements interfaces that enable execution of
// attach, detach, mount, and unmount operations with a
// nestedpendingoperations so that more than one operation is never triggered
// on the same volume for the same pod.
package operationexecutor
import (
"errors"
"fmt"
"time"
"github.com/go-logr/logr"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
)
// OperationExecutor defines a set of operations for attaching, detaching,
// mounting, or unmounting a volume that are executed with a NewNestedPendingOperations which
// prevents more than one operation from being triggered on the same volume.
//
// These operations should be idempotent (for example, AttachVolume should
// still succeed if the volume is already attached to the node, etc.). However,
// they depend on the volume plugins to implement this behavior.
//
// Once an operation completes successfully, the actualStateOfWorld is updated
// to indicate the volume is attached/detached/mounted/unmounted.
//
// If the OperationExecutor fails to start the operation because, for example,
// an operation with the same UniqueVolumeName is already pending, a non-nil
// error is returned.
//
// Once the operation is started, since it is executed asynchronously,
// errors are simply logged and the goroutine is terminated without updating
// actualStateOfWorld (callers are responsible for retrying as needed).
//
// Some of these operations may result in calls to the API server; callers are
// responsible for rate limiting on errors.
type OperationExecutor interface {
// AttachVolume attaches the volume to the node specified in volumeToAttach.
// It then updates the actual state of the world to reflect that.
AttachVolume(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// VerifyVolumesAreAttachedPerNode verifies the given list of volumes to see whether they are still attached to the node.
// If any volume is not attached right now, it will update the actual state of the world to reflect that.
// Note that this operation could be operated concurrently with other attach/detach operations.
// In theory (but very unlikely in practise), race condition among these operations might mark volume as detached
// even if it is attached. But reconciler can correct this in a short period of time.
VerifyVolumesAreAttachedPerNode(AttachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// VerifyVolumesAreAttached verifies volumes being used in entire cluster and if they are still attached to the node
// If any volume is not attached right now, it will update actual state of world to reflect that.
VerifyVolumesAreAttached(volumesToVerify map[types.NodeName][]AttachedVolume, actualStateOfWorld ActualStateOfWorldAttacherUpdater)
// DetachVolume detaches the volume from the node specified in
// volumeToDetach, and updates the actual state of the world to reflect
// that. If verifySafeToDetach is set, a call is made to the fetch the node
// object and it is used to verify that the volume does not exist in Node's
// Status.VolumesInUse list (operation fails with error if it is).
DetachVolume(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// If a volume has 'Filesystem' volumeMode, MountVolume mounts the
// volume to the pod specified in volumeToMount.
// Specifically it will:
// * Wait for the device to finish attaching (for attachable volumes only).
// * Mount device to global mount path (for attachable volumes only).
// * Update actual state of world to reflect volume is globally mounted (for
// attachable volumes only).
// * Mount the volume to the pod specific path.
// * Update actual state of world to reflect volume is mounted to the pod
// path.
// The parameter "isRemount" is informational and used to adjust logging
// verbosity. An initial mount is more log-worthy than a remount, for
// example.
//
// For 'Block' volumeMode, this method creates a symbolic link to
// the volume from both the pod specified in volumeToMount and global map path.
// Specifically it will:
// * Wait for the device to finish attaching (for attachable volumes only).
// * Update actual state of world to reflect volume is globally mounted/mapped.
// * Map volume to global map path using symbolic link.
// * Map the volume to the pod device map path using symbolic link.
// * Update actual state of world to reflect volume is mounted/mapped to the pod path.
MountVolume(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error
// If a volume has 'Filesystem' volumeMode, UnmountVolume unmounts the
// volume from the pod specified in volumeToUnmount and updates the actual
// state of the world to reflect that.
//
// For 'Block' volumeMode, this method unmaps symbolic link to the volume
// from both the pod device map path in volumeToUnmount and global map path.
// And then, updates the actual state of the world to reflect that.
UnmountVolume(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) error
// If a volume has 'Filesystem' volumeMode, UnmountDevice unmounts the
// volumes global mount path from the device (for attachable volumes only,
// freeing it for detach. It then updates the actual state of the world to
// reflect that.
//
// For 'Block' volumeMode, this method checks number of symbolic links under
// global map path. If number of reference is zero, remove global map path
// directory and free a volume for detach.
// It then updates the actual state of the world to reflect that.
UnmountDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, hostutil hostutil.HostUtils) error
// VerifyControllerAttachedVolume checks if the specified volume is present
// in the specified nodes AttachedVolumes Status field. It uses kubeClient
// to fetch the node object.
// If the volume is found, the actual state of the world is updated to mark
// the volume as attached.
// If the volume does not implement the attacher interface, it is assumed to
// be attached and the actual state of the world is updated accordingly.
// If the volume is not found or there is an error (fetching the node
// object, for example) then an error is returned which triggers exponential
// back off on retries.
VerifyControllerAttachedVolume(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// IsOperationPending returns true if an operation for the given volumeName
// and one of podName or nodeName is pending, otherwise it returns false
IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, nodeName types.NodeName) bool
// IsOperationSafeToRetry returns false if an operation for the given volumeName
// and one of podName or nodeName is pending or in exponential backoff, otherwise it returns true
IsOperationSafeToRetry(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, nodeName types.NodeName, operationName string) bool
// ExpandInUseVolume will resize volume's file system to expected size without unmounting the volume.
ExpandInUseVolume(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, currentSize resource.Quantity) error
// ReconstructVolumeOperation construct a new volumeSpec and returns it created by plugin
ReconstructVolumeOperation(volumeMode v1.PersistentVolumeMode, plugin volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, volumePath string, pluginName string) (volume.ReconstructedVolume, error)
}
// NewOperationExecutor returns a new instance of OperationExecutor.
func NewOperationExecutor(
operationGenerator OperationGenerator) OperationExecutor {
return &operationExecutor{
pendingOperations: nestedpendingoperations.NewNestedPendingOperations(
true /* exponentialBackOffOnError */),
operationGenerator: operationGenerator,
}
}
// MarkVolumeOpts is a struct to pass arguments to MountVolume functions
type MarkVolumeOpts struct {
PodName volumetypes.UniquePodName
PodUID types.UID
VolumeName v1.UniqueVolumeName
Mounter volume.Mounter
BlockVolumeMapper volume.BlockVolumeMapper
VolumeGIDVolume string
VolumeSpec *volume.Spec
VolumeMountState VolumeMountState
SELinuxMountContext string
}
// ActualStateOfWorldMounterUpdater defines a set of operations updating the actual
// state of the world cache after successful mount/unmount.
type ActualStateOfWorldMounterUpdater interface {
// Marks the specified volume as mounted to the specified pod
MarkVolumeAsMounted(markVolumeOpts MarkVolumeOpts) error
// Marks the specified volume as unmounted from the specified pod
MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error
// MarkVolumeMountAsUncertain marks state of volume mount for the pod uncertain
MarkVolumeMountAsUncertain(markVolumeOpts MarkVolumeOpts) error
// Marks the specified volume as having been globally mounted.
MarkDeviceAsMounted(volumeName v1.UniqueVolumeName, devicePath, deviceMountPath, seLinuxMountContext string) error
// MarkDeviceAsUncertain marks device state in global mount path as uncertain
MarkDeviceAsUncertain(volumeName v1.UniqueVolumeName, devicePath, deviceMountPath, seLinuxMountContext string) error
// Marks the specified volume as having its global mount unmounted.
MarkDeviceAsUnmounted(volumeName v1.UniqueVolumeName) error
// Marks the specified volume's file system resize request is finished.
MarkVolumeAsResized(volumeName v1.UniqueVolumeName, claimSize resource.Quantity) bool
// GetDeviceMountState returns mount state of the device in global path
GetDeviceMountState(volumeName v1.UniqueVolumeName) DeviceMountState
// GetVolumeMountState returns mount state of the volume for the Pod
GetVolumeMountState(volumName v1.UniqueVolumeName, podName volumetypes.UniquePodName) VolumeMountState
// IsVolumeMountedElsewhere returns whether the supplied volume is mounted in a Pod other than the supplied one
IsVolumeMountedElsewhere(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool
// MarkForInUseExpansionError marks the volume to have in-use error during expansion.
// volume expansion must not be retried for this volume
MarkForInUseExpansionError(volumeName v1.UniqueVolumeName)
// CheckAndMarkVolumeAsUncertainViaReconstruction only adds volume to actual state of the world
// if volume was not already there. This avoid overwriting in any previously stored
// state. It returns error if there was an error adding the volume to ASOW.
// It returns true, if this operation resulted in volume being added to ASOW
// otherwise it returns false.
CheckAndMarkVolumeAsUncertainViaReconstruction(opts MarkVolumeOpts) (bool, error)
// CheckAndMarkDeviceUncertainViaReconstruction only adds device to actual state of the world
// if device was not already there. This avoids overwriting in any previously stored
// state. We only supply deviceMountPath because devicePath is already determined from
// VerifyControllerAttachedVolume function.
CheckAndMarkDeviceUncertainViaReconstruction(volumeName v1.UniqueVolumeName, deviceMountPath string) bool
// IsVolumeReconstructed returns true if volume currently added to actual state of the world
// was found during reconstruction.
IsVolumeReconstructed(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool
// IsVolumeDeviceReconstructed returns true if volume device identified by volumeName has been
// found during reconstruction.
IsVolumeDeviceReconstructed(volumeName v1.UniqueVolumeName) bool
// MarkVolumeExpansionFailedWithFinalError marks volume as failed with a final error, so as
// this state doesn't have to be recorded in the API server
MarkVolumeExpansionFailedWithFinalError(volumeName v1.UniqueVolumeName)
// RemoveVolumeFromFailedWithFinalErrors removes volume from list that indicates that volume
// has failed expansion with a final error
RemoveVolumeFromFailedWithFinalErrors(volumeName v1.UniqueVolumeName)
// CheckVolumeInFailedExpansionWithFinalErrors verifies if volume expansion has failed with a final
// error
CheckVolumeInFailedExpansionWithFinalErrors(volumeName v1.UniqueVolumeName) bool
}
// ActualStateOfWorldAttacherUpdater defines a set of operations updating the
// actual state of the world cache after successful attach/detach/mount/unmount.
type ActualStateOfWorldAttacherUpdater interface {
// Marks the specified volume as attached to the specified node. If the
// volume name is supplied, that volume name will be used. If not, the
// volume name is computed using the result from querying the plugin.
//
// TODO: in the future, we should be able to remove the volumeName
// argument to this method -- since it is used only for attachable
// volumes. See issue 29695.
MarkVolumeAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
// Marks the specified volume as *possibly* attached to the specified node.
// If an attach operation fails, the attach/detach controller does not know for certain if the volume is attached or not.
// If the volume name is supplied, that volume name will be used. If not, the
// volume name is computed using the result from querying the plugin.
MarkVolumeAsUncertain(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error
// Marks the specified volume as detached from the specified node
MarkVolumeAsDetached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// Marks desire to detach the specified volume (remove the volume from the node's
// volumesToReportAsAttached list)
RemoveVolumeFromReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) error
// Unmarks the desire to detach for the specified volume (add the volume back to
// the node's volumesToReportAsAttached list)
AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// InitializeClaimSize sets pvc claim size by reading pvc.Status.Capacity
InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize resource.Quantity)
GetClaimSize(volumeName v1.UniqueVolumeName) resource.Quantity
}
// VolumeLogger defines a set of operations for generating volume-related logging and error msgs
type VolumeLogger interface {
// Creates a detailed msg that can be used in logs
// The msg format follows the pattern "<prefixMsg> <volume details> <suffixMsg>",
// where each implementation provides the volume details
GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string)
// Creates a detailed error that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <volume details>: <err> ",
GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error)
// Creates a simple msg that is user friendly and a detailed msg that can be used in logs
// The msg format follows the pattern "<prefixMsg> <volume details> <suffixMsg>",
// where each implementation provides the volume details
GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string)
// Creates a simple error that is user friendly and a detailed error that can be used in logs.
// The msg format follows the pattern "<prefixMsg> <volume details>: <err> ",
GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error)
}
// Generates an error string with the format ": <err>" if err exists
func errSuffix(err error) string {
errStr := ""
if err != nil {
errStr = fmt.Sprintf(": %v", err)
}
return errStr
}
// Generate a detailed error msg for logs
func generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeName, details string) (detailedMsg string) {
return fmt.Sprintf("%v for volume %q %v %v", prefixMsg, volumeName, details, suffixMsg)
}
// Generate a simplified error msg for events and a detailed error msg for logs
func generateVolumeMsg(prefixMsg, suffixMsg, volumeName, details string) (simpleMsg, detailedMsg string) {
simpleMsg = fmt.Sprintf("%v for volume %q %v", prefixMsg, volumeName, suffixMsg)
return simpleMsg, generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeName, details)
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
// MultiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.
// It is used to prevent reporting the error from being reported more than once for a given volume.
MultiAttachErrorReported bool
// VolumeName is the unique identifier for the volume that should be
// attached.
VolumeName v1.UniqueVolumeName
// VolumeSpec is a volume spec containing the specification for the volume
// that should be attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume should be
// attached to.
NodeName types.NodeName
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
ScheduledPods []*v1.Pod
}
// GenerateMsgDetailed returns detailed msgs for volumes to attach
func (volume *VolumeToAttach) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) from node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for volumes to attach
func (volume *VolumeToAttach) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) from node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsg(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateErrorDetailed returns detailed errors for volumes to attach
func (volume *VolumeToAttach) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for volumes to attach
func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return errors.New(simpleMsg), errors.New(detailedMsg)
}
// String combines key fields of the volume for logging in text format.
func (volume *VolumeToAttach) String() string {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return fmt.Sprintf("%s (UniqueName: %s) from node %s", volumeSpecName, volume.VolumeName, volume.NodeName)
}
// MarshalLog combines key fields of the volume for logging in a structured format.
func (volume *VolumeToAttach) MarshalLog() interface{} {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return struct {
VolumeName, UniqueName, NodeName string
}{
VolumeName: volumeSpecName,
UniqueName: string(volume.VolumeName),
NodeName: string(volume.NodeName),
}
}
var _ fmt.Stringer = &VolumeToAttach{}
var _ logr.Marshaler = &VolumeToAttach{}
// VolumeToMount represents a volume that should be attached to this node and
// mounted to the PodName.
type VolumeToMount struct {
// VolumeName is the unique identifier for the volume that should be
// mounted.
VolumeName v1.UniqueVolumeName
// PodName is the unique identifier for the pod that the volume should be
// mounted to after it is attached.
PodName volumetypes.UniquePodName
// VolumeSpec is a volume spec containing the specification for the volume
// that should be mounted. Used to create NewMounter. Used to generate
// InnerVolumeSpecName.
VolumeSpec *volume.Spec
// outerVolumeSpecNames are the podSpec.Volume[x].Name of the volume.
OuterVolumeSpecNames []string
// Pod to mount the volume to. Used to create NewMounter.
Pod *v1.Pod
// PluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface
PluginIsAttachable bool
// PluginIsDeviceMountable indicates that the plugin for this volume implements
// the volume.DeviceMounter interface
PluginIsDeviceMountable bool
// VolumeGIDValue contains the value of the GID annotation, if present.
VolumeGIDValue string
// DevicePath contains the path on the node where the volume is attached.
// For non-attachable volumes this is empty.
DevicePath string
// ReportedInUse indicates that the volume was successfully added to the
// VolumesInUse field in the node's status.
ReportedInUse bool
// DesiredSizeLimit indicates the desired upper bound on the size of the volume
// (if so implemented)
DesiredSizeLimit *resource.Quantity
// time at which volume was requested to be mounted
MountRequestTime time.Time
// DesiredPersistentVolumeSize stores desired size of the volume.
// usually this is the size if pv.Spec.Capacity
DesiredPersistentVolumeSize resource.Quantity
// SELinux label that should be used to mount.
// The label is set when:
// * SELinuxMountReadWriteOncePod feature gate is enabled and the volume is RWOP and kubelet knows the SELinux label.
// * Or, SELinuxMount feature gate is enabled and kubelet knows the SELinux label.
SELinuxLabel string
}
// DeviceMountState represents device mount state in a global path.
type DeviceMountState string
const (
// DeviceGloballyMounted means device has been globally mounted successfully
DeviceGloballyMounted DeviceMountState = "DeviceGloballyMounted"
// DeviceMountUncertain means device may not be mounted but a mount operation may be
// in-progress which can cause device mount to succeed.
DeviceMountUncertain DeviceMountState = "DeviceMountUncertain"
// DeviceNotMounted means device has not been mounted globally.
DeviceNotMounted DeviceMountState = "DeviceNotMounted"
)
// VolumeMountState represents volume mount state in a path local to the pod.
type VolumeMountState string
const (
// VolumeMounted means volume has been mounted in pod's local path
VolumeMounted VolumeMountState = "VolumeMounted"
// VolumeMountUncertain means volume may or may not be mounted in pods' local path
VolumeMountUncertain VolumeMountState = "VolumeMountUncertain"
// VolumeNotMounted means volume has not be mounted in pod's local path
VolumeNotMounted VolumeMountState = "VolumeNotMounted"
)
type MountPreConditionFailed struct {
msg string
}
func (err *MountPreConditionFailed) Error() string {
return err.msg
}
func NewMountPreConditionFailedError(msg string) *MountPreConditionFailed {
return &MountPreConditionFailed{msg: msg}
}
func IsMountFailedPreconditionError(err error) bool {
var failedPreconditionError *MountPreConditionFailed
return errors.As(err, &failedPreconditionError)
}
// GenerateMsgDetailed returns detailed msgs for volumes to mount
func (volume *VolumeToMount) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.Pod.Name, volume.Pod.UID)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for volumes to mount
func (volume *VolumeToMount) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.Pod.Name, volume.Pod.UID)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsg(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateErrorDetailed returns detailed errors for volumes to mount
func (volume *VolumeToMount) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for volumes to mount
func (volume *VolumeToMount) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return errors.New(simpleMsg), errors.New(detailedMsg)
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
// VolumeName is the unique identifier for the volume that is attached.
VolumeName v1.UniqueVolumeName
// VolumeSpec is the volume spec containing the specification for the
// volume that is attached.
VolumeSpec *volume.Spec
// NodeName is the identifier for the node that the volume is attached to.
NodeName types.NodeName
// PluginIsAttachable indicates that the plugin for this volume implements
// the volume.Attacher interface
PluginIsAttachable bool
// DevicePath contains the path on the node where the volume is attached.
// For non-attachable volumes this is empty.
DevicePath string
// DeviceMountPath contains the path on the node where the device should
// be mounted after it is attached.
DeviceMountPath string
// PluginName is the Unescaped Qualified name of the volume plugin used to
// attach and mount this volume.
PluginName string
SELinuxMountContext string
}
// GenerateMsgDetailed returns detailed msgs for attached volumes
func (volume *AttachedVolume) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) on node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateMsg returns simple and detailed msgs for attached volumes
func (volume *AttachedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) on node %q", volume.VolumeName, volume.NodeName)
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return generateVolumeMsg(prefixMsg, suffixMsg, volumeSpecName, detailedStr)
}
// GenerateErrorDetailed returns detailed errors for attached volumes
func (volume *AttachedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for attached volumes
func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return errors.New(simpleMsg), errors.New(detailedMsg)
}
// String combines key fields of the volume for logging in text format.
func (volume *AttachedVolume) String() string {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return fmt.Sprintf("%s (UniqueName: %s) from node %s", volumeSpecName, volume.VolumeName, volume.NodeName)
}
// MarshalLog combines key fields of the volume for logging in a structured format.
func (volume *AttachedVolume) MarshalLog() interface{} {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return struct {
VolumeName, UniqueName, NodeName string
}{
VolumeName: volumeSpecName,
UniqueName: string(volume.VolumeName),
NodeName: string(volume.NodeName),
}
}
var _ fmt.Stringer = &AttachedVolume{}
var _ logr.Marshaler = &AttachedVolume{}
// MountedVolume represents a volume that has successfully been mounted to a pod.
type MountedVolume struct {
// PodName is the unique identifier of the pod mounted to.
PodName volumetypes.UniquePodName
// VolumeName is the unique identifier of the volume mounted to the pod.
VolumeName v1.UniqueVolumeName
// InnerVolumeSpecName is the volume.Spec.Name() of the volume. If the
// volume was referenced through a persistent volume claims, this contains
// the name of the bound persistent volume object.
// It is the name that plugins use in their pod mount path, i.e.
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{innerVolumeSpecName}/
// PVC example,
// apiVersion: v1
// kind: PersistentVolume
// metadata:
// name: pv0003 <- InnerVolumeSpecName
// spec:
// capacity:
// storage: 5Gi
// accessModes:
// - ReadWriteOnce
// persistentVolumeReclaimPolicy: Recycle
// nfs:
// path: /tmp
// server: 172.17.0.2
// Non-PVC example:
// apiVersion: v1
// kind: Pod
// metadata:
// name: test-pd
// spec:
// containers:
// - image: registry.k8s.io/test-webserver
// name: test-container
// volumeMounts:
// - mountPath: /test-pd
// name: test-volume
// volumes:
// - name: test-volume <- InnerVolumeSpecName
// gcePersistentDisk:
// pdName: my-data-disk
// fsType: ext4
InnerVolumeSpecName string
// PluginName is the "Unescaped Qualified" name of the volume plugin used to
// mount and unmount this volume. It can be used to fetch the volume plugin
// to unmount with, on demand. It is also the name that plugins use, though
// escaped, in their pod mount path, i.e.
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{outerVolumeSpecName}/
PluginName string
// PodUID is the UID of the pod mounted to. It is also the string used by
// plugins in their pod mount path, i.e.
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{outerVolumeSpecName}/
PodUID types.UID
// Mounter is the volume mounter used to mount this volume. It is required
// by kubelet to create container.VolumeMap.
// Mounter is only required for file system volumes and not required for block volumes.
Mounter volume.Mounter
// BlockVolumeMapper is the volume mapper used to map this volume. It is required
// by kubelet to create container.VolumeMap.
// BlockVolumeMapper is only required for block volumes and not required for file system volumes.
BlockVolumeMapper volume.BlockVolumeMapper
// VolumeGIDValue contains the value of the GID annotation, if present.
VolumeGIDValue string
// VolumeSpec is a volume spec containing the specification for the volume
// that should be mounted.
VolumeSpec *volume.Spec
// DeviceMountPath contains the path on the node where the device should
// be mounted after it is attached.
DeviceMountPath string
// SELinuxMountContext is value of mount option 'mount -o context=XYZ'.
// If empty, no such mount option was used.
SELinuxMountContext string
}
// GenerateMsgDetailed returns detailed msgs for mounted volumes
func (volume *MountedVolume) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.PodName, volume.PodUID)
return generateVolumeMsgDetailed(prefixMsg, suffixMsg, string(volume.VolumeName), detailedStr)
}
// GenerateMsg returns simple and detailed msgs for mounted volumes
func (volume *MountedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) {
detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.PodName, volume.PodUID)
return generateVolumeMsg(prefixMsg, suffixMsg, string(volume.VolumeName), detailedStr)
}
// GenerateErrorDetailed returns simple and detailed errors for mounted volumes
func (volume *MountedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
}
// GenerateError returns simple and detailed errors for mounted volumes
func (volume *MountedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
return errors.New(simpleMsg), errors.New(detailedMsg)
}
type operationExecutor struct {
// pendingOperations keeps track of pending attach and detach operations so
// multiple operations are not started on the same volume
pendingOperations nestedpendingoperations.NestedPendingOperations
// operationGenerator is an interface that provides implementations for
// generating volume function
operationGenerator OperationGenerator
}
func (oe *operationExecutor) IsOperationPending(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName) bool {
return oe.pendingOperations.IsOperationPending(volumeName, podName, nodeName)
}
func (oe *operationExecutor) IsOperationSafeToRetry(
volumeName v1.UniqueVolumeName,
podName volumetypes.UniquePodName,
nodeName types.NodeName,
operationName string) bool {
return oe.pendingOperations.IsOperationSafeToRetry(volumeName, podName, nodeName, operationName)
}
func (oe *operationExecutor) AttachVolume(
logger klog.Logger,
volumeToAttach VolumeToAttach,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations :=
oe.operationGenerator.GenerateAttachVolumeFunc(logger, volumeToAttach, actualStateOfWorld)
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
return oe.pendingOperations.Run(
volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName, generatedOperations)
}
return oe.pendingOperations.Run(
volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */, generatedOperations)
}
func (oe *operationExecutor) DetachVolume(
logger klog.Logger,
volumeToDetach AttachedVolume,
verifySafeToDetach bool,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateDetachVolumeFunc(logger, volumeToDetach, verifySafeToDetach, actualStateOfWorld)
if err != nil {
return err
}
if util.IsMultiAttachAllowed(volumeToDetach.VolumeSpec) {
return oe.pendingOperations.Run(
volumeToDetach.VolumeName, "" /* podName */, volumeToDetach.NodeName, generatedOperations)
}
return oe.pendingOperations.Run(
volumeToDetach.VolumeName, "" /* podName */, "" /* nodeName */, generatedOperations)
}
func (oe *operationExecutor) VerifyVolumesAreAttached(
attachedVolumes map[types.NodeName][]AttachedVolume,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) {
for node, nodeAttachedVolumes := range attachedVolumes {
nodeError := oe.VerifyVolumesAreAttachedPerNode(nodeAttachedVolumes, node, actualStateOfWorld)
if nodeError != nil {
klog.Errorf("VerifyVolumesAreAttached failed for volumes %v, node %q with error %v", nodeAttachedVolumes, node, nodeError)
}
}
}
func (oe *operationExecutor) VerifyVolumesAreAttachedPerNode(
attachedVolumes []AttachedVolume,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateVolumesAreAttachedFunc(attachedVolumes, nodeName, actualStateOfWorld)
if err != nil {
return err
}
// Give an empty UniqueVolumeName so that this operation could be executed concurrently.
return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, "" /* nodeName */, generatedOperations)
}
func (oe *operationExecutor) MountVolume(
waitForAttachTimeout time.Duration,
volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
isRemount bool) error {
fsVolume, err := util.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec)
if err != nil {
return err
}
var generatedOperations volumetypes.GeneratedOperations
if fsVolume {
// Filesystem volume case
// Mount/remount a volume when a volume is attached
generatedOperations = oe.operationGenerator.GenerateMountVolumeFunc(
waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount)
} else {
// Block volume case
// Creates a map to device if a volume is attached
generatedOperations, err = oe.operationGenerator.GenerateMapVolumeFunc(
waitForAttachTimeout, volumeToMount, actualStateOfWorld)
}
if err != nil {
return err
}
// Avoid executing mount/map from multiple pods referencing the
// same volume in parallel
podName := nestedpendingoperations.EmptyUniquePodName
// TODO: remove this -- not necessary
if !volumeToMount.PluginIsAttachable && !volumeToMount.PluginIsDeviceMountable {
// volume plugins which are Non-attachable and Non-deviceMountable can execute mount for multiple pods
// referencing the same volume in parallel
podName = util.GetUniquePodName(volumeToMount.Pod)
}
// TODO mount_device
return oe.pendingOperations.Run(
volumeToMount.VolumeName, podName, "" /* nodeName */, generatedOperations)
}
func (oe *operationExecutor) UnmountVolume(
volumeToUnmount MountedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
podsDir string) error {
fsVolume, err := util.CheckVolumeModeFilesystem(volumeToUnmount.VolumeSpec)
if err != nil {
return err
}
var generatedOperations volumetypes.GeneratedOperations
if fsVolume {
// Filesystem volume case
// Unmount a volume if a volume is mounted
generatedOperations, err = oe.operationGenerator.GenerateUnmountVolumeFunc(
volumeToUnmount, actualStateOfWorld, podsDir)
} else {
// Block volume case
// Unmap a volume if a volume is mapped
generatedOperations, err = oe.operationGenerator.GenerateUnmapVolumeFunc(
volumeToUnmount, actualStateOfWorld)
}
if err != nil {
return err
}
// All volume plugins can execute unmount/unmap for multiple pods referencing the
// same volume in parallel
podName := volumetypes.UniquePodName(volumeToUnmount.PodUID)
return oe.pendingOperations.Run(
volumeToUnmount.VolumeName, podName, "" /* nodeName */, generatedOperations)
}
func (oe *operationExecutor) UnmountDevice(
deviceToDetach AttachedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
hostutil hostutil.HostUtils) error {
fsVolume, err := util.CheckVolumeModeFilesystem(deviceToDetach.VolumeSpec)
if err != nil {
return err
}
var generatedOperations volumetypes.GeneratedOperations
if fsVolume {
// Filesystem volume case
// Unmount and detach a device if a volume isn't referenced
generatedOperations, err = oe.operationGenerator.GenerateUnmountDeviceFunc(
deviceToDetach, actualStateOfWorld, hostutil)
} else {
// Block volume case
// Detach a device and remove loopback if a volume isn't referenced
generatedOperations, err = oe.operationGenerator.GenerateUnmapDeviceFunc(
deviceToDetach, actualStateOfWorld, hostutil)
}
if err != nil {
return err
}
// Avoid executing unmount/unmap device from multiple pods referencing
// the same volume in parallel
podName := nestedpendingoperations.EmptyUniquePodName
return oe.pendingOperations.Run(
deviceToDetach.VolumeName, podName, "" /* nodeName */, generatedOperations)
}
func (oe *operationExecutor) ExpandInUseVolume(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, currentSize resource.Quantity) error {
generatedOperations, err := oe.operationGenerator.GenerateExpandInUseVolumeFunc(volumeToMount, actualStateOfWorld, currentSize)
if err != nil {
return err
}
return oe.pendingOperations.Run(volumeToMount.VolumeName, "", "" /* nodeName */, generatedOperations)
}
func (oe *operationExecutor) VerifyControllerAttachedVolume(
logger klog.Logger,
volumeToMount VolumeToMount,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(logger, volumeToMount, nodeName, actualStateOfWorld)
if err != nil {
return err
}
return oe.pendingOperations.Run(
volumeToMount.VolumeName, "" /* podName */, "" /* nodeName */, generatedOperations)
}
// ReconstructVolumeOperation return a func to create volumeSpec from mount path
func (oe *operationExecutor) ReconstructVolumeOperation(
volumeMode v1.PersistentVolumeMode,
plugin volume.VolumePlugin,
mapperPlugin volume.BlockVolumePlugin,
uid types.UID,
podName volumetypes.UniquePodName,
volumeSpecName string,
volumePath string,
pluginName string) (volume.ReconstructedVolume, error) {
// Filesystem Volume case
if volumeMode == v1.PersistentVolumeFilesystem {
// Create volumeSpec from mount path
klog.V(5).Infof("Starting operationExecutor.ReconstructVolume for file volume on pod %q", podName)
reconstructed, err := plugin.ConstructVolumeSpec(volumeSpecName, volumePath)
if err != nil {
return volume.ReconstructedVolume{}, err
}
return reconstructed, nil
}
// Block Volume case
// Create volumeSpec from mount path
klog.V(5).Infof("Starting operationExecutor.ReconstructVolume for block volume on pod %q", podName)
// volumePath contains volumeName on the path. In the case of block volume, {volumeName} is symbolic link
// corresponding to raw block device.
// ex. volumePath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
volumeSpec, err := mapperPlugin.ConstructBlockVolumeSpec(uid, volumeSpecName, volumePath)
if err != nil {
return volume.ReconstructedVolume{}, err
}
return volume.ReconstructedVolume{
Spec: volumeSpec,
}, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operationexecutor
import (
"context"
goerrors "errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
volerr "k8s.io/cloud-provider/volume/errors"
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
kevents "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
const (
unknownVolumePlugin string = "UnknownVolumePlugin"
unknownAttachableVolumePlugin string = "UnknownAttachableVolumePlugin"
DetachOperationName string = "volume_detach"
VerifyControllerAttachedVolumeOpName string = "verify_controller_attached_volume"
)
var _ OperationGenerator = &operationGenerator{}
type operationGenerator struct {
// Used to fetch objects from the API server like Node in the
// VerifyControllerAttachedVolume operation.
kubeClient clientset.Interface
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
// recorder is used to record events in the API server
recorder record.EventRecorder
// blkUtil provides volume path related operations for block volume
blkUtil volumepathhandler.BlockVolumePathHandler
}
type inTreeResizeResponse struct {
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
err error
// indicates that resize operation was called on underlying volume driver
// mainly useful for testing.
resizeCalled bool
}
// NewOperationGenerator is returns instance of operationGenerator
func NewOperationGenerator(kubeClient clientset.Interface,
volumePluginMgr *volume.VolumePluginMgr,
recorder record.EventRecorder,
blkUtil volumepathhandler.BlockVolumePathHandler) OperationGenerator {
return &operationGenerator{
kubeClient: kubeClient,
volumePluginMgr: volumePluginMgr,
recorder: recorder,
blkUtil: blkUtil,
}
}
// OperationGenerator interface that extracts out the functions from operation_executor to make it dependency injectable
type OperationGenerator interface {
// Generates the MountVolume function needed to perform the mount of a volume plugin
GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) volumetypes.GeneratedOperations
// Generates the UnmountVolume function needed to perform the unmount of a volume plugin
GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error)
// Generates the AttachVolume function needed to perform attach of a volume plugin
GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations
// Generates the DetachVolume function needed to perform the detach of a volume plugin
GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
// Generates the VolumesAreAttached function needed to verify if volume plugins are attached
GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
// Generates the UnMountDevice function needed to perform the unmount of a device
GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter hostutil.HostUtils) (volumetypes.GeneratedOperations, error)
// Generates the function needed to check if the attach_detach controller has attached the volume plugin
GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
// Generates the MapVolume function needed to perform the map of a volume plugin
GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error)
// Generates the UnmapVolume function needed to perform the unmap of a volume plugin
GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error)
// Generates the UnmapDevice function needed to perform the unmap of a device
GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter hostutil.HostUtils) (volumetypes.GeneratedOperations, error)
// GetVolumePluginMgr returns volume plugin manager
GetVolumePluginMgr() *volume.VolumePluginMgr
GenerateExpandVolumeFunc(*v1.PersistentVolumeClaim, *v1.PersistentVolume) (volumetypes.GeneratedOperations, error)
GenerateExpandAndRecoverVolumeFunc(*v1.PersistentVolumeClaim, *v1.PersistentVolume, string) (volumetypes.GeneratedOperations, error)
// Generates the volume file system resize function, which can resize volume's file system to expected size without unmounting the volume.
// Along with volumeToMount and actualStateOfWorld, the function expects current size of volume on the node as an argument. The current
// size here always refers to capacity last recorded in actualStateOfWorld from pvc.Status.Capacity
GenerateExpandInUseVolumeFunc(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, currentSize resource.Quantity) (volumetypes.GeneratedOperations, error)
}
type inTreeResizeOpts struct {
resizerName string
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
volumeSpec *volume.Spec
volumePlugin volume.ExpandableVolumePlugin
}
type nodeResizeOperationOpts struct {
vmt VolumeToMount
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
pluginResizeOpts volume.NodeResizeOptions
volumePlugin volume.NodeExpandableVolumePlugin
actualStateOfWorld ActualStateOfWorldMounterUpdater
}
func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
attachedVolumes []AttachedVolume,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
// volumesPerPlugin maps from a volume plugin to a list of volume specs which belong
// to this type of plugin
volumesPerPlugin := make(map[string][]*volume.Spec)
// volumeSpecMap maps from a volume spec to its unique volumeName which will be used
// when calling MarkVolumeAsDetached
volumeSpecMap := make(map[*volume.Spec]v1.UniqueVolumeName)
// Iterate each volume spec and put them into a map index by the pluginName
for _, volumeAttached := range attachedVolumes {
if volumeAttached.VolumeSpec == nil {
klog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName)
continue
}
volumePlugin, err :=
og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec)
if err != nil || volumePlugin == nil {
klog.Error(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
continue
}
volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()]
if !pluginExists {
volumeSpecList = []*volume.Spec{}
}
volumeSpecList = append(volumeSpecList, volumeAttached.VolumeSpec)
volumesPerPlugin[volumePlugin.GetPluginName()] = volumeSpecList
// Migration: VolumeSpecMap contains original VolumeName for use in ActualStateOfWorld
volumeSpecMap[volumeAttached.VolumeSpec] = volumeAttached.VolumeName
}
volumesAreAttachedFunc := func() volumetypes.OperationContext {
// For each volume plugin, pass the list of volume specs to VolumesAreAttached to check
// whether the volumes are still attached.
for pluginName, volumesSpecs := range volumesPerPlugin {
attachableVolumePlugin, err :=
og.volumePluginMgr.FindAttachablePluginByName(pluginName)
if err != nil || attachableVolumePlugin == nil {
klog.Errorf(
"VolumeAreAttached.FindAttachablePluginBySpec failed for plugin %q with: %v",
pluginName,
err)
continue
}
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
if newAttacherErr != nil {
klog.Errorf(
"VolumesAreAttached.NewAttacher failed for getting plugin %q with: %v",
pluginName,
newAttacherErr)
continue
}
attached, areAttachedErr := volumeAttacher.VolumesAreAttached(volumesSpecs, nodeName)
if areAttachedErr != nil {
klog.Errorf(
"VolumesAreAttached failed for checking on node %q with: %v",
nodeName,
areAttachedErr)
continue
}
for spec, check := range attached {
if !check {
actualStateOfWorld.MarkVolumeAsDetached(volumeSpecMap[spec], nodeName)
klog.V(1).Infof("VerifyVolumesAreAttached determined volume %q (spec.Name: %q) is no longer attached to node %q, therefore it was marked as detached.",
volumeSpecMap[spec], spec.Name(), nodeName)
}
}
}
// It is hard to differentiate migrated status for all volumes for verify_volumes_are_attached_per_node
return volumetypes.NewOperationContext(nil, nil, false)
}
return volumetypes.GeneratedOperations{
OperationName: "verify_volumes_are_attached_per_node",
OperationFunc: volumesAreAttachedFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume("<n/a>", nil), "verify_volumes_are_attached_per_node"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
func (og *operationGenerator) GenerateAttachVolumeFunc(
logger klog.Logger,
volumeToAttach VolumeToAttach,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
attachVolumeFunc := func() volumetypes.OperationContext {
attachableVolumePlugin, err :=
og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec)
migrated := getMigratedStatusBySpec(volumeToAttach.VolumeSpec)
if err != nil || attachableVolumePlugin == nil {
eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.FindAttachablePluginBySpec failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
if newAttacherErr != nil {
eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.NewAttacher failed", newAttacherErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Execute attach
devicePath, attachErr := volumeAttacher.Attach(
volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if attachErr != nil {
uncertainNode := volumeToAttach.NodeName
if derr, ok := attachErr.(*volerr.DanglingAttachError); ok {
uncertainNode = derr.CurrentNode
}
addErr := actualStateOfWorld.MarkVolumeAsUncertain(
logger,
volumeToAttach.VolumeName,
volumeToAttach.VolumeSpec,
uncertainNode)
if addErr != nil {
klog.Errorf("AttachVolume.MarkVolumeAsUncertain fail to add the volume %q to actual state with %s", volumeToAttach.VolumeName, addErr)
}
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.Attach failed", attachErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Successful attach event is useful for user debugging
simpleMsg, _ := volumeToAttach.GenerateMsg("AttachVolume.Attach succeeded", "")
for _, pod := range volumeToAttach.ScheduledPods {
og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg)
}
klog.Info(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
// Update actual state of world
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
logger, v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath)
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
eventRecorderFunc := func(err *error) {
if *err != nil {
for _, pod := range volumeToAttach.ScheduledPods {
og.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, (*err).Error())
}
}
}
attachableVolumePluginName := unknownAttachableVolumePlugin
// Get attacher plugin
attachableVolumePlugin, err :=
og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec)
// It's ok to ignore the error, returning error is not expected from this function.
// If an error case occurred during the function generation, this error case(skipped one) will also trigger an error
// while the generated function is executed. And those errors will be handled during the execution of the generated
// function with a back off policy.
if err == nil && attachableVolumePlugin != nil {
attachableVolumePluginName = attachableVolumePlugin.GetPluginName()
}
return volumetypes.GeneratedOperations{
OperationName: "volume_attach",
OperationFunc: attachVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(attachableVolumePluginName, volumeToAttach.VolumeSpec), "volume_attach"),
}
}
func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr {
return og.volumePluginMgr
}
func (og *operationGenerator) GenerateDetachVolumeFunc(
logger klog.Logger,
volumeToDetach AttachedVolume,
verifySafeToDetach bool,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
var volumeName string
var attachableVolumePlugin volume.AttachableVolumePlugin
var pluginName string
var err error
if volumeToDetach.VolumeSpec != nil {
attachableVolumePlugin, err = findDetachablePluginBySpec(volumeToDetach.VolumeSpec, og.volumePluginMgr)
if err != nil || attachableVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.findDetachablePluginBySpec failed", err)
}
volumeName, err =
attachableVolumePlugin.GetVolumeName(volumeToDetach.VolumeSpec)
if err != nil {
return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.GetVolumeName failed", err)
}
} else {
// Get attacher plugin and the volumeName by splitting the volume unique name in case
// there's no VolumeSpec: this happens only on attach/detach controller crash recovery
// when a pod has been deleted during the controller downtime
pluginName, volumeName, err = util.SplitUniqueName(volumeToDetach.VolumeName)
if err != nil {
return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err)
}
attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(pluginName)
if err != nil || attachableVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginByName failed", err)
}
}
if pluginName == "" {
pluginName = attachableVolumePlugin.GetPluginName()
}
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
if err != nil {
return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.NewDetacher failed", err)
}
detachVolumeFunc := func() volumetypes.OperationContext {
var err error
if verifySafeToDetach {
err = og.verifyVolumeIsSafeToDetach(volumeToDetach)
}
if err == nil {
err = volumeDetacher.Detach(volumeName, volumeToDetach.NodeName)
}
migrated := getMigratedStatusBySpec(volumeToDetach.VolumeSpec)
if err != nil {
// On failure, mark the volume as uncertain. Attach() must succeed before adding the volume back
// to node status as attached.
uncertainError := actualStateOfWorld.MarkVolumeAsUncertain(
logger, volumeToDetach.VolumeName, volumeToDetach.VolumeSpec, volumeToDetach.NodeName)
if uncertainError != nil {
klog.Errorf("DetachVolume.MarkVolumeAsUncertain failed to add the volume %q to actual state after detach error: %s", volumeToDetach.VolumeName, uncertainError)
}
eventErr, detailedErr := volumeToDetach.GenerateError("DetachVolume.Detach failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
klog.Info(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
// Update actual state of world
actualStateOfWorld.MarkVolumeAsDetached(
volumeToDetach.VolumeName, volumeToDetach.NodeName)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
return volumetypes.GeneratedOperations{
OperationName: DetachOperationName,
OperationFunc: detachVolumeFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, volumeToDetach.VolumeSpec), DetachOperationName),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
func (og *operationGenerator) GenerateMountVolumeFunc(
waitForAttachTimeout time.Duration,
volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
isRemount bool) volumetypes.GeneratedOperations {
volumePluginName := unknownVolumePlugin
volumePlugin, err :=
og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
if err == nil && volumePlugin != nil {
volumePluginName = volumePlugin.GetPluginName()
}
mountVolumeFunc := func() volumetypes.OperationContext {
// Get mounter plugin
volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
migrated := getMigratedStatusBySpec(volumeToMount.VolumeSpec)
if err != nil || volumePlugin == nil {
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.FindPluginBySpec failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
affinityErr := checkNodeAffinity(og, volumeToMount)
if affinityErr != nil {
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NodeAffinity check failed", affinityErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
volumeMounter, newMounterErr := volumePlugin.NewMounter(
volumeToMount.VolumeSpec,
volumeToMount.Pod)
if newMounterErr != nil {
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin)
if mountCheckError != nil {
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MountOptionSupport check failed", mountCheckError)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Enforce ReadWriteOncePod access mode if it is the only one present. This is also enforced during scheduling.
if actualStateOfWorld.IsVolumeMountedElsewhere(volumeToMount.VolumeName, volumeToMount.PodName) &&
// Because we do not know what access mode the pod intends to use if there are multiple.
len(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes) == 1 &&
v1helper.ContainsAccessMode(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes, v1.ReadWriteOncePod) {
err = goerrors.New("volume uses the ReadWriteOncePod access mode and is already in use by another pod")
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.SetUp failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Get attacher, if possible
attachableVolumePlugin, _ :=
og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
var volumeAttacher volume.Attacher
if attachableVolumePlugin != nil {
volumeAttacher, _ = attachableVolumePlugin.NewAttacher()
}
// get deviceMounter, if possible
deviceMountableVolumePlugin, _ := og.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeToMount.VolumeSpec)
var volumeDeviceMounter volume.DeviceMounter
if deviceMountableVolumePlugin != nil {
volumeDeviceMounter, _ = deviceMountableVolumePlugin.NewDeviceMounter()
}
var fsGroup *int64
var fsGroupChangePolicy *v1.PodFSGroupChangePolicy
if podSc := volumeToMount.Pod.Spec.SecurityContext; podSc != nil {
if podSc.FSGroup != nil {
fsGroup = podSc.FSGroup
}
if podSc.FSGroupChangePolicy != nil {
fsGroupChangePolicy = podSc.FSGroupChangePolicy
}
}
devicePath := volumeToMount.DevicePath
if volumeAttacher != nil {
// Wait for attachable volumes to finish attaching
klog.InfoS(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)), "pod", klog.KObj(volumeToMount.Pod))
devicePath, err = volumeAttacher.WaitForAttach(
volumeToMount.VolumeSpec, devicePath, volumeToMount.Pod, waitForAttachTimeout)
if err != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.WaitForAttach failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
klog.InfoS(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath)), "pod", klog.KObj(volumeToMount.Pod))
}
var resizeError error
resizeOptions := volume.NodeResizeOptions{
DevicePath: devicePath,
}
if volumeDeviceMounter != nil && actualStateOfWorld.GetDeviceMountState(volumeToMount.VolumeName) != DeviceGloballyMounted {
deviceMountPath, err :=
volumeDeviceMounter.GetDeviceMountPath(volumeToMount.VolumeSpec)
if err != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.GetDeviceMountPath failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Mount device to global mount path
err = volumeDeviceMounter.MountDevice(
volumeToMount.VolumeSpec,
devicePath,
deviceMountPath,
volume.DeviceMounterArgs{FsGroup: fsGroup, SELinuxLabel: volumeToMount.SELinuxLabel},
)
if err != nil {
og.checkForFailedMount(volumeToMount, err)
og.markDeviceErrorState(volumeToMount, devicePath, deviceMountPath, err, actualStateOfWorld)
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MountDevice failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
klog.InfoS(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath)), "pod", klog.KObj(volumeToMount.Pod))
// Update actual state of world to reflect volume is globally mounted
markDeviceMountedErr := actualStateOfWorld.MarkDeviceAsMounted(
volumeToMount.VolumeName, devicePath, deviceMountPath, volumeToMount.SELinuxLabel)
if markDeviceMountedErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MarkDeviceAsMounted failed", markDeviceMountedErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// set staging path for volume expansion
resizeOptions.DeviceStagePath = deviceMountPath
}
if volumeDeviceMounter != nil && resizeOptions.DeviceStagePath == "" {
deviceStagePath, err := volumeDeviceMounter.GetDeviceMountPath(volumeToMount.VolumeSpec)
if err != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.GetDeviceMountPath failed for expansion", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
resizeOptions.DeviceStagePath = deviceStagePath
}
// Execute mount
mountErr := volumeMounter.SetUp(volume.MounterArgs{
FsUser: util.FsUserFrom(volumeToMount.Pod),
FsGroup: fsGroup,
DesiredSize: volumeToMount.DesiredSizeLimit,
FSGroupChangePolicy: fsGroupChangePolicy,
Recorder: og.recorder,
SELinuxLabel: volumeToMount.SELinuxLabel,
})
// Update actual state of world
markOpts := MarkVolumeOpts{
PodName: volumeToMount.PodName,
PodUID: volumeToMount.Pod.UID,
VolumeName: volumeToMount.VolumeName,
Mounter: volumeMounter,
VolumeGIDVolume: volumeToMount.VolumeGIDValue,
VolumeSpec: volumeToMount.VolumeSpec,
VolumeMountState: VolumeMounted,
SELinuxMountContext: volumeToMount.SELinuxLabel,
}
if mountErr != nil {
og.checkForFailedMount(volumeToMount, mountErr)
og.markVolumeErrorState(volumeToMount, markOpts, mountErr, actualStateOfWorld)
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.SetUp failed", mountErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
detailedMsg := volumeToMount.GenerateMsgDetailed("MountVolume.SetUp succeeded", "")
verbosity := klog.Level(1)
if isRemount {
verbosity = klog.Level(4)
}
klog.V(verbosity).InfoS(detailedMsg, "pod", klog.KObj(volumeToMount.Pod))
resizeOptions.DeviceMountPath = volumeMounter.GetPath()
_, resizeError = og.expandVolumeDuringMount(volumeToMount, actualStateOfWorld, resizeOptions)
if resizeError != nil {
klog.Errorf("MountVolume.NodeExpandVolume failed with %v", resizeError)
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.Setup failed while expanding volume", resizeError)
// At this point, MountVolume.Setup already succeeded, we should add volume into actual state
// so that reconciler can clean up volume when needed. However, volume resize failed,
// we should not mark the volume as mounted to avoid pod starts using it.
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts); err != nil {
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
}
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// record total time it takes to mount a volume. This is end to end time that includes waiting for volume to attach, node to be update
// plugin call to succeed
mountRequestTime := volumeToMount.MountRequestTime
totalTimeTaken := time.Since(mountRequestTime).Seconds()
util.RecordOperationLatencyMetric(util.GetFullQualifiedPluginNameForVolume(volumePluginName, volumeToMount.VolumeSpec), "overall_volume_mount", totalTimeTaken)
markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted(markOpts)
if markVolMountedErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.MarkVolumeAsMounted failed", markVolMountedErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
eventRecorderFunc := func(err *error) {
if *err != nil {
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, (*err).Error())
}
}
return volumetypes.GeneratedOperations{
OperationName: "volume_mount",
OperationFunc: mountVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePluginName, volumeToMount.VolumeSpec), "volume_mount"),
}
}
func (og *operationGenerator) checkForFailedMount(volumeToMount VolumeToMount, mountError error) {
pv := volumeToMount.VolumeSpec.PersistentVolume
if pv == nil {
return
}
if volumetypes.IsFilesystemMismatchError(mountError) {
simpleMsg, _ := volumeToMount.GenerateMsg("MountVolume failed", mountError.Error())
og.recorder.Eventf(pv, v1.EventTypeWarning, kevents.FailedMountOnFilesystemMismatch, simpleMsg)
}
}
func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount, devicePath, deviceMountPath string, mountError error, actualStateOfWorld ActualStateOfWorldMounterUpdater) {
if volumetypes.IsOperationFinishedError(mountError) &&
actualStateOfWorld.GetDeviceMountState(volumeToMount.VolumeName) == DeviceMountUncertain {
if actualStateOfWorld.IsVolumeDeviceReconstructed(volumeToMount.VolumeName) {
klog.V(2).InfoS("MountVolume.markDeviceErrorState leaving volume uncertain", "volumeName", volumeToMount.VolumeName)
return
}
// Only devices which were uncertain can be marked as unmounted
markDeviceUnmountError := actualStateOfWorld.MarkDeviceAsUnmounted(volumeToMount.VolumeName)
if markDeviceUnmountError != nil {
klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error())
}
return
}
if volumetypes.IsUncertainProgressError(mountError) &&
actualStateOfWorld.GetDeviceMountState(volumeToMount.VolumeName) == DeviceNotMounted {
// only devices which are not mounted can be marked as uncertain. We do not want to mark a device
// which was previously marked as mounted here as uncertain.
markDeviceUncertainError := actualStateOfWorld.MarkDeviceAsUncertain(volumeToMount.VolumeName, devicePath, deviceMountPath, volumeToMount.SELinuxLabel)
if markDeviceUncertainError != nil {
klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error())
}
}
}
func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount, markOpts MarkVolumeOpts, mountError error, actualStateOfWorld ActualStateOfWorldMounterUpdater) {
if volumetypes.IsOperationFinishedError(mountError) &&
actualStateOfWorld.GetVolumeMountState(volumeToMount.VolumeName, markOpts.PodName) == VolumeMountUncertain {
// if volume was previously reconstructed we are not going to change its state as unmounted even
// if mount operation fails.
if actualStateOfWorld.IsVolumeReconstructed(volumeToMount.VolumeName, volumeToMount.PodName) {
klog.V(3).InfoS("MountVolume.markVolumeErrorState leaving volume uncertain", "volumeName", volumeToMount.VolumeName)
return
}
t := actualStateOfWorld.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName)
if t != nil {
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error())
}
return
}
if volumetypes.IsUncertainProgressError(mountError) &&
actualStateOfWorld.GetVolumeMountState(volumeToMount.VolumeName, markOpts.PodName) == VolumeNotMounted {
t := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts)
if t != nil {
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error())
}
}
}
func (og *operationGenerator) GenerateUnmountVolumeFunc(
volumeToUnmount MountedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
podsDir string) (volumetypes.GeneratedOperations, error) {
// Get mountable plugin
volumePlugin, err := og.volumePluginMgr.FindPluginByName(volumeToUnmount.PluginName)
if err != nil || volumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err)
}
volumeUnmounter, newUnmounterErr := volumePlugin.NewUnmounter(
volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID)
if newUnmounterErr != nil {
return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.NewUnmounter failed", newUnmounterErr)
}
unmountVolumeFunc := func() volumetypes.OperationContext {
subpather := og.volumePluginMgr.Host.GetSubpather()
migrated := getMigratedStatusBySpec(volumeToUnmount.VolumeSpec)
// Remove all bind-mounts for subPaths
podDir := filepath.Join(podsDir, string(volumeToUnmount.PodUID))
if err := subpather.CleanSubPaths(podDir, volumeToUnmount.InnerVolumeSpecName); err != nil {
eventErr, detailedErr := volumeToUnmount.GenerateError("error cleaning subPath mounts", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Execute unmount
unmountErr := volumeUnmounter.TearDown()
if unmountErr != nil {
// Mark the volume as uncertain, so SetUp is called for new pods. Teardown may be already in progress.
opts := MarkVolumeOpts{
PodName: volumeToUnmount.PodName,
PodUID: volumeToUnmount.PodUID,
VolumeName: volumeToUnmount.VolumeName,
VolumeGIDVolume: volumeToUnmount.VolumeGIDValue,
VolumeSpec: volumeToUnmount.VolumeSpec,
VolumeMountState: VolumeMountUncertain,
}
markMountUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(opts)
if markMountUncertainErr != nil {
// There is nothing else we can do. Hope that UnmountVolume will be re-tried shortly.
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error())
}
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToUnmount.GenerateError("UnmountVolume.TearDown failed", unmountErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
klog.Infof(
"UnmountVolume.TearDown succeeded for volume %q pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGIDValue %q",
volumeToUnmount.VolumeName,
volumeToUnmount.PodName,
volumeToUnmount.PodUID,
volumeToUnmount.InnerVolumeSpecName,
volumeToUnmount.PluginName,
volumeToUnmount.VolumeGIDValue)
// Update actual state of world
markVolMountedErr := actualStateOfWorld.MarkVolumeAsUnmounted(
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
if markVolMountedErr != nil {
// On failure, just log and exit
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
return volumetypes.GeneratedOperations{
OperationName: "volume_unmount",
OperationFunc: unmountVolumeFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToUnmount.VolumeSpec), "volume_unmount"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
func (og *operationGenerator) GenerateUnmountDeviceFunc(
deviceToDetach AttachedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
hostutil hostutil.HostUtils) (volumetypes.GeneratedOperations, error) {
// Get DeviceMounter plugin
deviceMountableVolumePlugin, err :=
og.volumePluginMgr.FindDeviceMountablePluginByName(deviceToDetach.PluginName)
if err != nil || deviceMountableVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindDeviceMountablePluginByName failed", err)
}
volumeDeviceUnmounter, err := deviceMountableVolumePlugin.NewDeviceUnmounter()
if err != nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDeviceUnmounter failed", err)
}
volumeDeviceMounter, err := deviceMountableVolumePlugin.NewDeviceMounter()
if err != nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDeviceMounter failed", err)
}
unmountDeviceFunc := func() volumetypes.OperationContext {
migrated := getMigratedStatusBySpec(deviceToDetach.VolumeSpec)
//deviceMountPath := deviceToDetach.DeviceMountPath
deviceMountPath, err :=
volumeDeviceMounter.GetDeviceMountPath(deviceToDetach.VolumeSpec)
if err != nil {
// On failure other than "does not exist", return error. Caller will log and retry.
if !strings.Contains(err.Error(), "does not exist") {
eventErr, detailedErr := deviceToDetach.GenerateError("GetDeviceMountPath failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// If the mount path could not be found, don't fail the unmount, but instead log a warning and proceed,
// using the value from deviceToDetach.DeviceMountPath, so that the device can be marked as unmounted
deviceMountPath = deviceToDetach.DeviceMountPath
klog.Warning(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf(
"GetDeviceMountPath failed, but unmount operation will proceed using deviceMountPath=%s: %v", deviceMountPath, err), ""))
}
refs, err := deviceMountableVolumePlugin.GetDeviceMountRefs(deviceMountPath)
if err != nil || util.HasMountRefs(deviceMountPath, refs) {
if err == nil {
err = fmt.Errorf("the device mount path %q is still mounted by other references %v", deviceMountPath, refs)
}
eventErr, detailedErr := deviceToDetach.GenerateError("GetDeviceMountRefs check failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Execute unmount
unmountDeviceErr := volumeDeviceUnmounter.UnmountDevice(deviceMountPath)
if unmountDeviceErr != nil {
// Mark the device as uncertain, so MountDevice is called for new pods. UnmountDevice may be already in progress.
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
if markDeviceUncertainErr != nil {
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
}
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := deviceToDetach.GenerateError("UnmountDevice failed", unmountDeviceErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Before logging that UnmountDevice succeeded and moving on,
// use hostutil.PathIsDevice to check if the path is a device,
// if so use hostutil.DeviceOpened to check if the device is in use anywhere
// else on the system. Retry if it returns true.
deviceOpened, deviceOpenedErr := isDeviceOpened(deviceToDetach, hostutil)
if deviceOpenedErr != nil {
return volumetypes.NewOperationContext(nil, deviceOpenedErr, migrated)
}
// The device is still in use elsewhere. Caller will log and retry.
if deviceOpened {
// Mark the device as uncertain, so MountDevice is called for new pods.
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
if markDeviceUncertainErr != nil {
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
}
eventErr, detailedErr := deviceToDetach.GenerateError(
"UnmountDevice failed",
goerrors.New("the device is in use when it was no longer expected to be in use"))
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
klog.Info(deviceToDetach.GenerateMsgDetailed("UnmountDevice succeeded", ""))
// Update actual state of world
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
deviceToDetach.VolumeName)
if markDeviceUnmountedErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := deviceToDetach.GenerateError("MarkDeviceAsUnmounted failed", markDeviceUnmountedErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
return volumetypes.GeneratedOperations{
OperationName: "unmount_device",
OperationFunc: unmountDeviceFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(deviceMountableVolumePlugin.GetPluginName(), deviceToDetach.VolumeSpec), "unmount_device"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
// GenerateMapVolumeFunc marks volume as mounted based on following steps.
// If plugin is attachable, call WaitForAttach() and then mark the device
// as mounted. On next step, SetUpDevice is called without dependent of
// plugin type, but this method mainly is targeted for none attachable plugin.
// After setup is done, create symbolic links on both global map path and pod
// device map path. Once symbolic links are created, take fd lock by
// loopback for the device to avoid silent volume replacement. This lock
// will be released once no one uses the device.
// If all steps are completed, the volume is marked as mounted.
func (og *operationGenerator) GenerateMapVolumeFunc(
waitForAttachTimeout time.Duration,
volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) {
// Get block volume mapper plugin
blockVolumePlugin, err :=
og.volumePluginMgr.FindMapperPluginBySpec(volumeToMount.VolumeSpec)
if err != nil {
return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed", err)
}
if blockVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil)
}
affinityErr := checkNodeAffinity(og, volumeToMount)
if affinityErr != nil {
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.NodeAffinity check failed", affinityErr)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error())
return volumetypes.GeneratedOperations{}, detailedErr
}
blockVolumeMapper, newMapperErr := blockVolumePlugin.NewBlockVolumeMapper(
volumeToMount.VolumeSpec,
volumeToMount.Pod)
if newMapperErr != nil {
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.NewBlockVolumeMapper initialization failed", newMapperErr)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error())
return volumetypes.GeneratedOperations{}, detailedErr
}
// Get attacher, if possible
attachableVolumePlugin, _ :=
og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
var volumeAttacher volume.Attacher
if attachableVolumePlugin != nil {
volumeAttacher, _ = attachableVolumePlugin.NewAttacher()
}
mapVolumeFunc := func() (operationContext volumetypes.OperationContext) {
var devicePath string
var stagingPath string
migrated := getMigratedStatusBySpec(volumeToMount.VolumeSpec)
// Enforce ReadWriteOncePod access mode. This is also enforced during scheduling.
if actualStateOfWorld.IsVolumeMountedElsewhere(volumeToMount.VolumeName, volumeToMount.PodName) &&
// Because we do not know what access mode the pod intends to use if there are multiple.
len(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes) == 1 &&
v1helper.ContainsAccessMode(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes, v1.ReadWriteOncePod) {
err = goerrors.New("volume uses the ReadWriteOncePod access mode and is already in use by another pod")
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.SetUpDevice failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Set up global map path under the given plugin directory using symbolic link
globalMapPath, err :=
blockVolumeMapper.GetGlobalMapPath(volumeToMount.VolumeSpec)
if err != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.GetGlobalMapPath failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
if volumeAttacher != nil {
// Wait for attachable volumes to finish attaching
klog.InfoS(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)), "pod", klog.KObj(volumeToMount.Pod))
devicePath, err = volumeAttacher.WaitForAttach(
volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout)
if err != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.WaitForAttach failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
klog.InfoS(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath)), "pod", klog.KObj(volumeToMount.Pod))
}
// Call SetUpDevice if blockVolumeMapper implements CustomBlockVolumeMapper
if customBlockVolumeMapper, ok := blockVolumeMapper.(volume.CustomBlockVolumeMapper); ok && actualStateOfWorld.GetDeviceMountState(volumeToMount.VolumeName) != DeviceGloballyMounted {
var mapErr error
stagingPath, mapErr = customBlockVolumeMapper.SetUpDevice()
if mapErr != nil {
og.markDeviceErrorState(volumeToMount, devicePath, globalMapPath, mapErr, actualStateOfWorld)
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.SetUpDevice failed", mapErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
}
// Update actual state of world to reflect volume is globally mounted
markedDevicePath := devicePath
markDeviceMappedErr := actualStateOfWorld.MarkDeviceAsMounted(
volumeToMount.VolumeName, markedDevicePath, globalMapPath, "")
if markDeviceMappedErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
markVolumeOpts := MarkVolumeOpts{
PodName: volumeToMount.PodName,
PodUID: volumeToMount.Pod.UID,
VolumeName: volumeToMount.VolumeName,
BlockVolumeMapper: blockVolumeMapper,
VolumeGIDVolume: volumeToMount.VolumeGIDValue,
VolumeSpec: volumeToMount.VolumeSpec,
VolumeMountState: VolumeMounted,
}
// Call MapPodDevice if blockVolumeMapper implements CustomBlockVolumeMapper
if customBlockVolumeMapper, ok := blockVolumeMapper.(volume.CustomBlockVolumeMapper); ok {
// Execute driver specific map
pluginDevicePath, mapErr := customBlockVolumeMapper.MapPodDevice()
if mapErr != nil {
// On failure, return error. Caller will log and retry.
og.markVolumeErrorState(volumeToMount, markVolumeOpts, mapErr, actualStateOfWorld)
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MapPodDevice failed", mapErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// From now on, the volume is mapped. Mark it as uncertain on error,
// so it is is unmapped when corresponding pod is deleted.
defer func() {
if operationContext.EventErr != nil {
errText := operationContext.EventErr.Error()
og.markVolumeErrorState(volumeToMount, markVolumeOpts, volumetypes.NewUncertainProgressError(errText), actualStateOfWorld)
}
}()
// if pluginDevicePath is provided, assume attacher may not provide device
// or attachment flow uses SetupDevice to get device path
if len(pluginDevicePath) != 0 {
devicePath = pluginDevicePath
}
if len(devicePath) == 0 {
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume failed", goerrors.New("device path of the volume is empty"))
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
}
// When kubelet is containerized, devicePath may be a symlink at a place unavailable to
// kubelet, so evaluate it on the host and expect that it links to a device in /dev,
// which will be available to containerized kubelet. If still it does not exist,
// AttachFileDevice will fail. If kubelet is not containerized, eval it anyway.
kvh, ok := og.GetVolumePluginMgr().Host.(volume.KubeletVolumeHost)
if !ok {
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume type assertion error", fmt.Errorf("volume host does not implement KubeletVolumeHost interface"))
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
hu := kvh.GetHostUtil()
devicePath, err = hu.EvalHostSymlinks(devicePath)
if err != nil {
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.EvalHostSymlinks failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Update actual state of world with the devicePath again, if devicePath has changed from markedDevicePath
// TODO: This can be improved after #82492 is merged and ASW has state.
if markedDevicePath != devicePath {
markDeviceMappedErr := actualStateOfWorld.MarkDeviceAsMounted(
volumeToMount.VolumeName, devicePath, globalMapPath, "")
if markDeviceMappedErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
}
// Execute common map
volumeMapPath, volName := blockVolumeMapper.GetPodDeviceMapPath()
mapErr := util.MapBlockVolume(og.blkUtil, devicePath, globalMapPath, volumeMapPath, volName, volumeToMount.Pod.UID)
if mapErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MapBlockVolume failed", mapErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Device mapping for global map path succeeded
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapPodDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath))
verbosity := klog.Level(4)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg)
klog.V(verbosity).InfoS(detailedMsg, "pod", klog.KObj(volumeToMount.Pod))
// Device mapping for pod device map path succeeded
simpleMsg, detailedMsg = volumeToMount.GenerateMsg("MapVolume.MapPodDevice succeeded", fmt.Sprintf("volumeMapPath %q", volumeMapPath))
verbosity = klog.Level(1)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg)
klog.V(verbosity).InfoS(detailedMsg, "pod", klog.KObj(volumeToMount.Pod))
resizeOptions := volume.NodeResizeOptions{
DevicePath: devicePath,
DeviceStagePath: stagingPath,
}
_, resizeError := og.expandVolumeDuringMount(volumeToMount, actualStateOfWorld, resizeOptions)
if resizeError != nil {
klog.Errorf("MapVolume.NodeExpandVolume failed with %v", resizeError)
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MarkVolumeAsMounted failed while expanding volume", resizeError)
// At this point, MountVolume.Setup already succeeded, we should add volume into actual state
// so that reconciler can clean up volume when needed. However, if nodeExpandVolume failed,
// we should not mark the volume as mounted to avoid pod starts using it.
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts); err != nil {
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
}
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted(markVolumeOpts)
if markVolMountedErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MarkVolumeAsMounted failed", markVolMountedErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
eventRecorderFunc := func(err *error) {
if *err != nil {
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, (*err).Error())
}
}
return volumetypes.GeneratedOperations{
OperationName: "map_volume",
OperationFunc: mapVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "map_volume"),
}, nil
}
// GenerateUnmapVolumeFunc marks volume as unmonuted based on following steps.
// Remove symbolic links from pod device map path dir and global map path dir.
// Once those cleanups are done, remove pod device map path dir.
// If all steps are completed, the volume is marked as unmounted.
func (og *operationGenerator) GenerateUnmapVolumeFunc(
volumeToUnmount MountedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) {
// Get block volume unmapper plugin
blockVolumePlugin, err :=
og.volumePluginMgr.FindMapperPluginByName(volumeToUnmount.PluginName)
if err != nil {
return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed", err)
}
if blockVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil)
}
blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper(
volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID)
if newUnmapperErr != nil {
return volumetypes.GeneratedOperations{}, volumeToUnmount.GenerateErrorDetailed("UnmapVolume.NewUnmapper failed", newUnmapperErr)
}
unmapVolumeFunc := func() volumetypes.OperationContext {
migrated := getMigratedStatusBySpec(volumeToUnmount.VolumeSpec)
// pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName}
podDeviceUnmapPath, volName := blockVolumeUnmapper.GetPodDeviceMapPath()
// plugins/kubernetes.io/{PluginName}/volumeDevices/{volumePluginDependentPath}/{podUID}
globalUnmapPath, err := blockVolumeUnmapper.GetGlobalMapPath(volumeToUnmount.VolumeSpec)
if err != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToUnmount.GenerateError("UnmapVolume.GetGlobalMapPath failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Mark the device as uncertain to make sure kubelet calls UnmapDevice again in all the "return err"
// cases below. The volume is marked as fully un-mapped at the end of this function, when everything
// succeeds.
markVolumeOpts := MarkVolumeOpts{
PodName: volumeToUnmount.PodName,
PodUID: volumeToUnmount.PodUID,
VolumeName: volumeToUnmount.VolumeName,
VolumeGIDVolume: volumeToUnmount.VolumeGIDValue,
VolumeSpec: volumeToUnmount.VolumeSpec,
VolumeMountState: VolumeMountUncertain,
}
markVolumeUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts)
if markVolumeUncertainErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToUnmount.GenerateError("UnmapVolume.MarkDeviceAsUncertain failed", markVolumeUncertainErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Execute common unmap
unmapErr := util.UnmapBlockVolume(og.blkUtil, globalUnmapPath, podDeviceUnmapPath, volName, volumeToUnmount.PodUID)
if unmapErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToUnmount.GenerateError("UnmapVolume.UnmapBlockVolume failed", unmapErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Call UnmapPodDevice if blockVolumeUnmapper implements CustomBlockVolumeUnmapper
if customBlockVolumeUnmapper, ok := blockVolumeUnmapper.(volume.CustomBlockVolumeUnmapper); ok {
// Execute plugin specific unmap
unmapErr = customBlockVolumeUnmapper.UnmapPodDevice()
if unmapErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToUnmount.GenerateError("UnmapVolume.UnmapPodDevice failed", unmapErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
}
klog.Infof(
"UnmapVolume succeeded for volume %q pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGIDValue %q",
volumeToUnmount.VolumeName,
volumeToUnmount.PodName,
volumeToUnmount.PodUID,
volumeToUnmount.InnerVolumeSpecName,
volumeToUnmount.PluginName,
volumeToUnmount.VolumeGIDValue)
// Update actual state of world
markVolUnmountedErr := actualStateOfWorld.MarkVolumeAsUnmounted(
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
if markVolUnmountedErr != nil {
// On failure, just log and exit
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
return volumetypes.GeneratedOperations{
OperationName: "unmap_volume",
OperationFunc: unmapVolumeFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToUnmount.VolumeSpec), "unmap_volume"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
// GenerateUnmapDeviceFunc marks device as unmounted based on following steps.
// Check under globalMapPath dir if there isn't pod's symbolic links in it.
// If symbolic link isn't there, the device isn't referenced from Pods.
// Call plugin TearDownDevice to clean-up device connection, stored data under
// globalMapPath, these operations depend on plugin implementation.
// Once TearDownDevice is completed, remove globalMapPath dir.
// After globalMapPath is removed, fd lock by loopback for the device can
// be released safely because no one can consume the device at this point.
// At last, device open status will be checked just in case.
// If all steps are completed, the device is marked as unmounted.
func (og *operationGenerator) GenerateUnmapDeviceFunc(
deviceToDetach AttachedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
hostutil hostutil.HostUtils) (volumetypes.GeneratedOperations, error) {
blockVolumePlugin, err :=
og.volumePluginMgr.FindMapperPluginByName(deviceToDetach.PluginName)
if err != nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginByName failed", err)
}
if blockVolumePlugin == nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil)
}
blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper(
deviceToDetach.VolumeSpec.Name(),
"" /* podUID */)
if newUnmapperErr != nil {
return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewUnmapper failed", newUnmapperErr)
}
unmapDeviceFunc := func() volumetypes.OperationContext {
migrated := getMigratedStatusBySpec(deviceToDetach.VolumeSpec)
// Search under globalMapPath dir if all symbolic links from pods have been removed already.
// If symbolic links are there, pods may still refer the volume.
globalMapPath := deviceToDetach.DeviceMountPath
refs, err := og.blkUtil.GetDeviceBindMountRefs(deviceToDetach.DevicePath, globalMapPath)
if err != nil {
if os.IsNotExist(err) {
// Looks like SetupDevice did not complete. Fall through to TearDownDevice and mark the device as unmounted.
refs = nil
} else {
eventErr, detailedErr := deviceToDetach.GenerateError("UnmapDevice.GetDeviceBindMountRefs check failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
}
if len(refs) > 0 {
err = fmt.Errorf("the device %q is still referenced from other Pods %v", globalMapPath, refs)
eventErr, detailedErr := deviceToDetach.GenerateError("UnmapDevice failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Mark the device as uncertain to make sure kubelet calls UnmapDevice again in all the "return err"
// cases below. The volume is marked as fully un-mapped at the end of this function, when everything
// succeeds.
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(
deviceToDetach.VolumeName, deviceToDetach.DevicePath, globalMapPath, "" /* seLinuxMountContext */)
if markDeviceUncertainErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := deviceToDetach.GenerateError("UnmapDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Call TearDownDevice if blockVolumeUnmapper implements CustomBlockVolumeUnmapper
if customBlockVolumeUnmapper, ok := blockVolumeUnmapper.(volume.CustomBlockVolumeUnmapper); ok {
// Execute tear down device
unmapErr := customBlockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath)
if unmapErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := deviceToDetach.GenerateError("UnmapDevice.TearDownDevice failed", unmapErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
}
// Plugin finished TearDownDevice(). Now globalMapPath dir and plugin's stored data
// on the dir are unnecessary, clean up it.
removeMapPathErr := og.blkUtil.RemoveMapPath(globalMapPath)
if removeMapPathErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := deviceToDetach.GenerateError("UnmapDevice.RemoveMapPath failed", removeMapPathErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Before logging that UnmapDevice succeeded and moving on,
// use hostutil.PathIsDevice to check if the path is a device,
// if so use hostutil.DeviceOpened to check if the device is in use anywhere
// else on the system. Retry if it returns true.
deviceOpened, deviceOpenedErr := isDeviceOpened(deviceToDetach, hostutil)
if deviceOpenedErr != nil {
return volumetypes.NewOperationContext(nil, deviceOpenedErr, migrated)
}
// The device is still in use elsewhere. Caller will log and retry.
if deviceOpened {
eventErr, detailedErr := deviceToDetach.GenerateError(
"UnmapDevice failed",
fmt.Errorf("the device is in use when it was no longer expected to be in use"))
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
klog.Info(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
// Update actual state of world
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
deviceToDetach.VolumeName)
if markDeviceUnmountedErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := deviceToDetach.GenerateError("MarkDeviceAsUnmounted failed", markDeviceUnmountedErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
return volumetypes.GeneratedOperations{
OperationName: "unmap_device",
OperationFunc: unmapDeviceFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), deviceToDetach.VolumeSpec), "unmap_device"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
logger klog.Logger,
volumeToMount VolumeToMount,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
volumePlugin, err :=
og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
if err != nil || volumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume.FindPluginBySpec failed", err)
}
// For attachable volume types, lets check if volume is attached by reading from node lister.
// This would avoid exponential back-off and creation of goroutine unnecessarily. We still
// verify status of attached volume by directly reading from API server later on.This is necessarily
// to ensure any race conditions because of cached state in the informer.
if volumeToMount.PluginIsAttachable {
cachedAttachedVolumes, _ := og.volumePluginMgr.Host.GetAttachedVolumesFromNodeStatus()
if cachedAttachedVolumes != nil {
_, volumeFound := cachedAttachedVolumes[volumeToMount.VolumeName]
if !volumeFound {
return volumetypes.GeneratedOperations{}, NewMountPreConditionFailedError(fmt.Sprintf("volume %s is not yet in node's status", volumeToMount.VolumeName))
}
}
}
verifyControllerAttachedVolumeFunc := func() volumetypes.OperationContext {
migrated := getMigratedStatusBySpec(volumeToMount.VolumeSpec)
claimSize := actualStateOfWorld.GetClaimSize(volumeToMount.VolumeName)
// only fetch claimSize if it was not set previously
if volumeToMount.VolumeSpec.PersistentVolume != nil && claimSize.IsZero() && !volumeToMount.VolumeSpec.InlineVolumeSpecForCSIMigration {
pv := volumeToMount.VolumeSpec.PersistentVolume
pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil {
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume fetching pvc failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
pvcStatusSize := pvc.Status.Capacity.Storage()
if pvcStatusSize != nil {
claimSize = pvcStatusSize.DeepCopy()
}
}
if !volumeToMount.PluginIsAttachable {
// If the volume does not implement the attacher interface, it is
// assumed to be attached and the actual state of the world is
// updated accordingly.
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
logger, volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */)
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed", addVolumeNodeErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
if !volumeToMount.ReportedInUse {
// If the given volume has not yet been added to the list of
// VolumesInUse in the node's volume status, do not proceed, return
// error. Caller will log and retry. The node status is updated
// periodically by kubelet, so it may take as much as 10 seconds
// before this clears.
// Issue #28141 to enable on demand status updates.
eventErr, detailedErr := volumeToMount.GenerateError("Volume has not been added to the list of VolumesInUse in the node's volume status", nil)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// Fetch current node object
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(nodeName), metav1.GetOptions{})
if fetchErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
for _, attachedVolume := range node.Status.VolumesAttached {
if attachedVolume.Name == volumeToMount.VolumeName {
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
logger, v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath)
klog.InfoS(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath)), "pod", klog.KObj(volumeToMount.Pod))
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
}
// Volume not attached, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("Volume not attached according to node status", nil)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
return volumetypes.GeneratedOperations{
OperationName: VerifyControllerAttachedVolumeOpName,
OperationFunc: verifyControllerAttachedVolumeFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "verify_controller_attached_volume"),
EventRecorderFunc: nil, // nil because we do not want to generate event on error
}, nil
}
func (og *operationGenerator) verifyVolumeIsSafeToDetach(
volumeToDetach AttachedVolume) error {
// Fetch current node object
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{})
if fetchErr != nil {
if errors.IsNotFound(fetchErr) {
klog.Warning(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
return nil
}
// On failure, return error. Caller will log and retry.
return volumeToDetach.GenerateErrorDetailed("DetachVolume failed fetching node from API server", fetchErr)
}
for _, inUseVolume := range node.Status.VolumesInUse {
if inUseVolume == volumeToDetach.VolumeName {
return volumeToDetach.GenerateErrorDetailed(
"DetachVolume failed",
fmt.Errorf("volume is still in use by node, according to Node status"))
}
}
// Volume is not marked as in use by node
klog.Info(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
return nil
}
func (og *operationGenerator) GenerateExpandVolumeFunc(
pvc *v1.PersistentVolumeClaim,
pv *v1.PersistentVolume) (volumetypes.GeneratedOperations, error) {
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
volumePlugin, err := og.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec)
if err != nil {
return volumetypes.GeneratedOperations{}, fmt.Errorf("error finding plugin for expanding volume: %q with error %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
}
if volumePlugin == nil {
return volumetypes.GeneratedOperations{}, fmt.Errorf("can not find plugin for expanding volume: %q", util.GetPersistentVolumeClaimQualifiedName(pvc))
}
expandVolumeFunc := func() volumetypes.OperationContext {
migrated := false
newSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
statusSize := pvc.Status.Capacity[v1.ResourceStorage]
pvSize := pv.Spec.Capacity[v1.ResourceStorage]
if pvSize.Cmp(newSize) < 0 {
updatedSize, expandErr := volumePlugin.ExpandVolumeDevice(
volumeSpec,
newSize,
statusSize)
if expandErr != nil {
detailedErr := fmt.Errorf("error expanding volume %q of plugin %q: %v", util.GetPersistentVolumeClaimQualifiedName(pvc), volumePlugin.GetPluginName(), expandErr)
return volumetypes.NewOperationContext(detailedErr, detailedErr, migrated)
}
klog.Infof("ExpandVolume succeeded for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
newSize = updatedSize
// k8s doesn't have transactions, we can't guarantee that after updating PV - updating PVC will be
// successful, that is why all PVCs for which pvc.Spec.Size > pvc.Status.Size must be reprocessed
// until they reflect user requested size in pvc.Status.Size
_, updateErr := util.UpdatePVSize(pv, newSize, og.kubeClient)
if updateErr != nil {
detailedErr := fmt.Errorf("error updating PV spec capacity for volume %q with : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), updateErr)
return volumetypes.NewOperationContext(detailedErr, detailedErr, migrated)
}
klog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
}
fsVolume, _ := util.CheckVolumeModeFilesystem(volumeSpec)
// No Cloudprovider resize needed, lets mark resizing as done
// Rest of the volume expand controller code will assume PVC as *not* resized until pvc.Status.Size
// reflects user requested size.
if !volumePlugin.RequiresFSResize() || !fsVolume {
klog.V(4).Infof("Controller resizing done for PVC %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
_, err := util.MarkResizeFinished(pvc, newSize, og.kubeClient)
if err != nil {
detailedErr := fmt.Errorf("error marking pvc %s as resized : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
return volumetypes.NewOperationContext(detailedErr, detailedErr, migrated)
}
successMsg := fmt.Sprintf("ExpandVolume succeeded for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
og.recorder.Eventf(pvc, v1.EventTypeNormal, kevents.VolumeResizeSuccess, successMsg)
} else {
_, err := util.MarkForFSResize(pvc, og.kubeClient)
if err != nil {
detailedErr := fmt.Errorf("error updating pvc %s condition for fs resize : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
klog.Warning(detailedErr)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
oldCapacity := pvc.Status.Capacity[v1.ResourceStorage]
err = util.AddAnnPreResizeCapacity(pv, oldCapacity, og.kubeClient)
if err != nil {
detailedErr := fmt.Errorf("error updating pv %s annotation (%s) with pre-resize capacity %s: %v", pv.ObjectMeta.Name, util.AnnPreResizeCapacity, oldCapacity.String(), err)
klog.Warning(detailedErr)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
eventRecorderFunc := func(err *error) {
if *err != nil {
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.VolumeResizeFailed, (*err).Error())
}
}
return volumetypes.GeneratedOperations{
OperationName: "expand_volume",
OperationFunc: expandVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeSpec), "expand_volume"),
}, nil
}
func (og *operationGenerator) GenerateExpandAndRecoverVolumeFunc(
pvc *v1.PersistentVolumeClaim,
pv *v1.PersistentVolume, resizerName string) (volumetypes.GeneratedOperations, error) {
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
volumePlugin, err := og.volumePluginMgr.FindExpandablePluginBySpec(volumeSpec)
if err != nil {
return volumetypes.GeneratedOperations{}, fmt.Errorf("error finding plugin for expanding volume: %q with error %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
}
if volumePlugin == nil {
return volumetypes.GeneratedOperations{}, fmt.Errorf("can not find plugin for expanding volume: %q", util.GetPersistentVolumeClaimQualifiedName(pvc))
}
expandVolumeFunc := func() volumetypes.OperationContext {
resizeOpts := inTreeResizeOpts{
pvc: pvc,
pv: pv,
resizerName: resizerName,
volumePlugin: volumePlugin,
volumeSpec: volumeSpec,
}
migrated := false
resp := og.expandAndRecoverFunction(resizeOpts)
if resp.err != nil {
return volumetypes.NewOperationContext(resp.err, resp.err, migrated)
}
return volumetypes.NewOperationContext(nil, nil, migrated)
}
eventRecorderFunc := func(err *error) {
if *err != nil {
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.VolumeResizeFailed, (*err).Error())
}
}
return volumetypes.GeneratedOperations{
OperationName: "expand_volume",
OperationFunc: expandVolumeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeSpec), "expand_volume"),
}, nil
}
// Deprecated: This function should not called by any controller code in future and should be removed
// from kubernetes code
func (og *operationGenerator) expandAndRecoverFunction(resizeOpts inTreeResizeOpts) inTreeResizeResponse {
pvc := resizeOpts.pvc
pv := resizeOpts.pv
resizerName := resizeOpts.resizerName
volumePlugin := resizeOpts.volumePlugin
volumeSpec := resizeOpts.volumeSpec
pvcSpecSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
pvcStatusSize := pvc.Status.Capacity[v1.ResourceStorage]
pvSize := pv.Spec.Capacity[v1.ResourceStorage]
resizeResponse := inTreeResizeResponse{
pvc: pvc,
pv: pv,
resizeCalled: false,
}
// by default we are expanding to fulfill size requested in pvc.Spec.Resources
newSize := pvcSpecSize
var resizeStatus v1.ClaimResourceStatus
if status, ok := pvc.Status.AllocatedResourceStatuses[v1.ResourceStorage]; ok {
resizeStatus = status
}
var allocatedSize *resource.Quantity
t, ok := pvc.Status.AllocatedResources[v1.ResourceStorage]
if ok {
allocatedSize = &t
}
var err error
if pvSize.Cmp(pvcSpecSize) < 0 {
// pv is not of requested size yet and hence will require expanding
switch resizeStatus {
case v1.PersistentVolumeClaimControllerResizeInProgress,
v1.PersistentVolumeClaimNodeResizePending,
v1.PersistentVolumeClaimNodeResizeInProgress,
v1.PersistentVolumeClaimNodeResizeInfeasible:
if allocatedSize != nil {
newSize = *allocatedSize
}
default:
newSize = pvcSpecSize
}
} else {
// PV has already been expanded and hence we can be here for following reasons:
// 1. If expansion is pending on the node and this was just a spurious update event
// we don't need to do anything and let kubelet handle it.
// 2. It could be that - although we successfully expanded the volume, we failed to
// record our work in API objects, in which case - we should resume resizing operation
// and let API objects be updated.
// 3. Controller successfully expanded the volume, but expansion is failing on the node
// and before kubelet can retry failed node expansion - controller must verify if it is
// safe to do so.
// 4. While expansion was still pending on the node, user reduced the pvc size.
switch resizeStatus {
case v1.PersistentVolumeClaimNodeResizeInProgress,
v1.PersistentVolumeClaimNodeResizePending:
// we don't need to do any work. We could be here because of a spurious update event.
// This is case #1
return resizeResponse
case v1.PersistentVolumeClaimNodeResizeInfeasible:
// This is case#3
pvc, err = og.markForPendingNodeExpansion(pvc, pv)
resizeResponse.pvc = pvc
resizeResponse.err = err
return resizeResponse
case v1.PersistentVolumeClaimControllerResizeInProgress,
v1.PersistentVolumeClaimControllerResizeInfeasible:
// This is case#2 or it could also be case#4 when user manually shrunk the PVC
// after expanding it.
if allocatedSize != nil {
newSize = *allocatedSize
}
default:
// It is impossible for ResizeStatus to be "" and allocatedSize to be not nil but somehow
// if we do end up in this state, it is safest to resume expansion to last recorded size in
// allocatedSize variable.
if resizeStatus == "" && allocatedSize != nil {
newSize = *allocatedSize
} else {
newSize = pvcSpecSize
}
}
}
pvc, err = util.MarkControllerReisizeInProgress(pvc, resizerName, newSize, og.kubeClient)
if err != nil {
msg := fmt.Errorf("error updating pvc %s with resize in progress: %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
resizeResponse.err = msg
resizeResponse.pvc = pvc
return resizeResponse
}
updatedSize, err := volumePlugin.ExpandVolumeDevice(volumeSpec, newSize, pvcStatusSize)
resizeResponse.resizeCalled = true
if err != nil {
msg := fmt.Errorf("error expanding pvc %s: %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
resizeResponse.err = msg
resizeResponse.pvc = pvc
return resizeResponse
}
// update PV size
var updateErr error
pv, updateErr = util.UpdatePVSize(pv, updatedSize, og.kubeClient)
// if updating PV failed, we are going to leave the PVC in ControllerExpansionInProgress state, so as expansion can be retried to previously set allocatedSize value.
if updateErr != nil {
msg := fmt.Errorf("error updating pv for pvc %s: %v", util.GetPersistentVolumeClaimQualifiedName(pvc), updateErr)
resizeResponse.err = msg
return resizeResponse
}
resizeResponse.pv = pv
fsVolume, _ := util.CheckVolumeModeFilesystem(volumeSpec)
if !volumePlugin.RequiresFSResize() || !fsVolume {
pvc, err = util.MarkResizeFinished(pvc, updatedSize, og.kubeClient)
if err != nil {
msg := fmt.Errorf("error marking pvc %s as resized: %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
resizeResponse.err = msg
return resizeResponse
}
resizeResponse.pvc = pvc
successMsg := fmt.Sprintf("ExpandVolume succeeded for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
og.recorder.Eventf(pvc, v1.EventTypeNormal, kevents.VolumeResizeSuccess, successMsg)
} else {
pvc, err = og.markForPendingNodeExpansion(pvc, pv)
resizeResponse.pvc = pvc
if err != nil {
msg := fmt.Errorf("error marking pvc %s for node expansion: %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
resizeResponse.err = msg
return resizeResponse
}
}
return resizeResponse
}
func (og *operationGenerator) markForPendingNodeExpansion(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) {
var err error
pvc, err = util.MarkForFSResize(pvc, og.kubeClient)
if err != nil {
msg := fmt.Errorf("error marking pvc %s for node expansion: %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
return pvc, msg
}
// store old PVC capacity in pv, so as if PVC gets deleted while node expansion was pending
// we can restore size of pvc from PV annotation and still perform expansion on the node
oldCapacity := pvc.Status.Capacity[v1.ResourceStorage]
err = util.AddAnnPreResizeCapacity(pv, oldCapacity, og.kubeClient)
if err != nil {
detailedErr := fmt.Errorf("error updating pv %s annotation (%s) with pre-resize capacity %s: %v", pv.ObjectMeta.Name, util.AnnPreResizeCapacity, oldCapacity.String(), err)
klog.Warning(detailedErr)
return pvc, detailedErr
}
return pvc, nil
}
func (og *operationGenerator) GenerateExpandInUseVolumeFunc(
volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater, currentSize resource.Quantity) (volumetypes.GeneratedOperations, error) {
volumePlugin, err :=
og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
if err != nil || volumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("NodeExpandVolume.FindPluginBySpec failed", err)
}
fsResizeFunc := func() volumetypes.OperationContext {
var resizeDone bool
var eventErr, detailedErr error
migrated := false
if currentSize.IsZero() || volumeToMount.DesiredPersistentVolumeSize.IsZero() {
err := fmt.Errorf("current or new size of the volume is not set")
eventErr, detailedErr = volumeToMount.GenerateError("NodeExpandvolume.expansion failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
resizeOptions := volume.NodeResizeOptions{
VolumeSpec: volumeToMount.VolumeSpec,
DevicePath: volumeToMount.DevicePath,
OldSize: currentSize,
NewSize: volumeToMount.DesiredPersistentVolumeSize,
}
fsVolume, err := util.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec)
if err != nil {
eventErr, detailedErr = volumeToMount.GenerateError("NodeExpandvolume.CheckVolumeModeFilesystem failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
if fsVolume {
volumeMounter, newMounterErr := volumePlugin.NewMounter(
volumeToMount.VolumeSpec,
volumeToMount.Pod)
if newMounterErr != nil {
eventErr, detailedErr = volumeToMount.GenerateError("NodeExpandVolume.NewMounter initialization failed", newMounterErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
resizeOptions.DeviceMountPath = volumeMounter.GetPath()
deviceMountableVolumePlugin, _ := og.volumePluginMgr.FindDeviceMountablePluginBySpec(volumeToMount.VolumeSpec)
var volumeDeviceMounter volume.DeviceMounter
if deviceMountableVolumePlugin != nil {
volumeDeviceMounter, _ = deviceMountableVolumePlugin.NewDeviceMounter()
}
if volumeDeviceMounter != nil {
deviceStagePath, err := volumeDeviceMounter.GetDeviceMountPath(volumeToMount.VolumeSpec)
if err != nil {
eventErr, detailedErr = volumeToMount.GenerateError("NodeExpandVolume.GetDeviceMountPath failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
resizeOptions.DeviceStagePath = deviceStagePath
}
} else {
// Get block volume mapper plugin
blockVolumePlugin, err :=
og.volumePluginMgr.FindMapperPluginBySpec(volumeToMount.VolumeSpec)
if err != nil {
eventErr, detailedErr = volumeToMount.GenerateError("MapVolume.FindMapperPluginBySpec failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
if blockVolumePlugin == nil {
eventErr, detailedErr = volumeToMount.GenerateError("MapVolume.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
blockVolumeMapper, newMapperErr := blockVolumePlugin.NewBlockVolumeMapper(
volumeToMount.VolumeSpec,
volumeToMount.Pod)
if newMapperErr != nil {
eventErr, detailedErr = volumeToMount.GenerateError("MapVolume.NewBlockVolumeMapper initialization failed", newMapperErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
// if plugin supports custom mappers lets add DeviceStagePath
if customBlockVolumeMapper, ok := blockVolumeMapper.(volume.CustomBlockVolumeMapper); ok {
resizeOptions.DeviceStagePath = customBlockVolumeMapper.GetStagingPath()
}
}
// if we are doing online expansion then volume is already published
resizeDone, eventErr, detailedErr = og.doOnlineExpansion(volumeToMount, actualStateOfWorld, resizeOptions)
if eventErr != nil || detailedErr != nil {
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
if resizeDone {
return volumetypes.NewOperationContext(nil, nil, migrated)
}
klog.InfoS("Waiting for volume to be expandable on the node", "volumeName", volumeToMount.VolumeName)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
eventRecorderFunc := func(err *error) {
if *err != nil {
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.VolumeResizeFailed, (*err).Error())
}
}
return volumetypes.GeneratedOperations{
OperationName: "volume_fs_resize",
OperationFunc: fsResizeFunc,
EventRecorderFunc: eventRecorderFunc,
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_fs_resize"),
}, nil
}
func (og *operationGenerator) doOnlineExpansion(volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
resizeOptions volume.NodeResizeOptions) (bool, error, error) {
resizeDone, newSize, err := og.nodeExpandVolume(volumeToMount, actualStateOfWorld, resizeOptions)
if err != nil {
e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed", err)
klog.Error(e2.Error())
return false, e1, e2
}
if resizeDone {
markingDone := actualStateOfWorld.MarkVolumeAsResized(volumeToMount.VolumeName, newSize)
if !markingDone {
// On failure, return error. Caller will log and retry.
genericFailureError := fmt.Errorf("unable to mark volume as resized")
e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.MarkVolumeAsResized failed", genericFailureError)
return false, e1, e2
}
return true, nil, nil
}
return false, nil, nil
}
func (og *operationGenerator) expandVolumeDuringMount(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, rsOpts volume.NodeResizeOptions) (bool, error) {
supportsExpansion, expandablePlugin := og.checkIfSupportsNodeExpansion(volumeToMount)
if supportsExpansion {
pv := volumeToMount.VolumeSpec.PersistentVolume
pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil {
// Return error rather than leave the file system un-resized, caller will log and retry
return false, fmt.Errorf("mountVolume.NodeExpandVolume get PVC failed : %v", err)
}
pvcStatusCap := pvc.Status.Capacity[v1.ResourceStorage]
pvSpecCap := pv.Spec.Capacity[v1.ResourceStorage]
if pvcStatusCap.Cmp(pvSpecCap) < 0 {
if volumeToMount.VolumeSpec.ReadOnly {
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
klog.Warning(detailedMsg)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
return true, nil
}
rsOpts.NewSize = pvSpecCap
rsOpts.OldSize = pvcStatusCap
rsOpts.VolumeSpec = volumeToMount.VolumeSpec
resizeOp := nodeResizeOperationOpts{
vmt: volumeToMount,
pvc: pvc,
pv: pv,
pluginResizeOpts: rsOpts,
volumePlugin: expandablePlugin,
actualStateOfWorld: actualStateOfWorld,
}
if og.checkForRecoveryFromExpansion(pvc, volumeToMount) {
// if recovery feature is enabled, we can use allocated size from PVC status as new size
rsOpts.NewSize = pvc.Status.AllocatedResources[v1.ResourceStorage]
resizeOp.pluginResizeOpts = rsOpts
nodeExpander := newNodeExpander(resizeOp, og.kubeClient, og.recorder)
resizeFinished, _, err := nodeExpander.expandOnPlugin()
return resizeFinished, err
} else {
return og.legacyCallNodeExpandOnPlugin(resizeOp)
}
}
}
return true, nil
}
func (og *operationGenerator) checkIfSupportsNodeExpansion(volumeToMount VolumeToMount) (bool, volume.NodeExpandableVolumePlugin) {
if volumeToMount.VolumeSpec != nil &&
volumeToMount.VolumeSpec.InlineVolumeSpecForCSIMigration {
klog.V(4).Infof("This volume %s is a migrated inline volume and is not resizable", volumeToMount.VolumeName)
return false, nil
}
// Get expander, if possible
expandableVolumePlugin, _ :=
og.volumePluginMgr.FindNodeExpandablePluginBySpec(volumeToMount.VolumeSpec)
if expandableVolumePlugin != nil &&
expandableVolumePlugin.RequiresFSResize() &&
volumeToMount.VolumeSpec.PersistentVolume != nil {
return true, expandableVolumePlugin
}
return false, nil
}
func (og *operationGenerator) nodeExpandVolume(
volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
rsOpts volume.NodeResizeOptions) (bool, resource.Quantity, error) {
supportsExpansion, expandableVolumePlugin := og.checkIfSupportsNodeExpansion(volumeToMount)
if supportsExpansion {
// lets use sizes handed over to us by caller for comparison
if rsOpts.NewSize.Cmp(rsOpts.OldSize) > 0 {
pv := volumeToMount.VolumeSpec.PersistentVolume
pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{})
currentSize := pvc.Status.Capacity.Storage()
if err != nil {
// Return error rather than leave the file system un-resized, caller will log and retry
return false, *currentSize, fmt.Errorf("mountVolume.NodeExpandVolume get PVC failed : %w", err)
}
if volumeToMount.VolumeSpec.ReadOnly {
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
klog.Warning(detailedMsg)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
return true, *currentSize, nil
}
resizeOp := nodeResizeOperationOpts{
vmt: volumeToMount,
pvc: pvc,
pv: pv,
pluginResizeOpts: rsOpts,
volumePlugin: expandableVolumePlugin,
actualStateOfWorld: actualStateOfWorld,
}
if og.checkForRecoveryFromExpansion(pvc, volumeToMount) {
// if recovery feature is enabled, we can use allocated size from PVC status as new size
newSize := pvc.Status.AllocatedResources[v1.ResourceStorage]
rsOpts.NewSize = newSize
resizeOp.pluginResizeOpts.NewSize = newSize
nodeExpander := newNodeExpander(resizeOp, og.kubeClient, og.recorder)
resizeFinished, newSize, err := nodeExpander.expandOnPlugin()
return resizeFinished, newSize, err
} else {
resizeFinished, err := og.legacyCallNodeExpandOnPlugin(resizeOp)
return resizeFinished, rsOpts.NewSize, err
}
}
}
return true, rsOpts.OldSize, nil
}
func (og *operationGenerator) checkForRecoveryFromExpansion(pvc *v1.PersistentVolumeClaim, volumeToMount VolumeToMount) bool {
resizeStatus := pvc.Status.AllocatedResourceStatuses[v1.ResourceStorage]
allocatedResource := pvc.Status.AllocatedResources
featureGateStatus := utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure)
if !featureGateStatus {
// even though RecoverVolumeExpansionFailure feature-gate is disabled, we should consider it enabled
// if resizeStatus is not empty or allocatedresources is set
if resizeStatus != "" || allocatedResource != nil {
return true
}
return false
}
// Even though RecoverVolumeExpansionFailure feature gate is enabled, it appears that we are running with older version
// of resize controller, which will not populate allocatedResource and resizeStatus. This can happen because of version skew
// and hence we are going to keep expanding using older logic.
if resizeStatus == "" && allocatedResource == nil {
_, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume running with", "older external resize controller")
klog.Warning(detailedMsg)
return false
}
return true
}
// legacyCallNodeExpandOnPlugin is old version of calling node expansion on plugin, which does not support
// recovery from volume expansion failure
// TODO: Removing this code when RecoverVolumeExpansionFailure feature goes GA.
func (og *operationGenerator) legacyCallNodeExpandOnPlugin(resizeOp nodeResizeOperationOpts) (bool, error) {
pvc := resizeOp.pvc
volumeToMount := resizeOp.vmt
rsOpts := resizeOp.pluginResizeOpts
actualStateOfWorld := resizeOp.actualStateOfWorld
expandableVolumePlugin := resizeOp.volumePlugin
pvcStatusCap := pvc.Status.Capacity[v1.ResourceStorage]
nodeName := volumeToMount.Pod.Spec.NodeName
var err error
// File system resize was requested, proceed
klog.V(4).InfoS(volumeToMount.GenerateMsgDetailed("MountVolume.NodeExpandVolume entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)), "pod", klog.KObj(volumeToMount.Pod))
rsOpts.VolumeSpec = volumeToMount.VolumeSpec
_, resizeErr := expandableVolumePlugin.NodeExpand(rsOpts)
if resizeErr != nil {
// This is a workaround for now, until RecoverFromVolumeExpansionFailure feature goes GA.
// If RecoverFromVolumeExpansionFailure feature is enabled, we will not ever hit this state, because
// we will wait for VolumeExpansionPendingOnNode before trying to expand volume in kubelet.
if volumetypes.IsOperationNotSupportedError(resizeErr) {
klog.V(4).InfoS(volumeToMount.GenerateMsgDetailed("MountVolume.NodeExpandVolume failed", "NodeExpandVolume not supported"), "pod", klog.KObj(volumeToMount.Pod))
return true, nil
}
// if driver returned FailedPrecondition error that means
// volume expansion should not be retried on this node but
// expansion operation should not block mounting
if volumetypes.IsFailedPreconditionError(resizeErr) {
actualStateOfWorld.MarkForInUseExpansionError(volumeToMount.VolumeName)
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error())
return true, nil
}
return false, resizeErr
}
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume succeeded", nodeName)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
og.recorder.Eventf(pvc, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
klog.InfoS(detailedMsg, "pod", klog.KObj(volumeToMount.Pod))
// if PVC already has new size, there is no need to update it.
if pvcStatusCap.Cmp(rsOpts.NewSize) >= 0 {
return true, nil
}
// File system resize succeeded, now update the PVC's Capacity to match the PV's
_, err = util.MarkFSResizeFinished(pvc, rsOpts.NewSize, og.kubeClient)
if err != nil {
// On retry, NodeExpandVolume will be called again but do nothing
return false, fmt.Errorf("mountVolume.NodeExpandVolume update PVC status failed : %v", err)
}
return true, nil
}
func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount, plugin volume.VolumePlugin) error {
mountOptions := util.MountOptionFromSpec(volumeToMount.VolumeSpec)
if len(mountOptions) > 0 && !plugin.SupportsMountOption() {
return fmt.Errorf("mount options are not supported for this volume type")
}
return nil
}
// checkNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels
// This ensures that we don't mount a volume that doesn't belong to this node
func checkNodeAffinity(og *operationGenerator, volumeToMount VolumeToMount) error {
pv := volumeToMount.VolumeSpec.PersistentVolume
if pv != nil {
nodeLabels, err := og.volumePluginMgr.Host.GetNodeLabels()
if err != nil {
return err
}
err = storagehelpers.CheckNodeAffinity(pv, nodeLabels)
if err != nil {
return err
}
}
return nil
}
// isDeviceOpened checks the device status if the device is in use anywhere else on the system
func isDeviceOpened(deviceToDetach AttachedVolume, hostUtil hostutil.HostUtils) (bool, error) {
isDevicePath, devicePathErr := hostUtil.PathIsDevice(deviceToDetach.DevicePath)
var deviceOpened bool
var deviceOpenedErr error
if !isDevicePath && devicePathErr == nil ||
(devicePathErr != nil && strings.Contains(devicePathErr.Error(), "does not exist")) {
// not a device path or path doesn't exist
//TODO: refer to #36092
klog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath)
deviceOpened = false
} else if devicePathErr != nil {
return false, deviceToDetach.GenerateErrorDetailed("PathIsDevice failed", devicePathErr)
} else {
deviceOpened, deviceOpenedErr = hostUtil.DeviceOpened(deviceToDetach.DevicePath)
if deviceOpenedErr != nil {
return false, deviceToDetach.GenerateErrorDetailed("DeviceOpened failed", deviceOpenedErr)
}
}
return deviceOpened, nil
}
// findDetachablePluginBySpec is a variant of VolumePluginMgr.FindAttachablePluginByName() function.
// The difference is that it bypass the CanAttach() check for CSI plugin, i.e. it assumes all CSI plugin supports detach.
// The intention here is that a CSI plugin volume can end up in an Uncertain state, so that a detach
// operation will help it to detach no matter it actually has the ability to attach/detach.
func findDetachablePluginBySpec(spec *volume.Spec, pm *volume.VolumePluginMgr) (volume.AttachableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if attachableVolumePlugin, ok := volumePlugin.(volume.AttachableVolumePlugin); ok {
if attachableVolumePlugin.GetPluginName() == "kubernetes.io/csi" {
return attachableVolumePlugin, nil
}
if canAttach, err := attachableVolumePlugin.CanAttach(spec); err != nil {
return nil, err
} else if canAttach {
return attachableVolumePlugin, nil
}
}
return nil, nil
}
func getMigratedStatusBySpec(spec *volume.Spec) bool {
migrated := false
if spec != nil {
migrated = spec.Migrated
}
return migrated
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package recyclerclient
import (
"context"
"errors"
"fmt"
"sync"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
// RecycleEventRecorder is a func that defines how to record RecycleEvent.
type RecycleEventRecorder func(eventtype, message string)
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
// Recyclers. This function will save the given Pod to the API and watch it
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
// whichever comes first. An attempt to delete a recycler pod is always
// attempted before returning.
//
// In case there is a pod with the same namespace+name already running, this
// function deletes it as it is not able to judge if it is an old recycler
// or user has forged a fake recycler to block Kubernetes from recycling.//
//
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
// will be overwritten with unique name based on PV.Name.
// client - kube client for API operations.
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
}
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
klog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
// exists" error when a previous controller has already started recycling
// the volume. Here we assume that pv.Name is already unique.
pod.Name = "recycler-for-" + pvName
pod.GenerateName = ""
stopChannel := make(chan struct{})
defer close(stopChannel)
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
if err != nil {
klog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return err
}
// Start the pod
_, err = recyclerClient.CreatePod(pod)
if err != nil {
if apierrors.IsAlreadyExists(err) {
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
}
// Recycler will try again and the old pod will be hopefully deleted
// at that time.
return fmt.Errorf("old recycler pod found, will retry later")
}
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
}
err = waitForPod(pod, recyclerClient, podCh)
// In all cases delete the recycler pod and log its result.
klog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
klog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, deleteErr)
}
// Returning recycler error is preferred, the pod will be deleted again on
// the next retry.
if err != nil {
return fmt.Errorf("failed to recycle volume: %s", err)
}
// Recycle succeeded but we failed to delete the recycler pod. Report it,
// the controller will re-try recycling the PV again shortly.
if deleteErr != nil {
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
}
return nil
}
// waitForPod watches the pod it until it finishes and send all events on the
// pod to the PV.
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
for {
event, ok := <-podCh
if !ok {
return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
}
switch event.Object.(type) {
case *v1.Pod:
// POD changed
pod := event.Object.(*v1.Pod)
klog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == v1.PodSucceeded {
// Recycle succeeded.
return nil
}
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Message != "" {
return errors.New(pod.Status.Message)
}
return fmt.Errorf("pod failed, pod.Status.Message unknown")
}
case watch.Deleted:
return fmt.Errorf("recycler pod was deleted")
case watch.Error:
return fmt.Errorf("recycler pod watcher failed")
}
case *v1.Event:
// Event received
podEvent := event.Object.(*v1.Event)
klog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
}
}
}
}
// recyclerClient abstracts access to a Pod by providing a narrower interface.
// This makes it easier to mock a client for testing.
type recyclerClient interface {
CreatePod(pod *v1.Pod) (*v1.Pod, error)
GetPod(name, namespace string) (*v1.Pod, error)
DeletePod(name, namespace string) error
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
// to close the reflector backing the watch. The caller is responsible for
// derring a close on the channel to stop the reflector.
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
// Event sends an event to the volume that is being recycled.
Event(eventtype, message string)
}
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
return &realRecyclerClient{
client,
recorder,
}
}
type realRecyclerClient struct {
client clientset.Interface
recorder RecycleEventRecorder
}
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
return c.client.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
func (c *realRecyclerClient) Event(eventtype, message string) {
c.recorder(eventtype, message)
}
// WatchPod watches a pod and events related to it. It sends pod updates and events over the returned channel
// It will continue until stopChannel is closed
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, err := fields.ParseSelector("metadata.name=" + name)
if err != nil {
return nil, err
}
options := metav1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(context.TODO(), options)
if err != nil {
return nil, err
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(context.TODO(), metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})
if err != nil {
podWatch.Stop()
return nil, err
}
eventCh := make(chan watch.Event, 30)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer close(eventCh)
wg.Wait()
}()
go func() {
defer eventWatch.Stop()
defer wg.Done()
for {
select {
case <-stopChannel:
return
case eventEvent, ok := <-eventWatch.ResultChan():
if !ok {
return
}
eventCh <- eventEvent
}
}
}()
go func() {
defer podWatch.Stop()
defer wg.Done()
for {
select {
case <-stopChannel:
return
case podEvent, ok := <-podWatch.ResultChan():
if !ok {
return
}
eventCh <- podEvent
}
}
}()
return eventCh, nil
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"encoding/json"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/mount-utils"
"k8s.io/utils/exec"
)
var (
knownResizeConditions = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
v1.PersistentVolumeClaimControllerResizeError: true,
v1.PersistentVolumeClaimNodeResizeError: true,
}
// AnnPreResizeCapacity annotation is added to a PV when expanding volume.
// Its value is status capacity of the PVC prior to the volume expansion
// Its value will be set by the external-resizer when it deems that filesystem resize is required after resizing volume.
// Its value will be used by pv_controller to determine pvc's status capacity when binding pvc and pv.
AnnPreResizeCapacity = "volume.alpha.kubernetes.io/pre-resize-capacity"
)
type resizeProcessStatus struct {
condition v1.PersistentVolumeClaimCondition
processed bool
}
// UpdatePVSize updates just pv size after cloudprovider resizing is successful
func UpdatePVSize(
pv *v1.PersistentVolume,
newSize resource.Quantity,
kubeClient clientset.Interface) (*v1.PersistentVolume, error) {
pvClone := pv.DeepCopy()
pvClone.Spec.Capacity[v1.ResourceStorage] = newSize
return PatchPV(pv, pvClone, kubeClient)
}
// AddAnnPreResizeCapacity adds volume.alpha.kubernetes.io/pre-resize-capacity from the pv
func AddAnnPreResizeCapacity(
pv *v1.PersistentVolume,
oldCapacity resource.Quantity,
kubeClient clientset.Interface) error {
// if the pv already has a resize annotation skip the process
if metav1.HasAnnotation(pv.ObjectMeta, AnnPreResizeCapacity) {
return nil
}
pvClone := pv.DeepCopy()
if pvClone.ObjectMeta.Annotations == nil {
pvClone.ObjectMeta.Annotations = make(map[string]string)
}
pvClone.ObjectMeta.Annotations[AnnPreResizeCapacity] = oldCapacity.String()
_, err := PatchPV(pv, pvClone, kubeClient)
return err
}
// DeleteAnnPreResizeCapacity deletes volume.alpha.kubernetes.io/pre-resize-capacity from the pv
func DeleteAnnPreResizeCapacity(
pv *v1.PersistentVolume,
kubeClient clientset.Interface) error {
// if the pv does not have a resize annotation skip the entire process
if !metav1.HasAnnotation(pv.ObjectMeta, AnnPreResizeCapacity) {
return nil
}
pvClone := pv.DeepCopy()
delete(pvClone.ObjectMeta.Annotations, AnnPreResizeCapacity)
_, err := PatchPV(pv, pvClone, kubeClient)
return err
}
// PatchPV creates and executes a patch for pv
func PatchPV(
oldPV *v1.PersistentVolume,
newPV *v1.PersistentVolume,
kubeClient clientset.Interface) (*v1.PersistentVolume, error) {
oldData, err := json.Marshal(oldPV)
if err != nil {
return oldPV, fmt.Errorf("unexpected error marshaling old PV %q with error : %v", oldPV.Name, err)
}
newData, err := json.Marshal(newPV)
if err != nil {
return oldPV, fmt.Errorf("unexpected error marshaling new PV %q with error : %v", newPV.Name, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPV)
if err != nil {
return oldPV, fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", oldPV.Name, err)
}
updatedPV, err := kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), oldPV.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return oldPV, fmt.Errorf("error Patching PV %q with error : %v", oldPV.Name, err)
}
return updatedPV, nil
}
// MarkResizeInProgressWithResizer marks cloudprovider resizing as in progress
// and also annotates the PVC with the name of the resizer.
func MarkResizeInProgressWithResizer(
pvc *v1.PersistentVolumeClaim,
resizerName string,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
// Mark PVC as Resize Started
progressCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
}
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
newPVC := pvc.DeepCopy()
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
newPVC = setResizer(newPVC, resizerName)
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
}
func MarkControllerReisizeInProgress(pvc *v1.PersistentVolumeClaim, resizerName string, newSize resource.Quantity, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
// Mark PVC as Resize Started
progressCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
}
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
newPVC := pvc.DeepCopy()
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimControllerResizeInProgress)
newPVC = mergeStorageAllocatedResources(newPVC, newSize)
newPVC = setResizer(newPVC, resizerName)
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
}
// SetClaimResizer sets resizer annotation on PVC
func SetClaimResizer(
pvc *v1.PersistentVolumeClaim,
resizerName string,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC = setResizer(newPVC, resizerName)
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
}
func setResizer(pvc *v1.PersistentVolumeClaim, resizerName string) *v1.PersistentVolumeClaim {
if val, ok := pvc.Annotations[volumetypes.VolumeResizerKey]; ok && val == resizerName {
return pvc
}
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volumetypes.VolumeResizerKey, resizerName)
return pvc
}
// MarkForFSResize marks file system resizing as pending
func MarkForFSResize(
pvc *v1.PersistentVolumeClaim,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
pvcCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimFileSystemResizePending,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: "Waiting for user to (re-)start a pod to finish file system resize of volume on node.",
}
conditions := []v1.PersistentVolumeClaimCondition{pvcCondition}
newPVC := pvc.DeepCopy()
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizePending)
}
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, true /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
// MarkResizeFinished marks all resizing as done
func MarkResizeFinished(
pvc *v1.PersistentVolumeClaim,
newSize resource.Quantity,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
return MarkFSResizeFinished(pvc, newSize, kubeClient)
}
// MarkFSResizeFinished marks file system resizing as done
func MarkFSResizeFinished(
pvc *v1.PersistentVolumeClaim,
newSize resource.Quantity,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC.Status.Capacity[v1.ResourceStorage] = newSize
// if RecoverVolumeExpansionFailure is enabled, we need to reset ResizeStatus back to nil
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
allocatedResourceStatusMap := newPVC.Status.AllocatedResourceStatuses
delete(allocatedResourceStatusMap, v1.ResourceStorage)
if len(allocatedResourceStatusMap) == 0 {
newPVC.Status.AllocatedResourceStatuses = nil
} else {
newPVC.Status.AllocatedResourceStatuses = allocatedResourceStatusMap
}
}
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{}, false /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
func MarkNodeExpansionFinishedWithRecovery(
pvc *v1.PersistentVolumeClaim,
newSize resource.Quantity,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC.Status.Capacity[v1.ResourceStorage] = newSize
allocatedResourceStatusMap := newPVC.Status.AllocatedResourceStatuses
delete(allocatedResourceStatusMap, v1.ResourceStorage)
if len(allocatedResourceStatusMap) == 0 {
newPVC.Status.AllocatedResourceStatuses = nil
} else {
newPVC.Status.AllocatedResourceStatuses = allocatedResourceStatusMap
}
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{}, false /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
// MarkNodeExpansionInfeasible marks a PVC for node expansion as failed. Kubelet should not retry expansion
// of volumes which are in failed state.
func MarkNodeExpansionInfeasible(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeInfeasible)
errorCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimNodeResizeError,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: fmt.Sprintf("failed to expand pvc with %v", err),
}
newPVC = MergeResizeConditionOnPVC(newPVC,
[]v1.PersistentVolumeClaimCondition{errorCondition},
true /* keepOldResizeConditions */)
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
if err != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if updateErr != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, updateErr)
}
return updatedClaim, nil
}
func MarkNodeExpansionFailedCondition(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
errorCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimNodeResizeError,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: fmt.Sprintf("failed to expand pvc with %v", err),
}
newPVC = MergeResizeConditionOnPVC(newPVC,
[]v1.PersistentVolumeClaimCondition{errorCondition},
true /* keepOldResizeConditions */)
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
if err != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if updateErr != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, updateErr)
}
return updatedClaim, nil
}
// MarkNodeExpansionInProgress marks pvc expansion in progress on node
func MarkNodeExpansionInProgress(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeInProgress)
updatedPVC, err := PatchPVCStatus(pvc /* oldPVC */, newPVC, kubeClient)
return updatedPVC, err
}
// PatchPVCStatus updates PVC status using PATCH verb
// Don't use Update because this can be called from kubelet and if kubelet has an older client its
// Updates will overwrite new fields. And to avoid writing to a stale object, add ResourceVersion
// to the patch so that Patch will fail if the patch's RV != actual up-to-date RV like Update would
func PatchPVCStatus(
oldPVC *v1.PersistentVolumeClaim,
newPVC *v1.PersistentVolumeClaim,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
patchBytes, err := createPVCPatch(oldPVC, newPVC, true /* addResourceVersionCheck */)
if err != nil {
return oldPVC, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
Patch(context.TODO(), oldPVC.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if updateErr != nil {
return oldPVC, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr)
}
return updatedClaim, nil
}
func createPVCPatch(
oldPVC *v1.PersistentVolumeClaim,
newPVC *v1.PersistentVolumeClaim, addResourceVersionCheck bool) ([]byte, error) {
oldData, err := json.Marshal(oldPVC)
if err != nil {
return nil, fmt.Errorf("failed to marshal old data: %v", err)
}
newData, err := json.Marshal(newPVC)
if err != nil {
return nil, fmt.Errorf("failed to marshal new data: %v", err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPVC)
if err != nil {
return nil, fmt.Errorf("failed to create 2 way merge patch: %v", err)
}
if addResourceVersionCheck {
patchBytes, err = addResourceVersion(patchBytes, oldPVC.ResourceVersion)
if err != nil {
return nil, fmt.Errorf("failed to add resource version: %v", err)
}
}
return patchBytes, nil
}
func addResourceVersion(patchBytes []byte, resourceVersion string) ([]byte, error) {
var patchMap map[string]interface{}
err := json.Unmarshal(patchBytes, &patchMap)
if err != nil {
return nil, fmt.Errorf("error unmarshalling patch: %v", err)
}
u := unstructured.Unstructured{Object: patchMap}
a, err := meta.Accessor(&u)
if err != nil {
return nil, fmt.Errorf("error creating accessor: %v", err)
}
a.SetResourceVersion(resourceVersion)
versionBytes, err := json.Marshal(patchMap)
if err != nil {
return nil, fmt.Errorf("error marshalling json patch: %v", err)
}
return versionBytes, nil
}
// MergeResizeConditionOnPVC updates pvc with requested resize conditions
// leaving other conditions untouched.
func MergeResizeConditionOnPVC(
pvc *v1.PersistentVolumeClaim,
resizeConditions []v1.PersistentVolumeClaimCondition, keepOldResizeConditions bool) *v1.PersistentVolumeClaim {
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
}
oldConditions := pvc.Status.Conditions
newConditions := []v1.PersistentVolumeClaimCondition{}
for _, condition := range oldConditions {
// If Condition is of not resize type, we keep it.
if _, ok := knownResizeConditions[condition.Type]; !ok {
newConditions = append(newConditions, condition)
continue
}
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
if newCondition.condition.Status != condition.Status {
newConditions = append(newConditions, newCondition.condition)
} else {
newConditions = append(newConditions, condition)
}
newCondition.processed = true
} else if keepOldResizeConditions {
// if keepOldResizeConditions is true, we keep the old resize conditions that were present in the
// existing pvc.Status.Conditions field.
newConditions = append(newConditions, condition)
}
}
// append all unprocessed conditions
for _, newCondition := range resizeConditionMap {
if !newCondition.processed {
newConditions = append(newConditions, newCondition.condition)
}
}
pvc.Status.Conditions = newConditions
return pvc
}
func mergeStorageResourceStatus(pvc *v1.PersistentVolumeClaim, status v1.ClaimResourceStatus) *v1.PersistentVolumeClaim {
allocatedResourceStatusMap := pvc.Status.AllocatedResourceStatuses
if allocatedResourceStatusMap == nil {
pvc.Status.AllocatedResourceStatuses = map[v1.ResourceName]v1.ClaimResourceStatus{
v1.ResourceStorage: status,
}
return pvc
}
allocatedResourceStatusMap[v1.ResourceStorage] = status
pvc.Status.AllocatedResourceStatuses = allocatedResourceStatusMap
return pvc
}
func mergeStorageAllocatedResources(pvc *v1.PersistentVolumeClaim, size resource.Quantity) *v1.PersistentVolumeClaim {
allocatedResourcesMap := pvc.Status.AllocatedResources
if allocatedResourcesMap == nil {
pvc.Status.AllocatedResources = map[v1.ResourceName]resource.Quantity{
v1.ResourceStorage: size,
}
return pvc
}
allocatedResourcesMap[v1.ResourceStorage] = size
pvc.Status.AllocatedResources = allocatedResourcesMap
return pvc
}
// GenericResizeFS : call generic filesystem resizer for plugins that don't have any special filesystem resize requirements
func GenericResizeFS(host volume.VolumeHost, devicePath, deviceMountPath string) (bool, error) {
resizer := mount.NewResizeFs(exec.New())
return resizer.Resize(devicePath, deviceMountPath)
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
)
// SELinuxLabelTranslator translates v1.SELinuxOptions of a process to SELinux file label.
type SELinuxLabelTranslator interface {
// SELinuxOptionsToFileLabel returns SELinux file label for given SELinuxOptions
// of a container process.
// When Role, User or Type are empty, they're read from the system defaults.
// It returns "" and no error on platforms that do not have SELinux enabled
// or don't support SELinux at all.
SELinuxOptionsToFileLabel(opts *v1.SELinuxOptions) (string, error)
// SELinuxEnabled returns true when the OS has enabled SELinux support.
SELinuxEnabled() bool
}
// Real implementation of the interface.
// On Linux with SELinux enabled it translates. Otherwise it always returns an empty string and no error.
type translator struct{}
var _ SELinuxLabelTranslator = &translator{}
// NewSELinuxLabelTranslator returns new SELinuxLabelTranslator for the platform.
func NewSELinuxLabelTranslator() SELinuxLabelTranslator {
return &translator{}
}
// SELinuxOptionsToFileLabel returns SELinux file label for given SELinuxOptions
// of a container process.
// When Role, User or Type are empty, they're read from the system defaults.
// It returns "" and no error on platforms that do not have SELinux enabled
// or don't support SELinux at all.
func (l *translator) SELinuxOptionsToFileLabel(opts *v1.SELinuxOptions) (string, error) {
if opts == nil {
return "", nil
}
args := contextOptions(opts)
if len(args) == 0 {
return "", nil
}
processLabel, fileLabel, err := label.InitLabels(args)
if err != nil {
// In theory, this should be unreachable. InitLabels can fail only when args contain an unknown option,
// and all options returned by contextOptions are known.
return "", &SELinuxLabelTranslationError{msg: err.Error()}
}
// InitLabels() may allocate a new unique SELinux label in kubelet memory. The label is *not* allocated
// in the container runtime. Clear it to avoid memory problems.
// ReleaseLabel on non-allocated label is NOOP.
selinux.ReleaseLabel(processLabel)
return fileLabel, nil
}
// Convert SELinuxOptions to []string accepted by label.InitLabels
func contextOptions(opts *v1.SELinuxOptions) []string {
if opts == nil {
return nil
}
args := make([]string, 0, 3)
if opts.User != "" {
args = append(args, "user:"+opts.User)
}
if opts.Role != "" {
args = append(args, "role:"+opts.Role)
}
if opts.Type != "" {
args = append(args, "type:"+opts.Type)
}
if opts.Level != "" {
args = append(args, "level:"+opts.Level)
}
return args
}
func (l *translator) SELinuxEnabled() bool {
return selinux.GetEnabled()
}
// Fake implementation of the interface for unit tests.
type fakeTranslator struct{}
var _ SELinuxLabelTranslator = &fakeTranslator{}
// NewFakeSELinuxLabelTranslator returns a fake translator for unit tests.
// It imitates a real translator on platforms that do not have SELinux enabled
// or don't support SELinux at all.
func NewFakeSELinuxLabelTranslator() SELinuxLabelTranslator {
return &fakeTranslator{}
}
// SELinuxOptionsToFileLabel returns SELinux file label for given options.
func (l *fakeTranslator) SELinuxOptionsToFileLabel(opts *v1.SELinuxOptions) (string, error) {
if opts == nil {
return "", nil
}
// Fill empty values from "system defaults" (taken from Fedora Linux).
user := opts.User
if user == "" {
user = "system_u"
}
role := opts.Role
if role == "" {
role = "object_r"
}
// opts is context of the *process* to run in a container. Translate
// process type "container_t" to file label type "container_file_t".
// (The rest of the context is the same for processes and files).
fileType := opts.Type
if fileType == "" || fileType == "container_t" {
fileType = "container_file_t"
}
level := opts.Level
if level == "" {
// If empty, level is allocated randomly.
level = "s0:c998,c999"
}
ctx := fmt.Sprintf("%s:%s:%s:%s", user, role, fileType, level)
return ctx, nil
}
func (l *fakeTranslator) SELinuxEnabled() bool {
return true
}
type SELinuxLabelTranslationError struct {
msg string
}
func (e *SELinuxLabelTranslationError) Error() string {
return e.msg
}
func IsSELinuxLabelTranslationError(err error) bool {
var seLinuxError *SELinuxLabelTranslationError
return errors.As(err, &seLinuxError)
}
// SupportsSELinuxContextMount checks if the given volumeSpec supports with mount -o context
func SupportsSELinuxContextMount(volumeSpec *volume.Spec, volumePluginMgr *volume.VolumePluginMgr) (bool, error) {
plugin, _ := volumePluginMgr.FindPluginBySpec(volumeSpec)
if plugin != nil {
return plugin.SupportsSELinuxContextMount(volumeSpec)
}
return false, nil
}
// VolumeSupportsSELinuxMount returns true if given volume access mode can support mount with SELinux mount options.
func VolumeSupportsSELinuxMount(volumeSpec *volume.Spec) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
return false
}
if volumeSpec.PersistentVolume == nil {
return false
}
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMount) {
return true
}
// Only SELinuxMountReadWriteOncePod feature is enabled
if len(volumeSpec.PersistentVolume.Spec.AccessModes) != 1 {
// RWOP volumes must be the only access mode of the volume
return false
}
if !v1helper.ContainsAccessMode(volumeSpec.PersistentVolume.Spec.AccessModes, v1.ReadWriteOncePod) {
// Not a RWOP volume
return false
}
// RWOP volume
return true
}
// MultipleSELinuxLabelsError tells that one volume in a pod is mounted in multiple containers and each has a different SELinux label.
type MultipleSELinuxLabelsError struct {
labels []string
}
func (e *MultipleSELinuxLabelsError) Error() string {
return fmt.Sprintf("multiple SELinux labels found: %s", strings.Join(e.labels, ","))
}
func (e *MultipleSELinuxLabelsError) Labels() []string {
return e.labels
}
func IsMultipleSELinuxLabelsError(err error) bool {
var multiError *MultipleSELinuxLabelsError
return errors.As(err, &multiError)
}
// AddSELinuxMountOption adds -o context="XYZ" mount option to a given list
func AddSELinuxMountOption(options []string, seLinuxContext string) []string {
if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
return options
}
// Use double quotes to support a comma "," in the SELinux context string.
// For example: dirsync,context="system_u:object_r:container_file_t:s0:c15,c25",noatime
return append(options, fmt.Sprintf("context=%q", seLinuxContext))
}
// SELinuxLabelInfo contains information about SELinux labels that should be used to mount a volume for a Pod.
type SELinuxLabelInfo struct {
// SELinuxMountLabel is the SELinux label that should be used to mount the volume.
// The volume plugin supports SELinuxMount and the Pod did not opt out via SELinuxChangePolicy.
// Empty string otherwise.
SELinuxMountLabel string
// SELinuxProcessLabel is the SELinux label that will the container runtime use for the Pod.
// Regardless if the volume plugin supports SELinuxMount or the Pod opted out via SELinuxChangePolicy.
SELinuxProcessLabel string
// PluginSupportsSELinuxContextMount is true if the volume plugin supports SELinux mount.
PluginSupportsSELinuxContextMount bool
}
// GetMountSELinuxLabel returns SELinux labels that should be used to mount the given volume volumeSpec and podSecurityContext.
// It expects effectiveSELinuxContainerLabels as returned by volumeutil.GetPodVolumeNames, i.e. with all SELinuxOptions
// from all containers that use the volume in the pod, potentially expanded with PodSecurityContext.SELinuxOptions,
// if container's SELinuxOptions are nil.
// It does not evaluate the volume access mode! It's up to the caller to check SELinuxMount feature gate,
// it may need to bump different metrics based on feature gates / access modes / label anyway.
func GetMountSELinuxLabel(volumeSpec *volume.Spec, effectiveSELinuxContainerLabels []*v1.SELinuxOptions, podSecurityContext *v1.PodSecurityContext, volumePluginMgr *volume.VolumePluginMgr, seLinuxTranslator SELinuxLabelTranslator) (SELinuxLabelInfo, error) {
info := SELinuxLabelInfo{}
if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
return info, nil
}
if !seLinuxTranslator.SELinuxEnabled() {
return info, nil
}
pluginSupportsSELinuxContextMount, err := SupportsSELinuxContextMount(volumeSpec, volumePluginMgr)
if err != nil {
return info, err
}
info.PluginSupportsSELinuxContextMount = pluginSupportsSELinuxContextMount
// Collect all SELinux options from all containers that use this volume.
// A set will squash any duplicities.
labels := sets.New[string]()
for _, containerLabel := range effectiveSELinuxContainerLabels {
lbl, err := seLinuxTranslator.SELinuxOptionsToFileLabel(containerLabel)
if err != nil {
fullErr := fmt.Errorf("failed to construct SELinux label from context %q: %w", containerLabel, err)
return info, fullErr
}
labels.Insert(lbl)
}
// Ensure that all containers use the same SELinux label.
if labels.Len() > 1 {
// This volume is used with more than one SELinux label in the pod.
return info, &MultipleSELinuxLabelsError{labels: labels.UnsortedList()}
}
if labels.Len() == 0 {
return info, nil
}
lbl, _ := labels.PopAny()
info.SELinuxProcessLabel = lbl
info.SELinuxMountLabel = lbl
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxChangePolicy) &&
podSecurityContext != nil &&
podSecurityContext.SELinuxChangePolicy != nil &&
*podSecurityContext.SELinuxChangePolicy == v1.SELinuxChangePolicyRecursive {
// The pod has opted into recursive SELinux label changes. Do not mount with -o context.
info.SELinuxMountLabel = ""
}
if !pluginSupportsSELinuxContextMount {
// The volume plugin does not support SELinux mount. Do not mount with -o context.
info.SELinuxMountLabel = ""
}
return info, nil
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"sort"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
storagev1listers "k8s.io/client-go/listers/storage/v1"
"k8s.io/klog/v2"
)
const (
// isDefaultStorageClassAnnotation represents a StorageClass annotation that
// marks a class as the default StorageClass
IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
// betaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation.
// TODO: remove Beta when no longer used
BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
)
// GetDefaultClass returns the default StorageClass from the store, or nil.
func GetDefaultClass(lister storagev1listers.StorageClassLister) (*storagev1.StorageClass, error) {
list, err := lister.List(labels.Everything())
if err != nil {
return nil, err
}
defaultClasses := []*storagev1.StorageClass{}
for _, class := range list {
if IsDefaultAnnotation(class.ObjectMeta) {
defaultClasses = append(defaultClasses, class)
klog.V(4).Infof("GetDefaultClass added: %s", class.Name)
}
}
if len(defaultClasses) == 0 {
return nil, nil
}
// Primary sort by creation timestamp, newest first
// Secondary sort by class name, ascending order
sort.Slice(defaultClasses, func(i, j int) bool {
if defaultClasses[i].CreationTimestamp.UnixNano() == defaultClasses[j].CreationTimestamp.UnixNano() {
return defaultClasses[i].Name < defaultClasses[j].Name
}
return defaultClasses[i].CreationTimestamp.UnixNano() > defaultClasses[j].CreationTimestamp.UnixNano()
})
if len(defaultClasses) > 1 {
klog.V(4).Infof("%d default StorageClasses were found, choosing: %s", len(defaultClasses), defaultClasses[0].Name)
}
return defaultClasses[0], nil
}
// IsDefaultAnnotation returns a boolean if the default storage class
// annotation is set
// TODO: remove Beta when no longer needed
func IsDefaultAnnotation(obj metav1.ObjectMeta) bool {
if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" {
return true
}
if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" {
return true
}
return false
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subpath
import "os"
// Interface defines the set of methods all subpathers must implement
type Interface interface {
// CleanSubPaths removes any bind-mounts created by PrepareSafeSubpath in given
// pod volume directory.
CleanSubPaths(poodDir string, volumeName string) error
// PrepareSafeSubpath does everything that's necessary to prepare a subPath
// that's 1) inside given volumePath and 2) immutable after this call.
//
// newHostPath - location of prepared subPath. It should be used instead of
// hostName when running the container.
// cleanupAction - action to run when the container is running or it failed to start.
//
// CleanupAction must be called immediately after the container with given
// subpath starts. On the other hand, Interface.CleanSubPaths must be called
// when the pod finishes.
PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error)
// SafeMakeDir creates subdir within given base. It makes sure that the
// created directory does not escape given base directory mis-using
// symlinks. Note that the function makes sure that it creates the directory
// somewhere under the base, nothing else. E.g. if the directory already
// exists, it may exist outside of the base due to symlinks.
// This method should be used if the directory to create is inside volume
// that's under user control. User must not be able to use symlinks to
// escape the volume to create directories somewhere else.
SafeMakeDir(subdir string, base string, perm os.FileMode) error
}
// Subpath defines the attributes of a subpath
type Subpath struct {
// index of the VolumeMount for this container
VolumeMountIndex int
// Full path to the subpath directory on the host
Path string
// name of the volume that is a valid directory name.
VolumeName string
// Full path to the volume path
VolumePath string
// Path to the pod's directory, including pod UID
PodDir string
// Name of the container
ContainerName string
}
// Compile time-check for all implementers of subpath interface
var _ Interface = &subpath{}
var _ Interface = &FakeSubpath{}
// FakeSubpath is a subpather implementation for testing
type FakeSubpath struct{}
// PrepareSafeSubpath is a fake implementation of PrepareSafeSubpath. Always returns
// newHostPath == subPath.Path
func (fs *FakeSubpath) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) {
return subPath.Path, nil, nil
}
// CleanSubPaths is a fake implementation of CleanSubPaths. It is a noop
func (fs *FakeSubpath) CleanSubPaths(podDir string, volumeName string) error {
return nil
}
// SafeMakeDir is a fake implementation of SafeMakeDir. It is a noop
func (fs *FakeSubpath) SafeMakeDir(pathname string, base string, perm os.FileMode) error {
return nil
}
//go:build linux
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subpath
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"golang.org/x/sys/unix"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
)
const (
// place for subpath mounts
// TODO: pass in directory using kubelet_getters instead
containerSubPathDirectoryName = "volume-subpaths"
// syscall.Openat flags used to traverse directories not following symlinks
nofollowFlags = unix.O_RDONLY | unix.O_NOFOLLOW
// flags for getting file descriptor without following the symlink
openFDFlags = unix.O_NOFOLLOW | unix.O_PATH
)
type subpath struct {
mounter mount.Interface
}
// New returns a subpath.Interface for the current system
func New(mounter mount.Interface) Interface {
return &subpath{
mounter: mounter,
}
}
func (sp *subpath) CleanSubPaths(podDir string, volumeName string) error {
return doCleanSubPaths(sp.mounter, podDir, volumeName)
}
func (sp *subpath) SafeMakeDir(subdir string, base string, perm os.FileMode) error {
realBase, err := filepath.EvalSymlinks(base)
if err != nil {
return fmt.Errorf("error resolving symlinks in %s: %s", base, err)
}
realFullPath := filepath.Join(realBase, subdir)
return doSafeMakeDir(realFullPath, realBase, perm)
}
func (sp *subpath) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) {
newHostPath, err = doBindSubPath(sp.mounter, subPath)
// There is no action when the container starts. Bind-mount will be cleaned
// when container stops by CleanSubPaths.
cleanupAction = nil
return newHostPath, cleanupAction, err
}
// safeOpenSubPath opens subpath and returns its fd.
func safeOpenSubPath(mounter mount.Interface, subpath Subpath) (int, error) {
if !mount.PathWithinBase(subpath.Path, subpath.VolumePath) {
return -1, fmt.Errorf("subpath %q not within volume path %q", subpath.Path, subpath.VolumePath)
}
fd, err := doSafeOpen(subpath.Path, subpath.VolumePath)
if err != nil {
return -1, fmt.Errorf("error opening subpath %v: %v", subpath.Path, err)
}
return fd, nil
}
// prepareSubpathTarget creates target for bind-mount of subpath. It returns
// "true" when the target already exists and something is mounted there.
func prepareSubpathTarget(mounter mount.Interface, subpath Subpath) (bool, string, error) {
// Early check for already bind-mounted subpath.
bindPathTarget := getSubpathBindTarget(subpath)
notMount, err := mount.IsNotMountPoint(mounter, bindPathTarget)
if err != nil {
if !os.IsNotExist(err) {
return false, "", fmt.Errorf("error checking path %s for mount: %s", bindPathTarget, err)
}
// Ignore ErrorNotExist: the file/directory will be created below if it does not exist yet.
notMount = true
}
if !notMount {
// It's already mounted, so check if it's bind-mounted to the same path
samePath, err := checkSubPathFileEqual(subpath, bindPathTarget)
if err != nil {
return false, "", fmt.Errorf("error checking subpath mount info for %s: %s", bindPathTarget, err)
}
if !samePath {
// It's already mounted but not what we want, unmount it
if err = mounter.Unmount(bindPathTarget); err != nil {
return false, "", fmt.Errorf("error ummounting %s: %s", bindPathTarget, err)
}
} else {
// It's already mounted
klog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget)
return true, bindPathTarget, nil
}
}
// bindPathTarget is in /var/lib/kubelet and thus reachable without any
// translation even to containerized kubelet.
bindParent := filepath.Dir(bindPathTarget)
err = os.MkdirAll(bindParent, 0750)
if err != nil && !os.IsExist(err) {
return false, "", fmt.Errorf("error creating directory %s: %s", bindParent, err)
}
t, err := os.Lstat(subpath.Path)
if err != nil {
return false, "", fmt.Errorf("lstat %s failed: %s", subpath.Path, err)
}
if t.Mode()&os.ModeDir > 0 {
if err = os.Mkdir(bindPathTarget, 0750); err != nil && !os.IsExist(err) {
return false, "", fmt.Errorf("error creating directory %s: %s", bindPathTarget, err)
}
} else {
// "/bin/touch <bindPathTarget>".
// A file is enough for all possible targets (symlink, device, pipe,
// socket, ...), bind-mounting them into a file correctly changes type
// of the target file.
if err = ioutil.WriteFile(bindPathTarget, []byte{}, 0640); err != nil {
return false, "", fmt.Errorf("error creating file %s: %s", bindPathTarget, err)
}
}
return false, bindPathTarget, nil
}
func checkSubPathFileEqual(subpath Subpath, bindMountTarget string) (bool, error) {
s, err := os.Lstat(subpath.Path)
if err != nil {
return false, fmt.Errorf("stat %s failed: %s", subpath.Path, err)
}
t, err := os.Lstat(bindMountTarget)
if err != nil {
return false, fmt.Errorf("lstat %s failed: %s", bindMountTarget, err)
}
if !os.SameFile(s, t) {
return false, nil
}
return true, nil
}
func getSubpathBindTarget(subpath Subpath) string {
// containerName is DNS label, i.e. safe as a directory name.
return filepath.Join(subpath.PodDir, containerSubPathDirectoryName, subpath.VolumeName, subpath.ContainerName, strconv.Itoa(subpath.VolumeMountIndex))
}
func doBindSubPath(mounter mount.Interface, subpath Subpath) (hostPath string, err error) {
// Linux, kubelet runs on the host:
// - safely open the subpath
// - bind-mount /proc/<pid of kubelet>/fd/<fd> to subpath target
// User can't change /proc/<pid of kubelet>/fd/<fd> to point to a bad place.
// Evaluate all symlinks here once for all subsequent functions.
newVolumePath, err := filepath.EvalSymlinks(subpath.VolumePath)
if err != nil {
return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.VolumePath, err)
}
newPath, err := filepath.EvalSymlinks(subpath.Path)
if err != nil {
return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err)
}
klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath)
subpath.VolumePath = newVolumePath
subpath.Path = newPath
fd, err := safeOpenSubPath(mounter, subpath)
if err != nil {
return "", err
}
defer syscall.Close(fd)
alreadyMounted, bindPathTarget, err := prepareSubpathTarget(mounter, subpath)
if err != nil {
return "", err
}
if alreadyMounted {
return bindPathTarget, nil
}
success := false
defer func() {
// Cleanup subpath on error
if !success {
klog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget)
if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil {
klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr)
}
}
}()
kubeletPid := os.Getpid()
mountSource := fmt.Sprintf("/proc/%d/fd/%v", kubeletPid, fd)
// Do the bind mount
options := []string{"bind"}
mountFlags := []string{"--no-canonicalize"}
klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget)
if err = mounter.MountSensitiveWithoutSystemdWithMountFlags(mountSource, bindPathTarget, "" /*fstype*/, options, nil /* sensitiveOptions */, mountFlags); err != nil {
return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err)
}
success = true
klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget)
return bindPathTarget, nil
}
// doCleanSubPaths tears down the subpath bind mounts for a pod
func doCleanSubPaths(mounter mount.Interface, podDir string, volumeName string) error {
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/*
subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName)
klog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir)
containerDirs, err := ioutil.ReadDir(subPathDir)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("error reading %s: %s", subPathDir, err)
}
for _, containerDir := range containerDirs {
if !containerDir.IsDir() {
klog.V(4).Infof("Container file is not a directory: %s", containerDir.Name())
continue
}
klog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name())
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/<container name>/*
fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name())
// The original traversal method here was ReadDir, which was not so robust to handle some error such as "stale NFS file handle",
// so it was replaced with filepath.Walk in a later patch, which can pass through error and handled by the callback WalkFunc.
// After go 1.16, WalkDir was introduced, it's more effective than Walk because the callback WalkDirFunc is called before
// reading a directory, making it save some time when a container's subPath contains lots of dirs.
// See https://github.com/kubernetes/kubernetes/pull/71804 and https://github.com/kubernetes/kubernetes/issues/107667 for more details.
err = filepath.WalkDir(fullContainerDirPath, func(path string, info os.DirEntry, _ error) error {
if path == fullContainerDirPath {
// Skip top level directory
return nil
}
// pass through errors and let doCleanSubPath handle them
if err = doCleanSubPath(mounter, fullContainerDirPath, filepath.Base(path)); err != nil {
return err
}
// We need to check that info is not nil. This may happen when the incoming err is not nil due to stale mounts or permission errors.
if info != nil && info.IsDir() {
// skip subdirs of the volume: it only matters the first level to unmount, otherwise it would try to unmount subdir of the volume
return filepath.SkipDir
}
return nil
})
if err != nil {
return fmt.Errorf("error processing %s: %s", fullContainerDirPath, err)
}
// Whole container has been processed, remove its directory.
if err := os.Remove(fullContainerDirPath); err != nil {
return fmt.Errorf("error deleting %s: %s", fullContainerDirPath, err)
}
klog.V(5).Infof("Removed %s", fullContainerDirPath)
}
// Whole pod volume subpaths have been cleaned up, remove its subpath directory.
if err := os.Remove(subPathDir); err != nil {
return fmt.Errorf("error deleting %s: %s", subPathDir, err)
}
klog.V(5).Infof("Removed %s", subPathDir)
// Remove entire subpath directory if it's the last one
podSubPathDir := filepath.Join(podDir, containerSubPathDirectoryName)
if err := os.Remove(podSubPathDir); err != nil && !os.IsExist(err) {
return fmt.Errorf("error deleting %s: %s", podSubPathDir, err)
}
klog.V(5).Infof("Removed %s", podSubPathDir)
return nil
}
// doCleanSubPath tears down the single subpath bind mount
func doCleanSubPath(mounter mount.Interface, fullContainerDirPath, subPathIndex string) error {
// process /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/<container name>/<subPathName>
klog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex)
fullSubPath := filepath.Join(fullContainerDirPath, subPathIndex)
if err := mount.CleanupMountPoint(fullSubPath, mounter, true); err != nil {
return fmt.Errorf("error cleaning subpath mount %s: %s", fullSubPath, err)
}
klog.V(4).Infof("Successfully cleaned subpath directory %s", fullSubPath)
return nil
}
// cleanSubPath will teardown the subpath bind mount and any remove any directories if empty
func cleanSubPath(mounter mount.Interface, subpath Subpath) error {
containerDir := filepath.Join(subpath.PodDir, containerSubPathDirectoryName, subpath.VolumeName, subpath.ContainerName)
// Clean subdir bindmount
if err := doCleanSubPath(mounter, containerDir, strconv.Itoa(subpath.VolumeMountIndex)); err != nil && !os.IsNotExist(err) {
return err
}
// Recusively remove directories if empty
if err := removeEmptyDirs(subpath.PodDir, containerDir); err != nil {
return err
}
return nil
}
// removeEmptyDirs works backwards from endDir to baseDir and removes each directory
// if it is empty. It stops once it encounters a directory that has content
func removeEmptyDirs(baseDir, endDir string) error {
if !mount.PathWithinBase(endDir, baseDir) {
return fmt.Errorf("endDir %q is not within baseDir %q", endDir, baseDir)
}
for curDir := endDir; curDir != baseDir; curDir = filepath.Dir(curDir) {
s, err := os.Stat(curDir)
if err != nil {
if os.IsNotExist(err) {
klog.V(5).Infof("curDir %q doesn't exist, skipping", curDir)
continue
}
return fmt.Errorf("error stat %q: %v", curDir, err)
}
if !s.IsDir() {
return fmt.Errorf("path %q not a directory", curDir)
}
err = os.Remove(curDir)
if os.IsExist(err) {
klog.V(5).Infof("Directory %q not empty, not removing", curDir)
break
} else if err != nil {
return fmt.Errorf("error removing directory %q: %v", curDir, err)
}
klog.V(5).Infof("Removed directory %q", curDir)
}
return nil
}
// doSafeMakeDir creates a directory at pathname, but only if it is within base.
func doSafeMakeDir(pathname string, base string, perm os.FileMode) error {
klog.V(4).Infof("Creating directory %q within base %q", pathname, base)
if !mount.PathWithinBase(pathname, base) {
return fmt.Errorf("path %s is outside of allowed base %s", pathname, base)
}
// Quick check if the directory already exists
s, err := os.Stat(pathname)
if err == nil {
// Path exists
if s.IsDir() {
// The directory already exists. It can be outside of the parent,
// but there is no race-proof check.
klog.V(4).Infof("Directory %s already exists", pathname)
return nil
}
return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR}
}
// Find all existing directories
existingPath, toCreate, err := findExistingPrefix(base, pathname)
if err != nil {
return fmt.Errorf("error opening directory %s: %s", pathname, err)
}
// Ensure the existing directory is inside allowed base
fullExistingPath, err := filepath.EvalSymlinks(existingPath)
if err != nil {
return fmt.Errorf("error opening directory %s: %s", existingPath, err)
}
if !mount.PathWithinBase(fullExistingPath, base) {
return fmt.Errorf("path %s is outside of allowed base %s", fullExistingPath, err)
}
klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...))
parentFD, err := doSafeOpen(fullExistingPath, base)
if err != nil {
return fmt.Errorf("cannot open directory %s: %s", existingPath, err)
}
childFD := -1
defer func() {
if parentFD != -1 {
if err = syscall.Close(parentFD); err != nil {
klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err)
}
}
if childFD != -1 {
if err = syscall.Close(childFD); err != nil {
klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err)
}
}
}()
currentPath := fullExistingPath
// create the directories one by one, making sure nobody can change
// created directory into symlink.
for _, dir := range toCreate {
currentPath = filepath.Join(currentPath, dir)
klog.V(4).Infof("Creating %s", dir)
err = syscall.Mkdirat(parentFD, currentPath, uint32(perm))
if err != nil {
return fmt.Errorf("cannot create directory %s: %s", currentPath, err)
}
// Dive into the created directory
childFD, err = syscall.Openat(parentFD, dir, nofollowFlags|unix.O_CLOEXEC, 0)
if err != nil {
return fmt.Errorf("cannot open %s: %s", currentPath, err)
}
// We can be sure that childFD is safe to use. It could be changed
// by user after Mkdirat() and before Openat(), however:
// - it could not be changed to symlink - we use nofollowFlags
// - it could be changed to a file (or device, pipe, socket, ...)
// but either subsequent Mkdirat() fails or we mount this file
// to user's container. Security is no violated in both cases
// and user either gets error or the file that it can already access.
if err = syscall.Close(parentFD); err != nil {
klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err)
}
parentFD = childFD
childFD = -1
// Everything was created. mkdirat(..., perm) above was affected by current
// umask and we must apply the right permissions to the all created directory.
// (that's the one that will be available to the container as subpath)
// so user can read/write it.
// parentFD is the last created directory.
// Translate perm (os.FileMode) to uint32 that fchmod() expects
kernelPerm := uint32(perm & os.ModePerm)
if perm&os.ModeSetgid > 0 {
kernelPerm |= syscall.S_ISGID
}
if perm&os.ModeSetuid > 0 {
kernelPerm |= syscall.S_ISUID
}
if perm&os.ModeSticky > 0 {
kernelPerm |= syscall.S_ISVTX
}
if err = syscall.Fchmod(parentFD, kernelPerm); err != nil {
return fmt.Errorf("chmod %q failed: %s", currentPath, err)
}
}
return nil
}
// findExistingPrefix finds prefix of pathname that exists. In addition, it
// returns list of remaining directories that don't exist yet.
func findExistingPrefix(base, pathname string) (string, []string, error) {
rel, err := filepath.Rel(base, pathname)
if err != nil {
return base, nil, err
}
dirs := strings.Split(rel, string(filepath.Separator))
// Do OpenAt in a loop to find the first non-existing dir. Resolve symlinks.
// This should be faster than looping through all dirs and calling os.Stat()
// on each of them, as the symlinks are resolved only once with OpenAt().
currentPath := base
fd, err := syscall.Open(currentPath, syscall.O_RDONLY|syscall.O_CLOEXEC, 0)
if err != nil {
return pathname, nil, fmt.Errorf("error opening %s: %s", currentPath, err)
}
defer func() {
if err = syscall.Close(fd); err != nil {
klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err)
}
}()
for i, dir := range dirs {
// Using O_PATH here will prevent hangs in case user replaces directory with
// fifo
childFD, err := syscall.Openat(fd, dir, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
if os.IsNotExist(err) {
return currentPath, dirs[i:], nil
}
return base, nil, err
}
if err = syscall.Close(fd); err != nil {
klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err)
}
fd = childFD
currentPath = filepath.Join(currentPath, dir)
}
return pathname, []string{}, nil
}
// Open path and return its fd.
// Symlinks are disallowed (pathname must already resolve symlinks),
// and the path must be within the base directory.
func doSafeOpen(pathname string, base string) (int, error) {
pathname = filepath.Clean(pathname)
base = filepath.Clean(base)
// Calculate segments to follow
subpath, err := filepath.Rel(base, pathname)
if err != nil {
return -1, err
}
segments := strings.Split(subpath, string(filepath.Separator))
// Assumption: base is the only directory that we have under control.
// Base dir is not allowed to be a symlink.
parentFD, err := syscall.Open(base, nofollowFlags|unix.O_CLOEXEC, 0)
if err != nil {
return -1, fmt.Errorf("cannot open directory %s: %s", base, err)
}
defer func() {
if parentFD != -1 {
if err = syscall.Close(parentFD); err != nil {
klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err)
}
}
}()
childFD := -1
defer func() {
if childFD != -1 {
if err = syscall.Close(childFD); err != nil {
klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err)
}
}
}()
currentPath := base
// Follow the segments one by one using openat() to make
// sure the user cannot change already existing directories into symlinks.
for _, seg := range segments {
var deviceStat unix.Stat_t
currentPath = filepath.Join(currentPath, seg)
if !mount.PathWithinBase(currentPath, base) {
return -1, fmt.Errorf("path %s is outside of allowed base %s", currentPath, base)
}
// Trigger auto mount if it's an auto-mounted directory, ignore error if not a directory.
// Notice the trailing slash is mandatory, see "automount" in openat(2) and open_by_handle_at(2).
unix.Fstatat(parentFD, seg+"/", &deviceStat, unix.AT_SYMLINK_NOFOLLOW)
klog.V(5).Infof("Opening path %s", currentPath)
childFD, err = syscall.Openat(parentFD, seg, openFDFlags|unix.O_CLOEXEC, 0)
if err != nil {
return -1, fmt.Errorf("cannot open %s: %s", currentPath, err)
}
err := unix.Fstat(childFD, &deviceStat)
if err != nil {
return -1, fmt.Errorf("error running fstat on %s with %v", currentPath, err)
}
fileFmt := deviceStat.Mode & syscall.S_IFMT
if fileFmt == syscall.S_IFLNK {
return -1, fmt.Errorf("unexpected symlink found %s", currentPath)
}
// Close parentFD
if err = syscall.Close(parentFD); err != nil {
return -1, fmt.Errorf("closing fd for %q failed: %v", filepath.Dir(currentPath), err)
}
// Set child to new parent
parentFD = childFD
childFD = -1
}
// We made it to the end, return this fd, don't close it
finalFD := parentFD
parentFD = -1
return finalFD, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package types defines types used only by volume components
package types
import (
"errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/mount-utils"
)
var (
NodeExpansionNotRequired = "volume.kubernetes.io/node-expansion-not-required"
)
// UniquePodName defines the type to key pods off of
type UniquePodName types.UID
// UniquePVCName defines the type to key pvc off
type UniquePVCName types.UID
// GeneratedOperations contains the operation that is created as well as
// supporting functions required for the operation executor
type GeneratedOperations struct {
// Name of operation - could be used for resetting shared exponential backoff
OperationName string
OperationFunc func() (context OperationContext)
EventRecorderFunc func(*error)
CompleteFunc func(CompleteFuncParam)
}
type OperationContext struct {
EventErr error
DetailedErr error
Migrated bool
}
func NewOperationContext(eventErr, detailedErr error, migrated bool) OperationContext {
return OperationContext{
EventErr: eventErr,
DetailedErr: detailedErr,
Migrated: migrated,
}
}
type CompleteFuncParam struct {
Err *error
Migrated *bool
}
// Run executes the operations and its supporting functions
func (o *GeneratedOperations) Run() (eventErr, detailedErr error) {
var context OperationContext
if o.CompleteFunc != nil {
c := CompleteFuncParam{
Err: &context.DetailedErr,
Migrated: &context.Migrated,
}
defer o.CompleteFunc(c)
}
if o.EventRecorderFunc != nil {
defer o.EventRecorderFunc(&eventErr)
}
// Handle panic, if any, from operationFunc()
defer runtime.RecoverFromPanic(&detailedErr)
context = o.OperationFunc()
return context.EventErr, context.DetailedErr
}
// FailedPrecondition error indicates CSI operation returned failed precondition
// error
type FailedPrecondition struct {
msg string
}
func (err *FailedPrecondition) Error() string {
return err.msg
}
// NewFailedPreconditionError returns a new FailedPrecondition error instance
func NewFailedPreconditionError(msg string) *FailedPrecondition {
return &FailedPrecondition{msg: msg}
}
// IsFailedPreconditionError checks if given error is of type that indicates
// operation failed with precondition
func IsFailedPreconditionError(err error) bool {
var failedPreconditionError *FailedPrecondition
return errors.As(err, &failedPreconditionError)
}
// InfeasibleError errors are a subset of OperationFinished or final error
// codes. In terms of CSI - this usually means that, the operation is not possible
// in current state with given arguments.
type InfeasibleError struct {
msg string
}
func (err *InfeasibleError) Error() string {
return err.msg
}
// NewInfeasibleError returns a new instance of InfeasibleError
func NewInfeasibleError(msg string) *InfeasibleError {
return &InfeasibleError{msg: msg}
}
func IsInfeasibleError(err error) bool {
var infeasibleError *InfeasibleError
return errors.As(err, &infeasibleError)
}
type OperationNotSupported struct {
msg string
}
func (err *OperationNotSupported) Error() string {
return err.msg
}
func NewOperationNotSupportedError(msg string) *OperationNotSupported {
return &OperationNotSupported{msg: msg}
}
func IsOperationNotSupportedError(err error) bool {
var operationNotSupportedError *OperationNotSupported
return errors.As(err, &operationNotSupportedError)
}
// TransientOperationFailure indicates operation failed with a transient error
// and may fix itself when retried.
type TransientOperationFailure struct {
msg string
}
func (err *TransientOperationFailure) Error() string {
return err.msg
}
// NewTransientOperationFailure creates an instance of TransientOperationFailure error
func NewTransientOperationFailure(msg string) *TransientOperationFailure {
return &TransientOperationFailure{msg: msg}
}
// UncertainProgressError indicates operation failed with a non-final error
// and operation may be in-progress in background.
type UncertainProgressError struct {
msg string
}
func (err *UncertainProgressError) Error() string {
return err.msg
}
// NewUncertainProgressError creates an instance of UncertainProgressError type
func NewUncertainProgressError(msg string) *UncertainProgressError {
return &UncertainProgressError{msg: msg}
}
// IsOperationFinishedError checks if given error is of type that indicates
// operation is finished with a FINAL error.
func IsOperationFinishedError(err error) bool {
if _, ok := err.(*UncertainProgressError); ok {
return false
}
if _, ok := err.(*TransientOperationFailure); ok {
return false
}
return true
}
// IsFilesystemMismatchError checks if mount failed because requested filesystem
// on PVC and actual filesystem on disk did not match
func IsFilesystemMismatchError(err error) bool {
mountError := mount.MountError{}
return errors.As(err, &mountError) && mountError.Type == mount.FilesystemMismatch
}
// IsUncertainProgressError checks if given error is of type that indicates
// operation might be in-progress in background.
func IsUncertainProgressError(err error) bool {
if _, ok := err.(*UncertainProgressError); ok {
return true
}
return false
}
const (
// VolumeResizerKey is key that will be used to store resizer used
// for resizing PVC. The generated key/value pair will be added
// as a annotation to the PVC.
VolumeResizerKey = "volume.kubernetes.io/storage-resizer"
)
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiruntime "k8s.io/apimachinery/pkg/runtime"
utypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/mount-utils"
"k8s.io/utils/io"
utilstrings "k8s.io/utils/strings"
)
const (
readyFileName = "ready"
// ControllerManagedAttachAnnotation is the key of the annotation on Node
// objects that indicates attach/detach operations for the node should be
// managed by the attach/detach controller
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
// MountsInGlobalPDPath is name of the directory appended to a volume plugin
// name to create the place for volume mounts in the global PD path.
MountsInGlobalPDPath = "mounts"
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
// object that specifies a supplemental GID.
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
// VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume
// object created dynamically
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
// kubernetesPluginPathPrefix is the prefix of kubernetes plugin mount paths.
kubernetesPluginPathPrefix = "/plugins/kubernetes.io/"
)
// IsReady checks for the existence of a regular file
// called 'ready' in the given directory and returns
// true if that file exists.
func IsReady(dir string) bool {
readyFile := filepath.Join(dir, readyFileName)
s, err := os.Stat(readyFile)
if err != nil {
return false
}
if !s.Mode().IsRegular() {
klog.Errorf("ready-file is not a file: %s", readyFile)
return false
}
return true
}
// SetReady creates a file called 'ready' in the given
// directory. It logs an error if the file cannot be
// created.
func SetReady(dir string) {
if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) {
klog.Errorf("Can't mkdir %s: %v", dir, err)
return
}
readyFile := filepath.Join(dir, readyFileName)
file, err := os.Create(readyFile)
if err != nil {
klog.Errorf("Can't touch %s: %v", readyFile, err)
return
}
file.Close()
}
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
if secrets.Type != v1.SecretType(volumePluginName) {
return secret, fmt.Errorf("cannot get secret of type %s", volumePluginName)
}
for name, data := range secrets.Data {
secret[name] = string(data)
}
return secret, nil
}
// LoadPodFromFile will read, decode, and return a Pod from a file.
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
if filePath == "" {
return nil, fmt.Errorf("file path not specified")
}
podDef, err := os.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read file path %s: %+v", filePath, err)
}
if len(podDef) == 0 {
return nil, fmt.Errorf("file was empty: %s", filePath)
}
pod := &v1.Pod{}
codec := legacyscheme.Codecs.UniversalDecoder()
if err := apiruntime.DecodeInto(codec, podDef, pod); err != nil {
return nil, fmt.Errorf("failed decoding file: %v", err)
}
return pod, nil
}
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
// recycle operation. The calculation and return value is either the
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
// greater.
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
giQty := resource.MustParse("1Gi")
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
giSize := giQty.Value()
pvSize := pvQty.Value()
timeout := (pvSize / giSize) * int64(timeoutIncrement)
if timeout < int64(minimumTimeout) {
return int64(minimumTimeout)
}
return timeout
}
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter volume.Mounter) (string, error) {
path := mounter.GetPath()
if path == "" {
return "", fmt.Errorf("path is empty %s", reflect.TypeOf(mounter).String())
}
return path, nil
}
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
// to empty_dir
func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error {
klog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
}
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
func MountOptionFromSpec(spec *volume.Spec, options ...string) []string {
pv := spec.PersistentVolume
if pv != nil {
// Use beta annotation first
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
moList := strings.Split(mo, ",")
return JoinMountOptions(moList, options)
}
if len(pv.Spec.MountOptions) > 0 {
return JoinMountOptions(pv.Spec.MountOptions, options)
}
}
return options
}
// JoinMountOptions joins mount options eliminating duplicates
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
allMountOptions := sets.New[string]()
for _, mountOption := range userOptions {
if len(mountOption) > 0 {
allMountOptions.Insert(mountOption)
}
}
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return sets.List(allMountOptions)
}
// ContainsAccessMode returns whether the requested mode is contained by modes
func ContainsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// ContainsAllAccessModes returns whether all of the requested modes are contained by modes
func ContainsAllAccessModes(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
for _, mode := range requestedModes {
if !ContainsAccessMode(indexedModes, mode) {
return false
}
}
return true
}
// GetWindowsPath get a windows path
func GetWindowsPath(path string) string {
windowsPath := strings.Replace(path, "/", "\\", -1)
if strings.HasPrefix(windowsPath, "\\") {
windowsPath = "c:" + windowsPath
}
return windowsPath
}
// GetUniquePodName returns a unique identifier to reference a pod by
func GetUniquePodName(pod *v1.Pod) types.UniquePodName {
return types.UniquePodName(pod.UID)
}
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
// Caller should ensure that volumeName is a name/ID uniquely identifying the
// actual backing device, directory, path, etc. for a particular volume.
// The returned name can be used to uniquely reference the volume, for example,
// to prevent operations (attach/detach or mount/unmount) from being triggered
// on the same volume.
func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
}
// GetUniqueVolumeNameFromSpecWithPod returns a unique volume name with pod
// name included. This is useful to generate different names for different pods
// on same volume.
func GetUniqueVolumeNameFromSpecWithPod(
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
return v1.UniqueVolumeName(
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
}
// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique
// name representing the volume defined in the specified volume spec.
// This returned name can be used to uniquely reference the actual backing
// device, directory, path, etc. referenced by the given volumeSpec.
// If the given plugin does not support the volume spec, this returns an error.
func GetUniqueVolumeNameFromSpec(
volumePlugin volume.VolumePlugin,
volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) {
if volumePlugin == nil {
return "", fmt.Errorf(
"volumePlugin should not be nil. volumeSpec.Name=%q",
volumeSpec.Name())
}
volumeName, err := volumePlugin.GetVolumeName(volumeSpec)
if err != nil || volumeName == "" {
return "", fmt.Errorf(
"failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
return GetUniqueVolumeName(
volumePlugin.GetPluginName(),
volumeName),
nil
}
// IsPodTerminated checks if pod is terminated
func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool {
// TODO: the guarantees provided by kubelet status are not sufficient to guarantee it's safe to ignore a deleted pod,
// even if everything is notRunning (kubelet does not guarantee that when pod status is waiting that it isn't trying
// to start a container).
return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.InitContainerStatuses) && notRunning(podStatus.ContainerStatuses) && notRunning(podStatus.EphemeralContainerStatuses))
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
// the format plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
// the unique volume names.
func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) {
components := strings.SplitN(string(uniqueName), "/", 3)
if len(components) != 3 {
return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName)
}
pluginName := fmt.Sprintf("%s/%s", components[0], components[1])
return pluginName, components[2], nil
}
// GetVolumeMode retrieves VolumeMode from pv.
// If the volume doesn't have PersistentVolume, it's an inline volume,
// should return volumeMode as filesystem to keep existing behavior.
func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) {
if volumeSpec == nil || volumeSpec.PersistentVolume == nil {
return v1.PersistentVolumeFilesystem, nil
}
if volumeSpec.PersistentVolume.Spec.VolumeMode != nil {
return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil
}
return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name())
}
// GetPersistentVolumeClaimQualifiedName returns a qualified name for pvc.
func GetPersistentVolumeClaimQualifiedName(claim *v1.PersistentVolumeClaim) string {
return utilstrings.JoinQualifiedName(claim.GetNamespace(), claim.GetName())
}
// CheckVolumeModeFilesystem checks VolumeMode.
// If the mode is Filesystem, return true otherwise return false.
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
volumeMode, err := GetVolumeMode(volumeSpec)
if err != nil {
return true, err
}
if volumeMode == v1.PersistentVolumeBlock {
return false, nil
}
return true, nil
}
// CheckPersistentVolumeClaimModeBlock checks VolumeMode.
// If the mode is Block, return true otherwise return false.
func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool {
return pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock
}
// IsWindowsUNCPath checks if path is prefixed with \\
// This can be used to skip any processing of paths
// that point to SMB shares, local named pipes and local UNC path
func IsWindowsUNCPath(goos, path string) bool {
if goos != "windows" {
return false
}
// Check for UNC prefix \\
if strings.HasPrefix(path, `\\`) {
return true
}
return false
}
// IsWindowsLocalPath checks if path is a local path
// prefixed with "/" or "\" like "/foo/bar" or "\foo\bar"
func IsWindowsLocalPath(goos, path string) bool {
if goos != "windows" {
return false
}
if IsWindowsUNCPath(goos, path) {
return false
}
if strings.Contains(path, ":") {
return false
}
if !(strings.HasPrefix(path, `/`) || strings.HasPrefix(path, `\`)) {
return false
}
return true
}
// MakeAbsolutePath convert path to absolute path according to GOOS
func MakeAbsolutePath(goos, path string) string {
if goos != "windows" {
return filepath.Clean("/" + path)
}
// These are all for windows
// If there is a colon, give up.
if strings.Contains(path, ":") {
return path
}
// If there is a slash, but no drive, add 'c:'
if strings.HasPrefix(path, "/") || strings.HasPrefix(path, "\\") {
return "c:" + path
}
// Otherwise, add 'c:\'
return "c:\\" + path
}
// MapBlockVolume is a utility function to provide a common way of mapping
// block device path for a specified volume and pod. This function should be
// called by volume plugins that implements volume.BlockVolumeMapper.Map() method.
func MapBlockVolume(
blkUtil volumepathhandler.BlockVolumePathHandler,
devicePath,
globalMapPath,
podVolumeMapPath,
volumeMapName string,
podUID utypes.UID,
) error {
// map devicePath to global node path as bind mount
mapErr := blkUtil.MapDevice(devicePath, globalMapPath, string(podUID), true /* bindMount */)
if mapErr != nil {
return fmt.Errorf("blkUtil.MapDevice failed. devicePath: %s, globalMapPath:%s, podUID: %s, bindMount: %v: %v",
devicePath, globalMapPath, string(podUID), true, mapErr)
}
// map devicePath to pod volume path
mapErr = blkUtil.MapDevice(devicePath, podVolumeMapPath, volumeMapName, false /* bindMount */)
if mapErr != nil {
return fmt.Errorf("blkUtil.MapDevice failed. devicePath: %s, podVolumeMapPath:%s, volumeMapName: %s, bindMount: %v: %v",
devicePath, podVolumeMapPath, volumeMapName, false, mapErr)
}
// Take file descriptor lock to keep a block device opened. Otherwise, there is a case
// that the block device is silently removed and attached another device with the same name.
// Container runtime can't handle this problem. To avoid unexpected condition fd lock
// for the block device is required.
_, mapErr = blkUtil.AttachFileDevice(filepath.Join(globalMapPath, string(podUID)))
if mapErr != nil {
return fmt.Errorf("blkUtil.AttachFileDevice failed. globalMapPath:%s, podUID: %s: %v",
globalMapPath, string(podUID), mapErr)
}
return nil
}
// UnmapBlockVolume is a utility function to provide a common way of unmapping
// block device path for a specified volume and pod. This function should be
// called by volume plugins that implements volume.BlockVolumeMapper.Map() method.
func UnmapBlockVolume(
blkUtil volumepathhandler.BlockVolumePathHandler,
globalUnmapPath,
podDeviceUnmapPath,
volumeMapName string,
podUID utypes.UID,
) error {
// Release file descriptor lock.
err := blkUtil.DetachFileDevice(filepath.Join(globalUnmapPath, string(podUID)))
if err != nil {
return fmt.Errorf("blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s: %v",
globalUnmapPath, string(podUID), err)
}
// unmap devicePath from pod volume path
unmapDeviceErr := blkUtil.UnmapDevice(podDeviceUnmapPath, volumeMapName, false /* bindMount */)
if unmapDeviceErr != nil {
return fmt.Errorf("blkUtil.DetachFileDevice failed. podDeviceUnmapPath:%s, volumeMapName: %s, bindMount: %v: %v",
podDeviceUnmapPath, volumeMapName, false, unmapDeviceErr)
}
// unmap devicePath from global node path
unmapDeviceErr = blkUtil.UnmapDevice(globalUnmapPath, string(podUID), true /* bindMount */)
if unmapDeviceErr != nil {
return fmt.Errorf("blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s, bindMount: %v: %v",
globalUnmapPath, string(podUID), true, unmapDeviceErr)
}
return nil
}
// IsLocalEphemeralVolume determines whether the argument is a local ephemeral
// volume vs. some other type
// Local means the volume is using storage from the local disk that is managed by kubelet.
// Ephemeral means the lifecycle of the volume is the same as the Pod.
func IsLocalEphemeralVolume(volume v1.Volume) bool {
return volume.GitRepo != nil ||
(volume.EmptyDir != nil && volume.EmptyDir.Medium == v1.StorageMediumDefault) ||
volume.ConfigMap != nil
}
// GetPodVolumeNames returns names of volumes that are used in a pod,
// either as filesystem mount or raw block device.
// To save another sweep through containers, SELinux options are optionally collected too.
func GetPodVolumeNames(pod *v1.Pod, collectSELinuxOptions bool) (mounts sets.Set[string], devices sets.Set[string], seLinuxContainerContexts map[string][]*v1.SELinuxOptions) {
mounts = sets.New[string]()
devices = sets.New[string]()
seLinuxContainerContexts = make(map[string][]*v1.SELinuxOptions)
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
var seLinuxOptions *v1.SELinuxOptions
if collectSELinuxOptions {
effectiveContainerSecurity := securitycontext.DetermineEffectiveSecurityContext(pod, container)
if effectiveContainerSecurity != nil {
seLinuxOptions = effectiveContainerSecurity.SELinuxOptions
}
}
if container.VolumeMounts != nil {
for _, mount := range container.VolumeMounts {
mounts.Insert(mount.Name)
if seLinuxOptions != nil && collectSELinuxOptions {
seLinuxContainerContexts[mount.Name] = append(seLinuxContainerContexts[mount.Name], seLinuxOptions.DeepCopy())
}
}
}
if container.VolumeDevices != nil {
for _, device := range container.VolumeDevices {
devices.Insert(device.Name)
}
}
return true
})
return
}
// FsUserFrom returns FsUser of pod, which is determined by the runAsUser
// attributes.
func FsUserFrom(pod *v1.Pod) *int64 {
var fsUser *int64
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
runAsUser, ok := securitycontext.DetermineEffectiveRunAsUser(pod, container)
// One container doesn't specify user or there are more than one
// non-root UIDs.
if !ok || (fsUser != nil && *fsUser != *runAsUser) {
fsUser = nil
return false
}
if fsUser == nil {
fsUser = runAsUser
}
return true
})
return fsUser
}
// HasMountRefs checks if the given mountPath has mountRefs.
// TODO: this is a workaround for the unmount device issue caused by gci mounter.
// In GCI cluster, if gci mounter is used for mounting, the container started by mounter
// script will cause additional mounts created in the container. Since these mounts are
// irrelevant to the original mounts, they should be not considered when checking the
// mount references. The current solution is to filter out those mount paths that contain
// the k8s plugin suffix of original mount path.
func HasMountRefs(mountPath string, mountRefs []string) bool {
// A mountPath typically is like
// /var/lib/kubelet/plugins/kubernetes.io/some-plugin/mounts/volume-XXXX
// Mount refs can look like
// /home/somewhere/var/lib/kubelet/plugins/kubernetes.io/some-plugin/...
// but if /var/lib/kubelet is mounted to a different device a ref might be like
// /mnt/some-other-place/kubelet/plugins/kubernetes.io/some-plugin/...
// Neither of the above should be counted as a mount ref as those are handled
// by the kubelet. What we're concerned about is a path like
// /data/local/some/manual/mount
// As unmounting could interrupt usage from that mountpoint.
//
// So instead of looking for the entire /var/lib/... path, the plugins/kubernetes.io/
// suffix is trimmed off and searched for.
//
// If there isn't a /plugins/... path, the whole mountPath is used instead.
pathToFind := mountPath
if i := strings.Index(mountPath, kubernetesPluginPathPrefix); i > -1 {
pathToFind = mountPath[i:]
}
for _, ref := range mountRefs {
if !strings.Contains(ref, pathToFind) {
return true
}
}
return false
}
// IsMultiAttachAllowed checks if attaching this volume to multiple nodes is definitely not allowed/possible.
// In its current form, this function can only reliably say for which volumes it's definitely forbidden. If it returns
// false, it is not guaranteed that multi-attach is actually supported by the volume type and we must rely on the
// attacher to fail fast in such cases.
// Please see https://github.com/kubernetes/kubernetes/issues/40669 and https://github.com/kubernetes/kubernetes/pull/40148#discussion_r98055047
func IsMultiAttachAllowed(volumeSpec *volume.Spec) bool {
if volumeSpec == nil {
// we don't know if it's supported or not and let the attacher fail later in cases it's not supported
return true
}
if volumeSpec.Volume != nil {
// Check for volume types which are known to fail slow or cause trouble when trying to multi-attach
if volumeSpec.Volume.AzureDisk != nil ||
volumeSpec.Volume.Cinder != nil {
return false
}
}
// Only if this volume is a persistent volume, we have reliable information on whether it's allowed or not to
// multi-attach. We trust in the individual volume implementations to not allow unsupported access modes
if volumeSpec.PersistentVolume != nil {
// Check for persistent volume types which do not fail when trying to multi-attach
if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 {
// No access mode specified so we don't know for sure. Let the attacher fail if needed
return true
}
// check if this volume is allowed to be attached to multiple PODs/nodes, if yes, return false
for _, accessMode := range volumeSpec.PersistentVolume.Spec.AccessModes {
if accessMode == v1.ReadWriteMany || accessMode == v1.ReadOnlyMany {
return true
}
}
return false
}
// we don't know if it's supported or not and let the attacher fail later in cases it's not supported
return true
}
// IsAttachableVolume checks if the given volumeSpec is an attachable volume or not
func IsAttachableVolume(volumeSpec *volume.Spec, volumePluginMgr *volume.VolumePluginMgr) bool {
attachableVolumePlugin, _ := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if attachableVolumePlugin != nil {
volumeAttacher, err := attachableVolumePlugin.NewAttacher()
if err == nil && volumeAttacher != nil {
return true
}
}
return false
}
// IsDeviceMountableVolume checks if the given volumeSpec is an device mountable volume or not
func IsDeviceMountableVolume(volumeSpec *volume.Spec, volumePluginMgr *volume.VolumePluginMgr) bool {
deviceMountableVolumePlugin, _ := volumePluginMgr.FindDeviceMountablePluginBySpec(volumeSpec)
if deviceMountableVolumePlugin != nil {
volumeDeviceMounter, err := deviceMountableVolumePlugin.NewDeviceMounter()
if err == nil && volumeDeviceMounter != nil {
return true
}
}
return false
}
// GetReliableMountRefs calls mounter.GetMountRefs and retries on IsInconsistentReadError.
// To be used in volume reconstruction of volume plugins that don't have any protection
// against mounting a single volume on multiple nodes (such as attach/detach).
func GetReliableMountRefs(mounter mount.Interface, mountPath string) ([]string, error) {
var paths []string
var lastErr error
err := wait.PollImmediate(10*time.Millisecond, time.Minute, func() (bool, error) {
var err error
paths, err = mounter.GetMountRefs(mountPath)
if io.IsInconsistentReadError(err) {
lastErr = err
return false, nil
}
if err != nil {
return false, err
}
return true, nil
})
if wait.Interrupted(err) {
return nil, lastErr
}
return paths, err
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumepathhandler
import (
"fmt"
"os"
"path/filepath"
"runtime"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilexec "k8s.io/utils/exec"
"k8s.io/apimachinery/pkg/types"
)
const (
losetupPath = "losetup"
ErrDeviceNotFound = "device not found"
)
// BlockVolumePathHandler defines a set of operations for handling block volume-related operations
type BlockVolumePathHandler interface {
// MapDevice creates a symbolic link to block device under specified map path
MapDevice(devicePath string, mapPath string, linkName string, bindMount bool) error
// UnmapDevice removes a symbolic link to block device under specified map path
UnmapDevice(mapPath string, linkName string, bindMount bool) error
// RemovePath removes a file or directory on specified map path
RemoveMapPath(mapPath string) error
// IsSymlinkExist returns true if specified symbolic link exists
IsSymlinkExist(mapPath string) (bool, error)
// IsDeviceBindMountExist returns true if specified bind mount exists
IsDeviceBindMountExist(mapPath string) (bool, error)
// GetDeviceBindMountRefs searches bind mounts under global map path
GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error)
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
AttachFileDevice(path string) (string, error)
// DetachFileDevice takes a path to the attached block device and
// detach it from block device.
DetachFileDevice(path string) error
// GetLoopDevice returns the full path to the loop device associated with the given path.
GetLoopDevice(path string) (string, error)
}
// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler.
func NewBlockVolumePathHandler() BlockVolumePathHandler {
var volumePathHandler VolumePathHandler
return volumePathHandler
}
// VolumePathHandler is path related operation handlers for block volume
type VolumePathHandler struct {
}
// MapDevice creates a symbolic link to block device under specified map path
func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string, bindMount bool) error {
// Example of global map path:
// globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid}
// linkName: {podUid}
//
// Example of pod device map path:
// podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
// linkName: {volumeName}
if len(devicePath) == 0 {
return fmt.Errorf("failed to map device to map path. devicePath is empty")
}
if len(mapPath) == 0 {
return fmt.Errorf("failed to map device to map path. mapPath is empty")
}
if !filepath.IsAbs(mapPath) {
return fmt.Errorf("the map path should be absolute: map path: %s", mapPath)
}
klog.V(5).Infof("MapDevice: devicePath %s", devicePath)
klog.V(5).Infof("MapDevice: mapPath %s", mapPath)
klog.V(5).Infof("MapDevice: linkName %s", linkName)
// Check and create mapPath
_, err := os.Stat(mapPath)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot validate map path: %s: %v", mapPath, err)
}
if err = os.MkdirAll(mapPath, 0750); err != nil {
return fmt.Errorf("failed to mkdir %s: %v", mapPath, err)
}
if bindMount {
return mapBindMountDevice(devicePath, mapPath, linkName)
}
return mapSymlinkDevice(devicePath, mapPath, linkName)
}
func mapBindMountDevice(devicePath string, mapPath string, linkName string) error {
// Check bind mount exists
linkPath := filepath.Join(mapPath, string(linkName))
file, err := os.Stat(linkPath)
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("failed to stat file %s: %v", linkPath, err)
}
// Create file
newFile, err := os.OpenFile(linkPath, os.O_CREATE|os.O_RDWR, 0750)
if err != nil {
return fmt.Errorf("failed to open file %s: %v", linkPath, err)
}
if err := newFile.Close(); err != nil {
return fmt.Errorf("failed to close file %s: %v", linkPath, err)
}
} else {
// Check if device file
// TODO: Need to check if this device file is actually the expected bind mount
if file.Mode()&os.ModeDevice == os.ModeDevice {
klog.Warningf("Warning: Map skipped because bind mount already exist on the path: %v", linkPath)
return nil
}
klog.Warningf("Warning: file %s is already exist but not mounted, skip creating file", linkPath)
}
// Bind mount file
mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: utilexec.New()}
if err := mounter.MountSensitiveWithoutSystemd(devicePath, linkPath, "" /* fsType */, []string{"bind"}, nil); err != nil {
return fmt.Errorf("failed to bind mount devicePath: %s to linkPath %s: %v", devicePath, linkPath, err)
}
return nil
}
func mapSymlinkDevice(devicePath string, mapPath string, linkName string) error {
// Remove old symbolic link(or file) then create new one.
// This should be done because current symbolic link is
// stale across node reboot.
linkPath := filepath.Join(mapPath, string(linkName))
if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove file %s: %v", linkPath, err)
}
return os.Symlink(devicePath, linkPath)
}
// UnmapDevice removes a symbolic link associated to block device under specified map path
func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string, bindMount bool) error {
if len(mapPath) == 0 {
return fmt.Errorf("failed to unmap device from map path. mapPath is empty")
}
klog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
klog.V(5).Infof("UnmapDevice: linkName %s", linkName)
if bindMount {
return unmapBindMountDevice(v, mapPath, linkName)
}
return unmapSymlinkDevice(v, mapPath, linkName)
}
func unmapBindMountDevice(v VolumePathHandler, mapPath string, linkName string) error {
// Check bind mount exists
linkPath := filepath.Join(mapPath, string(linkName))
if isMountExist, checkErr := v.IsDeviceBindMountExist(linkPath); checkErr != nil {
return checkErr
} else if !isMountExist {
klog.Warningf("Warning: Unmap skipped because bind mount does not exist on the path: %v", linkPath)
// Check if linkPath still exists
if _, err := os.Stat(linkPath); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("failed to check if path %s exists: %v", linkPath, err)
}
// linkPath has already been removed
return nil
}
// Remove file
if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove file %s: %v", linkPath, err)
}
return nil
}
// Unmount file
mounter := &mount.SafeFormatAndMount{Interface: mount.New(""), Exec: utilexec.New()}
if err := mounter.Unmount(linkPath); err != nil {
return fmt.Errorf("failed to unmount linkPath %s: %v", linkPath, err)
}
// Remove file
if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove file %s: %v", linkPath, err)
}
return nil
}
func unmapSymlinkDevice(v VolumePathHandler, mapPath string, linkName string) error {
// Check symbolic link exists
linkPath := filepath.Join(mapPath, string(linkName))
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
return checkErr
} else if !islinkExist {
klog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
return nil
}
return os.Remove(linkPath)
}
// RemoveMapPath removes a file or directory on specified map path
func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
if len(mapPath) == 0 {
return fmt.Errorf("failed to remove map path. mapPath is empty")
}
klog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
err := os.RemoveAll(mapPath)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove directory %s: %v", mapPath, err)
}
return nil
}
// IsSymlinkExist returns true if specified file exists and the type is symbolik link or irregular file on Windows.
// If file doesn't exist, or file exists but not symbolic link, return false with no error.
// On other cases, return false with error from Lstat().
func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
fi, err := os.Lstat(mapPath)
if err != nil {
// If file doesn't exist, return false and no error
if os.IsNotExist(err) {
return false, nil
}
// Return error from Lstat()
return false, fmt.Errorf("failed to Lstat file %s: %v", mapPath, err)
}
// If file exits and it's symbolic link, return true and no error
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return true, nil
}
// go1.23 behavior change: https://github.com/golang/go/issues/63703#issuecomment-2535941458
if (runtime.GOOS == "windows") && (fi.Mode()&os.ModeIrregular != 0) {
return true, nil
}
// If file exits but it's not symbolic link, return false and no error
return false, nil
}
// IsDeviceBindMountExist returns true if specified file exists and the type is device.
// If file doesn't exist, or file exists but not device, return false with no error.
// On other cases, return false with error from Lstat().
func (v VolumePathHandler) IsDeviceBindMountExist(mapPath string) (bool, error) {
fi, err := os.Lstat(mapPath)
if err != nil {
// If file doesn't exist, return false and no error
if os.IsNotExist(err) {
return false, nil
}
// Return error from Lstat()
return false, fmt.Errorf("failed to Lstat file %s: %v", mapPath, err)
}
// If file exits and it's device, return true and no error
if fi.Mode()&os.ModeDevice == os.ModeDevice {
return true, nil
}
// If file exits but it's not device, return false and no error
return false, nil
}
// GetDeviceBindMountRefs searches bind mounts under global map path
func (v VolumePathHandler) GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error) {
var refs []string
files, err := os.ReadDir(mapPath)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Type()&os.ModeDevice != os.ModeDevice {
continue
}
filename := file.Name()
// TODO: Might need to check if the file is actually linked to devPath
refs = append(refs, filepath.Join(mapPath, filename))
}
klog.V(5).Infof("GetDeviceBindMountRefs: refs %v", refs)
return refs, nil
}
//go:build linux
// +build linux
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumepathhandler
import (
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"golang.org/x/sys/unix"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
blockDevicePath, err := v.GetLoopDevice(path)
if err != nil && err.Error() != ErrDeviceNotFound {
return "", fmt.Errorf("GetLoopDevice failed for path %s: %v", path, err)
}
// If no existing loop device for the path, create one
if blockDevicePath == "" {
klog.V(4).Infof("Creating device for path: %s", path)
blockDevicePath, err = makeLoopDevice(path)
if err != nil {
return "", fmt.Errorf("makeLoopDevice failed for path %s: %v", path, err)
}
}
return blockDevicePath, nil
}
// DetachFileDevice takes a path to the attached block device and
// detach it from block device.
func (v VolumePathHandler) DetachFileDevice(path string) error {
loopPath, err := v.GetLoopDevice(path)
if err != nil {
if err.Error() == ErrDeviceNotFound {
klog.Warningf("couldn't find loopback device which takes file descriptor lock. Skip detaching device. device path: %q", path)
} else {
return fmt.Errorf("GetLoopDevice failed for path %s: %v", path, err)
}
} else {
if len(loopPath) != 0 {
err = removeLoopDevice(loopPath)
if err != nil {
return fmt.Errorf("removeLoopDevice failed for path %s: %v", path, err)
}
}
}
return nil
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return "", errors.New(ErrDeviceNotFound)
}
if err != nil {
return "", fmt.Errorf("not attachable: %v", err)
}
return getLoopDeviceFromSysfs(path)
}
func makeLoopDevice(path string) (string, error) {
args := []string{"-f", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
klog.V(2).Infof("Failed device create command for path: %s %v %s", path, err, out)
return "", fmt.Errorf("losetup %s failed: %v", strings.Join(args, " "), err)
}
return getLoopDeviceFromSysfs(path)
}
// removeLoopDevice removes specified loopback device
func removeLoopDevice(device string) error {
args := []string{"-d", device}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
if _, err := os.Stat(device); os.IsNotExist(err) {
return nil
}
klog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out)
return fmt.Errorf("losetup -d %s failed: %v", device, err)
}
return nil
}
// getLoopDeviceFromSysfs finds the backing file for a loop
// device from sysfs via "/sys/block/loop*/loop/backing_file".
func getLoopDeviceFromSysfs(path string) (string, error) {
// If the file is a symlink.
realPath, err := filepath.EvalSymlinks(path)
if err != nil {
return "", fmt.Errorf("failed to evaluate path %s: %s", path, err)
}
devices, err := filepath.Glob("/sys/block/loop*")
if err != nil {
return "", fmt.Errorf("failed to list loop devices in sysfs: %s", err)
}
for _, device := range devices {
backingFile := fmt.Sprintf("%s/loop/backing_file", device)
// The contents of this file is the absolute path of "path".
data, err := os.ReadFile(backingFile)
if err != nil {
continue
}
// Return the first match.
backingFilePath := cleanBackingFilePath(string(data))
if backingFilePath == path || backingFilePath == realPath {
return fmt.Sprintf("/dev/%s", filepath.Base(device)), nil
}
}
return "", errors.New(ErrDeviceNotFound)
}
// cleanPath remove any trailing substrings that are not part of the backing file path.
func cleanBackingFilePath(path string) string {
// If the block device was deleted, the path will contain a "(deleted)" suffix
path = strings.TrimSpace(path)
path = strings.TrimSuffix(path, "(deleted)")
return strings.TrimSpace(path)
}
// FindGlobalMapPathUUIDFromPod finds {pod uuid} bind mount under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
// (See pkg/volume/volume.go for details on a global map path and a pod device map path.)
// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX
//
// globalMapPath/{pod uuid} bind mount: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
var globalMapPathUUID string
// Find symbolic link named pod uuid under plugin dir
err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if (fi.Mode()&os.ModeDevice == os.ModeDevice) && (fi.Name() == string(podUID)) {
klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
if res, err := compareBindMountAndSymlinks(path, mapPath); err == nil && res {
globalMapPathUUID = path
}
}
return nil
})
if err != nil {
return "", fmt.Errorf("FindGlobalMapPathUUIDFromPod failed: %v", err)
}
klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
// Return path contains global map path + {pod uuid}
return globalMapPathUUID, nil
}
// compareBindMountAndSymlinks returns if global path (bind mount) and
// pod path (symlink) are pointing to the same device.
// If there is an error in checking it returns error.
func compareBindMountAndSymlinks(global, pod string) (bool, error) {
// To check if bind mount and symlink are pointing to the same device,
// we need to check if they are pointing to the devices that have same major/minor number.
// Get the major/minor number for global path
devNumGlobal, err := getDeviceMajorMinor(global)
if err != nil {
return false, fmt.Errorf("getDeviceMajorMinor failed for path %s: %v", global, err)
}
// Get the symlinked device from the pod path
devPod, err := os.Readlink(pod)
if err != nil {
return false, fmt.Errorf("failed to readlink path %s: %v", pod, err)
}
// Get the major/minor number for the symlinked device from the pod path
devNumPod, err := getDeviceMajorMinor(devPod)
if err != nil {
return false, fmt.Errorf("getDeviceMajorMinor failed for path %s: %v", devPod, err)
}
klog.V(5).Infof("CompareBindMountAndSymlinks: devNumGlobal %s, devNumPod %s", devNumGlobal, devNumPod)
// Check if the major/minor number are the same
if devNumGlobal == devNumPod {
return true, nil
}
return false, nil
}
// getDeviceMajorMinor returns major/minor number for the path with below format:
// major:minor (in hex)
// ex)
//
// fc:10
func getDeviceMajorMinor(path string) (string, error) {
var stat unix.Stat_t
if err := unix.Stat(path, &stat); err != nil {
return "", fmt.Errorf("failed to stat path %s: %v", path, err)
}
devNumber := uint64(stat.Rdev)
major := unix.Major(devNumber)
minor := unix.Minor(devNumber)
return fmt.Sprintf("%x:%x", major, minor), nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"errors"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
)
// ValidatePersistentVolume validates PV object for plugin specific validation
// We can put here validations which are specific to volume types.
func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
return checkMountOption(pv)
}
func checkMountOption(pv *api.PersistentVolume) field.ErrorList {
allErrs := field.ErrorList{}
// if PV is of these types we don't return errors
// since mount options is supported
if pv.Spec.GCEPersistentDisk != nil ||
pv.Spec.AWSElasticBlockStore != nil ||
pv.Spec.Glusterfs != nil ||
pv.Spec.NFS != nil ||
pv.Spec.RBD != nil ||
pv.Spec.Quobyte != nil ||
pv.Spec.ISCSI != nil ||
pv.Spec.Cinder != nil ||
pv.Spec.CephFS != nil ||
pv.Spec.AzureFile != nil ||
pv.Spec.VsphereVolume != nil ||
pv.Spec.AzureDisk != nil ||
pv.Spec.PhotonPersistentDisk != nil {
return allErrs
}
// any other type if mount option is present lets return error
if _, ok := pv.Annotations[api.MountOptionAnnotation]; ok {
metaField := field.NewPath("metadata")
allErrs = append(allErrs, field.Forbidden(metaField.Child("annotations", api.MountOptionAnnotation), "may not specify mount options for this volume type"))
}
return allErrs
}
// ValidatePathNoBacksteps will make sure the targetPath does not have any element which is ".."
func ValidatePathNoBacksteps(targetPath string) error {
parts := strings.Split(filepath.ToSlash(targetPath), "/")
for _, item := range parts {
if item == ".." {
return errors.New("must not contain '..'")
}
}
return nil
}
//go:build linux
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"context"
"fmt"
"path/filepath"
"strings"
"syscall"
"os"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
rwMask = os.FileMode(0660)
roMask = os.FileMode(0440)
execMask = os.FileMode(0110)
)
var (
// function that will be used for changing file permissions on linux
// mainly stored here as a variable so as it can replaced in tests
filePermissionChangeFunc = changeFilePermission
progressReportDuration = 60 * time.Second
firstEventReportDuration = 30 * time.Second
)
// NewVolumeOwnership returns an interface that can be used to recursively change volume permissions and ownership
func NewVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) VolumeOwnershipChanger {
vo := &VolumeOwnership{
mounter: mounter,
dir: dir,
fsGroup: fsGroup,
fsGroupChangePolicy: fsGroupChangePolicy,
completionCallback: completeFunc,
}
vo.fileCounter.Store(0)
return vo
}
func (vo *VolumeOwnership) AddProgressNotifier(pod *v1.Pod, recorder record.EventRecorder) VolumeOwnershipChanger {
vo.pod = pod
vo.recorder = recorder
return vo
}
func (vo *VolumeOwnership) ChangePermissions() error {
if vo.fsGroup == nil {
return nil
}
if skipPermissionChange(vo.mounter, vo.dir, vo.fsGroup, vo.fsGroupChangePolicy) {
klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", vo.dir)
return nil
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
timer := time.AfterFunc(firstEventReportDuration, func() {
vo.initiateProgressMonitor(ctx)
})
defer timer.Stop()
return vo.changePermissionsRecursively()
}
func (vo *VolumeOwnership) initiateProgressMonitor(ctx context.Context) {
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", vo.dir)
if vo.pod != nil {
go vo.monitorProgress(ctx)
}
}
func (vo *VolumeOwnership) changePermissionsRecursively() error {
err := walkDeep(vo.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
vo.fileCounter.Add(1)
return filePermissionChangeFunc(path, vo.fsGroup, vo.mounter.GetAttributes().ReadOnly, info)
})
if vo.completionCallback != nil {
vo.completionCallback(types.CompleteFuncParam{
Err: &err,
})
}
return err
}
func (vo *VolumeOwnership) monitorProgress(ctx context.Context) {
dirName := getDirnameToReport(vo.dir, string(vo.pod.UID))
msg := fmt.Sprintf("Setting volume ownership for %s is taking longer than expected, consider using OnRootMismatch - https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods", dirName)
vo.recorder.Event(vo.pod, v1.EventTypeWarning, events.VolumePermissionChangeInProgress, msg)
ticker := time.NewTicker(progressReportDuration)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
vo.logWarning()
}
}
}
// report everything after podUID in dir string, including podUID
func getDirnameToReport(dir, podUID string) string {
podUIDIndex := strings.Index(dir, podUID)
if podUIDIndex == -1 {
return dir
}
return dir[podUIDIndex:]
}
func (vo *VolumeOwnership) logWarning() {
dirName := getDirnameToReport(vo.dir, string(vo.pod.UID))
msg := fmt.Sprintf("Setting volume ownership for %s, processed %d files.", dirName, vo.fileCounter.Load())
klog.Warning(msg)
vo.recorder.Event(vo.pod, v1.EventTypeWarning, events.VolumePermissionChangeInProgress, msg)
}
func changeFilePermission(filename string, fsGroup *int64, readonly bool, info os.FileInfo) error {
err := os.Lchown(filename, -1, int(*fsGroup))
if err != nil {
klog.ErrorS(err, "Lchown failed", "path", filename)
}
// chmod passes through to the underlying file for symlinks.
// Symlinks have a mode of 777 but this really doesn't mean anything.
// The permissions of the underlying file are what matter.
// However, if one reads the mode of a symlink then chmods the symlink
// with that mode, it changes the mode of the underlying file, overridden
// the defaultMode and permissions initialized by the volume plugin, which
// is not what we want; thus, we skip chmod for symlinks.
if info.Mode()&os.ModeSymlink != 0 {
return nil
}
mask := rwMask
if readonly {
mask = roMask
}
if info.IsDir() {
mask |= os.ModeSetgid
mask |= execMask
}
err = os.Chmod(filename, info.Mode()|mask)
if err != nil {
klog.ErrorS(err, "chmod failed", "path", filename)
}
return nil
}
func skipPermissionChange(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool {
if fsGroupChangePolicy == nil || *fsGroupChangePolicy != v1.FSGroupChangeOnRootMismatch {
klog.V(4).InfoS("Perform recursive ownership change for directory", "path", dir)
return false
}
return !requiresPermissionChange(dir, fsGroup, mounter.GetAttributes().ReadOnly)
}
func requiresPermissionChange(rootDir string, fsGroup *int64, readonly bool) bool {
fsInfo, err := os.Stat(rootDir)
if err != nil {
klog.ErrorS(err, "Performing recursive ownership change on rootDir because reading permissions of root volume failed", "path", rootDir)
return true
}
stat, ok := fsInfo.Sys().(*syscall.Stat_t)
if !ok || stat == nil {
klog.ErrorS(nil, "Performing recursive ownership change on rootDir because reading permissions of root volume failed", "path", rootDir)
return true
}
if int(stat.Gid) != int(*fsGroup) {
klog.V(4).InfoS("Expected group ownership of volume did not match with Gid", "path", rootDir, "GID", stat.Gid)
return true
}
unixPerms := rwMask
if readonly {
unixPerms = roMask
}
// if rootDir is not a directory then we should apply permission change anyways
if !fsInfo.IsDir() {
return true
}
unixPerms |= execMask
filePerm := fsInfo.Mode().Perm()
// We need to check if actual permissions of root directory is a superset of permissions required by unixPerms.
// This is done by checking if permission bits expected in unixPerms is set in actual permissions of the directory.
// We use bitwise AND operation to check set bits. For example:
// unixPerms: 770, filePerms: 775 : 770&775 = 770 (perms on directory is a superset)
// unixPerms: 770, filePerms: 770 : 770&770 = 770 (perms on directory is a superset)
// unixPerms: 770, filePerms: 750 : 770&750 = 750 (perms on directory is NOT a superset)
// We also need to check if setgid bits are set in permissions of the directory.
if (unixPerms&filePerm != unixPerms) || (fsInfo.Mode()&os.ModeSetgid == 0) {
klog.V(4).InfoS("Performing recursive ownership change on rootDir because of mismatching mode", "path", rootDir)
return true
}
return false
}
// readDirNames reads the directory named by dirname and returns
// a list of directory entries.
// We are not using filepath.readDirNames because we do not want to sort files found in a directory before changing
// permissions for performance reasons.
func readDirNames(dirname string) ([]string, error) {
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
names, err := f.Readdirnames(-1)
f.Close()
if err != nil {
return nil, err
}
return names, nil
}
// walkDeep can be used to traverse directories and has two minor differences
// from filepath.Walk:
// - List of files/dirs is not sorted for performance reasons
// - callback walkFunc is invoked on root directory after visiting children dirs and files
func walkDeep(root string, walkFunc filepath.WalkFunc) error {
info, err := os.Lstat(root)
if err != nil {
return walkFunc(root, nil, err)
}
return walk(root, info, walkFunc)
}
func walk(path string, info os.FileInfo, walkFunc filepath.WalkFunc) error {
if !info.IsDir() {
return walkFunc(path, info, nil)
}
names, err := readDirNames(path)
if err != nil {
return err
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := os.Lstat(filename)
if err != nil {
if err := walkFunc(filename, fileInfo, err); err != nil {
return err
}
} else {
err = walk(filename, fileInfo, walkFunc)
if err != nil {
return err
}
}
}
return walkFunc(path, info, nil)
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
stdlibAes "crypto/aes"
"context"
"crypto/cipher"
"encoding/hex"
"fmt"
"reflect"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"k8s.io/apiserver/pkg/storage/value"
aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes"
)
func FuzzAesRoundtrip(data []byte) int {
f := fuzz.NewConsumer(data)
cipherBytes, err := f.GetBytes()
if err != nil {
return 0
}
if len(cipherBytes) == 0 {
return 0
}
randBytes, err := f.GetBytes()
if err != nil {
return 0
}
if len(randBytes) == 0 {
return 0
}
aesBlock, err := stdlibAes.NewCipher(cipherBytes)
if err != nil {
return 0
}
callGCMT, err := f.GetBool()
if err != nil {
return 0
}
if callGCMT {
err = testGCMTTransformer(randBytes, aesBlock)
if err != nil {
panic(err)
}
} else {
err = testCBCTransformer(randBytes, aesBlock)
}
return 1
}
func testGCMTTransformer(randBytes []byte, aesBlock cipher.Block) error {
transformer, err := aestransformer.NewGCMTransformer(aesBlock)
if err != nil {
return err
}
defaultContext := value.DefaultContext("")
ciphertext, err := transformer.TransformToStorage(context.Background(), randBytes, defaultContext)
if err != nil {
return fmt.Errorf("TransformToStorage error = %v\n", err)
}
result, stale, err := transformer.TransformFromStorage(context.Background(), ciphertext, defaultContext)
if err != nil {
return fmt.Errorf("TransformFromStorage error = %v\n", err)
}
if stale {
return fmt.Errorf("unexpected stale output\n")
}
if !reflect.DeepEqual(randBytes, result) {
return fmt.Errorf("Round trip failed len=%d\noriginal:\n%s\nresult:\n%s\n", len(randBytes), hex.Dump(randBytes), hex.Dump(result))
}
return nil
}
func testCBCTransformer(randBytes []byte, aesBlock cipher.Block) error {
transformer := aestransformer.NewCBCTransformer(aesBlock)
defaultContext := value.DefaultContext("")
ciphertext, err := transformer.TransformToStorage(context.Background(), randBytes, defaultContext)
if err != nil {
return fmt.Errorf("TransformToStorage error = %v\n", err)
}
result, stale, err := transformer.TransformFromStorage(context.Background(), ciphertext, defaultContext)
if err != nil {
return fmt.Errorf("TransformFromStorage error = %v\n", err)
}
if stale {
return fmt.Errorf("unexpected stale output\n")
}
if !reflect.DeepEqual(randBytes, result) {
return fmt.Errorf("Round trip failed len=%d\noriginal:\n%s\nresult:\n%s\n", len(randBytes), hex.Dump(randBytes), hex.Dump(result))
}
return nil
}
package fuzzing
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/AdaLogics/go-fuzz-headers/bytesource"
"testing"
admissionv1 "k8s.io/api/admission/v1"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
admissionregv1 "k8s.io/api/admissionregistration/v1"
admissionregv1beta1 "k8s.io/api/admissionregistration/v1beta1"
apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
authenticationv1 "k8s.io/api/authentication/v1"
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
authorizationv1 "k8s.io/api/authorization/v1"
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
corev1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
eventsv1 "k8s.io/api/events/v1"
eventsv1beta1 "k8s.io/api/events/v1beta1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
flowcontrolv1 "k8s.io/api/flowcontrol/v1"
flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
nodev1 "k8s.io/api/node/v1"
nodev1alpha1 "k8s.io/api/node/v1alpha1"
nodev1beta1 "k8s.io/api/node/v1beta1"
policyv1 "k8s.io/api/policy/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
storagev1 "k8s.io/api/storage/v1"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
"k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
"k8s.io/apimachinery/pkg/api/apitesting/roundtrip"
genericfuzzer "k8s.io/apimachinery/pkg/apis/meta/fuzzer"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
var groups = []runtime.SchemeBuilder{
admissionv1beta1.SchemeBuilder,
admissionv1.SchemeBuilder,
admissionregv1beta1.SchemeBuilder,
admissionregv1.SchemeBuilder,
apiserverinternalv1alpha1.SchemeBuilder,
appsv1beta1.SchemeBuilder,
appsv1beta2.SchemeBuilder,
appsv1.SchemeBuilder,
authenticationv1beta1.SchemeBuilder,
authenticationv1.SchemeBuilder,
authorizationv1beta1.SchemeBuilder,
authorizationv1.SchemeBuilder,
autoscalingv1.SchemeBuilder,
autoscalingv2.SchemeBuilder,
autoscalingv2beta1.SchemeBuilder,
autoscalingv2beta2.SchemeBuilder,
batchv1beta1.SchemeBuilder,
batchv1.SchemeBuilder,
certificatesv1.SchemeBuilder,
certificatesv1beta1.SchemeBuilder,
coordinationv1.SchemeBuilder,
coordinationv1beta1.SchemeBuilder,
corev1.SchemeBuilder,
discoveryv1.SchemeBuilder,
discoveryv1beta1.SchemeBuilder,
eventsv1.SchemeBuilder,
eventsv1beta1.SchemeBuilder,
extensionsv1beta1.SchemeBuilder,
flowcontrolv1.SchemeBuilder,
flowcontrolv1beta1.SchemeBuilder,
flowcontrolv1beta2.SchemeBuilder,
flowcontrolv1beta3.SchemeBuilder,
imagepolicyv1alpha1.SchemeBuilder,
networkingv1.SchemeBuilder,
networkingv1beta1.SchemeBuilder,
nodev1.SchemeBuilder,
nodev1alpha1.SchemeBuilder,
nodev1beta1.SchemeBuilder,
policyv1.SchemeBuilder,
policyv1beta1.SchemeBuilder,
rbacv1alpha1.SchemeBuilder,
rbacv1beta1.SchemeBuilder,
rbacv1.SchemeBuilder,
schedulingv1alpha1.SchemeBuilder,
schedulingv1beta1.SchemeBuilder,
schedulingv1.SchemeBuilder,
storagev1alpha1.SchemeBuilder,
storagev1beta1.SchemeBuilder,
storagev1.SchemeBuilder,
}
func init() {
testing.Init()
}
func FuzzApiRoundtrip(data []byte) int {
t := &testing.T{}
f := fuzz.NewConsumer(data)
seed := bytesource.New(data)
scheme := runtime.NewScheme()
codecs := serializer.NewCodecFactory(scheme)
for _, builder := range groups {
err := builder.AddToScheme(scheme)
if err != nil {
return 0
}
}
fuzzer := fuzzer.FuzzerFor(genericfuzzer.Funcs, seed, codecs)
roundtrip.RoundTripExternalTypesAdaLogics(f, t, scheme, codecs, fuzzer, nil)
return 1
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"context"
"errors"
"fmt"
"sync"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiServerValidation "k8s.io/apiextensions-apiserver/pkg/apiserver/validation"
"k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/util/jsonpath"
kubeopenapispec "k8s.io/kube-openapi/pkg/validation/spec"
)
var scheme = runtime.NewScheme()
var fuzzInitter sync.Once
func setup() {
if err := apiextensions.AddToScheme(scheme); err != nil {
panic(err)
}
}
func setup2() {
// add internal and external types to scheme
if err := apiextensions.AddToScheme(scheme); err != nil {
panic(err)
}
if err := apiextensionsv1.AddToScheme(scheme); err != nil {
panic(err)
}
}
func FuzzApiServerRoundtrip(data []byte) int {
err := apiextensionsRoundtrip(data)
if err != nil {
panic(err)
}
return 1
}
// TestRoundTrip checks the conversion to go-openapi types.
// internal -> go-openapi -> JSON -> external -> internal.
// Similar to https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation_test.go#L37
func apiextensionsRoundtrip(data []byte) error {
f := fuzz.NewConsumer(data)
for i := 0; i < 20; i++ {
// fuzz internal types
internal := &apiextensions.JSONSchemaProps{}
err := f.GenerateStruct(&internal)
if err != nil {
return nil
}
// internal -> go-openapi
openAPITypes := &kubeopenapispec.Schema{}
if err := apiServerValidation.ConvertJSONSchemaProps(internal, openAPITypes); err != nil {
return err
}
// go-openapi -> JSON
openAPIJSON, err := json.Marshal(openAPITypes)
if err != nil {
return err
}
// JSON -> in-memory JSON => convertNullTypeToNullable => JSON
var j interface{}
if err := json.Unmarshal(openAPIJSON, &j); err != nil {
return err
}
j = stripIntOrStringType(j)
openAPIJSON, err = json.Marshal(j)
if err != nil {
return err
}
// JSON -> external
external := &apiextensionsv1.JSONSchemaProps{}
if err := json.Unmarshal(openAPIJSON, external); err != nil {
return err
}
// external -> internal
internalRoundTripped := &apiextensions.JSONSchemaProps{}
if err := scheme.Convert(external, internalRoundTripped, nil); err != nil {
return err
}
if !apiequality.Semantic.DeepEqual(internal, internalRoundTripped) {
return errors.New(fmt.Sprintf("%d: expected\n\t%#v, got \n\t%#v", i, internal, internalRoundTripped))
}
fmt.Println("Ran full fuzzer")
}
return nil
}
func stripIntOrStringType(x interface{}) interface{} {
switch x := x.(type) {
case map[string]interface{}:
if t, found := x["type"]; found {
switch t := t.(type) {
case []interface{}:
if len(t) == 2 && t[0] == "integer" && t[1] == "string" && x["x-kubernetes-int-or-string"] == true {
delete(x, "type")
}
}
}
for k := range x {
x[k] = stripIntOrStringType(x[k])
}
return x
case []interface{}:
for i := range x {
x[i] = stripIntOrStringType(x[i])
}
return x
default:
return x
}
}
func newJSONPath(name string, jsonPathExpression string) *jsonpath.JSONPath {
jp := jsonpath.New(name)
_ = jp.Parse(jsonPathExpression)
return jp
}
func FuzzConvertToTable(data []byte) int {
f := fuzz.NewConsumer(data)
crdColumns := createCRCDs(f)
if len(crdColumns) == 0 {
return 0
}
c, err := tableconvertor.New(crdColumns)
if err != nil {
return 0
}
o, err := getObject(f)
if err != nil {
return 0
}
table, err := c.ConvertToTable(context.Background(), o, nil)
if err != nil {
return 0
}
_ = table
return 1
}
func createCRCDs(f *fuzz.ConsumeFuzzer) []apiextensionsv1.CustomResourceColumnDefinition {
crcds := make([]apiextensionsv1.CustomResourceColumnDefinition, 0)
noOfCRCDs, err := f.GetInt()
if err != nil {
return crcds
}
for i := 0; i < noOfCRCDs%20; i++ {
crcd := apiextensionsv1.CustomResourceColumnDefinition{}
err = f.GenerateStruct(&crcd)
if err != nil {
return crcds
}
crcds = append(crcds, crcd)
}
return crcds
}
func getObject(f *fuzz.ConsumeFuzzer) (runtime.Object, error) {
emptyObject := &unstructured.Unstructured{}
typeOfObject, err := f.GetInt()
if err != nil {
return emptyObject, err
}
if typeOfObject%3 == 0 {
o := &metav1beta1.PartialObjectMetadata{}
err = f.GenerateStruct(o)
if err != nil {
return emptyObject, err
}
return runtime.Object(o), nil
}
if typeOfObject%3 == 1 {
o := &metav1beta1.PartialObjectMetadataList{}
err = f.GenerateStruct(o)
if err != nil {
return emptyObject, err
}
return runtime.Object(o), nil
}
if typeOfObject%3 == 2 {
o := &unstructured.Unstructured{}
err = f.GenerateStruct(o)
if err != nil {
return emptyObject, err
}
return runtime.Object(o), nil
}
return emptyObject, fmt.Errorf("Could not create object")
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"encoding/json"
"fmt"
fuzz "github.com/AdaLogics/go-fuzz-headers"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
auditpolicy "k8s.io/apiserver/pkg/audit/policy"
)
func FuzzLoadPolicyFromBytes(data []byte) int {
_, _ = auditpolicy.LoadPolicyFromBytes(data)
return 1
}
// tests https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go#L386
func RegistryFuzzer(data []byte) int {
f := fuzz.NewConsumer(data)
in := &metav1.UpdateOptions{}
// set in.DryRun
dryRuns := make([]string, 0)
noOfDryRyns, err := f.GetInt()
if err != nil {
return 0
}
for i:=0;i<noOfDryRyns%30;i++ {
dr, err := f.GetString()
if err != nil {
return 0
}
dryRuns = append(dryRuns, dr)
}
in.DryRun = dryRuns
// set in.Fieldmanager
fm, err := f.GetString()
if err != nil {
return 0
}
in.FieldManager = fm
in.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions"))
out := newCreateOptionsFromUpdateOptions(in)
inBytes, err := json.Marshal(in)
if err != nil {
panic(fmt.Sprintf("failed to json.Marshal(in): %v\n", err))
}
outBytes, err := json.Marshal(out)
if err != nil {
panic(fmt.Sprintf("failed to json.Marshal(out): %v\n", err))
}
inMap := map[string]interface{}{}
if err := json.Unmarshal(inBytes, &inMap); err != nil {
panic(fmt.Sprintf("failed to json.Unmarshal(in): %v\n", err))
}
outMap := map[string]interface{}{}
if err := json.Unmarshal(outBytes, &outMap); err != nil {
panic(fmt.Sprintf("failed to json.Unmarshal(out): %v\n", err))
}
// Compare the results.
inBytes, err = json.Marshal(inMap)
if err != nil {
panic(fmt.Sprintf("failed to json.Marshal(in): %v\n", err))
}
outBytes, err = json.Marshal(outMap)
if err != nil {
panic(fmt.Sprintf("failed to json.Marshal(out): %v\n", err))
}
if i, o := string(inBytes), string(outBytes); i != o {
panic(fmt.Sprintf("output != input:\n want: %s\n got: %s\n", i, o))
}
return 1
}
func newCreateOptionsFromUpdateOptions(in *metav1.UpdateOptions) *metav1.CreateOptions {
co := &metav1.CreateOptions{
DryRun: in.DryRun,
FieldManager: in.FieldManager,
}
co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions"))
return co
}
package fuzzing
import (
"fmt"
admissioncel "k8s.io/apiserver/pkg/admission/plugin/cel"
celgo "github.com/google/cel-go/cel"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apiserver/pkg/cel/environment"
"k8s.io/apiserver/pkg/cel/library"
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
type fakeValidationCondition struct {
Expression string
}
func (v *fakeValidationCondition) GetExpression() string {
return v.Expression
}
func (v *fakeValidationCondition) ReturnTypes() []*celgo.Type {
return []*celgo.Type{celgo.BoolType}
}
func FuzzCelCompile(data []byte) int {
f := fuzz.NewConsumer(data)
expr, err := f.GetString()
if err != nil {
//fmt.Println(err)
return 0
}
// Include the test library, which includes the test() function in the storage environment during test
base := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)
extended, err := base.Extend(environment.VersionedOptions{
IntroducedVersion: version.MajorMinor(1, 999),
EnvOptions: []celgo.EnvOption{library.Test()},
})
if err != nil {
fmt.Println(err)
return 0
}
compiler := admissioncel.NewCompiler(extended)
options := admissioncel.OptionalVariableDeclarations{HasParams: true, HasAuthorizer: true}
result := compiler.CompileCELExpression(&fakeValidationCondition{
Expression: expr,
}, options, environment.NewExpressions)
if result.Error != nil {
fmt.Sprintf("Got error: %s", result.Error)
return 1
}
return 0
}
// Copyright 2025 Shielder SpA
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"fmt"
admissioncel "k8s.io/apiserver/pkg/admission/plugin/cel"
validatingadmissionpolicy "k8s.io/apiserver/pkg/admission/plugin/policy/validating"
"k8s.io/apiserver/pkg/cel/environment"
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
func FuzzCelDataCompile(data []byte) int {
f := fuzz.NewConsumer(data)
expr, err := f.GetString()
if err != nil {
//fmt.Println("Error getting string:", err)
return 0
}
compositionEnvTemplate, err := admissioncel.NewCompositionEnv(admissioncel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), false))
if err != nil {
//fmt.Println("Error creating composition env:", err)
return 0
}
compiler := admissioncel.NewCompositedCompilerFromTemplate(compositionEnvTemplate)
options := admissioncel.OptionalVariableDeclarations{HasParams: true, HasAuthorizer: false}
variable := &validatingadmissionpolicy.Variable{
Name: "foo",
Expression: expr,
}
variables := []admissioncel.NamedExpressionAccessor{variable}
compiler.CompileAndStoreVariables(variables, options, environment.StoredExpressions)
// Use a bunch of fixed expressions
tests := []struct {
expression string
}{
{expression: "variables.foo > 1"},
{expression: "variables.foo in [1, 2, 3]"},
{expression: "variables.foo.startsWith('s')"},
{expression: "variables.foo.matches('[0-9]+')"},
{expression: "isURL(variables.foo)"},
}
for _, test := range tests {
validation := &validatingadmissionpolicy.ValidationCondition{
Expression: test.expression,
}
result := compiler.CompileCELExpression(validation, options, environment.StoredExpressions)
// there's a bug in CompileCELExpression that returns nil error even if the expression is invalid
if err := result.Error; err != nil {
fmt.Printf("Got error: %s\n", result.Error)
return 1
}
}
return 0
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"fmt"
"reflect"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
)
type A struct {
A int `json:"aa,omitempty"`
B string `json:"ab,omitempty"`
C bool `json:"ac,omitempty"`
}
type F struct {
A string `json:"fa"`
B map[string]string `json:"fb"`
C []A `json:"fc"`
D int `json:"fd"`
E float32 `json:"fe"`
F []string `json:"ff"`
G []int `json:"fg"`
H []bool `json:"fh"`
I []float32 `json:"fi"`
}
func FuzzUnrecognized(data []byte) int {
_ = doUnrecognized(string(data), &F{})
return 1
}
var simpleEquality = conversion.EqualitiesOrDie(
func(a, b time.Time) bool {
return a.UTC() == b.UTC()
},
)
// Verifies that:
// 1) serialized json -> object
// 2) serialized json -> map[string]interface{} -> object
// produces the same object.
func doUnrecognized(jsonData string, item interface{}) error {
unmarshalledObj := reflect.New(reflect.TypeOf(item).Elem()).Interface()
err := json.Unmarshal([]byte(jsonData), unmarshalledObj)
if err != nil {
return err
}
unstr := make(map[string]interface{})
err = json.Unmarshal([]byte(jsonData), &unstr)
if err != nil {
return err
}
newObj := reflect.New(reflect.TypeOf(item).Elem()).Interface()
err = runtime.NewTestUnstructuredConverter(simpleEquality).FromUnstructured(unstr, newObj)
if err != nil {
return err
}
if !reflect.DeepEqual(unmarshalledObj, newObj) {
fmt.Println(cmp.Diff(unmarshalledObj, newObj))
panic(fmt.Sprintf("DeepEqual failed\n"))
}
return nil
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"bytes"
"errors"
"reflect"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// FuzzDeepCopy implements a fuzzer for the logic defined in:
// https://github.com/kubernetes/kubernetes/blob/master/pkg/api/testing/copy_test.go
func FuzzDeepCopy(data []byte) int {
f := fuzz.NewConsumer(data)
// get groupversion
versionIndex, err := f.GetInt()
if err != nil {
return 0
}
groupVersions := []schema.GroupVersion{{Group: "", Version: runtime.APIVersionInternal}, {Group: "", Version: "v1"}}
version := groupVersions[versionIndex%len(groupVersions)]
// pick a kind and do the deepcopy test
knownTypes := legacyscheme.Scheme.KnownTypes(version)
kindIndex, err := f.GetInt()
if err != nil {
return 0
}
kindIndex = kindIndex % len(knownTypes)
i := 0
for kind := range knownTypes {
if i == kindIndex {
doDeepCopyTest(version.WithKind(kind), f)
}
}
return 1
}
func doDeepCopyTest(kind schema.GroupVersionKind, f *fuzz.ConsumeFuzzer) error {
item, err := legacyscheme.Scheme.New(kind)
if err != nil {
return err
}
err = f.GenerateStruct(item)
if err != nil {
return err
}
itemCopy := item.DeepCopyObject()
if !reflect.DeepEqual(item, itemCopy) {
panic("Items should be equal but are not.")
}
prefuzzData := &bytes.Buffer{}
if err := legacyscheme.Codecs.LegacyCodec(kind.GroupVersion()).Encode(item, prefuzzData); err != nil {
return errors.New("Could not encode original")
}
err = f.GenerateStruct(itemCopy)
if err != nil {
return err
}
postfuzzData := &bytes.Buffer{}
if err := legacyscheme.Codecs.LegacyCodec(kind.GroupVersion()).Encode(item, postfuzzData); err != nil {
return errors.New("Could not encode the copy")
}
if !bytes.Equal(prefuzzData.Bytes(), postfuzzData.Bytes()) {
panic("Bytes should be equal but are not")
}
return nil
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"sync"
"testing"
"github.com/AdaLogics/go-fuzz-headers/bytesource"
"k8s.io/apimachinery/pkg/api/apitesting/fuzzer"
"k8s.io/apimachinery/pkg/api/apitesting/roundtrip"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/api/legacyscheme"
apitesting "k8s.io/kubernetes/pkg/api/testing"
controllerFuzzer "k8s.io/kubernetes/pkg/controller/apis/config/fuzzer"
controllerScheme "k8s.io/kubernetes/pkg/controller/apis/config/scheme"
kubeletFuzzer "k8s.io/kubernetes/pkg/kubelet/apis/config/fuzzer"
kubeletScheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme"
proxyFuzzer "k8s.io/kubernetes/pkg/proxy/apis/config/fuzzer"
proxyScheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme"
)
var (
initter sync.Once
)
func initForFuzzing() {
testing.Init()
}
func FuzzRoundTripSpecificKind(data []byte) int {
initter.Do(initForFuzzing)
t := &testing.T{}
internalGVK := schema.GroupVersionKind{Group: "apps", Version: runtime.APIVersionInternal, Kind: "DaemonSet"}
seed := bytesource.New(data)
fuzzer := fuzzer.FuzzerFor(apitesting.FuzzerFuncs, seed, legacyscheme.Codecs)
roundtrip.RoundTripSpecificKind(t, internalGVK, legacyscheme.Scheme, legacyscheme.Codecs, fuzzer, nil)
return 1
}
func FuzzControllerRoundtrip(data []byte) int {
initter.Do(initForFuzzing)
t := &testing.T{}
seed := bytesource.New(data)
f := fuzzer.FuzzerFor(controllerFuzzer.Funcs, seed, legacyscheme.Codecs)
codecFactory := runtimeserializer.NewCodecFactory(controllerScheme.Scheme)
roundtrip.RoundTripTypesWithoutProtobuf(t, controllerScheme.Scheme, codecFactory, f, nil)
return 1
}
func FuzzKubeletSchemeRoundtrip(data []byte) int {
initter.Do(initForFuzzing)
t := &testing.T{}
seed := bytesource.New(data)
f := fuzzer.FuzzerFor(kubeletFuzzer.Funcs, seed, legacyscheme.Codecs)
klScheme, _, err := kubeletScheme.NewSchemeAndCodecs()
if err != nil {
return 0
}
codecFactory := runtimeserializer.NewCodecFactory(klScheme)
roundtrip.RoundTripTypesWithoutProtobuf(t, klScheme, codecFactory, f, nil)
return 1
}
func FuzzProxySchemeRoundtrip(data []byte) int {
initter.Do(initForFuzzing)
t := &testing.T{}
seed := bytesource.New(data)
f := fuzzer.FuzzerFor(proxyFuzzer.Funcs, seed, legacyscheme.Codecs)
codecFactory := runtimeserializer.NewCodecFactory(proxyScheme.Scheme)
roundtrip.RoundTripTypesWithoutProtobuf(t, proxyScheme.Scheme, codecFactory, f, nil)
return 1
}
func FuzzRoundTripType(data []byte) int {
initter.Do(initForFuzzing)
t := &testing.T{}
seed := bytesource.New(data)
f := fuzzer.FuzzerFor(apitesting.FuzzerFuncs, seed, legacyscheme.Codecs)
nonRoundTrippableTypes := map[schema.GroupVersionKind]bool{}
roundtrip.RoundTripTypes(t, legacyscheme.Scheme, legacyscheme.Codecs, f, nonRoundTrippableTypes)
return 1
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"bytes"
"context"
"os"
"time"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
apitesting "k8s.io/cri-api/pkg/apis/testing"
"k8s.io/cri-client/pkg/logs"
"k8s.io/klog/v2"
)
func FuzzReadLogs(data []byte) int {
f := fuzz.NewConsumer(data)
logFileBytes, err := f.GetBytes()
if err != nil {
return 0
}
logFile, err := os.Create("/tmp/logfile")
if err != nil {
return 0
}
defer logFile.Close()
_, err = logFile.Write(logFileBytes)
if err != nil {
return 0
}
containerID := "fake-container-id"
podLogOptions := &v1.PodLogOptions{}
err = f.GenerateStruct(podLogOptions)
if err != nil {
return 0
}
fakeRuntimeService := &apitesting.FakeRuntimeService{
Containers: map[string]*apitesting.FakeContainer{
containerID: {
ContainerStatus: runtimeapi.ContainerStatus{
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
},
},
},
}
// If follow is specified, mark the container as exited or else ReadLogs will run indefinitely
if podLogOptions.Follow {
fakeRuntimeService.Containers[containerID].State = runtimeapi.ContainerState_CONTAINER_EXITED
}
opts := logs.NewLogOptions(podLogOptions, time.Now())
stdoutBuf := bytes.NewBuffer(nil)
stderrBuf := bytes.NewBuffer(nil)
logger := klog.Background()
logs.ReadLogs(context.Background(), &logger, "/tmp/logfile", containerID, opts, fakeRuntimeService, stdoutBuf, stderrBuf)
return 1
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"reflect"
"sync"
"testing"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/api/legacyscheme"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
v1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
_ "k8s.io/kubernetes/pkg/apis/apps/install"
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
_ "k8s.io/kubernetes/pkg/apis/networking/install"
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"
appsv1SGV "k8s.io/kubernetes/pkg/apis/apps/v1"
appsv1beta1SGV "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
appsv1beta2SGV "k8s.io/kubernetes/pkg/apis/apps/v1beta2"
autoscalingV1SGV "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
autoscalingV2beta1SGV "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1"
autoscalingV2beta2SGV "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2"
batchV1SGV "k8s.io/kubernetes/pkg/apis/batch/v1"
batchV1beta1SGV "k8s.io/kubernetes/pkg/apis/batch/v1beta1"
corev1SGV "k8s.io/kubernetes/pkg/apis/core/v1"
extensionsV1beta1SGV "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
networkingV1SGV "k8s.io/kubernetes/pkg/apis/networking/v1"
networkingV1beta1SGV "k8s.io/kubernetes/pkg/apis/networking/v1beta1"
)
var (
initLocalTest sync.Once
totalFuncs = 39
)
// initTesting implements an init function that
// is invoked using sync.Do. It is only used
// by a few of the fuzzers, and its invocation
// is therefore isolated to those.
func initTesting() {
testing.Init()
}
/*
FuzzRoundtrip implements a fuzzer for the logic
of the following roundtrip tests:
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/apps/v1/defaults_test.go#L585
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/apps/v1beta1/defaults_test.go#L199
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/apps/v1beta2/defaults_test.go#L551
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/autoscaling/v1/defaults_test.go#L137
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/autoscaling/v2beta1/defaults_test.go#L176
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/autoscaling/v2beta2/defaults_test.go#L306
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/batch/v1/defaults_test.go#L310
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/batch/v1beta1/defaults_test.go#L93
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/core/v1/defaults_test.go#L396
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/extensions/v1beta1/defaults_test.go#L736
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/networking/v1/defaults_test.go#L367
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/networking/v1beta1/defaults_test.go#L82
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/scheduling/v1/defaults_test.go#L35
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/scheduling/v1alpha1/defaults_test.go#L35
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/scheduling/v1beta1/defaults_test.go#L35
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/storage/v1/defaults_test.go#L33
- https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/storage/v1beta1/defaults_test.go#L33
*/
func FuzzRoundtrip(data []byte) int {
if len(data) < 10 {
return 0
}
op := int(data[0])
inputData := data[1:]
if op%totalFuncs == 0 {
return FuzzAppsV1DaemonSet(inputData)
} else if op%totalFuncs == 1 {
return FuzzAppsV1StatefulSet(inputData)
} else if op%totalFuncs == 2 {
return FuzzAppsV1Deployment(inputData)
} else if op%totalFuncs == 3 {
return FuzzAppsV1beta1(inputData)
} else if op%totalFuncs == 4 {
return FuzzAppsV1beta2StatefulSet(inputData)
} else if op%totalFuncs == 5 {
return FuzzAppsV1beta2Deployment(inputData)
} else if op%totalFuncs == 6 {
return FuzzAppsV1beta2ReplicaSet(inputData)
} else if op%totalFuncs == 7 {
return FuzzAppsV1beta2DaemonSet(inputData)
} else if op%totalFuncs == 8 {
return FuzzAutoscalingV1HorizontalPodAutoscaler(inputData)
} else if op%totalFuncs == 9 {
return FuzzAutoscalingV2beta1HorizontalPodAutoscaler(inputData)
} else if op%totalFuncs == 10 {
return FuzzAutoscalingV2beta2HorizontalPodAutoscaler(inputData)
} else if op%totalFuncs == 11 {
return FuzzBatchV1Job(inputData)
} else if op%totalFuncs == 12 {
return FuzzBatchV1CronJob(inputData)
} else if op%totalFuncs == 13 {
return FuzzBatchV1beta1CronJob(inputData)
} else if op%totalFuncs == 14 {
return FuzzCoreV1ReplicationController(inputData)
} else if op%totalFuncs == 15 {
return FuzzCoreV1Pod(inputData)
} else if op%totalFuncs == 16 {
return FuzzCoreV1Secret(inputData)
} else if op%totalFuncs == 17 {
return FuzzCoreV1PersistentVolume(inputData)
} else if op%totalFuncs == 18 {
return FuzzCoreV1PersistentVolumeClaim(inputData)
} else if op%totalFuncs == 19 {
return FuzzCoreV1Endpoints(inputData)
} else if op%totalFuncs == 20 {
return FuzzCoreV1Service(inputData)
} else if op%totalFuncs == 21 {
return FuzzCoreV1Namespace(inputData)
} else if op%totalFuncs == 22 {
return FuzzCoreV1Node(inputData)
} else if op%totalFuncs == 23 {
return FuzzCoreV1Endpoints(inputData)
} else if op%totalFuncs == 24 {
return FuzzCoreV1LimitRange(inputData)
} else if op%totalFuncs == 25 {
return FuzzExtensionsV1beta1DaemonSet(inputData)
} else if op%totalFuncs == 26 {
return FuzzExtensionsV1beta1Deployment(inputData)
} else if op%totalFuncs == 27 {
return FuzzExtensionsV1beta1ReplicaSet(inputData)
} else if op%totalFuncs == 28 {
return FuzzExtensionsV1beta1NetworkPolicy(inputData)
} else if op%totalFuncs == 29 {
return FuzzNetworkingV1NetworkPolicy(inputData)
} else if op%totalFuncs == 30 {
return FuzzNetworkingV1IngressClass(inputData)
} else if op%totalFuncs == 31 {
return FuzzNetworkingV1beta1Ingress(inputData)
} else if op%totalFuncs == 32 {
return FuzzSchedulingV1PriorityClass(inputData)
} else if op%totalFuncs == 33 {
return FuzzSchedulingV1alpa1PriorityClass(inputData)
} else if op%totalFuncs == 34 {
return FuzzSchedulingV1beta1PriorityClass(inputData)
} else if op%totalFuncs == 35 {
return FuzzStorageV1CSIDriver(inputData)
} else if op%totalFuncs == 36 {
return FuzzStorageV1StorageClass(inputData)
} else if op%totalFuncs == 37 {
return FuzzStorageV1beta1CSIDriver(inputData)
} else if op%totalFuncs == 38 {
return FuzzStorageV1beta1StorageClass(inputData)
}
return 1
}
func FuzzAppsV1DaemonSet(data []byte) int {
o := &appsv1.DaemonSet{}
sgv := appsv1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAppsV1StatefulSet(data []byte) int {
o := &appsv1.StatefulSet{}
sgv := appsv1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAppsV1Deployment(data []byte) int {
o := &appsv1.Deployment{}
sgv := appsv1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAppsV1beta1(data []byte) int {
o := &appsv1beta1.Deployment{}
sgv := appsv1beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAppsV1beta2StatefulSet(data []byte) int {
o := &appsv1beta2.StatefulSet{}
sgv := appsv1beta2SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAppsV1beta2Deployment(data []byte) int {
o := &appsv1beta2.Deployment{}
sgv := appsv1beta2SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAppsV1beta2ReplicaSet(data []byte) int {
o := &appsv1beta2.ReplicaSet{}
sgv := appsv1beta2SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAppsV1beta2DaemonSet(data []byte) int {
o := &appsv1beta2.DaemonSet{}
sgv := appsv1beta2SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAutoscalingV1HorizontalPodAutoscaler(data []byte) int {
o := &autoscalingv1.HorizontalPodAutoscaler{}
sgv := autoscalingV1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAutoscalingV2beta1HorizontalPodAutoscaler(data []byte) int {
o := &autoscalingv2beta1.HorizontalPodAutoscaler{}
sgv := autoscalingV2beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzAutoscalingV2beta2HorizontalPodAutoscaler(data []byte) int {
o := &autoscalingv2.HorizontalPodAutoscaler{}
sgv := autoscalingV2beta2SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzBatchV1Job(data []byte) int {
o := &batchv1.Job{}
sgv := batchV1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzBatchV1CronJob(data []byte) int {
o := &batchv1.CronJob{}
sgv := batchV1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
// Scheme IS registered
func FuzzBatchV1beta1CronJob(data []byte) int {
o := &batchv1beta1.CronJob{}
sgv := batchV1beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1ReplicationController(data []byte) int {
o := &v1.ReplicationController{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1Pod(data []byte) int {
o := &v1.Pod{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1Secret(data []byte) int {
o := &v1.Secret{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1PersistentVolume(data []byte) int {
o := &v1.PersistentVolume{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1PersistentVolumeClaim(data []byte) int {
o := &v1.PersistentVolumeClaim{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1Endpoints(data []byte) int {
o := &v1.Endpoints{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1Service(data []byte) int {
o := &v1.Service{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1Namespace(data []byte) int {
o := &v1.Namespace{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1Node(data []byte) int {
o := &v1.Node{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzCoreV1LimitRange(data []byte) int {
o := &v1.LimitRange{}
sgv := corev1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzExtensionsV1beta1DaemonSet(data []byte) int {
o := &extensionsv1beta1.DaemonSet{}
sgv := extensionsV1beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzExtensionsV1beta1Deployment(data []byte) int {
o := &extensionsv1beta1.Deployment{}
sgv := extensionsV1beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzExtensionsV1beta1ReplicaSet(data []byte) int {
o := &extensionsv1beta1.ReplicaSet{}
sgv := extensionsV1beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzExtensionsV1beta1NetworkPolicy(data []byte) int {
o := &extensionsv1beta1.NetworkPolicy{}
sgv := extensionsV1beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzNetworkingV1NetworkPolicy(data []byte) int {
o := &networkingv1.NetworkPolicy{}
sgv := networkingV1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzNetworkingV1IngressClass(data []byte) int {
o := &networkingv1.IngressClass{}
sgv := networkingV1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzNetworkingV1beta1Ingress(data []byte) int {
o := &networkingv1beta1.Ingress{}
sgv := networkingV1beta1SGV.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzSchedulingV1PriorityClass(data []byte) int {
o := &schedulingv1.PriorityClass{}
sgv := schedulingv1.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzSchedulingV1alpa1PriorityClass(data []byte) int {
o := &schedulingv1alpha1.PriorityClass{}
sgv := schedulingv1alpha1.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzSchedulingV1beta1PriorityClass(data []byte) int {
o := &schedulingv1beta1.PriorityClass{}
sgv := schedulingv1beta1.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzStorageV1CSIDriver(data []byte) int {
initLocalTest.Do(initTesting)
o := &storagev1.CSIDriver{}
sgv := storagev1.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzStorageV1StorageClass(data []byte) int {
initLocalTest.Do(initTesting)
o := &storagev1.StorageClass{}
sgv := storagev1.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzStorageV1beta1CSIDriver(data []byte) int {
initLocalTest.Do(initTesting)
o := &storagev1beta1.CSIDriver{}
sgv := storagev1beta1.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func FuzzStorageV1beta1StorageClass(data []byte) int {
initLocalTest.Do(initTesting)
o := &storagev1beta1.StorageClass{}
sgv := storagev1beta1.SchemeGroupVersion
prepAndDoRoundtrip(sgv, o, data)
return 1
}
func prepAndDoRoundtrip(sgv schema.GroupVersion, o runtime.Object, data []byte) error {
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(o)
if err != nil {
return err
}
// First test:
// Do a roundtrip
_ = roundTrip(runtime.Object(o), sgv)
// Second test:
// Call only runtime.Decode with the Legacy codec and the UniversalDecoder
// and some input data that hasn't been derived from encoding.
data2, err := f.GetBytes()
if err != nil {
return err
}
_, _ = runtime.Decode(legacyscheme.Codecs.LegacyCodec(sgv), data2)
_, _ = runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data2)
return nil
}
func roundTrip(obj runtime.Object, sgv schema.GroupVersion) runtime.Object {
data, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(sgv), obj)
if err != nil {
return obj
}
obj2, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data)
if err != nil {
panic(err)
}
obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object)
err = legacyscheme.Scheme.Convert(obj2, obj3, nil)
if err != nil {
panic(err)
}
return obj3
}
// Copyright 2021 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package fuzzing
import (
"context"
fuzz "github.com/AdaLogics/go-fuzz-headers"
v1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apiextensionsValidation "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/apis/audit"
auditValidation "k8s.io/apiserver/pkg/apis/audit/validation"
"k8s.io/kubernetes/pkg/apis/apiserverinternal"
apiServerInternalValidation "k8s.io/kubernetes/pkg/apis/apiserverinternal/validation"
"k8s.io/kubernetes/pkg/apis/apps"
appsValidation "k8s.io/kubernetes/pkg/apis/apps/validation"
"k8s.io/kubernetes/pkg/apis/autoscaling"
autoscalingValidation "k8s.io/kubernetes/pkg/apis/autoscaling/validation"
"k8s.io/kubernetes/pkg/apis/batch"
batchValidation "k8s.io/kubernetes/pkg/apis/batch/validation"
"k8s.io/kubernetes/pkg/apis/certificates"
certificatesValidation "k8s.io/kubernetes/pkg/apis/certificates/validation"
"k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/policy"
policyValidation "k8s.io/kubernetes/pkg/apis/policy/validation"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacValidation "k8s.io/kubernetes/pkg/apis/rbac/validation"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
)
const maxFuzzers = 50
func FuzzAllValidation(data []byte) int {
if len(data) < 10 {
return 0
}
op := int(data[0]) % maxFuzzers
inputData := data[1:]
if op == 0 {
return FuzzValidatePodCreate(inputData)
} else if op == 1 {
return FuzzValidatePodUpdate(inputData)
} else if op == 2 {
return FuzzValidatePodStatusUpdate(inputData)
} else if op == 3 {
return FuzzValidatePodEphemeralContainersUpdate(inputData)
} else if op == 4 {
return FuzzValidatePersistentVolumeUpdate(inputData)
} else if op == 5 {
return FuzzValidatePersistentVolumeClaimUpdate(inputData)
} else if op == 6 {
return FuzzValidateServiceCreate(inputData)
} else if op == 7 {
return FuzzValidateServiceUpdate(inputData)
} else if op == 8 {
return FuzzValidateEndpointsCreate(inputData)
} else if op == 9 {
return FuzzValidateNodeUpdate(inputData)
} else if op == 10 {
return FuzzValidateLimitRange(inputData)
} else if op == 11 {
return FuzzValidateStatefulSet(inputData)
} else if op == 12 {
return FuzzValidateStatefulSetUpdate(inputData)
} else if op == 13 {
return FuzzValidateDaemonSet(inputData)
} else if op == 14 {
return FuzzValidateDaemonSetUpdate(inputData)
} else if op == 15 {
return FuzzValidateDeployment(inputData)
} else if op == 16 {
return FuzzValidateDeploymentUpdate(inputData)
} else if op == 17 {
return FuzzValidateJob(inputData)
} else if op == 18 {
return FuzzValidateJobUpdate(inputData)
} else if op == 19 {
return FuzzValidateCronJobCreate(inputData)
} else if op == 20 {
return FuzzValidateCronJobUpdate(inputData)
} else if op == 21 {
return FuzzValidateScale(inputData)
} else if op == 22 {
return FuzzValidateHorizontalPodAutoscaler(inputData)
} else if op == 23 {
return FuzzValidateHorizontalPodAutoscalerUpdate(inputData)
} else if op == 24 {
return FuzzValidateDeployment(inputData)
} else if op == 25 {
return FuzzValidatePodDisruptionBudget(inputData)
} else if op == 26 {
return FuzzValidatePodDisruptionBudgetStatusUpdate(inputData)
} else if op == 31 {
return FuzzValidateCertificateSigningRequestCreate(inputData)
} else if op == 32 {
return FuzzValidateCertificateSigningRequestUpdate(inputData)
} else if op == 33 {
return FuzzValidateCertificateSigningRequestStatusUpdate(inputData)
} else if op == 34 {
return FuzzValidateCertificateSigningRequestApprovalUpdate(inputData)
} else if op == 35 {
return FuzzValidateCustomResourceDefinition(inputData)
} else if op == 36 {
return FuzzValidateStorageVersion(inputData)
} else if op == 37 {
return FuzzValidateStorageVersionName(inputData)
} else if op == 38 {
return FuzzValidateStorageVersionStatusUpdate(inputData)
} else if op == 39 {
return FuzzValidatePolicy(inputData)
} else if op == 40 {
return FuzzLoadPolicyFromBytes(inputData)
} else if op == 41 {
return FuzzValidateRoleUpdate(inputData)
} else if op == 42 {
return FuzzValidateClusterRoleUpdate(inputData)
} else if op == 43 {
return FuzzValidateRoleBindingUpdate(inputData)
} else if op == 44 {
return FuzzValidateClusterRoleBindingUpdate(inputData)
} else if op == 45 {
return FuzzCompactRules(inputData)
} else if op == 46 {
return FuzzValidateResourceQuotaSpec(inputData)
} else if op == 47 {
return FuzzValidateResourceQuotaUpdate(inputData)
} else if op == 48 {
FuzzValidateResourceQuotaStatusUpdate(inputData)
} else if op == 49 {
FuzzValidateServiceStatusUpdate(inputData)
}
return 0
}
//// Pod validation
func FuzzValidatePodCreate(data []byte) int {
f := fuzz.NewConsumer(data)
pod := &core.Pod{}
err := f.GenerateStruct(pod)
if err != nil {
return 0
}
if errs := validation.ValidatePodCreate(pod, validation.PodValidationOptions{}); len(errs) > 0 {
return 0
}
// Now test conversion
v1Pod := &v1.Pod{}
_ = k8s_api_v1.Convert_core_Pod_To_v1_Pod(pod, v1Pod, nil)
return 1
}
func FuzzValidatePodUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
pod1 := &core.Pod{}
err := f.GenerateStruct(pod1)
if err != nil {
return 0
}
pod2 := &core.Pod{}
err = f.GenerateStruct(pod2)
if err != nil {
return 0
}
_ = validation.ValidatePodUpdate(pod1, pod2, validation.PodValidationOptions{})
return 1
}
func FuzzValidatePodStatusUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
pod1 := &core.Pod{}
err := f.GenerateStruct(pod1)
if err != nil {
return 0
}
pod2 := &core.Pod{}
err = f.GenerateStruct(pod2)
if err != nil {
return 0
}
_ = validation.ValidatePodStatusUpdate(pod1, pod2, validation.PodValidationOptions{})
return 1
}
func FuzzValidatePodEphemeralContainersUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
pod1 := &core.Pod{}
err := f.GenerateStruct(pod1)
if err != nil {
return 0
}
pod2 := &core.Pod{}
err = f.GenerateStruct(pod2)
if err != nil {
return 0
}
_ = validation.ValidatePodEphemeralContainersUpdate(pod1, pod2, validation.PodValidationOptions{})
return 1
}
// Persistent volume validation
func FuzzValidatePersistentVolumeUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
pv1 := &core.PersistentVolume{}
err := f.GenerateStruct(pv1)
if err != nil {
return 0
}
pv2 := &core.PersistentVolume{}
err = f.GenerateStruct(pv2)
if err != nil {
return 0
}
opts := validation.PersistentVolumeSpecValidationOptions{}
_ = validation.ValidatePersistentVolumeUpdate(pv1, pv2, opts)
return 1
}
// Persistent Volume clain validation
func FuzzValidatePersistentVolumeClaimUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
pvc1 := &core.PersistentVolumeClaim{}
err := f.GenerateStruct(pvc1)
if err != nil {
return 0
}
pvc2 := &core.PersistentVolumeClaim{}
err = f.GenerateStruct(pvc2)
if err != nil {
return 0
}
opts := validation.PersistentVolumeClaimSpecValidationOptions{}
_ = validation.ValidatePersistentVolumeClaimUpdate(pvc1, pvc2, opts)
return 1
}
//// Service validation
func FuzzValidateServiceCreate(data []byte) int {
service := &core.Service{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(service)
if err != nil {
return 0
}
_ = validation.ValidateServiceCreate(service)
return 1
}
func FuzzValidateServiceUpdate(data []byte) int {
service1 := &core.Service{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(service1)
if err != nil {
return 0
}
service2 := &core.Service{}
err = f.GenerateStruct(service2)
if err != nil {
return 0
}
_ = validation.ValidateServiceUpdate(service1, service2)
return 1
}
//// Endpoints validation
func FuzzValidateEndpointsCreate(data []byte) int {
endpoints := &core.Endpoints{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(endpoints)
if err != nil {
return 0
}
_ = validation.ValidateEndpointsCreate(endpoints)
return 1
}
// Node validation
func FuzzValidateNodeUpdate(data []byte) int {
node1 := &core.Node{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(node1)
if err != nil {
return 0
}
node2 := &core.Node{}
err = f.GenerateStruct(node2)
if err != nil {
return 0
}
_ = validation.ValidateNodeUpdate(node1, node2)
return 1
}
// Limit Range validation
func FuzzValidateLimitRange(data []byte) int {
limitRange := &core.LimitRange{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(limitRange)
if err != nil {
return 0
}
_ = validation.ValidateLimitRange(limitRange)
return 1
}
// apps validation
func FuzzValidateStatefulSet(data []byte) int {
//fmt.Println("Calling FuzzValidateStatefulSet")
statefulset := &apps.StatefulSet{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(statefulset)
if err != nil {
return 0
}
if errs := appsValidation.ValidateStatefulSet(statefulset, validation.PodValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateStatefulSetUpdate(data []byte) int {
//fmt.Println("Calling FuzzValidateStatefulSetUpdate")
f := fuzz.NewConsumer(data)
statefulset1 := &apps.StatefulSet{}
err := f.GenerateStruct(statefulset1)
if err != nil {
return 0
}
statefulset2 := &apps.StatefulSet{}
err = f.GenerateStruct(statefulset2)
if err != nil {
return 0
}
opts := validation.PodValidationOptions{}
err = f.GenerateStruct(&opts)
if err != nil {
return 0
}
if errs := appsValidation.ValidateStatefulSetUpdate(statefulset1, statefulset2, opts); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateDaemonSet(data []byte) int {
//fmt.Println("Calling FuzzValidateDaemonSet")
daemonset := &apps.DaemonSet{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(daemonset)
if err != nil {
return 0
}
if errs := appsValidation.ValidateDaemonSet(daemonset, validation.PodValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateDaemonSetUpdate(data []byte) int {
//fmt.Println("Calling FuzzValidateDaemonSetUpdate")
f := fuzz.NewConsumer(data)
daemonset1 := &apps.DaemonSet{}
err := f.GenerateStruct(daemonset1)
if err != nil {
return 0
}
daemonset2 := &apps.DaemonSet{}
err = f.GenerateStruct(daemonset2)
if err != nil {
return 0
}
if errs := appsValidation.ValidateDaemonSetUpdate(daemonset1, daemonset2, validation.PodValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateDeployment(data []byte) int {
//fmt.Println("Calling FuzzValidateDeployment")
deployment := &apps.Deployment{}
f := fuzz.NewConsumer(data)
err := f.GenerateStruct(deployment)
if err != nil {
return 0
}
if errs := appsValidation.ValidateDeployment(deployment, validation.PodValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateDeploymentUpdate(data []byte) int {
//fmt.Println("Calling FuzzValidateDeploymentUpdate")
f := fuzz.NewConsumer(data)
deployment1 := &apps.Deployment{}
err := f.GenerateStruct(deployment1)
if err != nil {
return 0
}
deployment2 := &apps.Deployment{}
err = f.GenerateStruct(deployment2)
if err != nil {
return 0
}
if errs := appsValidation.ValidateDeploymentUpdate(deployment1, deployment2, validation.PodValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
//fmt.Println(err)
_ = err
}
return 0
}
return 1
}
// batch validation
func FuzzValidateJob(data []byte) int {
f := fuzz.NewConsumer(data)
job := &batch.Job{}
err := f.GenerateStruct(job)
if err != nil {
return 0
}
if errs := batchValidation.ValidateJob(job, batchValidation.JobValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateJobUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
job1 := &batch.Job{}
err := f.GenerateStruct(job1)
if err != nil {
return 0
}
job2 := &batch.Job{}
err = f.GenerateStruct(job2)
if err != nil {
return 0
}
if errs := batchValidation.ValidateJobUpdate(job1, job2, batchValidation.JobValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateCronJobCreate(data []byte) int {
f := fuzz.NewConsumer(data)
cronjob := &batch.CronJob{}
err := f.GenerateStruct(cronjob)
if err != nil {
return 0
}
if errs := batchValidation.ValidateCronJobCreate(cronjob, validation.PodValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateCronJobUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
cronjob1 := &batch.CronJob{}
err := f.GenerateStruct(cronjob1)
if err != nil {
return 0
}
cronjob2 := &batch.CronJob{}
err = f.GenerateStruct(cronjob2)
if err != nil {
return 0
}
if errs := batchValidation.ValidateCronJobUpdate(cronjob1, cronjob2, validation.PodValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
// autoscaling validation
func FuzzValidateScale(data []byte) int {
f := fuzz.NewConsumer(data)
scale := &autoscaling.Scale{}
err := f.GenerateStruct(scale)
if err != nil {
return 0
}
if errs := autoscalingValidation.ValidateScale(scale); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateHorizontalPodAutoscaler(data []byte) int {
f := fuzz.NewConsumer(data)
autoscaler := &autoscaling.HorizontalPodAutoscaler{}
err := f.GenerateStruct(autoscaler)
if err != nil {
return 0
}
opts := &autoscalingValidation.HorizontalPodAutoscalerSpecValidationOptions{}
err = f.GenerateStruct(opts)
if err != nil {
return 0
}
if errs := autoscalingValidation.ValidateHorizontalPodAutoscaler(autoscaler, *opts); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidateHorizontalPodAutoscalerUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
autoscaler1 := &autoscaling.HorizontalPodAutoscaler{}
err := f.GenerateStruct(autoscaler1)
if err != nil {
return 0
}
autoscaler2 := &autoscaling.HorizontalPodAutoscaler{}
err = f.GenerateStruct(autoscaler2)
if err != nil {
return 0
}
opts := &autoscalingValidation.HorizontalPodAutoscalerSpecValidationOptions{}
err = f.GenerateStruct(opts)
if err != nil {
return 0
}
if errs := autoscalingValidation.ValidateHorizontalPodAutoscalerUpdate(autoscaler1, autoscaler2, *opts); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
// policy validation
func FuzzValidatePodDisruptionBudget(data []byte) int {
f := fuzz.NewConsumer(data)
pdb := &policy.PodDisruptionBudget{}
err := f.GenerateStruct(pdb)
if err != nil {
return 0
}
if errs := policyValidation.ValidatePodDisruptionBudget(pdb, policyValidation.PodDisruptionBudgetValidationOptions{}); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
func FuzzValidatePodDisruptionBudgetStatusUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
status := policy.PodDisruptionBudgetStatus{}
err := f.GenerateStruct(&status)
if err != nil {
return 0
}
oldStatus := policy.PodDisruptionBudgetStatus{}
err = f.GenerateStruct(&oldStatus)
if err != nil {
return 0
}
if errs := policyValidation.ValidatePodDisruptionBudgetStatusUpdate(status, oldStatus, field.NewPath("status"), policy.SchemeGroupVersion); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
if errs := policyValidation.ValidatePodDisruptionBudgetStatusUpdate(status, oldStatus, field.NewPath("status"), policyv1beta1.SchemeGroupVersion); len(errs) > 0 {
for _, err := range errs {
_ = err
//fmt.Println(err)
}
return 0
}
return 1
}
// certificates
func FuzzValidateCertificateSigningRequestCreate(data []byte) int {
f := fuzz.NewConsumer(data)
csr := &certificates.CertificateSigningRequest{}
err := f.GenerateStruct(csr)
if err != nil {
return 0
}
//fmt.Println(csr)
_ = certificatesValidation.ValidateCertificateSigningRequestCreate(csr)
return 1
}
func FuzzValidateCertificateSigningRequestUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
csr1 := &certificates.CertificateSigningRequest{}
err := f.GenerateStruct(csr1)
if err != nil {
return 0
}
//fmt.Println(csr1)
csr2 := &certificates.CertificateSigningRequest{}
err = f.GenerateStruct(csr2)
if err != nil {
return 0
}
//fmt.Println(csr2)
_ = certificatesValidation.ValidateCertificateSigningRequestUpdate(csr1, csr2)
return 1
}
func FuzzValidateCertificateSigningRequestStatusUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
csr1 := &certificates.CertificateSigningRequest{}
err := f.GenerateStruct(csr1)
if err != nil {
return 0
}
//fmt.Println(csr1)
csr2 := &certificates.CertificateSigningRequest{}
err = f.GenerateStruct(csr2)
if err != nil {
return 0
}
//fmt.Println(csr2)
_ = certificatesValidation.ValidateCertificateSigningRequestStatusUpdate(csr1, csr2)
return 1
}
func FuzzValidateCertificateSigningRequestApprovalUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
csr1 := &certificates.CertificateSigningRequest{}
err := f.GenerateStruct(csr1)
if err != nil {
return 0
}
//fmt.Println(csr1)
csr2 := &certificates.CertificateSigningRequest{}
err = f.GenerateStruct(csr1)
if err != nil {
return 0
}
//fmt.Println(csr2)
_ = certificatesValidation.ValidateCertificateSigningRequestApprovalUpdate(csr1, csr2)
return 1
}
// apiextensions-apiserver
func FuzzValidateCustomResourceDefinition(data []byte) int {
f := fuzz.NewConsumer(data)
crd := &apiextensions.CustomResourceDefinition{}
err := f.GenerateStruct(crd)
if err != nil {
return 0
}
//fmt.Println(crd)
_ = apiextensionsValidation.ValidateCustomResourceDefinition(context.Background(), crd)
return 1
}
// apiserverinternal
func FuzzValidateStorageVersion(data []byte) int {
f := fuzz.NewConsumer(data)
sv := &apiserverinternal.StorageVersion{}
err := f.GenerateStruct(sv)
if err != nil {
return 0
}
//fmt.Println(sv)
_ = apiServerInternalValidation.ValidateStorageVersion(sv)
return 1
}
func FuzzValidateStorageVersionName(data []byte) int {
_ = apiServerInternalValidation.ValidateStorageVersionName(string(data), false)
return 1
}
func FuzzValidateStorageVersionStatusUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
sv1 := &apiserverinternal.StorageVersion{}
err := f.GenerateStruct(sv1)
if err != nil {
return 0
}
//fmt.Println(sv1)
sv2 := &apiserverinternal.StorageVersion{}
err = f.GenerateStruct(sv2)
if err != nil {
return 0
}
//fmt.Println(sv2)
_ = apiServerInternalValidation.ValidateStorageVersionStatusUpdate(sv1, sv2)
return 1
}
// apiserver audit
func FuzzValidatePolicy(data []byte) int {
f := fuzz.NewConsumer(data)
p := &audit.Policy{}
err := f.GenerateStruct(p)
if err != nil {
return 0
}
//fmt.Println(p)
_ = auditValidation.ValidatePolicy(p)
return 1
}
// rbac validation
func FuzzValidateRoleUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
role1 := &rbac.Role{}
err := f.GenerateStruct(role1)
if err != nil {
return 0
}
role2 := &rbac.Role{}
err = f.GenerateStruct(role2)
if err != nil {
return 0
}
_ = rbacValidation.ValidateRoleUpdate(role1, role2)
return 1
}
func FuzzValidateClusterRoleUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
clusterRole1 := &rbac.ClusterRole{}
err := f.GenerateStruct(clusterRole1)
if err != nil {
return 0
}
clusterRole2 := &rbac.ClusterRole{}
err = f.GenerateStruct(clusterRole2)
if err != nil {
return 0
}
_ = rbacValidation.ValidateClusterRoleUpdate(clusterRole1, clusterRole2, rbacValidation.ClusterRoleValidationOptions{})
return 1
}
func FuzzValidateRoleBindingUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
roleBinding1 := &rbac.RoleBinding{}
err := f.GenerateStruct(roleBinding1)
if err != nil {
return 0
}
roleBinding2 := &rbac.RoleBinding{}
err = f.GenerateStruct(roleBinding2)
if err != nil {
return 0
}
_ = rbacValidation.ValidateRoleBindingUpdate(roleBinding1, roleBinding2)
return 1
}
func FuzzValidateClusterRoleBindingUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
clusterRoleBinding1 := &rbac.ClusterRoleBinding{}
err := f.GenerateStruct(clusterRoleBinding1)
if err != nil {
return 0
}
clusterRoleBinding2 := &rbac.ClusterRoleBinding{}
err = f.GenerateStruct(clusterRoleBinding2)
if err != nil {
return 0
}
_ = rbacValidation.ValidateClusterRoleBindingUpdate(clusterRoleBinding1, clusterRoleBinding2)
return 1
}
func FuzzCompactRules(data []byte) int {
f := fuzz.NewConsumer(data)
rules := make([]rbacv1.PolicyRule, 0)
err := f.CreateSlice(&rules)
if err != nil {
return 0
}
_, _ = rbacregistryvalidation.CompactRules(rules)
return 1
}
func FuzzValidateResourceQuotaSpec(data []byte) int {
f := fuzz.NewConsumer(data)
resourceQuotaSpec := &core.ResourceQuotaSpec{}
err := f.GenerateStruct(resourceQuotaSpec)
if err != nil {
return 0
}
fld := &field.Path{}
err = f.GenerateStruct(fld)
if err != nil {
return 0
}
_ = validation.ValidateResourceQuotaSpec(resourceQuotaSpec, fld)
return 1
}
func FuzzValidateResourceQuotaUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
newResourceQuota := &core.ResourceQuota{}
err := f.GenerateStruct(newResourceQuota)
if err != nil {
return 0
}
oldResourceQuota := &core.ResourceQuota{}
err = f.GenerateStruct(oldResourceQuota)
if err != nil {
return 0
}
_ = validation.ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota)
return 1
}
func FuzzValidateResourceQuotaStatusUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
newResourceQuota := &core.ResourceQuota{}
err := f.GenerateStruct(newResourceQuota)
if err != nil {
return 0
}
oldResourceQuota := &core.ResourceQuota{}
err = f.GenerateStruct(oldResourceQuota)
if err != nil {
return 0
}
_ = validation.ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota)
return 1
}
func FuzzValidateServiceStatusUpdate(data []byte) int {
f := fuzz.NewConsumer(data)
service := &core.Service{}
err := f.GenerateStruct(service)
if err != nil {
return 0
}
oldService := &core.Service{}
err = f.GenerateStruct(oldService)
if err != nil {
return 0
}
_ = validation.ValidateServiceStatusUpdate(service, oldService)
return 1
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package json implements fuzzers for json deserialization routines in
// Kubernetes. These targets are compatible with the github.com/dvyukov/go-fuzz
// fuzzing framework.
package json
import (
"bytes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
)
var (
gvk = &schema.GroupVersionKind{Version: "v1"}
strictOpt = json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}
strictYamlOpt = json.SerializerOptions{Yaml: true, Pretty: false, Strict: true}
strictPrettyOpt = json.SerializerOptions{Yaml: false, Pretty: true, Strict: true}
nonstrictOpt = json.SerializerOptions{Yaml: false, Pretty: false, Strict: false}
nonstrictYamlOpt = json.SerializerOptions{Yaml: true, Pretty: false, Strict: false}
nonstrictPrettyOpt = json.SerializerOptions{Yaml: false, Pretty: true, Strict: false}
scheme = runtime.NewScheme()
strictSer = json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, strictOpt)
ysSer = json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, strictYamlOpt)
psSer = json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, strictPrettyOpt)
nonstrictSer = json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, nonstrictOpt)
ynsSer = json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, nonstrictYamlOpt)
pnsSer = json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, nonstrictPrettyOpt)
)
// FuzzStrictDecode is a fuzz target for "k8s.io/apimachinery/pkg/runtime/serializer/json" strict decoding.
func FuzzStrictDecode(data []byte) int {
obj0, _, err0 := strictSer.Decode(data, gvk, nil)
obj1, _, err1 := nonstrictSer.Decode(data, gvk, nil)
obj2, _, err2 := ysSer.Decode(data, gvk, nil)
obj3, _, err3 := psSer.Decode(data, gvk, nil)
if obj0 == nil {
if obj1 != nil {
panic("NonStrict is stricter than Strict")
}
if obj2 != nil {
panic("Yaml strict different from plain strict")
}
if obj3 != nil {
panic("Pretty strict different from plain strict")
}
if err0 == nil || err1 == nil || err2 == nil || err3 == nil {
panic("no error")
}
return 0
}
if err0 != nil {
panic("got object and error for strict")
}
if err2 != nil {
panic("got object and error for yaml strict")
}
if err3 != nil {
panic("got object and error pretty strict")
}
var b0 bytes.Buffer
err4 := strictSer.Encode(obj0, &b0)
if err4 != nil {
panic("Can't encode decoded data")
}
if !bytes.Equal(b0.Bytes(), data) {
panic("Encoded data doesn't match original")
}
b0.Reset()
err5 := ysSer.Encode(obj1, &b0)
if err5 != nil {
panic("Can't encode yaml strict decoded data")
}
if !bytes.Equal(b0.Bytes(), data) {
panic("Encoded yaml strict data doesn't match original")
}
b0.Reset()
err6 := psSer.Encode(obj2, &b0)
if err6 != nil {
panic("Can't encode pretty strict decoded data")
}
if !bytes.Equal(b0.Bytes(), data) {
panic("Encoded pretty strict data doesn't match original")
}
b0.Reset()
err7 := nonstrictSer.Encode(obj3, &b0)
if err7 != nil {
panic("Can't encode nonstrict decoded data")
}
if !bytes.Equal(b0.Bytes(), data) {
panic("Encoded nonstrict data doesn't match original")
}
return 1
}
// FuzzNonStrictDecode is a fuzz target for "k8s.io/apimachinery/pkg/runtime/serializer/json" non-strict decoding.
func FuzzNonStrictDecode(data []byte) int {
obj0, _, err0 := nonstrictSer.Decode(data, gvk, nil)
if err0 != nil {
return 0
}
var b0 bytes.Buffer
err1 := nonstrictSer.Encode(obj0, &b0)
if err1 != nil {
panic("Can't nonstrict encode decoded data")
}
_, _, err2 := nonstrictSer.Decode(b0.Bytes(), gvk, nil)
if err2 != nil {
panic("Can't nonstrict decode encoded data")
}
b0.Reset()
err3 := ynsSer.Encode(obj0, &b0)
if err3 != nil {
panic("Can't yaml strict encode decoded data")
}
_, _, err4 := nonstrictSer.Decode(b0.Bytes(), gvk, nil)
if err4 != nil {
panic("Can't nonstrict decode encoded data")
}
b0.Reset()
err5 := pnsSer.Encode(obj0, &b0)
if err5 != nil {
panic("Can't pretty strict encode decoded data")
}
_, _, err6 := nonstrictSer.Decode(b0.Bytes(), gvk, nil)
if err6 != nil {
panic("Can't nonstrict decode encoded data")
}
return 1
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package yaml implements fuzzers for yaml deserialization routines in
// Kubernetes. These targets are compatible with the github.com/dvyukov/go-fuzz
// fuzzing framework.
package yaml
import (
"fmt"
"strings"
yaml "go.yaml.in/yaml/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sigyaml "sigs.k8s.io/yaml"
)
// FuzzDurationStrict is a fuzz target for strict-unmarshaling Duration defined
// in "k8s.io/apimachinery/pkg/apis/meta/v1". This target also checks that the
// unmarshaled result can be marshaled back to the input.
func FuzzDurationStrict(b []byte) int {
var durationHolder struct {
D metav1.Duration `json:"d"`
}
if err := sigyaml.UnmarshalStrict(b, &durationHolder); err != nil {
return 0
}
result, err := sigyaml.Marshal(&durationHolder)
if err != nil {
panic(err)
}
// Result is in the format "d: <duration>\n", so strip off the trailing
// newline and convert durationHolder.D to the expected format.
resultStr := strings.TrimSpace(string(result[:]))
inputStr := fmt.Sprintf("d: %s", durationHolder.D.Duration)
if resultStr != inputStr {
panic(fmt.Sprintf("result(%v) != input(%v)", resultStr, inputStr))
}
return 1
}
// FuzzMicroTimeStrict is a fuzz target for strict-unmarshaling MicroTime
// defined in "k8s.io/apimachinery/pkg/apis/meta/v1". This target also checks
// that the unmarshaled result can be marshaled back to the input.
func FuzzMicroTimeStrict(b []byte) int {
var microTimeHolder struct {
T metav1.MicroTime `json:"t"`
}
if err := sigyaml.UnmarshalStrict(b, µTimeHolder); err != nil {
return 0
}
result, err := sigyaml.Marshal(µTimeHolder)
if err != nil {
panic(err)
}
// Result is in the format "t: <time>\n", so strip off the trailing
// newline and convert microTimeHolder.T to the expected format. If
// time is zero, the value is marshaled to "null".
resultStr := strings.TrimSpace(string(result[:]))
var inputStr string
if microTimeHolder.T.Time.IsZero() {
inputStr = "t: null"
} else {
inputStr = fmt.Sprintf("t: %s", microTimeHolder.T.Time)
}
if resultStr != inputStr {
panic(fmt.Sprintf("result(%v) != input(%v)", resultStr, inputStr))
}
return 1
}
// FuzzSigYaml is a fuzz target for "sigs.k8s.io/yaml" unmarshaling.
func FuzzSigYaml(b []byte) int {
t := struct{}{}
m := map[string]interface{}{}
var out int
if err := sigyaml.Unmarshal(b, &m); err == nil {
out = 1
}
if err := sigyaml.Unmarshal(b, &t); err == nil {
out = 1
}
return out
}
// FuzzTimeStrict is a fuzz target for strict-unmarshaling Time defined in
// "k8s.io/apimachinery/pkg/apis/meta/v1". This target also checks that the
// unmarshaled result can be marshaled back to the input.
func FuzzTimeStrict(b []byte) int {
var timeHolder struct {
T metav1.Time `json:"t"`
}
if err := sigyaml.UnmarshalStrict(b, &timeHolder); err != nil {
return 0
}
result, err := sigyaml.Marshal(&timeHolder)
if err != nil {
panic(err)
}
// Result is in the format "t: <time>\n", so strip off the trailing
// newline and convert timeHolder.T to the expected format. If time is
// zero, the value is marshaled to "null".
resultStr := strings.TrimSpace(string(result[:]))
var inputStr string
if timeHolder.T.Time.IsZero() {
inputStr = "t: null"
} else {
inputStr = fmt.Sprintf("t: %s", timeHolder.T.Time)
}
if resultStr != inputStr {
panic(fmt.Sprintf("result(%v) != input(%v)", resultStr, inputStr))
}
return 1
}
// FuzzYamlV2 is a fuzz target for "go.yaml.in/yaml/v2" unmarshaling.
func FuzzYamlV2(b []byte) int {
t := struct{}{}
m := map[string]interface{}{}
var out int
if err := yaml.Unmarshal(b, &m); err == nil {
out = 1
}
if err := yaml.Unmarshal(b, &t); err == nil {
out = 1
}
return out
}
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package format is an extension of Gomega's format package which
// improves printing of objects that can be serialized well as YAML,
// like the structs in the Kubernetes API.
//
// Just importing it is enough to activate this special YAML support
// in Gomega.
package format
import (
"reflect"
"strings"
"github.com/onsi/gomega/format"
"sigs.k8s.io/yaml"
)
func init() {
format.RegisterCustomFormatter(handleYAML)
}
// Object makes Gomega's [format.Object] available without having to import that
// package.
func Object(object interface{}, indentation uint) string {
return format.Object(object, indentation)
}
// handleYAML formats all values as YAML where the result
// is likely to look better as YAML:
// - pointer to struct or struct where all fields
// have `json` tags
// - slices containing such a value
// - maps where the key or value are such a value
func handleYAML(object interface{}) (string, bool) {
value := reflect.ValueOf(object)
if !useYAML(value.Type()) {
return "", false
}
y, err := yaml.Marshal(object)
if err != nil {
return "", false
}
return "\n" + strings.TrimSpace(string(y)), true
}
func useYAML(t reflect.Type) bool {
switch t.Kind() {
case reflect.Pointer, reflect.Slice, reflect.Array:
return useYAML(t.Elem())
case reflect.Map:
return useYAML(t.Key()) || useYAML(t.Elem())
case reflect.Struct:
// All fields must have a `json` tag.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if _, ok := field.Tag.Lookup("json"); !ok {
return false
}
}
return true
default:
return false
}
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"context"
"errors"
"fmt"
"strings"
"github.com/onsi/gomega"
"github.com/onsi/gomega/format"
)
// FailureError is an error where the error string is meant to be passed to
// [TContext.Fatal] directly, i.e. adding some prefix like "unexpected error" is not
// necessary. It is also not necessary to dump the error struct.
type FailureError struct {
Msg string
FullStackTrace string
}
func (f FailureError) Error() string {
return f.Msg
}
func (f FailureError) Backtrace() string {
return f.FullStackTrace
}
func (f FailureError) Is(target error) bool {
return target == ErrFailure
}
// ErrFailure is an empty error that can be wrapped to indicate that an error
// is a FailureError. It can also be used to test for a FailureError:.
//
// return fmt.Errorf("some problem%w", ErrFailure)
// ...
// err := someOperation()
// if errors.Is(err, ErrFailure) {
// ...
// }
var ErrFailure error = FailureError{}
func expect(tCtx TContext, actual interface{}, extra ...interface{}) gomega.Assertion {
tCtx.Helper()
return gomega.NewWithT(tCtx).Expect(actual, extra...)
}
// suppressUnexpectedErrorLoggingKeyType is the type for a key which, if set to true in a context,
// suppresses logging of an unexpected error. The context returned by WithError uses this because
// the caller catches all failures in an error and then decides about logging.
type suppressUnexpectedErrorLoggingKeyType struct{}
var suppressUnexpectedErrorLoggingKey suppressUnexpectedErrorLoggingKeyType
func expectNoError(tCtx TContext, err error, explain ...interface{}) {
if err == nil {
return
}
tCtx.Helper()
value, ok := tCtx.Value(suppressUnexpectedErrorLoggingKey).(bool)
suppressLogging := ok && value
description := buildDescription(explain...)
if errors.Is(err, ErrFailure) {
var failure FailureError
if !suppressLogging && errors.As(err, &failure) {
if backtrace := failure.Backtrace(); backtrace != "" {
if description != "" {
tCtx.Log(description)
}
tCtx.Logf("Failed at:\n %s", strings.ReplaceAll(backtrace, "\n", "\n "))
}
}
if description != "" {
tCtx.Fatalf("%s: %s", description, err.Error())
}
tCtx.Fatal(err.Error())
}
if description == "" {
description = "Unexpected error"
}
if !suppressLogging {
tCtx.Logf("%s:\n%s", description, format.Object(err, 1))
}
tCtx.Fatalf("%s: %v", description, err.Error())
}
func buildDescription(explain ...interface{}) string {
switch len(explain) {
case 0:
return ""
case 1:
if describe, ok := explain[0].(func() string); ok {
return describe()
}
}
return fmt.Sprintf(explain[0].(string), explain[1:]...)
}
// Eventually wraps [gomega.Eventually] such that a failure will be reported via
// TContext.Fatal.
//
// In contrast to [gomega.Eventually], the parameter is strongly typed. It must
// accept a TContext as first argument and return one value, the one which is
// then checked with the matcher.
//
// In contrast to direct usage of [gomega.Eventually], make additional
// assertions inside the callback is okay as long as they use the TContext that
// is passed in. For example, errors can be checked with ExpectNoError:
//
// cb := func(func(tCtx ktesting.TContext) int {
// value, err := doSomething(...)
// tCtx.ExpectNoError(err, "something failed")
// assert(tCtx, 42, value, "the answer")
// return value
// }
// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything")
//
// If there is no value, then an error can be returned:
//
// cb := func(func(tCtx ktesting.TContext) error {
// err := doSomething(...)
// return err
// }
// tCtx.Eventually(cb).Should(gomega.Succeed(), "foobar should succeed")
//
// The default Gomega poll interval and timeout are used. Setting a specific
// timeout may be useful:
//
// tCtx.Eventually(cb).Timeout(5 * time.Second).Should(gomega.Succeed(), "foobar should succeed")
//
// Canceling the context in the callback only affects code in the callback. The
// context passed to Eventually is not getting canceled. To abort polling
// immediately because the expected condition is known to not be reached
// anymore, use [gomega.StopTrying]:
//
// cb := func(func(tCtx ktesting.TContext) int {
// value, err := doSomething(...)
// if errors.Is(err, SomeFinalErr) {
// // This message completely replaces the normal
// // failure message and thus should include all
// // relevant information.
// //
// // github.com/onsi/gomega/format is a good way
// // to format arbitrary data. It uses indention
// // and falls back to YAML for Kubernetes API
// // structs for readability.
// gomega.StopTrying("permanent failure, last value:\n%s", format.Object(value, 1 /* indent one level */)).
// Wrap(err).Now()
// }
// ktesting.ExpectNoError(tCtx, err, "something failed")
// return value
// }
// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything")
//
// To poll again after some specific timeout, use [gomega.TryAgainAfter]. This is
// particularly useful in [Consistently] to ignore some intermittent error.
//
// cb := func(func(tCtx ktesting.TContext) int {
// value, err := doSomething(...)
// var intermittentErr SomeIntermittentError
// if errors.As(err, &intermittentErr) {
// gomega.TryAgainAfter(intermittentErr.RetryPeriod).Wrap(err).Now()
// }
// ktesting.ExpectNoError(tCtx, err, "something failed")
// return value
// }
// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything")
func Eventually[T any](tCtx TContext, cb func(TContext) T) gomega.AsyncAssertion {
tCtx.Helper()
return gomega.NewWithT(tCtx).Eventually(tCtx, func(ctx context.Context) (val T, err error) {
tCtx := WithContext(tCtx, ctx)
tCtx, finalize := WithError(tCtx, &err)
defer finalize()
tCtx = WithCancel(tCtx)
return cb(tCtx), nil
})
}
// Consistently wraps [gomega.Consistently] the same way as [Eventually] wraps
// [gomega.Eventually].
func Consistently[T any](tCtx TContext, cb func(TContext) T) gomega.AsyncAssertion {
tCtx.Helper()
return gomega.NewWithT(tCtx).Consistently(tCtx, func(ctx context.Context) (val T, err error) {
tCtx := WithContext(tCtx, ctx)
tCtx, finalize := WithError(tCtx, &err)
defer finalize()
return cb(tCtx), nil
})
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"fmt"
"github.com/onsi/gomega"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/klog/v2"
)
// WithRESTConfig initializes all client-go clients with new clients
// created for the config. The current test name gets included in the UserAgent.
func WithRESTConfig(tCtx TContext, cfg *rest.Config) TContext {
cfg = rest.CopyConfig(cfg)
cfg.UserAgent = fmt.Sprintf("%s -- %s", rest.DefaultKubernetesUserAgent(), tCtx.Name())
cCtx := clientContext{
TContext: tCtx,
restConfig: cfg,
client: clientset.NewForConfigOrDie(cfg),
dynamic: dynamic.NewForConfigOrDie(cfg),
apiextensions: apiextensions.NewForConfigOrDie(cfg),
}
cachedDiscovery := memory.NewMemCacheClient(cCtx.client.Discovery())
cCtx.restMapper = restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery)
return &cCtx
}
// WithClients uses an existing config and clients.
func WithClients(tCtx TContext, cfg *rest.Config, mapper *restmapper.DeferredDiscoveryRESTMapper, client clientset.Interface, dynamic dynamic.Interface, apiextensions apiextensions.Interface) TContext {
return clientContext{
TContext: tCtx,
restConfig: cfg,
restMapper: mapper,
client: client,
dynamic: dynamic,
apiextensions: apiextensions,
}
}
type clientContext struct {
TContext
restConfig *rest.Config
restMapper *restmapper.DeferredDiscoveryRESTMapper
client clientset.Interface
dynamic dynamic.Interface
apiextensions apiextensions.Interface
}
func (cCtx clientContext) CleanupCtx(cb func(TContext)) {
cCtx.Helper()
cleanupCtx(cCtx, cb)
}
func (cCtx clientContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion {
cCtx.Helper()
return expect(cCtx, actual, extra...)
}
func (cCtx clientContext) ExpectNoError(err error, explain ...interface{}) {
cCtx.Helper()
expectNoError(cCtx, err, explain...)
}
func (cCtx clientContext) Run(name string, cb func(tCtx TContext)) bool {
return run(cCtx, name, cb)
}
func (cCtx clientContext) Logger() klog.Logger {
return klog.FromContext(cCtx)
}
func (cCtx clientContext) RESTConfig() *rest.Config {
if cCtx.restConfig == nil {
return nil
}
return rest.CopyConfig(cCtx.restConfig)
}
func (cCtx clientContext) RESTMapper() *restmapper.DeferredDiscoveryRESTMapper {
return cCtx.restMapper
}
func (cCtx clientContext) Client() clientset.Interface {
return cCtx.client
}
func (cCtx clientContext) Dynamic() dynamic.Interface {
return cCtx.dynamic
}
func (cCtx clientContext) APIExtensions() apiextensions.Interface {
return cCtx.apiextensions
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"context"
"fmt"
"time"
)
// cleanupErr creates a cause when canceling a context because the test has completed.
// It is a context.Canceled error.
func cleanupErr(testName string) error {
return canceledError(fmt.Sprintf("test %s is cleaning up", testName))
}
type canceledError string
func (c canceledError) Error() string { return string(c) }
func (c canceledError) Is(target error) bool {
return target == context.Canceled
}
// withTimeout corresponds to [context.WithTimeout]. In contrast to
// [context.WithTimeout], it automatically cancels during test cleanup, provides
// the given cause when the deadline is reached, and its cancel function
// requires a cause.
func withTimeout(ctx context.Context, tb TB, timeout time.Duration, timeoutCause string) (context.Context, func(cause string)) {
tb.Helper()
now := time.Now()
cancelCtx, cancel := context.WithCancelCause(ctx)
after := time.NewTimer(timeout)
stopCtx, stop := context.WithCancel(ctx) // Only used internally, doesn't need a cause.
tb.Cleanup(func() {
cancel(cleanupErr(tb.Name()))
stop()
})
go func() {
select {
case <-stopCtx.Done():
after.Stop()
// No need to set a cause here. The cause or error of
// the parent context will be used.
case <-after.C:
// Code using this tCtx may or may not log the
// information above when it runs into the
// cancellation. It's better if we do it, just to be on
// the safe side.
//
// Would be nice to log this with the source code location
// of our caller, but testing.Logf does not support that.
tb.Logf("\nINFO: canceling context: %s\n", timeoutCause)
cancel(canceledError(timeoutCause))
}
}()
// Determine which deadline is sooner: ours or that of our parent.
deadline := now.Add(timeout)
if parentDeadline, ok := ctx.Deadline(); ok {
if deadline.After(parentDeadline) {
deadline = parentDeadline
}
}
// We always have a deadline.
return deadlineContext{Context: cancelCtx, deadline: deadline}, func(cause string) {
var cancelCause error
if cause != "" {
cancelCause = canceledError(cause)
}
cancel(cancelCause)
}
}
type deadlineContext struct {
context.Context
deadline time.Time
}
func (d deadlineContext) Deadline() (time.Time, bool) {
return d.deadline, true
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"errors"
"fmt"
"strings"
"sync"
"github.com/onsi/gomega"
"k8s.io/klog/v2"
)
// WithError creates a context where test failures are collected and stored in
// the provided error instance when the caller is done. Use it like this:
//
// func doSomething(tCtx ktesting.TContext) (finalErr error) {
// tCtx, finalize := WithError(tCtx, &finalErr)
// defer finalize()
// ...
// tCtx.Fatal("some failure")
//
// Any error already stored in the variable will get overwritten by finalize if
// there were test failures, otherwise the variable is left unchanged.
// If there were multiple test errors, then the error will wrap all of
// them with errors.Join.
//
// Test failures are not propagated to the parent context.
func WithError(tCtx TContext, err *error) (TContext, func()) {
return withError(tCtx, err, true)
}
// WithErrorLogging, in contrast to WithError, uses the normal ExpectNoError implementation
// where an error is first logged and then the context is marked as failed.
//
// This is only useful if ExpectNoError is only called once. If is is called repeatedly
// and the resulting error is handled by the caller (for example, in a polling
// function), then WithError is more suitable.
func WithErrorLogging(tCtx TContext, err *error) (TContext, func()) {
return withError(tCtx, err, false)
}
func withError(tCtx TContext, err *error, suppressUnexpectedErrorLogging bool) (TContext, func()) {
eCtx := &errorContext{
TContext: tCtx,
suppressUnexpectedErrorLogging: suppressUnexpectedErrorLogging,
}
return eCtx, func() {
// Recover has to be called in the deferred function. When called inside
// a function called by a deferred function (like finalize below), it
// returns nil.
if e := recover(); e != nil {
if _, ok := e.(fatalWithError); !ok {
// Not our own panic, pass it on instead of setting the error.
panic(e)
}
}
eCtx.finalize(err)
}
}
type errorContext struct {
TContext
mutex sync.Mutex
errors []error
failed bool
suppressUnexpectedErrorLogging bool
}
func (eCtx *errorContext) Value(key any) any {
if key == suppressUnexpectedErrorLoggingKey {
return eCtx.suppressUnexpectedErrorLogging
}
return eCtx.TContext.Value(key)
}
func (eCtx *errorContext) finalize(err *error) {
eCtx.mutex.Lock()
defer eCtx.mutex.Unlock()
if !eCtx.failed {
return
}
errs := eCtx.errors
if len(errs) == 0 {
errs = []error{errFailedWithNoExplanation}
}
*err = errors.Join(errs...)
}
func (eCtx *errorContext) Error(args ...any) {
eCtx.mutex.Lock()
defer eCtx.mutex.Unlock()
// Gomega adds a leading newline in https://github.com/onsi/gomega/blob/f804ac6ada8d36164ecae0513295de8affce1245/internal/gomega.go#L37
// Let's strip that at start and end because ktesting will make errors
// stand out more with the "ERROR" prefix, so there's no need for additional
// line breaks.
eCtx.errors = append(eCtx.errors, errors.New(strings.TrimSpace(fmt.Sprintln(args...))))
eCtx.failed = true
}
func (eCtx *errorContext) Errorf(format string, args ...any) {
eCtx.mutex.Lock()
defer eCtx.mutex.Unlock()
eCtx.errors = append(eCtx.errors, errors.New(strings.TrimSpace(fmt.Sprintf(format, args...))))
eCtx.failed = true
}
func (eCtx *errorContext) Fail() {
eCtx.mutex.Lock()
defer eCtx.mutex.Unlock()
eCtx.failed = true
}
func (eCtx *errorContext) FailNow() {
eCtx.Helper()
eCtx.Fail()
panic(failed)
}
func (eCtx *errorContext) Failed() bool {
eCtx.mutex.Lock()
defer eCtx.mutex.Unlock()
return eCtx.failed
}
func (eCtx *errorContext) Fatal(args ...any) {
eCtx.Error(args...)
eCtx.FailNow()
}
func (eCtx *errorContext) Fatalf(format string, args ...any) {
eCtx.Errorf(format, args...)
eCtx.FailNow()
}
func (eCtx *errorContext) CleanupCtx(cb func(TContext)) {
eCtx.Helper()
cleanupCtx(eCtx, cb)
}
func (eCtx *errorContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion {
eCtx.Helper()
return expect(eCtx, actual, extra...)
}
func (eCtx *errorContext) ExpectNoError(err error, explain ...interface{}) {
eCtx.Helper()
expectNoError(eCtx, err, explain...)
}
func (cCtx *errorContext) Run(name string, cb func(tCtx TContext)) bool {
return run(cCtx, name, cb)
}
func (eCtx *errorContext) Logger() klog.Logger {
return klog.FromContext(eCtx)
}
// fatalWithError is the internal type that should never get propagated up. The
// only case where that can happen is when the developer forgot to call
// finalize via defer. The string explains that, in case that developers get to
// see it.
type fatalWithError string
const failed = fatalWithError("WithError TContext encountered a fatal error, but the finalize function was not called via defer as it should have been.")
var errFailedWithNoExplanation = errors.New("WithError context was marked as failed without recording an error")
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package initoption
import "k8s.io/kubernetes/test/utils/ktesting/internal"
// InitOption is a functional option for Init and InitCtx.
type InitOption func(c *internal.InitConfig)
// PerTestOutput controls whether a per-test logger gets
// set up by Init. Has no effect in InitCtx.
func PerTestOutput(enabled bool) InitOption {
return func(c *internal.InitConfig) {
c.PerTestOutput = enabled
}
}
// BufferLogs controls whether log entries are captured in memory in addition
// to being printed. Off by default. Unit tests that want to verify that
// log entries are emitted as expected can turn this on and then retrieve
// the captured log through the Underlier LogSink interface.
func BufferLogs(enabled bool) InitOption {
return func(c *internal.InitConfig) {
c.BufferLogs = enabled
}
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"fmt"
"strings"
"time"
)
var timeNow = time.Now // Can be stubbed out for testing.
// withKlogHeader creates a TB where the same "I<date> <time>]" prefix
// gets added to all log output, just as in the klog test logger.
// This is used internally when constructing a TContext for unit testing.
func withKlogHeader(tb TB) TB {
return klogTB{
TB: tb,
}
}
type klogTB struct {
TB
}
func (k klogTB) Log(args ...any) {
k.Helper()
k.TB.Log(header() + strings.TrimSpace(fmt.Sprintln(args...)))
}
func (k klogTB) Logf(format string, args ...any) {
k.Helper()
k.TB.Log(header() + strings.TrimSpace(fmt.Sprintf(format, args...)))
}
func (k klogTB) Error(args ...any) {
k.Helper()
k.TB.Error(header() + strings.TrimSpace(fmt.Sprintln(args...)))
}
func (k klogTB) Errorf(format string, args ...any) {
k.Helper()
k.TB.Error(header() + strings.TrimSpace(fmt.Sprintf(format, args...)))
}
func (k klogTB) Fatal(args ...any) {
k.Helper()
k.TB.Fatal(header() + strings.TrimSpace(fmt.Sprintln(args...)))
}
func (k klogTB) Fatalf(format string, args ...any) {
k.Helper()
k.TB.Fatal(header() + strings.TrimSpace(fmt.Sprintf(format, args...)))
}
func header() string {
now := timeNow()
_, month, day := now.Date()
hour, minute, second := now.Clock()
return fmt.Sprintf("I%02d%02d %02d:%02d:%02d.%06d] ",
month, day, hour, minute, second, now.Nanosecond()/1000)
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"flag"
"fmt"
"testing"
"k8s.io/klog/v2"
// Initialize command line parameters.
_ "k8s.io/component-base/logs/testinit"
)
func init() {
// This is a good default for unit tests. Benchmarks should add their own
// init function or TestMain to lower the default, for example to 2.
SetDefaultVerbosity(5)
}
// SetDefaultVerbosity can be called during init to modify the default
// log verbosity of the program.
//
// Note that this immediately reconfigures the klog verbosity, already before
// flag parsing. If the verbosity is non-zero and SetDefaultVerbosity is called
// during init, then other init functions might start logging where normally
// they wouldn't log anything. Should this occur, then the right fix is to
// remove those log calls because logging during init is discouraged. It leads
// to unpredictable output (init order is not specified) and/or is useless
// (logging not initialized during init and thus conditional log output gets
// omitted).
func SetDefaultVerbosity(v int) {
f := flag.CommandLine.Lookup("v")
_ = f.Value.Set(fmt.Sprintf("%d", v))
}
// NewTestContext is a replacement for ktesting.NewTestContext
// which returns a more versatile context.
func NewTestContext(tb testing.TB) (klog.Logger, TContext) {
tCtx := Init(tb)
return tCtx.Logger(), tCtx
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"context"
"errors"
"io"
"os"
"os/signal"
"strings"
"sync"
)
var (
interruptCtx context.Context
defaultProgressReporter = new(progressReporter)
defaultSignalChannel chan os.Signal
)
const ginkgoSpecContextKey = "GINKGO_SPEC_CONTEXT"
type ginkgoReporter interface {
AttachProgressReporter(reporter func() string) func()
}
func init() {
// Setting up signals is intentionally done in an init function because
// then importing ktesting in a unit or integration test is sufficient
// to activate the signal behavior.
signalCtx, _ := signal.NotifyContext(context.Background(), os.Interrupt)
cancelCtx, cancel := context.WithCancelCause(context.Background())
go func() {
<-signalCtx.Done()
cancel(errors.New("received interrupt signal"))
}()
// This reimplements the contract between Ginkgo and Gomega for progress reporting.
// When using Ginkgo contexts, Ginkgo will implement it. This here is for "go test".
//
// nolint:staticcheck // It complains about using a plain string. This can only be fixed
// by Ginkgo and Gomega formalizing this interface and define a type (somewhere...
// probably cannot be in either Ginkgo or Gomega).
interruptCtx = context.WithValue(cancelCtx, ginkgoSpecContextKey, defaultProgressReporter)
defaultSignalChannel = make(chan os.Signal, 1)
// progressSignals will be empty on Windows.
if len(progressSignals) > 0 {
signal.Notify(defaultSignalChannel, progressSignals...)
}
// os.Stderr gets redirected by "go test". "go test -v" has to be
// used to see the output while a test runs.
defaultProgressReporter.setOutput(os.Stderr)
go defaultProgressReporter.run(interruptCtx, defaultSignalChannel)
}
type progressReporter struct {
mutex sync.Mutex
reporterCounter int64
reporters map[int64]func() string
out io.Writer
}
var _ ginkgoReporter = &progressReporter{}
func (p *progressReporter) setOutput(out io.Writer) io.Writer {
p.mutex.Lock()
defer p.mutex.Unlock()
oldOut := p.out
p.out = out
return oldOut
}
// AttachProgressReporter implements Gomega's contextWithAttachProgressReporter.
func (p *progressReporter) AttachProgressReporter(reporter func() string) func() {
p.mutex.Lock()
defer p.mutex.Unlock()
// TODO (?): identify the caller and record that for dumpProgress.
p.reporterCounter++
id := p.reporterCounter
if p.reporters == nil {
p.reporters = make(map[int64]func() string)
}
p.reporters[id] = reporter
return func() {
p.detachProgressReporter(id)
}
}
func (p *progressReporter) detachProgressReporter(id int64) {
p.mutex.Lock()
defer p.mutex.Unlock()
delete(p.reporters, id)
}
func (p *progressReporter) run(ctx context.Context, progressSignalChannel chan os.Signal) {
for {
select {
case <-ctx.Done():
return
case <-progressSignalChannel:
p.dumpProgress()
}
}
}
// dumpProgress is less useful than the Ginkgo progress report. We can't fix
// that we don't know which tests are currently running and instead have to
// rely on "go test -v" for that.
//
// But perhaps dumping goroutines and their callstacks is useful anyway? TODO:
// look at how Ginkgo does it and replicate some of it.
func (p *progressReporter) dumpProgress() {
p.mutex.Lock()
defer p.mutex.Unlock()
var buffer strings.Builder
buffer.WriteString("You requested a progress report.\n")
if len(p.reporters) == 0 {
buffer.WriteString("Currently there is no information about test progress available.\n")
}
for _, reporter := range p.reporters {
report := reporter()
buffer.WriteRune('\n')
buffer.WriteString(report)
if !strings.HasSuffix(report, "\n") {
buffer.WriteRune('\n')
}
}
_, _ = p.out.Write([]byte(buffer.String()))
}
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"fmt"
"strings"
"time"
"k8s.io/klog/v2"
)
// WithStep creates a context where a prefix is added to all errors and log
// messages, similar to how errors are wrapped. This can be nested, leaving a
// trail of "bread crumbs" that help figure out where in a test some problem
// occurred or why some log output gets written:
//
// ERROR: bake cake: set heat for baking: oven not found
//
// The string should describe the operation that is about to happen ("starting
// the controller", "list items") or what is being operated on ("HTTP server").
// Multiple different prefixes get concatenated with a colon.
func WithStep(tCtx TContext, what string) TContext {
sCtx := &stepContext{
TContext: WithLogger(tCtx, klog.LoggerWithName(tCtx.Logger(), what)),
parentCtx: tCtx,
what: what,
start: time.Now(),
}
return sCtx
}
// Step is useful when the context with the step information is
// used more than once:
//
// ktesting.Step(tCtx, "step 1", func(tCtx ktesting.TContext) {
// tCtx.Log(...)
// if (... ) {
// tCtx.Failf(...)
// }
// )}
//
// Inside the callback, the tCtx variable is the one where the step
// has been added. This avoids the need to introduce multiple different
// context variables and risk of using the wrong one.
func Step(tCtx TContext, what string, cb func(tCtx TContext)) {
tCtx.Helper()
cb(WithStep(tCtx, what))
}
// Begin and End can be used instead of Step to execute some instructions
// with a new context without using a callback method. This is useful
// when some local variables need to be set which are read later one.
// Log entries document the start and end of the step, including its duration.
//
// tCtx = ktesting.Begin(tCtx, "step 1")
// .. do something with tCtx
// tCtx = ktesting.End(tCtx)
func Begin(tCtx TContext, what string) TContext {
tCtx.Helper()
tCtx = WithStep(tCtx, what)
tCtx.Log("Starting...")
return tCtx
}
// End complements Begin and returns the original context that was passed to Begin.
// It must be called on the context returned by Begin.
func End(tCtx TContext) TContext {
tCtx.Helper()
sCtx, ok := tCtx.(*stepContext)
if !ok {
tCtx.Fatalf("expected result of Begin, got instead %T", tCtx)
}
tCtx.Logf("Done, duration %s", time.Since(sCtx.start))
return sCtx.parentCtx
}
type stepContext struct {
TContext
parentCtx TContext
what string
start time.Time
}
func (sCtx *stepContext) Log(args ...any) {
sCtx.Helper()
sCtx.TContext.Log(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintln(args...)))
}
func (sCtx *stepContext) Logf(format string, args ...any) {
sCtx.Helper()
sCtx.TContext.Log(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintf(format, args...)))
}
func (sCtx *stepContext) Error(args ...any) {
sCtx.Helper()
sCtx.TContext.Error(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintln(args...)))
}
func (sCtx *stepContext) Errorf(format string, args ...any) {
sCtx.Helper()
sCtx.TContext.Error(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintf(format, args...)))
}
func (sCtx *stepContext) Fatal(args ...any) {
sCtx.Helper()
sCtx.TContext.Fatal(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintln(args...)))
}
func (sCtx *stepContext) Fatalf(format string, args ...any) {
sCtx.Helper()
sCtx.TContext.Fatal(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintf(format, args...)))
}
// Value intercepts a search for the special "GINKGO_SPEC_CONTEXT".
func (sCtx *stepContext) Value(key any) any {
if s, ok := key.(string); ok && s == ginkgoSpecContextKey {
if reporter, ok := sCtx.TContext.Value(key).(ginkgoReporter); ok {
return ginkgoReporter(&stepReporter{reporter: reporter, what: sCtx.what})
}
}
return sCtx.TContext.Value(key)
}
type stepReporter struct {
reporter ginkgoReporter
what string
}
var _ ginkgoReporter = &stepReporter{}
func (s *stepReporter) AttachProgressReporter(reporter func() string) func() {
return s.reporter.AttachProgressReporter(func() string {
report := reporter()
return s.what + ": " + report
})
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"context"
"flag"
"fmt"
"strings"
"testing"
"time"
"github.com/onsi/gomega"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/test/utils/format"
"k8s.io/kubernetes/test/utils/ktesting/initoption"
"k8s.io/kubernetes/test/utils/ktesting/internal"
)
// Underlier is the additional interface implemented by the per-test LogSink
// behind [TContext.Logger]. Together with [initoption.BufferLogs] it can be
// used to capture log output in memory to check it in tests.
type Underlier = ktesting.Underlier
// CleanupGracePeriod is the time that a [TContext] gets canceled before the
// deadline of its underlying test suite (usually determined via "go test
// -timeout"). This gives the running test(s) time to fail with an informative
// timeout error. After that, all cleanup callbacks then have the remaining
// time to complete before the test binary is killed.
//
// For this to work, each blocking calls in a test must respect the
// cancellation of the [TContext].
//
// When using Ginkgo to manage the test suite and running tests, the
// CleanupGracePeriod is ignored because Ginkgo itself manages timeouts.
const CleanupGracePeriod = 5 * time.Second
// TContext combines [context.Context], [TB] and some additional
// methods. Log output is associated with the current test. Errors ([Error],
// [Errorf]) are recorded with "ERROR" as prefix, fatal errors ([Fatal],
// [Fatalf]) with "FATAL ERROR".
//
// TContext provides features offered by Ginkgo also when using normal Go [testing]:
// - The context contains a deadline that expires soon enough before
// the overall timeout that cleanup code can still run.
// - Cleanup callbacks can get their own, separate contexts when
// registered via [CleanupCtx].
// - CTRL-C aborts, prints a progress report, and then cleans up
// before terminating.
// - SIGUSR1 prints a progress report without aborting.
//
// Progress reporting is more informative when doing polling with
// [gomega.Eventually] and [gomega.Consistently]. Without that, it
// can only report which tests are active.
type TContext interface {
context.Context
TB
// Parallel signals that this test is to be run in parallel with (and
// only with) other parallel tests. In other words, it needs to be
// called in each test which is meant to run in parallel.
//
// Only supported in Go unit tests. When such a test is run multiple
// times due to use of -test.count or -test.cpu, multiple instances of
// a single test never run in parallel with each other.
Parallel()
// Run runs f as a subtest of t called name. It blocks until f returns or
// calls t.Parallel to become a parallel test.
//
// Only supported in Go unit tests or benchmarks. It fails the current
// test when called elsewhere.
Run(name string, f func(tCtx TContext)) bool
// Cancel can be invoked to cancel the context before the test is completed.
// Tests which use the context to control goroutines and then wait for
// termination of those goroutines must call Cancel to avoid a deadlock.
//
// The cause, if non-empty, is turned into an error which is equivalend
// to context.Canceled. context.Cause will return that error for the
// context.
Cancel(cause string)
// Cleanup registers a callback that will get invoked when the test
// has finished. Callbacks get invoked in last-in-first-out order (LIFO).
//
// Beware of context cancellation. The following cleanup code
// will use a canceled context, which is not desirable:
//
// tCtx.Cleanup(func() { /* do something with tCtx */ })
// tCtx.Cancel()
//
// A safer way to run cleanup code is:
//
// tCtx.CleanupCtx(func (tCtx ktesting.TContext) { /* do something with cleanup tCtx */ })
Cleanup(func())
// CleanupCtx is an alternative for Cleanup. The callback is passed a
// new TContext with the same logger and clients as the one CleanupCtx
// was invoked for.
CleanupCtx(func(TContext))
// Expect wraps [gomega.Expect] such that a failure will be reported via
// [TContext.Fatal]. As with [gomega.Expect], additional values
// may get passed. Those values then all must be nil for the assertion
// to pass. This can be used with functions which return a value
// plus error:
//
// myAmazingThing := func(int, error) { ...}
// tCtx.Expect(myAmazingThing()).Should(gomega.Equal(1))
Expect(actual interface{}, extra ...interface{}) gomega.Assertion
// ExpectNoError asserts that no error has occurred.
//
// As in [gomega], the optional explanation can be:
// - a [fmt.Sprintf] format string plus its argument
// - a function returning a string, which will be called
// lazy to construct the explanation if needed
//
// If an explanation is provided, then it replaces the default "Unexpected
// error" in the failure message. It's combined with additional details by
// adding a colon at the end, as when wrapping an error. Therefore it should
// not end with a punctuation mark or line break.
//
// Using ExpectNoError instead of the corresponding Gomega or testify
// assertions has the advantage that the failure message is short (good for
// aggregation in https://go.k8s.io/triage) with more details captured in the
// test log output (good when investigating one particular failure).
ExpectNoError(err error, explain ...interface{})
// Logger returns a logger for the current test. This is a shortcut
// for calling klog.FromContext.
//
// Output emitted via this logger and the TB interface (like Logf)
// is formatted consistently. The TB interface generates a single
// message string, while Logger enables structured logging and can
// be passed down into code which expects a logger.
//
// To skip intermediate helper functions during stack unwinding,
// TB.Helper can be called in those functions.
Logger() klog.Logger
// TB returns the underlying TB. This can be used to "break the glass"
// and cast back into a testing.T or TB. Calling TB is necessary
// because TContext wraps the underlying TB.
TB() TB
// RESTConfig returns a config for a rest client with the UserAgent set
// to include the current test name or nil if not available. Several
// typed clients using this config are available through [Client],
// [Dynamic], [APIExtensions].
RESTConfig() *rest.Config
RESTMapper() *restmapper.DeferredDiscoveryRESTMapper
Client() clientset.Interface
Dynamic() dynamic.Interface
APIExtensions() apiextensions.Interface
// The following methods must be implemented by every implementation
// of TContext to ensure that the leaf TContext is used, not some
// embedded TContext:
// - CleanupCtx
// - Expect
// - ExpectNoError
// - Run
// - Logger
//
// Usually these methods would be stand-alone functions with a TContext
// parameter. Offering them as methods simplifies the test code.
}
// TB is the interface common to [testing.T], [testing.B], [testing.F] and
// [github.com/onsi/ginkgo/v2]. In contrast to [testing.TB], it can be
// implemented also outside of the testing package.
type TB interface {
Cleanup(func())
Error(args ...any)
Errorf(format string, args ...any)
Fail()
FailNow()
Failed() bool
Fatal(args ...any)
Fatalf(format string, args ...any)
Helper()
Log(args ...any)
Logf(format string, args ...any)
Name() string
Setenv(key, value string)
Skip(args ...any)
SkipNow()
Skipf(format string, args ...any)
Skipped() bool
TempDir() string
}
// ContextTB adds support for cleanup callbacks with explicit context
// parameter. This is used when integrating with Ginkgo: then CleanupCtx
// gets implemented via ginkgo.DeferCleanup.
type ContextTB interface {
TB
CleanupCtx(func(ctx context.Context))
}
// Init can be called in a unit or integration test to create
// a test context which:
// - has a per-test logger with verbosity derived from the -v command line flag
// - gets canceled when the test finishes (via [TB.Cleanup])
//
// Note that the test context supports the interfaces of [TB] and
// [context.Context] and thus can be used like one of those where needed.
// It also has additional methods for retrieving the logger and canceling
// the context early, which can be useful in tests which want to wait
// for goroutines to terminate after cancellation.
//
// If the [TB] implementation also implements [ContextTB], then
// [TContext.CleanupCtx] uses [ContextTB.CleanupCtx] and uses
// the context passed into that callback. This can be used to let
// Ginkgo create a fresh context for cleanup code.
//
// Can be called more than once per test to get different contexts with
// independent cancellation. The default behavior describe above can be
// modified via optional functional options defined in [initoption].
func Init(tb TB, opts ...InitOption) TContext {
tb.Helper()
c := internal.InitConfig{
PerTestOutput: true,
}
for _, opt := range opts {
opt(&c)
}
// We don't need a Deadline implementation, testing.B doesn't have it.
// But if we have one, we'll use it to set a timeout shortly before
// the deadline. This needs to come before we wrap tb.
deadlineTB, deadlineOK := tb.(interface {
Deadline() (time.Time, bool)
})
ctx := interruptCtx
if c.PerTestOutput {
logger := newLogger(tb, c.BufferLogs)
ctx = klog.NewContext(interruptCtx, logger)
tb = withKlogHeader(tb)
}
if deadlineOK {
if deadline, ok := deadlineTB.Deadline(); ok {
timeLeft := time.Until(deadline)
timeLeft -= CleanupGracePeriod
ctx, cancel := withTimeout(ctx, tb, timeLeft, fmt.Sprintf("test suite deadline (%s) is close, need to clean up before the %s cleanup grace period", deadline.Truncate(time.Second), CleanupGracePeriod))
tCtx := tContext{
Context: ctx,
testingTB: testingTB{TB: tb},
cancel: cancel,
}
return tCtx
}
}
tCtx := WithCancel(InitCtx(ctx, tb))
tCtx.Cleanup(func() {
tCtx.Cancel(cleanupErr(tCtx.Name()).Error())
})
return tCtx
}
func newLogger(tb TB, bufferLogs bool) klog.Logger {
config := ktesting.NewConfig(
ktesting.AnyToString(func(v interface{}) string {
// For basic types where the string
// representation is "obvious" we use
// fmt.Sprintf because format.Object always
// adds a <"type"> prefix, which is too long
// for simple values.
switch v := v.(type) {
case int, int32, int64, uint, uint32, uint64, float32, float64, bool:
return fmt.Sprintf("%v", v)
case string:
return v
default:
return strings.TrimSpace(format.Object(v, 1))
}
}),
ktesting.VerbosityFlagName("v"),
ktesting.VModuleFlagName("vmodule"),
ktesting.BufferLogs(bufferLogs),
)
// Copy klog settings instead of making the ktesting logger
// configurable directly.
var fs flag.FlagSet
config.AddFlags(&fs)
for _, name := range []string{"v", "vmodule"} {
from := flag.CommandLine.Lookup(name)
to := fs.Lookup(name)
if err := to.Value.Set(from.Value.String()); err != nil {
panic(err)
}
}
// Ensure consistent logging: this klog.Logger writes to tb, adding the
// date/time header, and our own wrapper emulates that behavior for
// Log/Logf/...
logger := ktesting.NewLogger(tb, config)
return logger
}
type InitOption = initoption.InitOption
// InitCtx is a variant of [Init] which uses an already existing context and
// whatever logger and timeouts are stored there.
// Functional options are part of the API, but currently
// there are none which have an effect.
func InitCtx(ctx context.Context, tb TB, _ ...InitOption) TContext {
tCtx := tContext{
Context: ctx,
testingTB: testingTB{TB: tb},
}
return tCtx
}
// WithTB constructs a new TContext with a different TB instance.
// This can be used to set up some of the context, in particular
// clients, in the root test and then run sub-tests:
//
// func TestSomething(t *testing.T) {
// tCtx := ktesting.Init(t)
// ...
// tCtx = ktesting.WithRESTConfig(tCtx, config)
//
// t.Run("sub", func (t *testing.T) {
// tCtx := ktesting.WithTB(tCtx, t)
// ...
// })
//
// WithTB sets up cancellation for the sub-test and uses per-test output.
//
// A simpler API is to use TContext.Run as replacement
// for [testing.T.Run].
func WithTB(parentCtx TContext, tb TB) TContext {
tCtx := InitCtx(klog.NewContext(parentCtx, newLogger(tb, false /* don't buffer log output */)), tb)
tCtx = WithCancel(tCtx)
tCtx = WithClients(tCtx,
parentCtx.RESTConfig(),
parentCtx.RESTMapper(),
parentCtx.Client(),
parentCtx.Dynamic(),
parentCtx.APIExtensions(),
)
return tCtx
}
// run implements the different Run methods. It's not an exported
// method because tCtx.Run is more discoverable (same usage as
// with normal Go).
func run(tCtx TContext, name string, cb func(tCtx TContext)) bool {
tCtx.Helper()
switch tb := tCtx.TB().(type) {
case interface {
Run(string, func(t *testing.T)) bool
}:
return tb.Run(name, func(t *testing.T) { cb(WithTB(tCtx, t)) })
case interface {
Run(string, func(t *testing.B)) bool
}:
return tb.Run(name, func(b *testing.B) { cb(WithTB(tCtx, b)) })
default:
tCtx.Fatalf("Run not implemented, underlying %T does not support it", tCtx.TB())
}
return false
}
// WithContext constructs a new TContext with a different Context instance.
// This can be used in callbacks which receive a Context, for example
// from Gomega:
//
// gomega.Eventually(tCtx, func(ctx context.Context) {
// tCtx := ktesting.WithContext(tCtx, ctx)
// ...
//
// This is important because the Context in the callback could have
// a different deadline than in the parent TContext.
func WithContext(parentCtx TContext, ctx context.Context) TContext {
tCtx := InitCtx(ctx, parentCtx.TB())
tCtx = WithClients(tCtx,
parentCtx.RESTConfig(),
parentCtx.RESTMapper(),
parentCtx.Client(),
parentCtx.Dynamic(),
parentCtx.APIExtensions(),
)
return tCtx
}
// WithValue wraps context.WithValue such that the result is again a TContext.
func WithValue(parentCtx TContext, key, val any) TContext {
ctx := context.WithValue(parentCtx, key, val)
return WithContext(parentCtx, ctx)
}
type tContext struct {
context.Context
testingTB
cancel func(cause string)
}
// testingTB is needed to avoid a name conflict
// between field and method in tContext.
type testingTB struct {
TB
}
func (tCtx tContext) Parallel() {
if tb, ok := tCtx.TB().(interface{ Parallel() }); ok {
tb.Parallel()
}
}
func (tCtx tContext) Cancel(cause string) {
if tCtx.cancel != nil {
tCtx.cancel(cause)
}
}
func (tCtx tContext) CleanupCtx(cb func(TContext)) {
tCtx.Helper()
cleanupCtx(tCtx, cb)
}
func (tCtx tContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion {
tCtx.Helper()
return expect(tCtx, actual, extra...)
}
func (tCtx tContext) ExpectNoError(err error, explain ...interface{}) {
tCtx.Helper()
expectNoError(tCtx, err, explain...)
}
func cleanupCtx(tCtx TContext, cb func(TContext)) {
tCtx.Helper()
if tb, ok := tCtx.TB().(ContextTB); ok {
// Use context from base TB (most likely Ginkgo).
tb.CleanupCtx(func(ctx context.Context) {
tCtx := WithContext(tCtx, ctx)
cb(tCtx)
})
return
}
tCtx.Cleanup(func() {
// Use new context. This is the code path for "go test". The
// context then has *no* deadline. In the code path above for
// Ginkgo, Ginkgo is more sophisticated and also applies
// timeouts to cleanup calls which accept a context.
childCtx := WithContext(tCtx, context.WithoutCancel(tCtx))
cb(childCtx)
})
}
func (cCtx tContext) Run(name string, cb func(tCtx TContext)) bool {
return run(cCtx, name, cb)
}
func (tCtx tContext) Logger() klog.Logger {
return klog.FromContext(tCtx)
}
func (tCtx tContext) Error(args ...any) {
tCtx.Helper()
args = append([]any{"ERROR:"}, args...)
tCtx.testingTB.Error(args...)
}
func (tCtx tContext) Errorf(format string, args ...any) {
tCtx.Helper()
error := fmt.Sprintf(format, args...)
error = "ERROR: " + error
tCtx.testingTB.Error(error)
}
func (tCtx tContext) Fatal(args ...any) {
tCtx.Helper()
args = append([]any{"FATAL ERROR:"}, args...)
tCtx.testingTB.Fatal(args...)
}
func (tCtx tContext) Fatalf(format string, args ...any) {
tCtx.Helper()
error := fmt.Sprintf(format, args...)
error = "FATAL ERROR: " + error
tCtx.testingTB.Fatal(error)
}
func (tCtx tContext) TB() TB {
// Might have to unwrap twice, depending on how
// this tContext was constructed.
tb := tCtx.testingTB.TB
if k, ok := tb.(klogTB); ok {
return k.TB
}
return tb
}
func (tCtx tContext) RESTConfig() *rest.Config {
return nil
}
func (tCtx tContext) RESTMapper() *restmapper.DeferredDiscoveryRESTMapper {
return nil
}
func (tCtx tContext) Client() clientset.Interface {
return nil
}
func (tCtx tContext) Dynamic() dynamic.Interface {
return nil
}
func (tCtx tContext) APIExtensions() apiextensions.Interface {
return nil
}
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ktesting
import (
"context"
"time"
"github.com/onsi/gomega"
"k8s.io/klog/v2"
)
// WithCancel sets up cancellation in a [TContext.Cleanup] callback and
// constructs a new TContext where [TContext.Cancel] cancels only the new
// context.
func WithCancel(tCtx TContext) TContext {
ctx, cancel := context.WithCancelCause(tCtx)
return withContext{
TContext: tCtx,
Context: ctx,
cancel: func(cause string) {
var cancelCause error
if cause != "" {
cancelCause = canceledError(cause)
}
cancel(cancelCause)
},
}
}
// WithoutCancel causes the returned context to ignore cancellation of its parent.
func WithoutCancel(tCtx TContext) TContext {
tCtx.Helper()
ctx := context.WithoutCancel(tCtx)
return WithContext(tCtx, ctx)
}
// WithTimeout sets up new context with a timeout. Canceling the timeout gets
// registered in a [TContext.Cleanup] callback. [TContext.Cancel] cancels only
// the new context. The cause is used as reason why the context is canceled
// once the timeout is reached. It may be empty, in which case the usual
// "context canceled" error is used.
func WithTimeout(tCtx TContext, timeout time.Duration, timeoutCause string) TContext {
tCtx.Helper()
ctx, cancel := withTimeout(tCtx, tCtx.TB(), timeout, timeoutCause)
return withContext{
TContext: tCtx,
Context: ctx,
cancel: cancel,
}
}
// WithLogger constructs a new context with a different logger.
func WithLogger(tCtx TContext, logger klog.Logger) TContext {
ctx := klog.NewContext(tCtx, logger)
return withContext{
TContext: tCtx,
Context: ctx,
cancel: tCtx.Cancel,
}
}
// withContext combines some TContext with a new [context.Context] derived
// from it. Because both provide the [context.Context] interface, methods must
// be defined which pick the newer one.
type withContext struct {
TContext
context.Context
cancel func(cause string)
}
func (wCtx withContext) Cancel(cause string) {
wCtx.cancel(cause)
}
func (wCtx withContext) CleanupCtx(cb func(TContext)) {
wCtx.Helper()
cleanupCtx(wCtx, cb)
}
func (wCtx withContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion {
wCtx.Helper()
return expect(wCtx, actual, extra...)
}
func (wCtx withContext) ExpectNoError(err error, explain ...interface{}) {
wCtx.Helper()
expectNoError(wCtx, err, explain...)
}
func (cCtx withContext) Run(name string, cb func(tCtx TContext)) bool {
return run(cCtx, name, cb)
}
func (wCtx withContext) Logger() klog.Logger {
return klog.FromContext(wCtx)
}
func (wCtx withContext) Deadline() (time.Time, bool) {
return wCtx.Context.Deadline()
}
func (wCtx withContext) Done() <-chan struct{} {
return wCtx.Context.Done()
}
func (wCtx withContext) Err() error {
return wCtx.Context.Err()
}
func (wCtx withContext) Value(key any) any {
return wCtx.Context.Value(key)
}
package expansion
import (
"bytes"
)
const (
operator = '$'
referenceOpener = '('
referenceCloser = ')'
)
// syntaxWrap returns the input string wrapped by the expansion syntax.
func syntaxWrap(input string) string {
return string(operator) + string(referenceOpener) + input + string(referenceCloser)
}
// MappingFuncFor returns a mapping function for use with Expand that
// implements the expansion semantics defined in the expansion spec; it
// returns the input string wrapped in the expansion syntax if no mapping
// for the input is found.
func MappingFuncFor(context ...map[string]string) func(string) string {
return func(input string) string {
for _, vars := range context {
val, ok := vars[input]
if ok {
return val
}
}
return syntaxWrap(input)
}
}
// Expand replaces variable references in the input string according to
// the expansion spec using the given mapping function to resolve the
// values of variables.
func Expand(input string, mapping func(string) string) string {
var buf bytes.Buffer
checkpoint := 0
for cursor := 0; cursor < len(input); cursor++ {
if input[cursor] == operator && cursor+1 < len(input) {
// Copy the portion of the input string since the last
// checkpoint into the buffer
buf.WriteString(input[checkpoint:cursor])
// Attempt to read the variable name as defined by the
// syntax from the input string
read, isVar, advance := tryReadVariableName(input[cursor+1:])
if isVar {
// We were able to read a variable name correctly;
// apply the mapping to the variable name and copy the
// bytes into the buffer
buf.WriteString(mapping(read))
} else {
// Not a variable name; copy the read bytes into the buffer
buf.WriteString(read)
}
// Advance the cursor in the input string to account for
// bytes consumed to read the variable name expression
cursor += advance
// Advance the checkpoint in the input string
checkpoint = cursor + 1
}
}
// Return the buffer and any remaining unwritten bytes in the
// input string.
return buf.String() + input[checkpoint:]
}
// tryReadVariableName attempts to read a variable name from the input
// string and returns the content read from the input, whether that content
// represents a variable name to perform mapping on, and the number of bytes
// consumed in the input string.
//
// The input string is assumed not to contain the initial operator.
func tryReadVariableName(input string) (string, bool, int) {
switch input[0] {
case operator:
// Escaped operator; return it.
return input[0:1], false, 1
case referenceOpener:
// Scan to expression closer
for i := 1; i < len(input); i++ {
if input[i] == referenceCloser {
return input[1:i], true, i + 1
}
}
// Incomplete reference; return it.
return string(operator) + string(referenceOpener), false, 1
default:
// Not the beginning of an expression, ie, an operator
// that doesn't begin an expression. Return the operator
// and the first rune in the string.
return (string(operator) + string(input[0])), false, 1
}
}
package apparmor
import (
"os"
"sync"
)
var (
appArmorEnabled bool
checkAppArmor sync.Once
)
// IsEnabled returns true if apparmor is enabled for the host.
func IsEnabled() bool {
checkAppArmor.Do(func() {
if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil {
buf, err := os.ReadFile("/sys/module/apparmor/parameters/enabled")
appArmorEnabled = err == nil && len(buf) > 1 && buf[0] == 'Y'
}
})
return appArmorEnabled
}
package utils
import (
"os"
"path/filepath"
)
// CleanPath makes a path safe for use with filepath.Join. This is done by not
// only cleaning the path, but also (if the path is relative) adding a leading
// '/' and cleaning it (then removing the leading '/'). This ensures that a
// path resulting from prepending another path will always resolve to lexically
// be a subdirectory of the prefixed path. This is all done lexically, so paths
// that include symlinks won't be safe as a result of using CleanPath.
func CleanPath(path string) string {
// Deal with empty strings nicely.
if path == "" {
return ""
}
// Ensure that all paths are cleaned (especially problematic ones like
// "/../../../../../" which can cause lots of issues).
path = filepath.Clean(path)
// If the path isn't absolute, we need to do more processing to fix paths
// such as "../../../../<etc>/some/path". We also shouldn't convert absolute
// paths to relative ones.
if !filepath.IsAbs(path) {
path = filepath.Clean(string(os.PathSeparator) + path)
// This can't fail, as (by definition) all paths are relative to root.
path, _ = filepath.Rel(string(os.PathSeparator), path)
}
// Clean the path again for good measure.
return filepath.Clean(path)
}